diff options
Diffstat (limited to 'lib/gcc/i686-linux-android/4.6')
57 files changed, 15931 insertions, 0 deletions
diff --git a/lib/gcc/i686-linux-android/4.6/crtbegin.o b/lib/gcc/i686-linux-android/4.6/crtbegin.o Binary files differnew file mode 100644 index 0000000..59617ed --- /dev/null +++ b/lib/gcc/i686-linux-android/4.6/crtbegin.o diff --git a/lib/gcc/i686-linux-android/4.6/crtbeginS.o b/lib/gcc/i686-linux-android/4.6/crtbeginS.o Binary files differnew file mode 100644 index 0000000..b25e395 --- /dev/null +++ b/lib/gcc/i686-linux-android/4.6/crtbeginS.o diff --git a/lib/gcc/i686-linux-android/4.6/crtbeginT.o b/lib/gcc/i686-linux-android/4.6/crtbeginT.o Binary files differnew file mode 100644 index 0000000..59617ed --- /dev/null +++ b/lib/gcc/i686-linux-android/4.6/crtbeginT.o diff --git a/lib/gcc/i686-linux-android/4.6/crtend.o b/lib/gcc/i686-linux-android/4.6/crtend.o Binary files differnew file mode 100644 index 0000000..e9eaf9d --- /dev/null +++ b/lib/gcc/i686-linux-android/4.6/crtend.o diff --git a/lib/gcc/i686-linux-android/4.6/crtendS.o b/lib/gcc/i686-linux-android/4.6/crtendS.o Binary files differnew file mode 100644 index 0000000..e9eaf9d --- /dev/null +++ b/lib/gcc/i686-linux-android/4.6/crtendS.o diff --git a/lib/gcc/i686-linux-android/4.6/crtfastmath.o b/lib/gcc/i686-linux-android/4.6/crtfastmath.o Binary files differnew file mode 100644 index 0000000..2c42ae6 --- /dev/null +++ b/lib/gcc/i686-linux-android/4.6/crtfastmath.o diff --git a/lib/gcc/i686-linux-android/4.6/crtprec32.o b/lib/gcc/i686-linux-android/4.6/crtprec32.o Binary files differnew file mode 100644 index 0000000..5e23637 --- /dev/null +++ b/lib/gcc/i686-linux-android/4.6/crtprec32.o diff --git a/lib/gcc/i686-linux-android/4.6/crtprec64.o b/lib/gcc/i686-linux-android/4.6/crtprec64.o Binary files differnew file mode 100644 index 0000000..df1e3ec --- /dev/null +++ b/lib/gcc/i686-linux-android/4.6/crtprec64.o diff --git a/lib/gcc/i686-linux-android/4.6/crtprec80.o b/lib/gcc/i686-linux-android/4.6/crtprec80.o Binary files differnew file mode 100644 index 0000000..06e4ffa --- /dev/null +++ b/lib/gcc/i686-linux-android/4.6/crtprec80.o diff --git a/lib/gcc/i686-linux-android/4.6/gcov-src/gcov-io.c b/lib/gcc/i686-linux-android/4.6/gcov-src/gcov-io.c new file mode 100644 index 0000000..61ecd46 --- /dev/null +++ b/lib/gcc/i686-linux-android/4.6/gcov-src/gcov-io.c @@ -0,0 +1,980 @@ +/* File format for coverage information + Copyright (C) 1996, 1997, 1998, 2000, 2002, 2003, 2004, 2005, 2007, + 2008 Free Software Foundation, Inc. + Contributed by Bob Manson <manson@cygnus.com>. + Completely remangled by Nathan Sidwell <nathan@codesourcery.com>. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free +Software Foundation; either version 3, or (at your option) any later +version. + +GCC is distributed in the hope that it will be useful, but WITHOUT ANY +WARRANTY; without even the implied warranty of MERCHANTABILITY or +FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +for more details. + +Under Section 7 of GPL version 3, you are granted additional +permissions described in the GCC Runtime Library Exception, version +3.1, as published by the Free Software Foundation. + +You should have received a copy of the GNU General Public License and +a copy of the GCC Runtime Library Exception along with this program; +see the files COPYING3 and COPYING.RUNTIME respectively. If not, see +<http://www.gnu.org/licenses/>. */ + +/* Routines declared in gcov-io.h. This file should be #included by + another source file, after having #included gcov-io.h. */ + +/* Redefine these here, rather than using the ones in system.h since + * including system.h leads to conflicting definitions of other + * symbols and macros. */ +#undef MIN +#define MIN(X,Y) ((X) < (Y) ? (X) : (Y)) + +#if !IN_GCOV +static void gcov_write_block (unsigned); +static gcov_unsigned_t *gcov_write_words (unsigned); +#endif +static const gcov_unsigned_t *gcov_read_words (unsigned); +#if !IN_LIBGCOV +static void gcov_allocate (unsigned); +#endif + +#ifdef __GCOV_KERNEL__ +struct gcov_var gcov_var ATTRIBUTE_HIDDEN; +#endif + +static inline gcov_unsigned_t from_file (gcov_unsigned_t value) +{ +#if !IN_LIBGCOV + if (gcov_var.endian) + { + value = (value >> 16) | (value << 16); + value = ((value & 0xff00ff) << 8) | ((value >> 8) & 0xff00ff); + } +#endif + return value; +} + +/* Open a gcov file. NAME is the name of the file to open and MODE + indicates whether a new file should be created, or an existing file + opened. If MODE is >= 0 an existing file will be opened, if + possible, and if MODE is <= 0, a new file will be created. Use + MODE=0 to attempt to reopen an existing file and then fall back on + creating a new one. If MODE < 0, the file will be opened in + read-only mode. Otherwise it will be opened for modification. + Return zero on failure, >0 on opening an existing file and <0 on + creating a new one. */ + +#ifndef __GCOV_KERNEL__ +GCOV_LINKAGE int +#if IN_LIBGCOV +gcov_open (const char *name) +#else +gcov_open (const char *name, int mode) +#endif +{ +#if IN_LIBGCOV + const int mode = 0; +#endif +#if GCOV_LOCKED + struct flock s_flock; + int fd; + + s_flock.l_whence = SEEK_SET; + s_flock.l_start = 0; + s_flock.l_len = 0; /* Until EOF. */ + s_flock.l_pid = getpid (); +#endif + + gcc_assert (!gcov_var.file); + gcov_var.start = 0; + gcov_var.offset = gcov_var.length = 0; + gcov_var.overread = -1u; + gcov_var.error = 0; +#if !IN_LIBGCOV + gcov_var.endian = 0; +#endif +#if GCOV_LOCKED + if (mode > 0) + { + /* Read-only mode - acquire a read-lock. */ + s_flock.l_type = F_RDLCK; + fd = open (name, O_RDONLY); + } + else + { + /* Write mode - acquire a write-lock. */ + s_flock.l_type = F_WRLCK; + fd = open (name, O_RDWR | O_CREAT, 0666); + } + if (fd < 0) + return 0; + + while (fcntl (fd, F_SETLKW, &s_flock) && errno == EINTR) + continue; + + gcov_var.file = fdopen (fd, (mode > 0) ? "rb" : "r+b"); + + if (!gcov_var.file) + { + close (fd); + return 0; + } + + if (mode > 0) + gcov_var.mode = 1; + else if (mode == 0) + { + struct stat st; + + if (fstat (fd, &st) < 0) + { + fclose (gcov_var.file); + gcov_var.file = 0; + return 0; + } + if (st.st_size != 0) + gcov_var.mode = 1; + else + gcov_var.mode = mode * 2 + 1; + } + else + gcov_var.mode = mode * 2 + 1; +#else + if (mode >= 0) + gcov_var.file = fopen (name, (mode > 0) ? "rb" : "r+b"); + + if (gcov_var.file) + gcov_var.mode = 1; + else if (mode <= 0) + { + gcov_var.file = fopen (name, "w+b"); + if (gcov_var.file) + gcov_var.mode = mode * 2 + 1; + } + if (!gcov_var.file) + return 0; +#endif + + setbuf (gcov_var.file, (char *)0); + + return 1; +} +#else /* __GCOV_KERNEL__ */ + +extern _GCOV_FILE *gcov_current_file; + +GCOV_LINKAGE int +gcov_open (const char *name) +{ + gcov_var.start = 0; + gcov_var.offset = gcov_var.length = 0; + gcov_var.overread = -1u; + gcov_var.error = 0; + gcov_var.file = gcov_current_file; + gcov_var.mode = 1; + + return 1; +} +#endif /* __GCOV_KERNEL__ */ + +/* Close the current gcov file. Flushes data to disk. Returns nonzero + on failure or error flag set. */ + +GCOV_LINKAGE int +gcov_close (void) +{ + if (gcov_var.file) + { +#if !IN_GCOV + if (gcov_var.offset && gcov_var.mode < 0) + gcov_write_block (gcov_var.offset); +#endif + _GCOV_fclose (gcov_var.file); + gcov_var.file = 0; + gcov_var.length = 0; + } +#if !IN_LIBGCOV + free (gcov_var.buffer); + gcov_var.alloc = 0; + gcov_var.buffer = 0; +#endif + gcov_var.mode = 0; + return gcov_var.error; +} + +#if !IN_LIBGCOV +/* Modify FILENAME to a canonical form after stripping known prefixes + in place. It removes '/proc/self/cwd' and '/proc/self/cwd/.'. + Returns the in-place modified filename. */ + +GCOV_LINKAGE char * +gcov_canonical_filename (char *filename) +{ + static char cwd_dot_str[] = "/proc/self/cwd/./"; + int cwd_dot_len = strlen (cwd_dot_str); + int cwd_len = cwd_dot_len - 2; /* without trailing './' */ + int filename_len = strlen (filename); + /* delete the longer prefix first */ + if (0 == strncmp (filename, cwd_dot_str, cwd_dot_len)) + { + memmove (filename, filename + cwd_dot_len, filename_len - cwd_dot_len); + filename[filename_len - cwd_dot_len] = '\0'; + return filename; + } + + if (0 == strncmp (filename, cwd_dot_str, cwd_len)) + { + memmove (filename, filename + cwd_len, filename_len - cwd_len); + filename[filename_len - cwd_len] = '\0'; + return filename; + } + return filename; +} + +/* Read LEN words and construct load latency info LL_INFO. */ + +GCOV_LINKAGE void +gcov_read_pmu_load_latency_info (gcov_pmu_ll_info_t *ll_info, + gcov_unsigned_t len ATTRIBUTE_UNUSED) +{ + const char *filename; + ll_info->counts = gcov_read_unsigned (); + ll_info->self = gcov_read_unsigned (); + ll_info->cum = gcov_read_unsigned (); + ll_info->lt_10 = gcov_read_unsigned (); + ll_info->lt_32 = gcov_read_unsigned (); + ll_info->lt_64 = gcov_read_unsigned (); + ll_info->lt_256 = gcov_read_unsigned (); + ll_info->lt_1024 = gcov_read_unsigned (); + ll_info->gt_1024 = gcov_read_unsigned (); + ll_info->wself = gcov_read_unsigned (); + ll_info->code_addr = gcov_read_counter (); + ll_info->line = gcov_read_unsigned (); + ll_info->discriminator = gcov_read_unsigned (); + filename = gcov_read_string (); + if (filename) + ll_info->filename = gcov_canonical_filename (xstrdup (filename)); + else + ll_info->filename = 0; +} + +/* Read LEN words and construct branch mispredict info BRM_INFO. */ + +GCOV_LINKAGE void +gcov_read_pmu_branch_mispredict_info (gcov_pmu_brm_info_t *brm_info, + gcov_unsigned_t len ATTRIBUTE_UNUSED) +{ + const char *filename; + brm_info->counts = gcov_read_unsigned (); + brm_info->self = gcov_read_unsigned (); + brm_info->cum = gcov_read_unsigned (); + brm_info->code_addr = gcov_read_counter (); + brm_info->line = gcov_read_unsigned (); + brm_info->discriminator = gcov_read_unsigned (); + filename = gcov_read_string (); + if (filename) + brm_info->filename = gcov_canonical_filename (xstrdup (filename)); + else + brm_info->filename = 0; +} + +/* Read LEN words from an open gcov file and construct data into pmu + tool header TOOL_HEADER. */ + +GCOV_LINKAGE void gcov_read_pmu_tool_header (gcov_pmu_tool_header_t *header, + gcov_unsigned_t len ATTRIBUTE_UNUSED) +{ + const char *str; + str = gcov_read_string (); + header->host_cpu = str ? xstrdup (str) : 0; + str = gcov_read_string (); + header->hostname = str ? xstrdup (str) : 0; + str = gcov_read_string (); + header->kernel_version = str ? xstrdup (str) : 0; + str = gcov_read_string (); + header->column_header = str ? xstrdup (str) : 0; + str = gcov_read_string (); + header->column_description = str ? xstrdup (str) : 0; + str = gcov_read_string (); + header->full_header = str ? xstrdup (str) : 0; +} +#endif + +#if !IN_LIBGCOV +/* Check if MAGIC is EXPECTED. Use it to determine endianness of the + file. Returns +1 for same endian, -1 for other endian and zero for + not EXPECTED. */ + +GCOV_LINKAGE int +gcov_magic (gcov_unsigned_t magic, gcov_unsigned_t expected) +{ + if (magic == expected) + return 1; + magic = (magic >> 16) | (magic << 16); + magic = ((magic & 0xff00ff) << 8) | ((magic >> 8) & 0xff00ff); + if (magic == expected) + { + gcov_var.endian = 1; + return -1; + } + return 0; +} +#endif + +#if !IN_LIBGCOV +static void +gcov_allocate (unsigned length) +{ + size_t new_size = gcov_var.alloc; + + if (!new_size) + new_size = GCOV_BLOCK_SIZE; + new_size += length; + new_size *= 2; + + gcov_var.alloc = new_size; + gcov_var.buffer = XRESIZEVAR (gcov_unsigned_t, gcov_var.buffer, new_size << 2); +} +#endif + +#if !IN_GCOV +/* Write out the current block, if needs be. */ + +static void +gcov_write_block (unsigned size) +{ + if (_GCOV_fwrite (gcov_var.buffer, size << 2, 1, gcov_var.file) != 1) + gcov_var.error = 1; + gcov_var.start += size; + gcov_var.offset -= size; +} + +#if IN_LIBGCOV +/* Return the number of words STRING would need including the length + field in the output stream itself. This should be identical to + "alloc" calculation in gcov_write_string(). */ + +GCOV_LINKAGE gcov_unsigned_t +gcov_string_length (const char *string) +{ + gcov_unsigned_t len = (string) ? strlen (string) : 0; + /* + 1 because of the length field. */ + gcov_unsigned_t alloc = 1 + ((len + 4) >> 2); + + /* Can not write a bigger than GCOV_BLOCK_SIZE string yet */ + gcc_assert (alloc < GCOV_BLOCK_SIZE); + return alloc; +} +#endif + +/* Allocate space to write BYTES bytes to the gcov file. Return a + pointer to those bytes, or NULL on failure. */ + +static gcov_unsigned_t * +gcov_write_words (unsigned words) +{ + gcov_unsigned_t *result; + + gcc_assert (gcov_var.mode < 0); +#if IN_LIBGCOV + if (gcov_var.offset + words >= GCOV_BLOCK_SIZE) + { + gcov_write_block (MIN (gcov_var.offset, GCOV_BLOCK_SIZE)); + if (gcov_var.offset) + { + gcc_assert (gcov_var.offset < GCOV_BLOCK_SIZE); + memcpy (gcov_var.buffer, + gcov_var.buffer + GCOV_BLOCK_SIZE, + gcov_var.offset << 2); + } + } +#else + if (gcov_var.offset + words > gcov_var.alloc) + gcov_allocate (gcov_var.offset + words); +#endif + result = &gcov_var.buffer[gcov_var.offset]; + gcov_var.offset += words; + + return result; +} + +/* Write unsigned VALUE to coverage file. Sets error flag + appropriately. */ + +GCOV_LINKAGE void +gcov_write_unsigned (gcov_unsigned_t value) +{ + gcov_unsigned_t *buffer = gcov_write_words (1); + + buffer[0] = value; +} + +/* Write counter VALUE to coverage file. Sets error flag + appropriately. */ + +#if IN_LIBGCOV +GCOV_LINKAGE void +gcov_write_counter (gcov_type value) +{ + gcov_unsigned_t *buffer = gcov_write_words (2); + + buffer[0] = (gcov_unsigned_t) value; + if (sizeof (value) > sizeof (gcov_unsigned_t)) + buffer[1] = (gcov_unsigned_t) (value >> 32); + else + buffer[1] = 0; +} +#endif /* IN_LIBGCOV */ + +/* Write STRING to coverage file. Sets error flag on file + error, overflow flag on overflow */ + +GCOV_LINKAGE void +gcov_write_string (const char *string) +{ + unsigned length = 0; + unsigned alloc = 0; + gcov_unsigned_t *buffer; + + if (string) + { + length = strlen (string); + alloc = (length + 4) >> 2; + } + + buffer = gcov_write_words (1 + alloc); + + buffer[0] = alloc; + buffer[alloc] = 0; + memcpy (&buffer[1], string, length); +} + +#if !IN_LIBGCOV +/* Write a tag TAG and reserve space for the record length. Return a + value to be used for gcov_write_length. */ + +GCOV_LINKAGE gcov_position_t +gcov_write_tag (gcov_unsigned_t tag) +{ + gcov_position_t result = gcov_var.start + gcov_var.offset; + gcov_unsigned_t *buffer = gcov_write_words (2); + + buffer[0] = tag; + buffer[1] = 0; + + return result; +} + +/* Write a record length using POSITION, which was returned by + gcov_write_tag. The current file position is the end of the + record, and is restored before returning. Returns nonzero on + overflow. */ + +GCOV_LINKAGE void +gcov_write_length (gcov_position_t position) +{ + unsigned offset; + gcov_unsigned_t length; + gcov_unsigned_t *buffer; + + gcc_assert (gcov_var.mode < 0); + gcc_assert (position + 2 <= gcov_var.start + gcov_var.offset); + gcc_assert (position >= gcov_var.start); + offset = position - gcov_var.start; + length = gcov_var.offset - offset - 2; + buffer = (gcov_unsigned_t *) &gcov_var.buffer[offset]; + buffer[1] = length; + if (gcov_var.offset >= GCOV_BLOCK_SIZE) + gcov_write_block (gcov_var.offset); +} + +#else /* IN_LIBGCOV */ + +/* Write a tag TAG and length LENGTH. */ + +GCOV_LINKAGE void +gcov_write_tag_length (gcov_unsigned_t tag, gcov_unsigned_t length) +{ + gcov_unsigned_t *buffer = gcov_write_words (2); + + buffer[0] = tag; + buffer[1] = length; +} + +/* Write a summary structure to the gcov file. Return nonzero on + overflow. */ + +GCOV_LINKAGE void +gcov_write_summary (gcov_unsigned_t tag, const struct gcov_summary *summary) +{ + unsigned ix; + const struct gcov_ctr_summary *csum; + + gcov_write_tag_length (tag, GCOV_TAG_SUMMARY_LENGTH); + gcov_write_unsigned (summary->checksum); + for (csum = summary->ctrs, ix = GCOV_COUNTERS_SUMMABLE; ix--; csum++) + { + gcov_write_unsigned (csum->num); + gcov_write_unsigned (csum->runs); + gcov_write_counter (csum->sum_all); + gcov_write_counter (csum->run_max); + gcov_write_counter (csum->sum_max); + } +} +#endif /* IN_LIBGCOV */ + +#endif /*!IN_GCOV */ + +/* Return a pointer to read BYTES bytes from the gcov file. Returns + NULL on failure (read past EOF). */ + +static const gcov_unsigned_t * +gcov_read_words (unsigned words) +{ + const gcov_unsigned_t *result; + unsigned excess = gcov_var.length - gcov_var.offset; + + gcc_assert (gcov_var.mode > 0); + gcc_assert (words < GCOV_BLOCK_SIZE); + if (excess < words) + { + gcov_var.start += gcov_var.offset; +#if IN_LIBGCOV + if (excess) + { + gcc_assert (excess < GCOV_BLOCK_SIZE); + memmove (gcov_var.buffer, gcov_var.buffer + gcov_var.offset, excess * 4); + } +#else + memmove (gcov_var.buffer, gcov_var.buffer + gcov_var.offset, excess * 4); +#endif + gcov_var.offset = 0; + gcov_var.length = excess; +#if IN_LIBGCOV + excess = (sizeof (gcov_var.buffer) / sizeof (gcov_var.buffer[0])) - gcov_var.length; +#else + if (gcov_var.length + words > gcov_var.alloc) + gcov_allocate (gcov_var.length + words); + excess = gcov_var.alloc - gcov_var.length; +#endif + excess = _GCOV_fread (gcov_var.buffer + gcov_var.length, + 1, excess << 2, gcov_var.file) >> 2; + gcov_var.length += excess; + if (gcov_var.length < words) + { + gcov_var.overread += words - gcov_var.length; + gcov_var.length = 0; + return 0; + } + } + result = &gcov_var.buffer[gcov_var.offset]; + gcov_var.offset += words; + return result; +} + +/* Read unsigned value from a coverage file. Sets error flag on file + error, overflow flag on overflow */ + +GCOV_LINKAGE gcov_unsigned_t +gcov_read_unsigned (void) +{ + gcov_unsigned_t value; + const gcov_unsigned_t *buffer = gcov_read_words (1); + + if (!buffer) + return 0; + value = from_file (buffer[0]); + return value; +} + +/* Read counter value from a coverage file. Sets error flag on file + error, overflow flag on overflow */ + +GCOV_LINKAGE gcov_type +gcov_read_counter (void) +{ + gcov_type value; + const gcov_unsigned_t *buffer = gcov_read_words (2); + + if (!buffer) + return 0; + value = from_file (buffer[0]); + if (sizeof (value) > sizeof (gcov_unsigned_t)) + value |= ((gcov_type) from_file (buffer[1])) << 32; + else if (buffer[1]) + gcov_var.error = -1; + + return value; +} + +/* Read string from coverage file. Returns a pointer to a static + buffer, or NULL on empty string. You must copy the string before + calling another gcov function. */ + +GCOV_LINKAGE const char * +gcov_read_string (void) +{ + unsigned length = gcov_read_unsigned (); + + if (!length) + return 0; + + return (const char *) gcov_read_words (length); +} + +GCOV_LINKAGE void +gcov_read_summary (struct gcov_summary *summary) +{ + unsigned ix; + struct gcov_ctr_summary *csum; + + summary->checksum = gcov_read_unsigned (); + for (csum = summary->ctrs, ix = GCOV_COUNTERS_SUMMABLE; ix--; csum++) + { + csum->num = gcov_read_unsigned (); + csum->runs = gcov_read_unsigned (); + csum->sum_all = gcov_read_counter (); + csum->run_max = gcov_read_counter (); + csum->sum_max = gcov_read_counter (); + } +} + +#if !IN_LIBGCOV && IN_GCOV != 1 +/* Read LEN words (unsigned type) and construct MOD_INFO. */ + +GCOV_LINKAGE void +gcov_read_module_info (struct gcov_module_info *mod_info, + gcov_unsigned_t len) +{ + gcov_unsigned_t src_filename_len, filename_len, i, j, num_strings; + mod_info->ident = gcov_read_unsigned (); + mod_info->is_primary = gcov_read_unsigned (); + mod_info->is_exported = gcov_read_unsigned (); + mod_info->lang = gcov_read_unsigned (); + mod_info->num_quote_paths = gcov_read_unsigned (); + mod_info->num_bracket_paths = gcov_read_unsigned (); + mod_info->num_cpp_defines = gcov_read_unsigned (); + mod_info->num_cpp_includes = gcov_read_unsigned (); + mod_info->num_cl_args = gcov_read_unsigned (); + len -= 9; + + filename_len = gcov_read_unsigned (); + mod_info->da_filename = (char *) xmalloc (filename_len * + sizeof (gcov_unsigned_t)); + for (i = 0; i < filename_len; i++) + ((gcov_unsigned_t *) mod_info->da_filename)[i] = gcov_read_unsigned (); + len -= (filename_len + 1); + + src_filename_len = gcov_read_unsigned (); + mod_info->source_filename = (char *) xmalloc (src_filename_len * + sizeof (gcov_unsigned_t)); + for (i = 0; i < src_filename_len; i++) + ((gcov_unsigned_t *) mod_info->source_filename)[i] = gcov_read_unsigned (); + len -= (src_filename_len + 1); + + num_strings = mod_info->num_quote_paths + mod_info->num_bracket_paths + + mod_info->num_cpp_defines + mod_info->num_cpp_includes + + mod_info->num_cl_args; + for (j = 0; j < num_strings; j++) + { + gcov_unsigned_t string_len = gcov_read_unsigned (); + mod_info->string_array[j] = + (char *) xmalloc (string_len * sizeof (gcov_unsigned_t)); + for (i = 0; i < string_len; i++) + ((gcov_unsigned_t *) mod_info->string_array[j])[i] = + gcov_read_unsigned (); + len -= (string_len + 1); + } + gcc_assert (!len); +} +#endif + +#if !IN_LIBGCOV +/* Reset to a known position. BASE should have been obtained from + gcov_position, LENGTH should be a record length. */ + +GCOV_LINKAGE void +gcov_sync (gcov_position_t base, gcov_unsigned_t length) +{ +#ifdef __GCOV_KERNEL__ + /* should not reach this point */ + gcc_assert (0); +#else /* __GCOV_KERNEL__ */ + gcc_assert (gcov_var.mode > 0); + base += length; + if (base - gcov_var.start <= gcov_var.length) + gcov_var.offset = base - gcov_var.start; + else + { + gcov_var.offset = gcov_var.length = 0; + _GCOV_fseek (gcov_var.file, base << 2, SEEK_SET); + gcov_var.start = _GCOV_ftell (gcov_var.file) >> 2; + } +#endif /* __GCOV_KERNEL__ */ +} +#endif + +#if IN_LIBGCOV +/* Move to a given position in a gcov file. */ + +GCOV_LINKAGE void +gcov_seek (gcov_position_t base) +{ + gcc_assert (gcov_var.mode < 0); + if (gcov_var.offset) + gcov_write_block (gcov_var.offset); + _GCOV_fseek (gcov_var.file, base << 2, SEEK_SET); + gcov_var.start = _GCOV_ftell (gcov_var.file) >> 2; +} + +/* Truncate the gcov file at the current position. */ + +GCOV_LINKAGE void +gcov_truncate (void) +{ +#ifdef __GCOV_KERNEL__ + /* should not reach this point */ + gcc_assert (0); +#else /* __GCOV_KERNEL__ */ + long offs; + int filenum; + gcc_assert (gcov_var.mode < 0); + if (gcov_var.offset) + gcov_write_block (gcov_var.offset); + offs = ftell (gcov_var.file); + filenum = fileno (gcov_var.file); + if (offs == -1 || filenum == -1 || ftruncate (filenum, offs)) + gcov_var.error = 1; +#endif /* __GCOV_KERNEL__ */ +} +#endif + +#ifndef __GCOV_KERNEL__ +/* Convert an unsigned NUMBER to a percentage after dividing by + 100. */ + +GCOV_LINKAGE float +convert_unsigned_to_pct (const unsigned number) +{ + return (float)number / 100.0f; +} +#endif + +#if !IN_LIBGCOV && IN_GCOV != 1 +/* Print load latency information given by LL_INFO in a human readable + format into an open output file pointed by FP. NEWLINE specifies + whether or not to print a trailing newline. */ + +GCOV_LINKAGE void +print_load_latency_line (FILE *fp, const gcov_pmu_ll_info_t *ll_info, + const enum print_newline newline) +{ + if (!ll_info) + return; + fprintf (fp, " %u %.2f%% %.2f%% %.2f%% %.2f%% %.2f%% %.2f%% %.2f%% " + "%.2f%% %.2f%% " HOST_WIDEST_INT_PRINT_HEX " %s %d %d", + ll_info->counts, + convert_unsigned_to_pct (ll_info->self), + convert_unsigned_to_pct (ll_info->cum), + convert_unsigned_to_pct (ll_info->lt_10), + convert_unsigned_to_pct (ll_info->lt_32), + convert_unsigned_to_pct (ll_info->lt_64), + convert_unsigned_to_pct (ll_info->lt_256), + convert_unsigned_to_pct (ll_info->lt_1024), + convert_unsigned_to_pct (ll_info->gt_1024), + convert_unsigned_to_pct (ll_info->wself), + ll_info->code_addr, + ll_info->filename, + ll_info->line, + ll_info->discriminator); + if (newline == add_newline) + fprintf (fp, "\n"); +} + +/* Print BRM_INFO into the file pointed by FP. NEWLINE specifies + whether or not to print a trailing newline. */ + +GCOV_LINKAGE void +print_branch_mispredict_line (FILE *fp, const gcov_pmu_brm_info_t *brm_info, + const enum print_newline newline) +{ + if (!brm_info) + return; + fprintf (fp, " %u %.2f%% %.2f%% " HOST_WIDEST_INT_PRINT_HEX " %s %d %d", + brm_info->counts, + convert_unsigned_to_pct (brm_info->self), + convert_unsigned_to_pct (brm_info->cum), + brm_info->code_addr, + brm_info->filename, + brm_info->line, + brm_info->discriminator); + if (newline == add_newline) + fprintf (fp, "\n"); +} + +/* Print TOOL_HEADER into the file pointed by FP. NEWLINE specifies + whether or not to print a trailing newline. */ + +GCOV_LINKAGE void +print_pmu_tool_header (FILE *fp, gcov_pmu_tool_header_t *tool_header, + const enum print_newline newline) +{ + if (!tool_header) + return; + fprintf (fp, "\nhost_cpu: %s\n", tool_header->host_cpu); + fprintf (fp, "hostname: %s\n", tool_header->hostname); + fprintf (fp, "kernel_version: %s\n", tool_header->kernel_version); + fprintf (fp, "column_header: %s\n", tool_header->column_header); + fprintf (fp, "column_description: %s\n", tool_header->column_description); + fprintf (fp, "full_header: %s\n", tool_header->full_header); + if (newline == add_newline) + fprintf (fp, "\n"); +} +#endif + +#if IN_GCOV > 0 +/* Return the modification time of the current gcov file. */ + +GCOV_LINKAGE time_t +gcov_time (void) +{ + struct stat status; + + if (fstat (fileno (gcov_var.file), &status)) + return 0; + else + return status.st_mtime; +} +#endif /* IN_GCOV */ + +#ifdef __GCOV_KERNEL__ + +/* File fclose operation in kernel mode. */ + +int +kernel_file_fclose (gcov_kernel_vfile *fp) +{ + return 0; +} + +/* File ftell operation in kernel mode. It currently should not + be called. */ + +long +kernel_file_ftell (gcov_kernel_vfile *fp) +{ + gcc_assert (0); /* should not reach here */ + return 0; +} + +/* File fseek operation in kernel mode. It should only be called + with OFFSET==0 and WHENCE==0 to a freshly opened file. */ + +int +kernel_file_fseek (gcov_kernel_vfile *fp, long offset, int whence) +{ + gcc_assert (offset == 0 && whence == 0 && fp->count == 0); + return 0; +} + +/* File ftruncate operation in kernel mode. It currently should not + be called. */ + +int +kernel_file_ftruncate (gcov_kernel_vfile *fp, off_t value) +{ + gcc_assert (0); /* should not reach here */ + return 0; +} + +/* File fread operation in kernel mode. It currently should not + be called. */ + +int +kernel_file_fread (void *ptr, size_t size, size_t nitems, + gcov_kernel_vfile *fp) +{ + gcc_assert (0); /* should not reach here */ + return 0; +} + +/* File fwrite operation in kernel mode. It outputs the data + to a buffer in the virual file. */ + +int +kernel_file_fwrite (const void *ptr, size_t size, + size_t nitems, gcov_kernel_vfile *fp) +{ + char *vbuf; + unsigned vsize, vpos; + unsigned len; + + if (!fp) return 0; + + vbuf = fp->buf; + vsize = fp->size; + vpos = fp->count; + + if (vsize <= vpos) + { + printk (KERN_ERR + "GCOV_KERNEL: something wrong: vbuf=%p vsize=%u vpos=%u\n", + vbuf, vsize, vpos); + return 0; + } + len = vsize - vpos; + len /= size; + + if (len > nitems) + len = nitems; + + memcpy (vbuf+vpos, ptr, size*len); + fp->count += len*size; + + if (len != nitems) + printk (KERN_ERR + "GCOV_KERNEL: something wrong: size=%lu nitems=%lu ret=%d\n", + size, nitems, len); + return len; +} + +/* File fileno operation in kernel mode. It currently should not + be called. */ + +int +kernel_file_fileno (gcov_kernel_vfile *fp) +{ + gcc_assert (0); /* should not reach here */ + return 0; +} +#else /* __GCOV_KERNEL__ */ + +#if IN_GCOV != 1 +/* Delete pmu tool header TOOL_HEADER. */ + +GCOV_LINKAGE void +destroy_pmu_tool_header (gcov_pmu_tool_header_t *tool_header) +{ + if (!tool_header) + return; + if (tool_header->host_cpu) + free (tool_header->host_cpu); + if (tool_header->hostname) + free (tool_header->hostname); + if (tool_header->kernel_version) + free (tool_header->kernel_version); + if (tool_header->column_header) + free (tool_header->column_header); + if (tool_header->column_description) + free (tool_header->column_description); + if (tool_header->full_header) + free (tool_header->full_header); +} +#endif + +#endif /* GCOV_KERNEL */ diff --git a/lib/gcc/i686-linux-android/4.6/gcov-src/gcov-io.h b/lib/gcc/i686-linux-android/4.6/gcov-src/gcov-io.h new file mode 100644 index 0000000..fe0ae20 --- /dev/null +++ b/lib/gcc/i686-linux-android/4.6/gcov-src/gcov-io.h @@ -0,0 +1,968 @@ +/* File format for coverage information + Copyright (C) 1996, 1997, 1998, 2000, 2002, + 2003, 2004, 2005, 2008, 2009 Free Software Foundation, Inc. + Contributed by Bob Manson <manson@cygnus.com>. + Completely remangled by Nathan Sidwell <nathan@codesourcery.com>. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free +Software Foundation; either version 3, or (at your option) any later +version. + +GCC is distributed in the hope that it will be useful, but WITHOUT ANY +WARRANTY; without even the implied warranty of MERCHANTABILITY or +FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +for more details. + +Under Section 7 of GPL version 3, you are granted additional +permissions described in the GCC Runtime Library Exception, version +3.1, as published by the Free Software Foundation. + +You should have received a copy of the GNU General Public License and +a copy of the GCC Runtime Library Exception along with this program; +see the files COPYING3 and COPYING.RUNTIME respectively. If not, see +<http://www.gnu.org/licenses/>. */ + + +/* Coverage information is held in two files. A notes file, which is + generated by the compiler, and a data file, which is generated by + the program under test. Both files use a similar structure. We do + not attempt to make these files backwards compatible with previous + versions, as you only need coverage information when developing a + program. We do hold version information, so that mismatches can be + detected, and we use a format that allows tools to skip information + they do not understand or are not interested in. + + Numbers are recorded in the 32 bit unsigned binary form of the + endianness of the machine generating the file. 64 bit numbers are + stored as two 32 bit numbers, the low part first. Strings are + padded with 1 to 4 NUL bytes, to bring the length up to a multiple + of 4. The number of 4 bytes is stored, followed by the padded + string. Zero length and NULL strings are simply stored as a length + of zero (they have no trailing NUL or padding). + + int32: byte3 byte2 byte1 byte0 | byte0 byte1 byte2 byte3 + int64: int32:low int32:high + string: int32:0 | int32:length char* char:0 padding + padding: | char:0 | char:0 char:0 | char:0 char:0 char:0 + item: int32 | int64 | string + + The basic format of the files is + + file : int32:magic int32:version int32:stamp record* + + The magic ident is different for the notes and the data files. The + magic ident is used to determine the endianness of the file, when + reading. The version is the same for both files and is derived + from gcc's version number. The stamp value is used to synchronize + note and data files and to synchronize merging within a data + file. It need not be an absolute time stamp, merely a ticker that + increments fast enough and cycles slow enough to distinguish + different compile/run/compile cycles. + + Although the ident and version are formally 32 bit numbers, they + are derived from 4 character ASCII strings. The version number + consists of the single character major version number, a two + character minor version number (leading zero for versions less than + 10), and a single character indicating the status of the release. + That will be 'e' experimental, 'p' prerelease and 'r' for release. + Because, by good fortune, these are in alphabetical order, string + collating can be used to compare version strings. Be aware that + the 'e' designation will (naturally) be unstable and might be + incompatible with itself. For gcc 3.4 experimental, it would be + '304e' (0x33303465). When the major version reaches 10, the + letters A-Z will be used. Assuming minor increments releases every + 6 months, we have to make a major increment every 50 years. + Assuming major increments releases every 5 years, we're ok for the + next 155 years -- good enough for me. + + A record has a tag, length and variable amount of data. + + record: header data + header: int32:tag int32:length + data: item* + + Records are not nested, but there is a record hierarchy. Tag + numbers reflect this hierarchy. Tags are unique across note and + data files. Some record types have a varying amount of data. The + LENGTH is the number of 4bytes that follow and is usually used to + determine how much data. The tag value is split into 4 8-bit + fields, one for each of four possible levels. The most significant + is allocated first. Unused levels are zero. Active levels are + odd-valued, so that the LSB of the level is one. A sub-level + incorporates the values of its superlevels. This formatting allows + you to determine the tag hierarchy, without understanding the tags + themselves, and is similar to the standard section numbering used + in technical documents. Level values [1..3f] are used for common + tags, values [41..9f] for the notes file and [a1..ff] for the data + file. + + The basic block graph file contains the following records + note: unit function-graph* + unit: header int32:checksum string:source + function-graph: announce_function basic_blocks {arcs | lines}* + announce_function: header int32:ident + int32:lineno_checksum int32:cfg_checksum + string:name string:source int32:lineno + basic_block: header int32:flags* + arcs: header int32:block_no arc* + arc: int32:dest_block int32:flags + lines: header int32:block_no line* + int32:0 string:NULL + line: int32:line_no | int32:0 string:filename + + The BASIC_BLOCK record holds per-bb flags. The number of blocks + can be inferred from its data length. There is one ARCS record per + basic block. The number of arcs from a bb is implicit from the + data length. It enumerates the destination bb and per-arc flags. + There is one LINES record per basic block, it enumerates the source + lines which belong to that basic block. Source file names are + introduced by a line number of 0, following lines are from the new + source file. The initial source file for the function is NULL, but + the current source file should be remembered from one LINES record + to the next. The end of a block is indicated by an empty filename + - this does not reset the current source file. Note there is no + ordering of the ARCS and LINES records: they may be in any order, + interleaved in any manner. The current filename follows the order + the LINES records are stored in the file, *not* the ordering of the + blocks they are for. + + The data file contains the following records. + data: {unit function-data* summary:object summary:program*}* + unit: header int32:checksum + function-data: announce_function arc_counts + announce_function: header int32:ident + int32:lineno_checksum int32:cfg_checksum + arc_counts: header int64:count* + summary: int32:checksum {count-summary}GCOV_COUNTERS + count-summary: int32:num int32:runs int64:sum + int64:max int64:sum_max + + The ANNOUNCE_FUNCTION record is the same as that in the note file, + but without the source location. The ARC_COUNTS gives the counter + values for those arcs that are instrumented. The SUMMARY records + give information about the whole object file and about the whole + program. The checksum is used for whole program summaries, and + disambiguates different programs which include the same + instrumented object file. There may be several program summaries, + each with a unique checksum. The object summary's checksum is zero. + Note that the data file might contain information from several runs + concatenated, or the data might be merged. + + This file is included by both the compiler, gcov tools and the + runtime support library libgcov. IN_LIBGCOV and IN_GCOV are used to + distinguish which case is which. If IN_LIBGCOV is nonzero, + libgcov is being built. If IN_GCOV is nonzero, the gcov tools are + being built. Otherwise the compiler is being built. IN_GCOV may be + positive or negative. If positive, we are compiling a tool that + requires additional functions (see the code for knowledge of what + those functions are). */ + +#ifndef GCC_GCOV_IO_H +#define GCC_GCOV_IO_H + +#ifdef __KERNEL__ +#ifndef __GCOV_KERNEL__ +#define __GCOV_KERNEL__ +#endif /* __GCOV_KERNEL__ */ +#endif /* __KERNEL__ */ + +#ifdef __GCOV_KERNEL__ +#define GCOV_LINKAGE /* nothing */ + +/* We need the definitions for + BITS_PER_UNIT and + LONG_LONG_TYPE_SIZE + They are defined in gcc/defaults.h and gcc/config/<arch_depend_files> + (like, gcc/config/i386/i386.h). And it can be overridden by setting + in build scripts. Here I hardcoded the value for x86. + Todo: using a program to auto-generate the vaules in build time. */ +#define BITS_PER_UNIT 8 +#define LONG_LONG_TYPE_SIZE 64 + +/* There are many gcc_assertions. Set the vaule to 1 if we want a warning + message if the assertion fails. */ +#ifndef ENABLE_ASSERT_CHECKING +#define ENABLE_ASSERT_CHECKING 1 +#endif + +#include <linux/fs.h> +#endif /* __GCOV_KERNEL__ */ + +/* Wrappers to the file operations. */ +#ifndef __GCOV_KERNEL__ +# define _GCOV_FILE FILE +# define _GCOV_fclose fclose +# define _GCOV_ftell ftell +# define _GCOV_fseek fseek +# define _GCOV_ftruncate ftruncate +# define _GCOV_fread fread +# define _GCOV_fwrite fwrite +# define _GCOV_fread fread +# define _GCOV_fileno fileno +#else /* __GCOV_KERNEL__ */ +/* In Linux kernel mode, a virtual file is used for file operations. */ +struct gcov_info; +typedef struct { + long size; /* size of buf */ + long count; /* element written into buf */ + struct gcov_info *info; + char buf[0]; +} gcov_kernel_vfile; + +# define _GCOV_FILE gcov_kernel_vfile + +/* gcc_assert() prints out a warning if the check fails. It + will not abort. */ +#if ENABLE_ASSERT_CHECKING +# define gcc_assert(EXPR) \ + ((void)(!(EXPR) ? printk (KERN_WARNING \ + "GCOV assertion fails: func=%s line=%d\n", \ + __FUNCTION__, __LINE__), 0 : 0)) +#else +# define gcc_assert(EXPR) ((void)(0 && (EXPR))) +#endif + +/* Wrappers to the file operations. */ +# define _GCOV_fclose kernel_file_fclose +# define _GCOV_ftell kernel_file_ftell +# define _GCOV_fseek kernel_file_fseek +# define _GCOV_ftruncate kernel_file_ftruncate +# define _GCOV_fread kernel_file_fread +# define _GCOV_fwrite kernel_file_fwrite +# define _GCOV_fileno kernel_file_fileno + +/* Declarations for virtual files operations. */ +extern int kernel_file_fclose (gcov_kernel_vfile *); +extern long kernel_file_ftell (gcov_kernel_vfile *); +extern int kernel_file_fseek (gcov_kernel_vfile *, long, int); +extern int kernel_file_ftruncate (gcov_kernel_vfile *, off_t); +extern int kernel_file_fread (void *, size_t, size_t, + gcov_kernel_vfile *); +extern int kernel_file_fwrite (const void *, size_t, size_t, + gcov_kernel_vfile *); +extern int kernel_file_fileno(gcov_kernel_vfile *); +#endif /* GCOV_KERNEL */ +#if IN_LIBGCOV + +#undef FUNC_ID_WIDTH +#undef FUNC_ID_MASK +/* About the target */ + +#if BITS_PER_UNIT == 8 +typedef unsigned gcov_unsigned_t __attribute__ ((mode (SI))); +typedef unsigned gcov_position_t __attribute__ ((mode (SI))); +#if LONG_LONG_TYPE_SIZE > 32 +typedef signed gcov_type __attribute__ ((mode (DI))); +#define FUNC_ID_WIDTH 32 +#define FUNC_ID_MASK ((1ll << FUNC_ID_WIDTH) - 1) +#else +typedef signed gcov_type __attribute__ ((mode (SI))); +#define FUNC_ID_WIDTH 16 +#define FUNC_ID_MASK ((1 << FUNC_ID_WIDTH) - 1) +#endif +#else /* BITS_PER_UNIT != 8 */ +#if BITS_PER_UNIT == 16 +typedef unsigned gcov_unsigned_t __attribute__ ((mode (HI))); +typedef unsigned gcov_position_t __attribute__ ((mode (HI))); +#if LONG_LONG_TYPE_SIZE > 32 +typedef signed gcov_type __attribute__ ((mode (SI))); +#define FUNC_ID_WIDTH 32 +#define FUNC_ID_MASK ((1ll << FUNC_ID_WIDTH) - 1) +#else +typedef signed gcov_type __attribute__ ((mode (HI))); +#define FUNC_ID_WIDTH 16 +#define FUNC_ID_MASK ((1 << FUNC_ID_WIDTH) - 1) +#endif +#else /* BITS_PER_UNIT != 16 */ +typedef unsigned gcov_unsigned_t __attribute__ ((mode (QI))); +typedef unsigned gcov_position_t __attribute__ ((mode (QI))); +#if LONG_LONG_TYPE_SIZE > 32 +typedef signed gcov_type __attribute__ ((mode (HI))); +#define FUNC_ID_WIDTH 32 +#define FUNC_ID_MASK ((1ll << FUNC_ID_WIDTH) - 1) +#else +typedef signed gcov_type __attribute__ ((mode (QI))); +#define FUNC_ID_WIDTH 16 +#define FUNC_ID_MASK ((1 << FUNC_ID_WIDTH) - 1) +#endif +#endif /* BITS_PER_UNIT == 16 */ + +#endif /* BITS_PER_UNIT == 8 */ + +#undef EXTRACT_MODULE_ID_FROM_GLOBAL_ID +#undef EXTRACT_FUNC_ID_FROM_GLOBAL_ID +#undef GEN_FUNC_GLOBAL_ID +#define EXTRACT_MODULE_ID_FROM_GLOBAL_ID(gid) \ + (gcov_unsigned_t)(((gid) >> FUNC_ID_WIDTH) & FUNC_ID_MASK) +#define EXTRACT_FUNC_ID_FROM_GLOBAL_ID(gid) \ + (gcov_unsigned_t)((gid) & FUNC_ID_MASK) +#define GEN_FUNC_GLOBAL_ID(m,f) ((((gcov_type) (m)) << FUNC_ID_WIDTH) | (f)) + + +#if defined (TARGET_POSIX_IO) +#define GCOV_LOCKED 1 +#else +#define GCOV_LOCKED 0 +#endif + +#else /* !IN_LIBGCOV */ +/* About the host */ + +typedef unsigned gcov_unsigned_t; +typedef unsigned gcov_position_t; + +/* gcov_type is typedef'd elsewhere for the compiler */ +#if IN_GCOV +#define GCOV_LINKAGE static +typedef HOST_WIDEST_INT gcov_type; +#if IN_GCOV > 0 +#include <sys/types.h> +#endif + +#define FUNC_ID_WIDTH HOST_BITS_PER_WIDE_INT/2 +#define FUNC_ID_MASK ((1L << FUNC_ID_WIDTH) - 1) +#define EXTRACT_MODULE_ID_FROM_GLOBAL_ID(gid) (unsigned)(((gid) >> FUNC_ID_WIDTH) & FUNC_ID_MASK) +#define EXTRACT_FUNC_ID_FROM_GLOBAL_ID(gid) (unsigned)((gid) & FUNC_ID_MASK) +#define FUNC_GLOBAL_ID(m,f) ((((HOST_WIDE_INT) (m)) << FUNC_ID_WIDTH) | (f) + +#else /*!IN_GCOV */ +#define GCOV_TYPE_SIZE (LONG_LONG_TYPE_SIZE > 32 ? 64 : 32) +#endif + +#if defined (HOST_HAS_F_SETLKW) +#define GCOV_LOCKED 1 +#else +#define GCOV_LOCKED 0 +#endif + +#endif /* !IN_LIBGCOV */ + +/* In gcov we want function linkage to be static. In the compiler we want + it extern, so that they can be accessed from elsewhere. In libgcov we + need these functions to be extern, so prefix them with __gcov. In + libgcov they must also be hidden so that the instance in the executable + is not also used in a DSO. */ +#if IN_LIBGCOV + +#ifndef __GCOV_KERNEL__ +#include "tconfig.h" +#endif /* __GCOV_KERNEL__ */ + +#define gcov_var __gcov_var +#define gcov_open __gcov_open +#define gcov_close __gcov_close +#define gcov_write_tag_length __gcov_write_tag_length +#define gcov_position __gcov_position +#define gcov_seek __gcov_seek +#define gcov_rewrite __gcov_rewrite +#define gcov_truncate __gcov_truncate +#define gcov_is_error __gcov_is_error +#define gcov_write_unsigned __gcov_write_unsigned +#define gcov_write_counter __gcov_write_counter +#define gcov_write_summary __gcov_write_summary +#define gcov_write_module_info __gcov_write_module_info +#define gcov_write_string __gcov_write_string +#define gcov_string_length __gcov_string_length +#define gcov_read_unsigned __gcov_read_unsigned +#define gcov_read_counter __gcov_read_counter +#define gcov_read_string __gcov_read_string +#define gcov_read_summary __gcov_read_summary +#define gcov_read_module_info __gcov_read_module_info +#define gcov_sort_n_vals __gcov_sort_n_vals +#define gcov_canonical_filename _gcov_canonical_filename +#define gcov_read_pmu_load_latency_info __gcov_read_pmu_load_latency_info +#define gcov_read_pmu_branch_mispredict_info __gcov_read_pmu_branch_mispredict_info +#define gcov_read_pmu_tool_header __gcov_read_pmu_tool_header +#define destroy_pmu_tool_header __destroy_pmu_tool_header + + +/* Poison these, so they don't accidentally slip in. */ +#pragma GCC poison gcov_write_tag gcov_write_length +#pragma GCC poison gcov_sync gcov_time gcov_magic + +#ifdef HAVE_GAS_HIDDEN +#define ATTRIBUTE_HIDDEN __attribute__ ((__visibility__ ("hidden"))) +#else +#define ATTRIBUTE_HIDDEN +#endif + +#else + +#define ATTRIBUTE_HIDDEN + +#endif + +#ifndef GCOV_LINKAGE +#define GCOV_LINKAGE extern +#endif + +/* File suffixes. */ +#define GCOV_DATA_SUFFIX ".gcda" +#define GCOV_NOTE_SUFFIX ".gcno" + +/* File magic. Must not be palindromes. */ +#define GCOV_DATA_MAGIC ((gcov_unsigned_t)0x67636461) /* "gcda" */ +#define GCOV_NOTE_MAGIC ((gcov_unsigned_t)0x67636e6f) /* "gcno" */ + +/* gcov-iov.h is automatically generated by the makefile from + version.c, it looks like + #define GCOV_VERSION ((gcov_unsigned_t)0x89abcdef) +*/ +#include "gcov-iov.h" + +/* Convert a magic or version number to a 4 character string. */ +#define GCOV_UNSIGNED2STRING(ARRAY,VALUE) \ + ((ARRAY)[0] = (char)((VALUE) >> 24), \ + (ARRAY)[1] = (char)((VALUE) >> 16), \ + (ARRAY)[2] = (char)((VALUE) >> 8), \ + (ARRAY)[3] = (char)((VALUE) >> 0)) + +/* The record tags. Values [1..3f] are for tags which may be in either + file. Values [41..9f] for those in the note file and [a1..ff] for + the data file. The tag value zero is used as an explicit end of + file marker -- it is not required to be present. */ + +#define GCOV_TAG_FUNCTION ((gcov_unsigned_t)0x01000000) +#define GCOV_TAG_FUNCTION_LENGTH (3) +#define GCOV_TAG_BLOCKS ((gcov_unsigned_t)0x01410000) +#define GCOV_TAG_BLOCKS_LENGTH(NUM) (NUM) +#define GCOV_TAG_BLOCKS_NUM(LENGTH) (LENGTH) +#define GCOV_TAG_ARCS ((gcov_unsigned_t)0x01430000) +#define GCOV_TAG_ARCS_LENGTH(NUM) (1 + (NUM) * 2) +#define GCOV_TAG_ARCS_NUM(LENGTH) (((LENGTH) - 1) / 2) +#define GCOV_TAG_LINES ((gcov_unsigned_t)0x01450000) +#define GCOV_TAG_COUNTER_BASE ((gcov_unsigned_t)0x01a10000) +#define GCOV_TAG_COUNTER_LENGTH(NUM) ((NUM) * 2) +#define GCOV_TAG_COUNTER_NUM(LENGTH) ((LENGTH) / 2) +#define GCOV_TAG_OBJECT_SUMMARY ((gcov_unsigned_t)0xa1000000) +#define GCOV_TAG_PROGRAM_SUMMARY ((gcov_unsigned_t)0xa3000000) +#define GCOV_TAG_SUMMARY_LENGTH \ + (1 + GCOV_COUNTERS_SUMMABLE * (2 + 3 * 2)) +#define GCOV_TAG_MODULE_INFO ((gcov_unsigned_t)0xa4000000) +#define GCOV_TAG_PMU_LOAD_LATENCY_INFO ((gcov_unsigned_t)0xa5000000) +#define GCOV_TAG_PMU_LOAD_LATENCY_LENGTH(filename) \ + (gcov_string_length (filename) + 12 + 2) +#define GCOV_TAG_PMU_BRANCH_MISPREDICT_INFO ((gcov_unsigned_t)0xa7000000) +#define GCOV_TAG_PMU_BRANCH_MISPREDICT_LENGTH(filename) \ + (gcov_string_length (filename) + 5 + 2) +#define GCOV_TAG_PMU_TOOL_HEADER ((gcov_unsigned_t)0xa9000000) + +/* Counters that are collected. */ +#define GCOV_COUNTER_ARCS 0 /* Arc transitions. */ +#define GCOV_COUNTERS_SUMMABLE 1 /* Counters which can be + summaried. */ +#define GCOV_FIRST_VALUE_COUNTER 1 /* The first of counters used for value + profiling. They must form a consecutive + interval and their order must match + the order of HIST_TYPEs in + value-prof.h. */ +#define GCOV_COUNTER_V_INTERVAL 1 /* Histogram of value inside an interval. */ +#define GCOV_COUNTER_V_POW2 2 /* Histogram of exact power2 logarithm + of a value. */ +#define GCOV_COUNTER_V_SINGLE 3 /* The most common value of expression. */ +#define GCOV_COUNTER_V_DELTA 4 /* The most common difference between + consecutive values of expression. */ + +#define GCOV_COUNTER_V_INDIR 5 /* The most common indirect address */ +#define GCOV_COUNTER_AVERAGE 6 /* Compute average value passed to the + counter. */ +#define GCOV_COUNTER_IOR 7 /* IOR of the all values passed to + counter. */ +#define GCOV_COUNTER_ICALL_TOPNV 8 /* Top N value tracking for indirect calls */ +#define GCOV_LAST_VALUE_COUNTER 8 /* The last of counters used for value + profiling. */ +#define GCOV_COUNTER_DIRECT_CALL 9 /* Direct call counts. */ +#define GCOV_COUNTER_REUSE_DIST 10 /* Reuse distance measure. */ +#define GCOV_COUNTERS 11 + +/* Number of counters used for value profiling. */ +#define GCOV_N_VALUE_COUNTERS \ + (GCOV_LAST_VALUE_COUNTER - GCOV_FIRST_VALUE_COUNTER + 1) + + /* A list of human readable names of the counters */ +#define GCOV_COUNTER_NAMES {"arcs", "interval", "pow2", "single", \ + "delta","indirect_call", "average", "ior", \ + "indirect_call_topn", "direct_call", \ + "reuse_distance"} + +#define GCOV_ICALL_TOPN_VAL 2 /* Track two hottest callees */ +#define GCOV_ICALL_TOPN_NCOUNTS 9 /* The number of counter entries per icall callsite */ + /* Names of merge functions for counters. */ +#define GCOV_MERGE_FUNCTIONS {"__gcov_merge_add", \ + "__gcov_merge_add", \ + "__gcov_merge_add", \ + "__gcov_merge_single", \ + "__gcov_merge_delta", \ + "__gcov_merge_single", \ + "__gcov_merge_add", \ + "__gcov_merge_ior", \ + "__gcov_merge_icall_topn",\ + "__gcov_merge_dc",\ + "__gcov_merge_reusedist" } + +/* Convert a counter index to a tag. */ +#define GCOV_TAG_FOR_COUNTER(COUNT) \ + (GCOV_TAG_COUNTER_BASE + ((gcov_unsigned_t)(COUNT) << 17)) +/* Convert a tag to a counter. */ +#define GCOV_COUNTER_FOR_TAG(TAG) \ + ((unsigned)(((TAG) - GCOV_TAG_COUNTER_BASE) >> 17)) +/* Check whether a tag is a counter tag. */ +#define GCOV_TAG_IS_COUNTER(TAG) \ + (!((TAG) & 0xFFFF) && GCOV_COUNTER_FOR_TAG (TAG) < GCOV_COUNTERS) + +/* The tag level mask has 1's in the position of the inner levels, & + the lsb of the current level, and zero on the current and outer + levels. */ +#define GCOV_TAG_MASK(TAG) (((TAG) - 1) ^ (TAG)) + +/* Return nonzero if SUB is an immediate subtag of TAG. */ +#define GCOV_TAG_IS_SUBTAG(TAG,SUB) \ + (GCOV_TAG_MASK (TAG) >> 8 == GCOV_TAG_MASK (SUB) \ + && !(((SUB) ^ (TAG)) & ~GCOV_TAG_MASK(TAG))) + +/* Return nonzero if SUB is at a sublevel to TAG. */ +#define GCOV_TAG_IS_SUBLEVEL(TAG,SUB) \ + (GCOV_TAG_MASK (TAG) > GCOV_TAG_MASK (SUB)) + +/* Basic block flags. */ +#define GCOV_BLOCK_UNEXPECTED (1 << 1) + +/* Arc flags. */ +#define GCOV_ARC_ON_TREE (1 << 0) +#define GCOV_ARC_FAKE (1 << 1) +#define GCOV_ARC_FALLTHROUGH (1 << 2) + +/* Structured records. */ + +/* Cumulative counter data. */ +struct gcov_ctr_summary +{ + gcov_unsigned_t num; /* number of counters. */ + gcov_unsigned_t runs; /* number of program runs */ + gcov_type sum_all; /* sum of all counters accumulated. */ + gcov_type run_max; /* maximum value on a single run. */ + gcov_type sum_max; /* sum of individual run max values. */ +}; + +/* Object & program summary record. */ +struct gcov_summary +{ + gcov_unsigned_t checksum; /* checksum of program */ + struct gcov_ctr_summary ctrs[GCOV_COUNTERS_SUMMABLE]; +}; + +#define GCOV_MODULE_UNKNOWN_LANG 0 +#define GCOV_MODULE_C_LANG 1 +#define GCOV_MODULE_CPP_LANG 2 +#define GCOV_MODULE_FORT_LANG 3 + +#define GCOV_MODULE_ASM_STMTS (1 << 16) +#define GCOV_MODULE_LANG_MASK 0xffff + +enum print_newline {no_newline, add_newline}; + +/* Source module info. The data structure is used in + both runtime and profile-use phase. Make sure to allocate + enough space for the variable length member. */ +struct gcov_module_info +{ + gcov_unsigned_t ident; + gcov_unsigned_t is_primary; /* this is overloaded to mean two things: + (1) means FDO/LIPO in instrumented binary. + (2) means IS_PRIMARY in persistent file or + memory copy used in profile-use. */ + gcov_unsigned_t is_exported; + gcov_unsigned_t lang; /* lower 16 bits encode the language, and the upper + 16 bits enocde other attributes, such as whether + any assembler is present in the source, etc. */ + char *da_filename; + char *source_filename; + gcov_unsigned_t num_quote_paths; + gcov_unsigned_t num_bracket_paths; + gcov_unsigned_t num_cpp_defines; + gcov_unsigned_t num_cpp_includes; + gcov_unsigned_t num_cl_args; + char *string_array[1]; +}; + +extern struct gcov_module_info **module_infos; +extern unsigned primary_module_id; +#define PRIMARY_MODULE_EXPORTED \ + (module_infos[0]->is_exported \ + && !((module_infos[0]->lang & GCOV_MODULE_ASM_STMTS) \ + && flag_ripa_disallow_asm_modules)) + +/* Information about the hardware performance monitoring unit. */ +struct gcov_pmu_info +{ + const char *pmu_profile_filename; /* pmu profile filename */ + const char *pmu_tool; /* canonical pmu tool options */ + gcov_unsigned_t pmu_top_n_address; /* how many top addresses to symbolize */ +}; + +/* Information about the PMU tool header. */ +typedef struct gcov_pmu_tool_header { + char *host_cpu; + char *hostname; + char *kernel_version; + char *column_header; + char *column_description; + char *full_header; +} gcov_pmu_tool_header_t; + +/* Available only for PMUs which support PEBS or IBS using pfmon + tool. If any field here is changed, the length computation in + GCOV_TAG_PMU_LOAD_LATENCY_LENGTH must be updated as well. All + percentages are multiplied by 100 to make them out of 10000 and + only integer part is kept. */ +typedef struct gcov_pmu_load_latency_info +{ + gcov_unsigned_t counts; /* raw count of samples */ + gcov_unsigned_t self; /* per 10k of total samples */ + gcov_unsigned_t cum; /* per 10k cumulative weight */ + gcov_unsigned_t lt_10; /* per 10k with latency <= 10 cycles */ + gcov_unsigned_t lt_32; /* per 10k with latency <= 32 cycles */ + gcov_unsigned_t lt_64; /* per 10k with latency <= 64 cycles */ + gcov_unsigned_t lt_256; /* per 10k with latency <= 256 cycles */ + gcov_unsigned_t lt_1024; /* per 10k with latency <= 1024 cycles */ + gcov_unsigned_t gt_1024; /* per 10k with latency > 1024 cycles */ + gcov_unsigned_t wself; /* weighted average cost of this miss in cycles */ + gcov_type code_addr; /* the actual miss address (pc+1 for Intel) */ + gcov_unsigned_t line; /* line number corresponding to this miss */ + gcov_unsigned_t discriminator; /* discriminator information for this miss */ + char *filename; /* filename corresponding to this miss */ +} gcov_pmu_ll_info_t; + +/* This structure is used during runtime as well as in gcov. */ +typedef struct load_latency_infos +{ + /* An array describing the total number of load latency fields. */ + gcov_pmu_ll_info_t **ll_array; + /* The total number of entries in the load latency array. */ + unsigned ll_count; + /* The total number of entries currently allocated in the array. + Used for bookkeeping. */ + unsigned alloc_ll_count; + /* PMU tool header */ + gcov_pmu_tool_header_t *pmu_tool_header; +} ll_infos_t; + +/* Available only for PMUs which support PEBS or IBS using pfmon + tool. If any field here is changed, the length computation in + GCOV_TAG_PMU_BR_MISPREDICT_LENGTH must be updated as well. All + percentages are multiplied by 100 to make them out of 10000 and + only integer part is kept. */ +typedef struct gcov_pmu_branch_mispredict_info +{ + gcov_unsigned_t counts; /* raw count of samples */ + gcov_unsigned_t self; /* per 10k of total samples */ + gcov_unsigned_t cum; /* per 10k cumulative weight */ + gcov_type code_addr; /* the actual mispredict address */ + gcov_unsigned_t line; /* line number corresponding to this event */ + gcov_unsigned_t discriminator; /* discriminator for this event */ + char *filename; /* filename corresponding to this event */ +} gcov_pmu_brm_info_t; + +/* This structure is used during runtime as well as in gcov. */ +typedef struct branch_mispredict_infos +{ + /* An array describing the total number of mispredict entries. */ + gcov_pmu_brm_info_t **brm_array; + /* The total number of entries in the above array. */ + unsigned brm_count; + /* The total number of entries currently allocated in the array. + Used for bookkeeping. */ + unsigned alloc_brm_count; + /* PMU tool header */ + gcov_pmu_tool_header_t *pmu_tool_header; +} brm_infos_t; + +/* Structures embedded in coveraged program. The structures generated + by write_profile must match these. */ + +#if IN_LIBGCOV +/* Information about a single function. This uses the trailing array + idiom. The number of counters is determined from the counter_mask + in gcov_info. We hold an array of function info, so have to + explicitly calculate the correct array stride. */ + +struct gcov_fn_info +{ + gcov_unsigned_t ident; /* unique ident of function */ + gcov_unsigned_t lineno_checksum; /* function lineo_checksum */ + gcov_unsigned_t cfg_checksum; /* function cfg checksum */ + gcov_unsigned_t dc_offset; /* direct call offset */ + unsigned n_ctrs[0]; /* instrumented counters */ +}; + +/* Type of function used to merge counters. */ +typedef void (*gcov_merge_fn) (gcov_type *, gcov_unsigned_t); + +/* Information about counters. */ +struct gcov_ctr_info +{ + gcov_unsigned_t num; /* number of counters. */ + gcov_type *values; /* their values. */ + gcov_merge_fn merge; /* The function used to merge them. */ +}; + +/* Information about a single object file. */ +struct gcov_info +{ + gcov_unsigned_t version; /* expected version number */ + struct gcov_module_info *mod_info; /* addtional module info. */ + struct gcov_info *next; /* link to next, used by libgcov */ + + gcov_unsigned_t stamp; /* uniquifying time stamp */ + const char *filename; /* output file name */ + gcov_unsigned_t eof_pos; /* end position of profile data */ + unsigned n_functions; /* number of functions */ + const struct gcov_fn_info *functions; /* table of functions */ + + unsigned ctr_mask; /* mask of counters instrumented. */ + struct gcov_ctr_info counts[0]; /* count data. The number of bits + set in the ctr_mask field + determines how big this array + is. */ +}; + +/* Information about a single imported module. */ +struct dyn_imp_mod +{ + const struct gcov_info *imp_mod; + double weight; +}; + +/* Register a new object file module. */ +extern void __gcov_init (struct gcov_info *) ATTRIBUTE_HIDDEN; + +/* Called before fork, to avoid double counting. */ +extern void __gcov_flush (void) ATTRIBUTE_HIDDEN; + +/* The merge function that just sums the counters. */ +extern void __gcov_merge_add (gcov_type *, unsigned) ATTRIBUTE_HIDDEN; + +/* The merge function to choose the most common value. */ +extern void __gcov_merge_single (gcov_type *, unsigned) ATTRIBUTE_HIDDEN; + +/* The merge function to choose the most common difference between + consecutive values. */ +extern void __gcov_merge_delta (gcov_type *, unsigned) ATTRIBUTE_HIDDEN; + +/* The merge function that just ors the counters together. */ +extern void __gcov_merge_ior (gcov_type *, unsigned) ATTRIBUTE_HIDDEN; + +/* The merge function used for direct call counters. */ +extern void __gcov_merge_dc (gcov_type *, unsigned) ATTRIBUTE_HIDDEN; + +/* The merge function used for reuse distance counters. */ +extern void __gcov_merge_reusedist (gcov_type *, unsigned) ATTRIBUTE_HIDDEN; + +/* The merge function used for indirect call counters. */ +extern void __gcov_merge_icall_topn (gcov_type *, unsigned) ATTRIBUTE_HIDDEN; + +/* The profiler functions. */ +extern void __gcov_interval_profiler (gcov_type *, gcov_type, int, unsigned); +extern void __gcov_pow2_profiler (gcov_type *, gcov_type); +extern void __gcov_one_value_profiler (gcov_type *, gcov_type); +extern void __gcov_indirect_call_profiler (gcov_type *, gcov_type, void *, void *); +extern void __gcov_indirect_call_topn_profiler (void *, void *, gcov_unsigned_t) ATTRIBUTE_HIDDEN; +extern void __gcov_direct_call_profiler (void *, void *, gcov_unsigned_t) ATTRIBUTE_HIDDEN; +extern void __gcov_average_profiler (gcov_type *, gcov_type); +extern void __gcov_ior_profiler (gcov_type *, gcov_type); +extern void __gcov_sort_n_vals (gcov_type *value_array, int n); + +/* Initialize/start/stop/dump performance monitoring unit (PMU) profile */ +void __gcov_init_pmu_profiler (struct gcov_pmu_info *) ATTRIBUTE_HIDDEN; +void __gcov_start_pmu_profiler (void) ATTRIBUTE_HIDDEN; +void __gcov_stop_pmu_profiler (void) ATTRIBUTE_HIDDEN; +void __gcov_end_pmu_profiler (int gcda_error) ATTRIBUTE_HIDDEN; + +#ifndef inhibit_libc +/* The wrappers around some library functions.. */ +extern pid_t __gcov_fork (void) ATTRIBUTE_HIDDEN; +extern int __gcov_execl (const char *, char *, ...) ATTRIBUTE_HIDDEN; +extern int __gcov_execlp (const char *, char *, ...) ATTRIBUTE_HIDDEN; +extern int __gcov_execle (const char *, char *, ...) ATTRIBUTE_HIDDEN; +extern int __gcov_execv (const char *, char *const []) ATTRIBUTE_HIDDEN; +extern int __gcov_execvp (const char *, char *const []) ATTRIBUTE_HIDDEN; +extern int __gcov_execve (const char *, char *const [], char *const []) + ATTRIBUTE_HIDDEN; +#endif + +#endif /* IN_LIBGCOV */ + +#if IN_LIBGCOV >= 0 + +/* Optimum number of gcov_unsigned_t's read from or written to disk. */ +#define GCOV_BLOCK_SIZE (1 << 10) + +struct gcov_var +{ + _GCOV_FILE *file; + gcov_position_t start; /* Position of first byte of block */ + unsigned offset; /* Read/write position within the block. */ + unsigned length; /* Read limit in the block. */ + unsigned overread; /* Number of words overread. */ + int error; /* < 0 overflow, > 0 disk error. */ + int mode; /* < 0 writing, > 0 reading */ +#if IN_LIBGCOV + /* Holds one block plus 4 bytes, thus all coverage reads & writes + fit within this buffer and we always can transfer GCOV_BLOCK_SIZE + to and from the disk. libgcov never backtracks and only writes 4 + or 8 byte objects. */ + gcov_unsigned_t buffer[GCOV_BLOCK_SIZE + 1]; +#else + int endian; /* Swap endianness. */ + /* Holds a variable length block, as the compiler can write + strings and needs to backtrack. */ + size_t alloc; + gcov_unsigned_t *buffer; +#endif +}; + +/* In kernel mode, move gcov_var definition to gcov-io.c + to avoid dulipcate definitions. */ +#ifndef __GCOV_KERNEL__ +GCOV_LINKAGE struct gcov_var gcov_var ATTRIBUTE_HIDDEN; +#else +extern struct gcov_var gcov_var; +#endif + +/* Functions for reading and writing gcov files. In libgcov you can + open the file for reading then writing. Elsewhere you can open the + file either for reading or for writing. When reading a file you may + use the gcov_read_* functions, gcov_sync, gcov_position, & + gcov_error. When writing a file you may use the gcov_write + functions, gcov_seek & gcov_error. When a file is to be rewritten + you use the functions for reading, then gcov_rewrite then the + functions for writing. Your file may become corrupted if you break + these invariants. */ +#if IN_LIBGCOV +GCOV_LINKAGE int gcov_open (const char */*name*/) ATTRIBUTE_HIDDEN; +#else +GCOV_LINKAGE int gcov_open (const char */*name*/, int /*direction*/); +GCOV_LINKAGE int gcov_magic (gcov_unsigned_t, gcov_unsigned_t); +#endif +GCOV_LINKAGE int gcov_close (void) ATTRIBUTE_HIDDEN; + +/* Available everywhere. */ +static gcov_position_t gcov_position (void); +static int gcov_is_error (void); + +GCOV_LINKAGE const char *gcov_read_string (void) ATTRIBUTE_HIDDEN; +GCOV_LINKAGE gcov_unsigned_t gcov_read_unsigned (void) ATTRIBUTE_HIDDEN; +GCOV_LINKAGE gcov_type gcov_read_counter (void) ATTRIBUTE_HIDDEN; +GCOV_LINKAGE void gcov_read_summary (struct gcov_summary *) ATTRIBUTE_HIDDEN; +GCOV_LINKAGE char *gcov_canonical_filename (char *filename) ATTRIBUTE_HIDDEN; +GCOV_LINKAGE void +gcov_read_pmu_load_latency_info (gcov_pmu_ll_info_t *ll_info, + gcov_unsigned_t len) ATTRIBUTE_HIDDEN; +GCOV_LINKAGE void +gcov_read_pmu_branch_mispredict_info (gcov_pmu_brm_info_t *brm_info, + gcov_unsigned_t len) ATTRIBUTE_HIDDEN; +GCOV_LINKAGE void +gcov_read_pmu_tool_header (gcov_pmu_tool_header_t *tool_header, + gcov_unsigned_t len) ATTRIBUTE_HIDDEN; +#ifndef __GCOV_KERNEL__ +GCOV_LINKAGE float convert_unsigned_to_pct ( + const unsigned number) ATTRIBUTE_HIDDEN; +#endif /* __GCOV_KERNEL__ */ + +#if !IN_LIBGCOV && IN_GCOV != 1 +GCOV_LINKAGE void gcov_read_module_info (struct gcov_module_info *mod_info, + gcov_unsigned_t len) ATTRIBUTE_HIDDEN; +GCOV_LINKAGE void print_load_latency_line (FILE *fp, + const gcov_pmu_ll_info_t *ll_info, + const enum print_newline); +GCOV_LINKAGE void +print_branch_mispredict_line (FILE *fp, const gcov_pmu_brm_info_t *brm_info, + const enum print_newline); +GCOV_LINKAGE void print_pmu_tool_header (FILE *fp, + gcov_pmu_tool_header_t *tool_header, + const enum print_newline); +#endif + +#if IN_GCOV != 1 +GCOV_LINKAGE void destroy_pmu_tool_header (gcov_pmu_tool_header_t *tool_header) + ATTRIBUTE_HIDDEN; +#endif + +#if IN_LIBGCOV +/* Available only in libgcov */ +GCOV_LINKAGE void gcov_write_counter (gcov_type) ATTRIBUTE_HIDDEN; +GCOV_LINKAGE void gcov_write_tag_length (gcov_unsigned_t, gcov_unsigned_t) + ATTRIBUTE_HIDDEN; +GCOV_LINKAGE void gcov_write_summary (gcov_unsigned_t /*tag*/, + const struct gcov_summary *) + ATTRIBUTE_HIDDEN; + +GCOV_LINKAGE void gcov_write_module_infos (struct gcov_info *mod_info) + ATTRIBUTE_HIDDEN; +GCOV_LINKAGE const struct dyn_imp_mod ** +gcov_get_sorted_import_module_array (struct gcov_info *mod_info, unsigned *len) + ATTRIBUTE_HIDDEN; +static void gcov_rewrite (void); +GCOV_LINKAGE void gcov_seek (gcov_position_t /*position*/) ATTRIBUTE_HIDDEN; +GCOV_LINKAGE void gcov_truncate (void) ATTRIBUTE_HIDDEN; +GCOV_LINKAGE gcov_unsigned_t gcov_string_length (const char *) ATTRIBUTE_HIDDEN; +GCOV_LINKAGE unsigned gcov_gcda_file_size (struct gcov_info *); +#else +/* Available outside libgcov */ +GCOV_LINKAGE void gcov_sync (gcov_position_t /*base*/, + gcov_unsigned_t /*length */); +#endif + +#if !IN_GCOV +/* Available outside gcov */ +GCOV_LINKAGE void gcov_write_unsigned (gcov_unsigned_t) ATTRIBUTE_HIDDEN; +GCOV_LINKAGE void gcov_write_string (const char *) ATTRIBUTE_HIDDEN; +#endif + +#if !IN_GCOV && !IN_LIBGCOV +/* Available only in compiler */ +GCOV_LINKAGE gcov_position_t gcov_write_tag (gcov_unsigned_t); +GCOV_LINKAGE void gcov_write_length (gcov_position_t /*position*/); +#endif + +#if IN_GCOV > 0 +/* Available in gcov */ +GCOV_LINKAGE time_t gcov_time (void); +#endif + +/* Save the current position in the gcov file. */ + +static inline gcov_position_t +gcov_position (void) +{ + return gcov_var.start + gcov_var.offset; +} + +/* Return nonzero if the error flag is set. */ + +static inline int +gcov_is_error (void) +{ + return gcov_var.file ? gcov_var.error : 1; +} + +#if IN_LIBGCOV +/* Move to beginning of file and initialize for writing. */ + +static inline void +gcov_rewrite (void) +{ + gcc_assert (gcov_var.mode > 0); + gcov_var.mode = -1; + gcov_var.start = 0; + gcov_var.offset = 0; + _GCOV_fseek (gcov_var.file, 0L, SEEK_SET); +} +#endif + +#endif /* IN_LIBGCOV >= 0 */ + +#endif /* GCC_GCOV_IO_H */ diff --git a/lib/gcc/i686-linux-android/4.6/gcov-src/gcov-iov.h b/lib/gcc/i686-linux-android/4.6/gcov-src/gcov-iov.h new file mode 100644 index 0000000..3c8c9ef --- /dev/null +++ b/lib/gcc/i686-linux-android/4.6/gcov-src/gcov-iov.h @@ -0,0 +1,4 @@ +/* Generated automatically by the program `build/gcov-iov' + from `4.6 (4 6) and prerelease (p)'. */ + +#define GCOV_VERSION ((gcov_unsigned_t)0x34303670) /* 406p */ diff --git a/lib/gcc/i686-linux-android/4.6/gcov-src/libgcov.c b/lib/gcc/i686-linux-android/4.6/gcov-src/libgcov.c new file mode 100644 index 0000000..613c1f4 --- /dev/null +++ b/lib/gcc/i686-linux-android/4.6/gcov-src/libgcov.c @@ -0,0 +1,1989 @@ +/* Routines required for instrumenting a program. */ +/* Compile this one with gcc. */ +/* Copyright (C) 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, + 2000, 2001, 2002, 2003, 2004, 2005, 2008, 2009, 2010 + Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free +Software Foundation; either version 3, or (at your option) any later +version. + +GCC is distributed in the hope that it will be useful, but WITHOUT ANY +WARRANTY; without even the implied warranty of MERCHANTABILITY or +FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +for more details. + +Under Section 7 of GPL version 3, you are granted additional +permissions described in the GCC Runtime Library Exception, version +3.1, as published by the Free Software Foundation. + +You should have received a copy of the GNU General Public License and +a copy of the GCC Runtime Library Exception along with this program; +see the files COPYING3 and COPYING.RUNTIME respectively. If not, see +<http://www.gnu.org/licenses/>. */ + +/* Assume compiling for Linux Kernel if __KERNEL__ is defined. */ +#ifdef __KERNEL__ + /* Define MACROs to be used by kernel compilation. */ +# define L_gcov +# define L_gcov_interval_profiler +# define L_gcov_pow2_profiler +# define L_gcov_one_value_profiler +# define L_gcov_indirect_call_profiler +# define L_gcov_average_profiler +# define L_gcov_ior_profiler + +# define HAVE_CC_TLS 0 +# define __GCOV_KERNEL__ + +# define IN_LIBGCOV 1 +# define IN_GCOV 0 +#else /* __KERNEL__ */ +#include "tconfig.h" +#include "tsystem.h" +#include "coretypes.h" +#include "tm.h" +#endif /* __KERNEL__ */ + +#if 1 +#define THREAD_PREFIX __thread +#else +#define THREAD_PREFIX +#endif + +#ifndef __GCOV_KERNEL__ +#if defined(inhibit_libc) +#define IN_LIBGCOV (-1) +#else +#undef NULL /* Avoid errors if stdio.h and our stddef.h mismatch. */ +#include <stdio.h> +#define IN_LIBGCOV 1 +#if defined(L_gcov) +#define GCOV_LINKAGE /* nothing */ +#endif +#endif +#endif /* __GCOV_KERNEL__ */ + +#include "gcov-io.h" + +#if defined(inhibit_libc) +/* If libc and its header files are not available, provide dummy functions. */ + +#ifdef L_gcov +void __gcov_init (struct gcov_info *p __attribute__ ((unused))) {} +void __gcov_flush (void) {} +#endif + +#ifdef L_gcov_merge_add +void __gcov_merge_add (gcov_type *counters __attribute__ ((unused)), + unsigned n_counters __attribute__ ((unused))) {} +#endif + +#ifdef L_gcov_merge_single +void __gcov_merge_single (gcov_type *counters __attribute__ ((unused)), + unsigned n_counters __attribute__ ((unused))) {} +#endif + +#ifdef L_gcov_merge_delta +void __gcov_merge_delta (gcov_type *counters __attribute__ ((unused)), + unsigned n_counters __attribute__ ((unused))) {} +#endif + +#else + +#ifndef __GCOV_KERNEL__ +#include <string.h> +#if GCOV_LOCKED +#include <fcntl.h> +#include <errno.h> +#include <sys/stat.h> +#endif +#endif /* __GCOV_KERNEL__ */ + +#ifdef L_gcov +#include "gcov-io.c" + +/* Utility function for outputing errors. */ +static int +gcov_error (const char *fmt, ...) +{ + int ret; + va_list argp; + va_start (argp, fmt); +#ifdef __GCOV_KERNEL__ + ret = vprintk (fmt, argp); +#else + ret = vfprintf (stderr, fmt, argp); +#endif + va_end (argp); + return ret; +} + +#ifndef __GCOV_KERNEL__ +/* Emitted in coverage.c. */ +extern char * __gcov_pmu_profile_filename; +extern char * __gcov_pmu_profile_options; +extern gcov_unsigned_t __gcov_pmu_top_n_address; + +/* Sampling rate. */ +extern gcov_unsigned_t __gcov_sampling_rate; +static int gcov_sampling_rate_initialized = 0; +void __gcov_set_sampling_rate (unsigned int rate); + +/* Set sampling rate to RATE. */ + +void __gcov_set_sampling_rate (unsigned int rate) +{ + __gcov_sampling_rate = rate; +} + +/* Per thread sample counter. */ +THREAD_PREFIX gcov_unsigned_t __gcov_sample_counter = 0; + +/* Chain of per-object gcov structures. */ +extern struct gcov_info *__gcov_list; + +/* Size of the longest file name. */ +static size_t gcov_max_filename = 0; +#endif /* __GCOV_KERNEL__ */ + +/* Unique identifier assigned to each module (object file). */ +static gcov_unsigned_t gcov_cur_module_id = 0; + +/* Pointer to the direct-call counters (per call-site counters). + Initialized by the caller. */ +THREAD_PREFIX gcov_type *__gcov_direct_call_counters ATTRIBUTE_HIDDEN; + +/* Direct call callee address. */ +THREAD_PREFIX void *__gcov_direct_call_callee ATTRIBUTE_HIDDEN; + +/* Pointer to the indirect-call counters (per call-site counters). + Initialized by the caller. */ +THREAD_PREFIX gcov_type *__gcov_indirect_call_topn_counters ATTRIBUTE_HIDDEN; + +/* Indirect call callee address. */ +THREAD_PREFIX void *__gcov_indirect_call_topn_callee ATTRIBUTE_HIDDEN; + +/* A program checksum allows us to distinguish program data for an + object file included in multiple programs. */ +static gcov_unsigned_t gcov_crc32; + +/* Dynamic call graph build and form module groups. */ +void __gcov_compute_module_groups (void) ATTRIBUTE_HIDDEN; +void __gcov_finalize_dyn_callgraph (void) ATTRIBUTE_HIDDEN; + +/* Profile summary for the gdca file, used in sanity check? */ +static struct gcov_summary all; + +/* Profile summary for this program in current exeuction. */ +static struct gcov_summary this_program; + +/* Profile summary for this object in current execuction. */ +static struct gcov_summary this_object; + +/* Merged profile summary for this program. */ +static struct gcov_summary program; + +/* Merged profile summary for this object. */ +static struct gcov_summary object; + +/* Record the position of summary info. */ +static gcov_position_t summary_pos = 0; + +/* Record the postion of eof. */ +static gcov_position_t eof_pos = 0; + +/* Number of chars in prefix to be stripped. */ +static int gcov_prefix_strip = 0; + +/* The length of path prefix. */ +static size_t prefix_length = 0; + +/* gi_filename is current object filename. + gi_filename_up points to the stripped filename. */ +static char *gi_filename, *gi_filename_up; + +static int gcov_open_by_filename (char * gi_filename); +static int gcov_exit_init (void); +static void gcov_dump_one_gcov (struct gcov_info *gi_ptr); + +/* Make sure path component of the given FILENAME exists, create + missing directories. FILENAME must be writable. + Returns zero on success, or -1 if an error occurred. */ + +static int +create_file_directory (char *filename) +{ +#if !defined(TARGET_POSIX_IO) && !defined(_WIN32) + (void) filename; + return -1; +#else + char *s; + + s = filename; + + if (HAS_DRIVE_SPEC(s)) + s += 2; + if (IS_DIR_SEPARATOR(*s)) + ++s; + for (; *s != '\0'; s++) + if (IS_DIR_SEPARATOR(*s)) + { + char sep = *s; + *s = '\0'; + + /* Try to make directory if it doesn't already exist. */ + if (access (filename, F_OK) == -1 +#ifdef TARGET_POSIX_IO + && mkdir (filename, 0755) == -1 +#else + && mkdir (filename) == -1 +#endif + /* The directory might have been made by another process. */ + && errno != EEXIST) + { + fprintf (stderr, "profiling:%s:Cannot create directory\n", + filename); + *s = sep; + return -1; + }; + + *s = sep; + }; + return 0; +#endif +} + +/* Open a file with the specified name. */ + +static int +gcov_open_by_filename (char * gi_filename) +{ + if (!gcov_open (gi_filename)) + { + /* Open failed likely due to missed directory. + Create directory and retry to open file. */ + if (create_file_directory (gi_filename)) + { + gcov_error ("profiling:%s:Skip\n", gi_filename); + return -1; + } + if (!gcov_open (gi_filename)) + { + gcov_error ("profiling:%s:Cannot open\n", gi_filename); + return -1; + } + } + return 0; +} + + +/* Determine whether a counter is active. */ + +static inline int +gcov_counter_active (const struct gcov_info *info, unsigned int type) +{ + return (1 << type) & info->ctr_mask; +} + +#ifndef __GCOV_KERNEL__ +/* Check if VERSION of the info block PTR matches libgcov one. + Return 1 on success, or zero in case of versions mismatch. + If FILENAME is not NULL, its value used for reporting purposes + instead of value from the info block. */ + +static int +gcov_version (struct gcov_info *ptr __attribute__ ((unused)), + gcov_unsigned_t version, const char *filename) +{ + if (version != GCOV_VERSION) + { + char v[4], e[4]; + + GCOV_UNSIGNED2STRING (v, version); + GCOV_UNSIGNED2STRING (e, GCOV_VERSION); + + if (filename) + gcov_error ("profiling:%s:Version mismatch - expected %.4s got %.4s\n", + filename, e, v); + else + gcov_error ("profiling:Version mismatch - expected %.4s got %.4s\n", e, v); + return 0; + } + return 1; +} + +#define GCOV_GET_FILENAME gcov_strip_leading_dirs + +/* Strip GCOV_PREFIX_STRIP levels of leading '/' from FILENAME and + put the result into GI_FILENAME_UP. */ + +static void +gcov_strip_leading_dirs (int prefix_length, int gcov_prefix_strip, + const char *filename, char *gi_filename_up) +{ + /* Avoid to add multiple drive letters into combined path. */ + if (prefix_length != 0 && HAS_DRIVE_SPEC(filename)) + filename += 2; + + /* Build relocated filename, stripping off leading + directories from the initial filename if requested. */ + if (gcov_prefix_strip > 0) + { + int level = 0; + const char *s = filename; + if (IS_DIR_SEPARATOR(*s)) + ++s; + + /* Skip selected directory levels. */ + for (; (*s != '\0') && (level < gcov_prefix_strip); s++) + if (IS_DIR_SEPARATOR(*s)) + { + filename = s; + level++; + } + } + /* Update complete filename with stripped original. */ + if (prefix_length != 0 && !IS_DIR_SEPARATOR (*filename)) + { + /* If prefix is given, add directory separator. */ + strcpy (gi_filename_up, "/"); + strcpy (gi_filename_up + 1, filename); + } + else + strcpy (gi_filename_up, filename); +} + +/* This function allocates the space to store current file name. */ + +static void +gcov_alloc_filename (void) +{ + /* Get file name relocation prefix. Non-absolute values are ignored. */ + char *gcov_prefix = 0; + + prefix_length = 0; + gcov_prefix_strip = 0; + + { + /* Check if the level of dirs to strip off specified. */ + char *tmp = getenv ("GCOV_PREFIX_STRIP"); + if (tmp) + { + gcov_prefix_strip = atoi (tmp); + /* Do not consider negative values. */ + if (gcov_prefix_strip < 0) + gcov_prefix_strip = 0; + } + } + /* Get file name relocation prefix. Non-absolute values are ignored. */ + gcov_prefix = getenv ("GCOV_PREFIX"); + if (gcov_prefix) + { + prefix_length = strlen(gcov_prefix); + + /* Remove an unnecessary trailing '/' */ + if (IS_DIR_SEPARATOR (gcov_prefix[prefix_length - 1])) + prefix_length--; + } + else + prefix_length = 0; + + /* If no prefix was specified and a prefix stip, then we assume + relative. */ + if (gcov_prefix_strip != 0 && prefix_length == 0) + { + gcov_prefix = "."; + prefix_length = 1; + } + + /* Allocate and initialize the filename scratch space. */ + gi_filename = (char *) malloc (prefix_length + gcov_max_filename + 2); + if (prefix_length) + memcpy (gi_filename, gcov_prefix, prefix_length); + + gi_filename_up = gi_filename + prefix_length; +} + +/* Stop the pmu profiler and dump pmu profile info into the global file. */ + +static void +pmu_profile_stop (void) +{ + const char *pmu_profile_filename = __gcov_pmu_profile_filename; + const char *pmu_options = __gcov_pmu_profile_options; + size_t filename_length; + int gcda_error; + + if (!pmu_profile_filename || !pmu_options) + return; + + __gcov_stop_pmu_profiler (); + + filename_length = strlen (pmu_profile_filename); + if (filename_length > gcov_max_filename) + gcov_max_filename = filename_length; + /* Allocate and initialize the filename scratch space. */ + gcov_alloc_filename (); + GCOV_GET_FILENAME (prefix_length, gcov_prefix_strip, pmu_profile_filename, + gi_filename_up); + /* Open the gcda file for writing. We don't support merge yet. */ + gcda_error = gcov_open_by_filename (gi_filename); + __gcov_end_pmu_profiler (gcda_error); + if ((gcda_error = gcov_close ())) + gcov_error (gcda_error < 0 ? "pmu_profile_stop:%s:Overflow writing\n" : + "pmu_profile_stop:%s:Error writing\n", + gi_filename); +} + +/* Sort N entries in VALUE_ARRAY in descending order. + Each entry in VALUE_ARRAY has two values. The sorting + is based on the second value. */ + +GCOV_LINKAGE void +gcov_sort_n_vals (gcov_type *value_array, int n) +{ + int j, k; + for (j = 2; j < n; j += 2) + { + gcov_type cur_ent[2]; + cur_ent[0] = value_array[j]; + cur_ent[1] = value_array[j + 1]; + k = j - 2; + while (k >= 0 && value_array[k + 1] < cur_ent[1]) + { + value_array[k + 2] = value_array[k]; + value_array[k + 3] = value_array[k+1]; + k -= 2; + } + value_array[k + 2] = cur_ent[0]; + value_array[k + 3] = cur_ent[1]; + } +} + +/* Sort the profile counters for all indirect call sites. Counters + for each call site are allocated in array COUNTERS. */ + +static void +gcov_sort_icall_topn_counter (const struct gcov_ctr_info *counters) +{ + int i; + gcov_type *values; + int n = counters->num; + gcc_assert (!(n % GCOV_ICALL_TOPN_NCOUNTS)); + + values = counters->values; + + for (i = 0; i < n; i += GCOV_ICALL_TOPN_NCOUNTS) + { + gcov_type *value_array = &values[i + 1]; + gcov_sort_n_vals (value_array, GCOV_ICALL_TOPN_NCOUNTS - 1); + } +} + +/* Write imported files (auxiliary modules) for primary module GI_PTR + into file GI_FILENAME. */ + +static void +gcov_write_import_file (char *gi_filename, struct gcov_info *gi_ptr) +{ + char *gi_imports_filename; + const char *gcov_suffix; + FILE *imports_file; + size_t prefix_length, suffix_length; + + gcov_suffix = getenv ("GCOV_IMPORTS_SUFFIX"); + if (!gcov_suffix || !strlen (gcov_suffix)) + gcov_suffix = ".imports"; + suffix_length = strlen (gcov_suffix); + prefix_length = strlen (gi_filename); + gi_imports_filename = (char *) alloca (prefix_length + suffix_length + 1); + memset (gi_imports_filename, 0, prefix_length + suffix_length + 1); + memcpy (gi_imports_filename, gi_filename, prefix_length); + memcpy (gi_imports_filename + prefix_length, gcov_suffix, suffix_length); + imports_file = fopen (gi_imports_filename, "w"); + if (imports_file) + { + const struct dyn_imp_mod **imp_mods; + unsigned i, imp_len; + imp_mods = gcov_get_sorted_import_module_array (gi_ptr, &imp_len); + if (imp_mods) + { + for (i = 0; i < imp_len; i++) + { + fprintf (imports_file, "%s\n", + imp_mods[i]->imp_mod->mod_info->source_filename); + fprintf (imports_file, "%s%s\n", + imp_mods[i]->imp_mod->mod_info->da_filename, GCOV_DATA_SUFFIX); + } + free (imp_mods); + } + fclose (imports_file); + } +} + +static void +gcov_dump_module_info (void) +{ + struct gcov_info *gi_ptr; + + __gcov_compute_module_groups (); + + /* Now write out module group info. */ + for (gi_ptr = __gcov_list; gi_ptr; gi_ptr = gi_ptr->next) + { + int error; + + GCOV_GET_FILENAME (prefix_length, gcov_prefix_strip, gi_ptr->filename, + gi_filename_up); + error = gcov_open_by_filename (gi_filename); + if (error != 0) + continue; + + /* Overwrite the zero word at the of the file. */ + gcov_rewrite (); + gcov_seek (gi_ptr->eof_pos); + + gcov_write_module_infos (gi_ptr); + gcov_truncate (); + + if ((error = gcov_close ())) + gcov_error (error < 0 ? "profiling:%s:Overflow writing\n" : + "profiling:%s:Error writing\n", + gi_filename); + gcov_write_import_file (gi_filename, gi_ptr); + } + __gcov_finalize_dyn_callgraph (); +} + +/* Dump the coverage counts. We merge with existing counts when + possible, to avoid growing the .da files ad infinitum. We use this + program's checksum to make sure we only accumulate whole program + statistics to the correct summary. An object file might be embedded + in two separate programs, and we must keep the two program + summaries separate. */ + +static void +gcov_exit (void) +{ + struct gcov_info *gi_ptr; + int dump_module_info; + + /* Stop and write the PMU profile data into the global file. */ + pmu_profile_stop (); + + dump_module_info = gcov_exit_init (); + + for (gi_ptr = __gcov_list; gi_ptr; gi_ptr = gi_ptr->next) + gcov_dump_one_gcov (gi_ptr); + + if (dump_module_info) + gcov_dump_module_info (); + + free (gi_filename); +} + +/* Add a new object file onto the bb chain. Invoked automatically + when running an object file's global ctors. */ + +void +__gcov_init (struct gcov_info *info) +{ + if (!gcov_sampling_rate_initialized) + { + const char* env_value_str = getenv ("GCOV_SAMPLING_RATE"); + if (env_value_str) + { + int env_value_int = atoi(env_value_str); + if (env_value_int >= 1) + __gcov_sampling_rate = env_value_int; + } + gcov_sampling_rate_initialized = 1; + } + + if (!info->version) + return; + + if (gcov_version (info, info->version, 0)) + { + const char *ptr = info->filename; + gcov_unsigned_t crc32 = gcov_crc32; + size_t filename_length = strlen (info->filename); + struct gcov_pmu_info pmu_info; + + /* Refresh the longest file name information. */ + if (filename_length > gcov_max_filename) + gcov_max_filename = filename_length; + + /* Initialize the pmu profiler. */ + pmu_info.pmu_profile_filename = __gcov_pmu_profile_filename; + pmu_info.pmu_tool = __gcov_pmu_profile_options; + pmu_info.pmu_top_n_address = __gcov_pmu_top_n_address; + __gcov_init_pmu_profiler (&pmu_info); + if (pmu_info.pmu_profile_filename) + { + /* Refresh the longest file name information. */ + filename_length = strlen (pmu_info.pmu_profile_filename); + if (filename_length > gcov_max_filename) + gcov_max_filename = filename_length; + } + + /* Assign the module ID (starting at 1). */ + info->mod_info->ident = (++gcov_cur_module_id); + gcc_assert (EXTRACT_MODULE_ID_FROM_GLOBAL_ID (GEN_FUNC_GLOBAL_ID ( + info->mod_info->ident, 0)) + == info->mod_info->ident); + + do + { + unsigned ix; + gcov_unsigned_t value = *ptr << 24; + + for (ix = 8; ix--; value <<= 1) + { + gcov_unsigned_t feedback; + + feedback = (value ^ crc32) & 0x80000000 ? 0x04c11db7 : 0; + crc32 <<= 1; + crc32 ^= feedback; + } + } while (*ptr++); + + gcov_crc32 = crc32; + + if (!__gcov_list) + { + atexit (gcov_exit); + /* Start pmu profiler. */ + __gcov_start_pmu_profiler (); + } + + info->next = __gcov_list; + __gcov_list = info; + } + info->version = 0; +} + +/* Called before fork or exec - write out profile information gathered so + far and reset it to zero. This avoids duplication or loss of the + profile information gathered so far. */ + +void +__gcov_flush (void) +{ + const struct gcov_info *gi_ptr; + + __gcov_stop_pmu_profiler (); + gcov_exit (); + for (gi_ptr = __gcov_list; gi_ptr; gi_ptr = gi_ptr->next) + { + unsigned t_ix; + const struct gcov_ctr_info *ci_ptr; + + for (t_ix = 0, ci_ptr = gi_ptr->counts; t_ix != GCOV_COUNTERS; t_ix++) + if (gcov_counter_active (gi_ptr, t_ix)) + { + memset (ci_ptr->values, 0, sizeof (gcov_type) * ci_ptr->num); + ci_ptr++; + } + } + __gcov_start_pmu_profiler (); +} + +#else /* __GCOV_KERNEL__ */ + +#define GCOV_GET_FILENAME gcov_get_filename + +/* Copy the filename to the buffer. */ + +static inline void +gcov_get_filename (int prefix_length __attribute__ ((unused)), + int gcov_prefix_strip __attribute__ ((unused)), + const char *filename, char *gi_filename_up) +{ + strcpy (gi_filename_up, filename); +} + +/* Sort the profile counters for all indirect call sites. Counters + for each call site are allocated in array COUNTERS. */ + +static void +gcov_sort_icall_topn_counter (const struct gcov_ctr_info *counters) +{ + /* Empty */ +} + +/* Reserves a buffer to store the name of the file being processed. */ +static char _kernel_gi_filename[520]; + +/* This function allocates the space to store current file name. */ + +static void +gcov_alloc_filename (void) +{ + prefix_length = 0; + gcov_prefix_strip = 0; + gi_filename = _kernel_gi_filename; + gi_filename_up = _kernel_gi_filename; +} + +#endif /* __GCOV_KERNEL__ */ + +/* Determine number of active counters in gcov_info INFO, + the counter arrays are stored in VALUES if the coming + value of VALUES !=0. If FLAG_SORT_ICALL_TOPN_COUNTER !=0, + the icall_topn_counter in INFO will be sorted. + Return: the number of active counter types. */ + +static unsigned int +gcov_counter_array (const struct gcov_info *info, + gcov_type *values[GCOV_COUNTERS], + int flag_sort_icall_topn_counter) +{ + unsigned int i; + unsigned int result = 0; + + for (i = 0; i < GCOV_COUNTERS; i++) { + if (gcov_counter_active (info, i)) + { + if (values) + values[result] = info->counts[result].values; + if (flag_sort_icall_topn_counter && + (i == GCOV_COUNTER_ICALL_TOPNV)) + gcov_sort_icall_topn_counter (&info->counts[result]); + result++; + } + } + return result; +} + +/* Compute object summary recored in gcov_info INFO. The result is + stored in OBJ_SUM. Note that the caller is responsible for + zeroing out OBJ_SUM, otherwise the summary is accumulated. */ + +static void +gcov_object_summary (struct gcov_info *info, + struct gcov_summary *obj_sum) +{ + const struct gcov_ctr_info *ci_ptr; + struct gcov_ctr_summary *cs_ptr; + gcov_unsigned_t c_num; + unsigned t_ix; + + /* Totals for this object file. */ + ci_ptr = info->counts; + for (t_ix = 0; t_ix < GCOV_COUNTERS_SUMMABLE; t_ix++) + { + if (!gcov_counter_active (info, t_ix)) + continue; + + cs_ptr = &(obj_sum->ctrs[t_ix]); + cs_ptr->num += ci_ptr->num; + for (c_num = 0; c_num < ci_ptr->num; c_num++) + { + cs_ptr->sum_all += ci_ptr->values[c_num]; + if (cs_ptr->run_max < ci_ptr->values[c_num]) + cs_ptr->run_max = ci_ptr->values[c_num]; + } + ci_ptr++; + } +} + +/* Merge with existing gcda file in the same directory to avoid + excessive growthe of the files. */ + +static int +gcov_merge_gcda_file (struct gcov_info *info, + gcov_type *values[GCOV_COUNTERS], + unsigned fi_stride) +{ + struct gcov_ctr_summary *cs_obj, *cs_tobj, *cs_prg, *cs_tprg, *cs_all; + unsigned t_ix, f_ix; + +#ifndef __GCOV_KERNEL__ + const struct gcov_fn_info *fi_ptr; + unsigned c_ix, n_counts; + int error = 0; + gcov_unsigned_t tag, length; + + eof_pos = 0; + summary_pos = 0; + + tag = gcov_read_unsigned (); + if (tag) + { + /* Merge data from file. */ + if (tag != GCOV_DATA_MAGIC) + { + gcov_error ("profiling:%s:Not a gcov data file\n", gi_filename); + goto read_fatal; + } + length = gcov_read_unsigned (); + if (!gcov_version (info, length, gi_filename)) + goto read_fatal; + + length = gcov_read_unsigned (); + if (length != info->stamp) + /* Read from a different compilation. Overwrite the file. */ + goto rewrite; + + /* Merge execution counts for each function. */ + for (f_ix = 0; f_ix < info->n_functions; f_ix++) + { + fi_ptr = (const struct gcov_fn_info *) + ((const char *) info->functions + f_ix * fi_stride); + tag = gcov_read_unsigned (); + length = gcov_read_unsigned (); + + /* Check function. */ + if (tag != GCOV_TAG_FUNCTION + || length != GCOV_TAG_FUNCTION_LENGTH + || gcov_read_unsigned () != fi_ptr->ident + || gcov_read_unsigned () != fi_ptr->lineno_checksum + || gcov_read_unsigned () != fi_ptr->cfg_checksum) + goto read_mismatch; + + c_ix = 0; + for (t_ix = 0; t_ix < GCOV_COUNTERS; t_ix++) + { + gcov_merge_fn merge; + + if (!((1 << t_ix) & info->ctr_mask)) + continue; + + n_counts = fi_ptr->n_ctrs[c_ix]; + merge = info->counts[c_ix].merge; + + tag = gcov_read_unsigned (); + length = gcov_read_unsigned (); + if (tag != GCOV_TAG_FOR_COUNTER (t_ix) + || length != GCOV_TAG_COUNTER_LENGTH (n_counts)) + goto read_mismatch; + (*merge) (values[c_ix], n_counts); + values[c_ix] += n_counts; + c_ix++; + } + if ((error = gcov_is_error ())) + goto read_error; + } + + f_ix = ~0u; + /* Check program & object summary. */ + while (1) + { + int is_program; + + eof_pos = gcov_position (); + tag = gcov_read_unsigned (); + if (!tag) + break; + + length = gcov_read_unsigned (); + is_program = tag == GCOV_TAG_PROGRAM_SUMMARY; + if (length != GCOV_TAG_SUMMARY_LENGTH + || (!is_program && tag != GCOV_TAG_OBJECT_SUMMARY)) + goto read_mismatch; + gcov_read_summary (is_program ? &program : &object); + if ((error = gcov_is_error ())) + goto read_error; + if (is_program && program.checksum == gcov_crc32) + { + summary_pos = eof_pos; + goto rewrite; + } + } + } + + goto rewrite; + +read_error:; + gcov_error (error < 0 ? "profiling:%s:Overflow merging\n" + : "profiling:%s:Error merging\n", gi_filename); + goto read_fatal; + +#endif /* __GCOV_KERNEL__ */ + + goto rewrite; + +read_mismatch:; + gcov_error ("profiling:%s:Merge mismatch for %s\n", gi_filename, + f_ix + 1 ? "function" : "summaries"); + goto read_fatal; /* work-around the compiler warning */ + +read_fatal:; + gcov_close (); + return 1; + +rewrite:; + gcov_rewrite (); + if (!summary_pos) + memset (&program, 0, sizeof (program)); + + /* Merge the summaries. */ + f_ix = ~0u; + for (t_ix = 0; t_ix < GCOV_COUNTERS_SUMMABLE; t_ix++) + { + cs_obj = &object.ctrs[t_ix]; + cs_tobj = &this_object.ctrs[t_ix]; + cs_prg = &program.ctrs[t_ix]; + cs_tprg = &this_program.ctrs[t_ix]; + cs_all = &all.ctrs[t_ix]; + + if ((1 << t_ix) & info->ctr_mask) + { + if (!cs_obj->runs++) + cs_obj->num = cs_tobj->num; + else if (cs_obj->num != cs_tobj->num) + goto read_mismatch; + cs_obj->sum_all += cs_tobj->sum_all; + if (cs_obj->run_max < cs_tobj->run_max) + cs_obj->run_max = cs_tobj->run_max; + cs_obj->sum_max += cs_tobj->run_max; + + if (!cs_prg->runs++) + cs_prg->num = cs_tprg->num; + else if (cs_prg->num != cs_tprg->num) + goto read_mismatch; + cs_prg->sum_all += cs_tprg->sum_all; + if (cs_prg->run_max < cs_tprg->run_max) + cs_prg->run_max = cs_tprg->run_max; + cs_prg->sum_max += cs_tprg->run_max; + } + else if (cs_obj->num || cs_prg->num) + goto read_mismatch; + + if (!cs_all->runs && cs_prg->runs) + memcpy (cs_all, cs_prg, sizeof (*cs_all)); + else if (!all.checksum + && (!GCOV_LOCKED || cs_all->runs == cs_prg->runs) + && memcmp (cs_all, cs_prg, sizeof (*cs_all))) + { + gcov_error ("profiling:%s:Invocation mismatch - " + "some data files may have been removed%s", + gi_filename, GCOV_LOCKED + ? "" : " or concurrent update without locking support"); + all.checksum = ~0u; + } + } + + return 0; +} + +/* Calculate the function_info stride. This depends on the + number of counter types being measured. + NUM_COUNTER_TYPES is number of counter types recorded. + Return: the number of bytes for accessing next fn_info + (aligned to gcov_fn_info). */ + +static unsigned +gcov_compute_fi_stride (unsigned num_counter_types) +{ + unsigned fi_stride; + + fi_stride = offsetof (struct gcov_fn_info, n_ctrs) + + num_counter_types * sizeof (unsigned); + if (__alignof__ (struct gcov_fn_info) > sizeof (unsigned)) + { + fi_stride += __alignof__ (struct gcov_fn_info) - 1; + fi_stride &= ~(__alignof__ (struct gcov_fn_info) - 1); + } + return fi_stride; +} + +/* This function returns the size of gcda file to be written. Note + the size is in units of gcov_type. */ + +GCOV_LINKAGE unsigned +gcov_gcda_file_size (struct gcov_info *gi_ptr) +{ + unsigned size; + const struct gcov_fn_info *fi_ptr; + unsigned f_ix, t_ix, c_ix; + unsigned n_counts; + unsigned fi_stride; + gcov_type *values[GCOV_COUNTERS]; + + c_ix = gcov_counter_array (gi_ptr, values, 0); + fi_stride = gcov_compute_fi_stride (c_ix); + + /* GCOV_DATA_MAGIC, GCOV_VERSION and time_stamp. */ + size = 3; + + /* size for each function. */ + for (f_ix = 0; f_ix < gi_ptr->n_functions; f_ix++) + { + fi_ptr = (const struct gcov_fn_info *) + ((const char *) gi_ptr->functions + f_ix * fi_stride); + + size += 2 /* tag_length itself */ + + GCOV_TAG_FUNCTION_LENGTH; /* ident, lineno_cksum, cfg_cksm */ + + c_ix = 0; + for (t_ix = 0; t_ix < GCOV_COUNTERS; t_ix++) + { + if (!((1 << t_ix) & gi_ptr->ctr_mask)) + continue; + + n_counts = fi_ptr->n_ctrs[c_ix]; + size += 2 + GCOV_TAG_COUNTER_LENGTH (n_counts); + c_ix++; + } + } + + /* Object summary. */ + size += 2 + GCOV_TAG_SUMMARY_LENGTH; + + /* Program summary. */ + size += 2 + GCOV_TAG_SUMMARY_LENGTH; + + size += 1; + + return size*4; +} + +/* Write profile data (including summary and module grouping information, + if available, to file. */ + +static void +gcov_write_gcda_file (struct gcov_info *gi_ptr, + unsigned fi_stride) +{ + const struct gcov_fn_info *fi_ptr; + gcov_type *values[GCOV_COUNTERS]; + unsigned t_ix, c_ix, f_ix, n_counts; + int error = 0; + + /* Write out the data. */ + gcov_write_tag_length (GCOV_DATA_MAGIC, GCOV_VERSION); + gcov_write_unsigned (gi_ptr->stamp); + + gcov_counter_array (gi_ptr, values, 0); + + /* Write execution counts for each function. */ + for (f_ix = 0; f_ix < gi_ptr->n_functions; f_ix++) + { + fi_ptr = (const struct gcov_fn_info *) + ((const char *) gi_ptr->functions + f_ix * fi_stride); + + /* Announce function. */ + gcov_write_tag_length (GCOV_TAG_FUNCTION, GCOV_TAG_FUNCTION_LENGTH); + gcov_write_unsigned (fi_ptr->ident); + gcov_write_unsigned (fi_ptr->lineno_checksum); + gcov_write_unsigned (fi_ptr->cfg_checksum); + + c_ix = 0; + for (t_ix = 0; t_ix < GCOV_COUNTERS; t_ix++) + { + gcov_type *c_ptr; + + if (!((1 << t_ix) & gi_ptr->ctr_mask)) + continue; + + n_counts = fi_ptr->n_ctrs[c_ix]; + + gcov_write_tag_length (GCOV_TAG_FOR_COUNTER (t_ix), + GCOV_TAG_COUNTER_LENGTH (n_counts)); + c_ptr = values[c_ix]; + while (n_counts--) + gcov_write_counter (*c_ptr++); + + values[c_ix] = c_ptr; + c_ix++; + } + } + + /* Object file summary. */ + gcov_write_summary (GCOV_TAG_OBJECT_SUMMARY, &object); + + /* Generate whole program statistics. */ + program.checksum = gcov_crc32; + if (eof_pos) + gcov_seek (eof_pos); + gcov_write_summary (GCOV_TAG_PROGRAM_SUMMARY, &program); + if (!summary_pos) + gcov_write_unsigned (0); + + /* TODO: there is a problem here -- if there are other program + summary data after the matching one, setting eof_pos to this + position means that the module info table will overwrite the + those other program summary. It also means a mismatch error + may occur at the next merge if no matching program summary is + found before the module info data. */ + if (!summary_pos) + gi_ptr->eof_pos = gcov_position () - 1; + else + gi_ptr->eof_pos = gcov_position (); + + if ((error = gcov_close ())) + gcov_error (error < 0 ? + "profiling:%s:Overflow writing\n" : + "profiling:%s:Error writing\n", + gi_filename); +} + +/* Do some preparation work before calling the actual dumping + routine. + Return: 1 when module grouping info needs to be dumped, + 0 otherwise. */ + +static int +gcov_exit_init (void) +{ + struct gcov_info *gi_ptr; + int dump_module_info = 0; + + dump_module_info = 0; + gcov_prefix_strip = 0; + + memset (&all, 0, sizeof (all)); + + /* Find the totals for this execution. */ + memset (&this_program, 0, sizeof (this_program)); + for (gi_ptr = __gcov_list; gi_ptr; gi_ptr = gi_ptr->next) + { + gcov_object_summary (gi_ptr, &this_program); + + /* The IS_PRIMARY field is overloaded to indicate if this module + is FDO/LIPO. */ + dump_module_info |= gi_ptr->mod_info->is_primary; + } + + gcov_alloc_filename (); + + return dump_module_info; +} + +/* Dump one entry in the gcov_info list (for one object). */ + +static void +gcov_dump_one_gcov (struct gcov_info *gi_ptr) +{ + gcov_type *values[GCOV_COUNTERS]; + unsigned fi_stride; + unsigned c_ix; + int ret; + + memset (&this_object, 0, sizeof (this_object)); + memset (&object, 0, sizeof (object)); + + gcov_object_summary (gi_ptr, &this_object); + + c_ix = gcov_counter_array (gi_ptr, values, 1); + + fi_stride = gcov_compute_fi_stride (c_ix); + + GCOV_GET_FILENAME (prefix_length, gcov_prefix_strip, gi_ptr->filename, + gi_filename_up); + + if (gcov_open_by_filename (gi_filename) == -1) + return; + + /* Now merge this file. */ + ret = gcov_merge_gcda_file (gi_ptr, values, fi_stride); + if (ret != 0 ) return; + + gcov_write_gcda_file (gi_ptr, fi_stride); +} + +#endif /* L_gcov */ + +#ifdef L_gcov_merge_add +/* The profile merging function that just adds the counters. It is given + an array COUNTERS of N_COUNTERS old counters and it reads the same number + of counters from the gcov file. */ +void +__gcov_merge_add (gcov_type *counters, unsigned n_counters) +{ + for (; n_counters; counters++, n_counters--) + *counters += gcov_read_counter (); +} +#endif /* L_gcov_merge_add */ + +#ifdef L_gcov_merge_ior +/* The profile merging function that just adds the counters. It is given + an array COUNTERS of N_COUNTERS old counters and it reads the same number + of counters from the gcov file. */ +void +__gcov_merge_ior (gcov_type *counters, unsigned n_counters) +{ + for (; n_counters; counters++, n_counters--) + *counters |= gcov_read_counter (); +} +#endif + +#ifdef L_gcov_merge_reusedist + +/* Return the weighted arithmetic mean of two values. */ + +static gcov_type +__gcov_weighted_mean2 (gcov_type value1, gcov_type count1, + gcov_type value2, gcov_type count2) +{ + if (count1 + count2 == 0) + return 0; + else + return (value1 * count1 + value2 * count2) / (count1 + count2); +} + +void +__gcov_merge_reusedist (gcov_type *counters, unsigned n_counters) +{ + unsigned i; + + gcc_assert(!(n_counters % 4)); + + for (i = 0; i < n_counters; i += 4) + { + /* Decode current values. */ + gcov_type c_mean_dist = counters[i]; + gcov_type c_mean_size = counters[i+1]; + gcov_type c_count = counters[i+2]; + gcov_type c_dist_x_size = counters[i+3]; + + /* Read and decode values in file. */ + gcov_type f_mean_dist = __gcov_read_counter (); + gcov_type f_mean_size = __gcov_read_counter (); + gcov_type f_count = __gcov_read_counter (); + gcov_type f_dist_x_size = __gcov_read_counter (); + + /* Compute aggregates. */ + gcov_type a_mean_dist = __gcov_weighted_mean2 ( + f_mean_dist, f_count, c_mean_dist, c_count); + gcov_type a_mean_size = __gcov_weighted_mean2 ( + f_mean_size, f_count, c_mean_size, c_count); + gcov_type a_count = f_count + c_count; + gcov_type a_dist_x_size = f_dist_x_size + c_dist_x_size; + + /* Encode back into counters. */ + counters[i] = a_mean_dist; + counters[i+1] = a_mean_size; + counters[i+2] = a_count; + counters[i+3] = a_dist_x_size; + } +} + +#endif + +#ifdef L_gcov_merge_dc + +/* Returns 1 if the function global id GID is not valid. */ + +static int +__gcov_is_gid_insane (gcov_type gid) +{ + if (EXTRACT_MODULE_ID_FROM_GLOBAL_ID (gid) == 0 + || EXTRACT_FUNC_ID_FROM_GLOBAL_ID (gid) == 0) + return 1; + return 0; +} + +/* The profile merging function used for merging direct call counts + This function is given array COUNTERS of N_COUNTERS old counters and it + reads the same number of counters from the gcov file. */ + +void +__gcov_merge_dc (gcov_type *counters, unsigned n_counters) +{ + unsigned i; + + gcc_assert (!(n_counters % 2)); + for (i = 0; i < n_counters; i += 2) + { + gcov_type global_id = gcov_read_counter (); + gcov_type call_count = gcov_read_counter (); + + /* Note that global id counter may never have been set if no calls were + made from this call-site. */ + if (counters[i] && global_id) + { + /* TODO race condition requires us do the following correction. */ + if (__gcov_is_gid_insane (counters[i])) + counters[i] = global_id; + else if (__gcov_is_gid_insane (global_id)) + global_id = counters[i]; + + gcc_assert (counters[i] == global_id); + } + else if (global_id) + counters[i] = global_id; + + counters[i + 1] += call_count; + + /* Reset. */ + if (__gcov_is_gid_insane (counters[i])) + counters[i] = counters[i + 1] = 0; + + /* Assert that the invariant (global_id == 0) <==> (call_count == 0) + holds true after merging. */ + if (counters[i] == 0) + counters[i+1] = 0; + if (counters[i + 1] == 0) + counters[i] = 0; + } +} +#endif + +#ifdef L_gcov_merge_icall_topn +/* The profile merging function used for merging indirect call counts + This function is given array COUNTERS of N_COUNTERS old counters and it + reads the same number of counters from the gcov file. */ + +void +__gcov_merge_icall_topn (gcov_type *counters, unsigned n_counters) +{ + unsigned i, j, k, m; + + gcc_assert (!(n_counters % GCOV_ICALL_TOPN_NCOUNTS)); + for (i = 0; i < n_counters; i += GCOV_ICALL_TOPN_NCOUNTS) + { + gcov_type *value_array = &counters[i + 1]; + unsigned tmp_size = 2 * (GCOV_ICALL_TOPN_NCOUNTS - 1); + gcov_type *tmp_array + = (gcov_type *) alloca (tmp_size * sizeof (gcov_type)); + + for (j = 0; j < tmp_size; j++) + tmp_array[j] = 0; + + for (j = 0; j < GCOV_ICALL_TOPN_NCOUNTS - 1; j += 2) + { + tmp_array[j] = value_array[j]; + tmp_array[j + 1] = value_array [j + 1]; + } + + /* Skip the number_of_eviction entry. */ + gcov_read_counter (); + for (k = 0; k < GCOV_ICALL_TOPN_NCOUNTS - 1; k += 2) + { + int found = 0; + gcov_type global_id = gcov_read_counter (); + gcov_type call_count = gcov_read_counter (); + for (m = 0; m < j; m += 2) + { + if (tmp_array[m] == global_id) + { + found = 1; + tmp_array[m + 1] += call_count; + break; + } + } + if (!found) + { + tmp_array[j] = global_id; + tmp_array[j + 1] = call_count; + j += 2; + } + } + /* Now sort the temp array */ + gcov_sort_n_vals (tmp_array, j); + + /* Now copy back the top half of the temp array */ + for (k = 0; k < GCOV_ICALL_TOPN_NCOUNTS - 1; k += 2) + { + value_array[k] = tmp_array[k]; + value_array[k + 1] = tmp_array[k + 1]; + } + } +} +#endif + + +#ifdef L_gcov_merge_single +/* The profile merging function for choosing the most common value. + It is given an array COUNTERS of N_COUNTERS old counters and it + reads the same number of counters from the gcov file. The counters + are split into 3-tuples where the members of the tuple have + meanings: + + -- the stored candidate on the most common value of the measured entity + -- counter + -- total number of evaluations of the value */ +void +__gcov_merge_single (gcov_type *counters, unsigned n_counters) +{ + unsigned i, n_measures; + gcov_type value, counter, all; + + gcc_assert (!(n_counters % 3)); + n_measures = n_counters / 3; + for (i = 0; i < n_measures; i++, counters += 3) + { + value = gcov_read_counter (); + counter = gcov_read_counter (); + all = gcov_read_counter (); + + if (counters[0] == value) + counters[1] += counter; + else if (counter > counters[1]) + { + counters[0] = value; + counters[1] = counter - counters[1]; + } + else + counters[1] -= counter; + counters[2] += all; + } +} +#endif /* L_gcov_merge_single */ + +#ifdef L_gcov_merge_delta +/* The profile merging function for choosing the most common + difference between two consecutive evaluations of the value. It is + given an array COUNTERS of N_COUNTERS old counters and it reads the + same number of counters from the gcov file. The counters are split + into 4-tuples where the members of the tuple have meanings: + + -- the last value of the measured entity + -- the stored candidate on the most common difference + -- counter + -- total number of evaluations of the value */ +void +__gcov_merge_delta (gcov_type *counters, unsigned n_counters) +{ + unsigned i, n_measures; + gcov_type value, counter, all; + + gcc_assert (!(n_counters % 4)); + n_measures = n_counters / 4; + for (i = 0; i < n_measures; i++, counters += 4) + { + /* last = */ gcov_read_counter (); + value = gcov_read_counter (); + counter = gcov_read_counter (); + all = gcov_read_counter (); + + if (counters[1] == value) + counters[2] += counter; + else if (counter > counters[2]) + { + counters[1] = value; + counters[2] = counter - counters[2]; + } + else + counters[2] -= counter; + counters[3] += all; + } +} +#endif /* L_gcov_merge_delta */ + +#ifdef L_gcov_interval_profiler +/* If VALUE is in interval <START, START + STEPS - 1>, then increases the + corresponding counter in COUNTERS. If the VALUE is above or below + the interval, COUNTERS[STEPS] or COUNTERS[STEPS + 1] is increased + instead. */ + +void +__gcov_interval_profiler (gcov_type *counters, gcov_type value, + int start, unsigned steps) +{ + gcov_type delta = value - start; + if (delta < 0) + counters[steps + 1]++; + else if (delta >= steps) + counters[steps]++; + else + counters[delta]++; +} +#endif + +#ifdef L_gcov_pow2_profiler +/* If VALUE is a power of two, COUNTERS[1] is incremented. Otherwise + COUNTERS[0] is incremented. */ + +void +__gcov_pow2_profiler (gcov_type *counters, gcov_type value) +{ + if (value & (value - 1)) + counters[0]++; + else + counters[1]++; +} +#endif + +/* Tries to determine the most common value among its inputs. Checks if the + value stored in COUNTERS[0] matches VALUE. If this is the case, COUNTERS[1] + is incremented. If this is not the case and COUNTERS[1] is not zero, + COUNTERS[1] is decremented. Otherwise COUNTERS[1] is set to one and + VALUE is stored to COUNTERS[0]. This algorithm guarantees that if this + function is called more than 50% of the time with one value, this value + will be in COUNTERS[0] in the end. + + In any case, COUNTERS[2] is incremented. */ + +static inline void +__gcov_one_value_profiler_body (gcov_type *counters, gcov_type value) +{ + if (value == counters[0]) + counters[1]++; + else if (counters[1] == 0) + { + counters[1] = 1; + counters[0] = value; + } + else + counters[1]--; + counters[2]++; +} + +#ifdef L_gcov_indirect_call_topn_profiler +/* Tries to keep track the most frequent N values in the counters where + N is specified by parameter TOPN_VAL. To track top N values, 2*N counter + entries are used. + counter[0] --- the accumative count of the number of times one entry in + in the counters gets evicted/replaced due to limited capacity. + When this value reaches a threshold, the bottom N values are + cleared. + counter[1] through counter[2*N] records the top 2*N values collected so far. + Each value is represented by two entries: count[2*i+1] is the ith value, and + count[2*i+2] is the number of times the value is seen. */ + +static void +__gcov_topn_value_profiler_body (gcov_type *counters, gcov_type value, + gcov_unsigned_t topn_val) +{ + unsigned i, found = 0, have_zero_count = 0; + + gcov_type *entry; + gcov_type *lfu_entry = &counters[1]; + gcov_type *value_array = &counters[1]; + gcov_type *num_eviction = &counters[0]; + + /* There are 2*topn_val values tracked, each value takes two slots in the + counter array */ + for ( i = 0; i < (topn_val << 2); i += 2) + { + entry = &value_array[i]; + if ( entry[0] == value) + { + entry[1]++ ; + found = 1; + break; + } + else if (entry[1] == 0) + { + lfu_entry = entry; + have_zero_count = 1; + } + else if (entry[1] < lfu_entry[1]) + lfu_entry = entry; + } + + if (found) + return; + + /* lfu_entry is either an empty entry or an entry + with lowest count, which will be evicted. */ + lfu_entry[0] = value; + lfu_entry[1] = 1; + +#define GCOV_ICALL_COUNTER_CLEAR_THRESHOLD 3000 + + /* Too many evictions -- time to clear bottom entries to + avoid hot values bumping each other out. */ + if ( !have_zero_count + && ++*num_eviction >= GCOV_ICALL_COUNTER_CLEAR_THRESHOLD) + { + unsigned i, j; + gcov_type *p, minv; + gcov_type* tmp_cnts + = (gcov_type *)alloca (topn_val * sizeof(gcov_type)); + + *num_eviction = 0; + + for ( i = 0; i < topn_val; i++ ) + tmp_cnts[i] = 0; + + /* Find the largest topn_val values from the group of + 2*topn_val values and put them into tmp_cnts. */ + + for ( i = 0; i < 2 * topn_val; i += 2 ) + { + p = 0; + for ( j = 0; j < topn_val; j++ ) + { + if ( !p || tmp_cnts[j] < *p ) + p = &tmp_cnts[j]; + } + if ( value_array[i + 1] > *p ) + *p = value_array[i + 1]; + } + + minv = tmp_cnts[0]; + for ( j = 1; j < topn_val; j++ ) + { + if (tmp_cnts[j] < minv) + minv = tmp_cnts[j]; + } + /* Zero out low value entries */ + for ( i = 0; i < 2 * topn_val; i += 2 ) + { + if (value_array[i + 1] < minv) + { + value_array[i] = 0; + value_array[i + 1] = 0; + } + } + } +} +#endif + +#ifdef L_gcov_one_value_profiler +void +__gcov_one_value_profiler (gcov_type *counters, gcov_type value) +{ + __gcov_one_value_profiler_body (counters, value); +} +#endif + +#ifdef L_gcov_indirect_call_profiler + +/* By default, the C++ compiler will use function addresses in the + vtable entries. Setting TARGET_VTABLE_USES_DESCRIPTORS to nonzero + tells the compiler to use function descriptors instead. The value + of this macro says how many words wide the descriptor is (normally 2), + but it may be dependent on target flags. Since we do not have access + to the target flags here we just check to see if it is set and use + that to set VTABLE_USES_DESCRIPTORS to 0 or 1. + + It is assumed that the address of a function descriptor may be treated + as a pointer to a function. */ + +#ifdef TARGET_VTABLE_USES_DESCRIPTORS +#define VTABLE_USES_DESCRIPTORS 1 +#else +#define VTABLE_USES_DESCRIPTORS 0 +#endif + +/* Tries to determine the most common value among its inputs. */ +void +__gcov_indirect_call_profiler (gcov_type* counter, gcov_type value, + void* cur_func, void* callee_func) +{ + /* If the C++ virtual tables contain function descriptors then one + function may have multiple descriptors and we need to dereference + the descriptors to see if they point to the same function. */ + if (cur_func == callee_func + || (VTABLE_USES_DESCRIPTORS && callee_func + && *(void **) cur_func == *(void **) callee_func)) + __gcov_one_value_profiler_body (counter, value); +} +#endif + + +#ifdef L_gcov_indirect_call_topn_profiler +extern THREAD_PREFIX gcov_type *__gcov_indirect_call_topn_counters ATTRIBUTE_HIDDEN; +extern THREAD_PREFIX void *__gcov_indirect_call_topn_callee ATTRIBUTE_HIDDEN; +#ifdef TARGET_VTABLE_USES_DESCRIPTORS +#define VTABLE_USES_DESCRIPTORS 1 +#else +#define VTABLE_USES_DESCRIPTORS 0 +#endif +void +__gcov_indirect_call_topn_profiler (void *cur_func, + void *cur_module_gcov_info, + gcov_unsigned_t cur_func_id) +{ + void *callee_func = __gcov_indirect_call_topn_callee; + gcov_type *counter = __gcov_indirect_call_topn_counters; + /* If the C++ virtual tables contain function descriptors then one + function may have multiple descriptors and we need to dereference + the descriptors to see if they point to the same function. */ + if (cur_func == callee_func + || (VTABLE_USES_DESCRIPTORS && callee_func + && *(void **) cur_func == *(void **) callee_func)) + { + gcov_type global_id + = ((struct gcov_info *) cur_module_gcov_info)->mod_info->ident; + global_id = GEN_FUNC_GLOBAL_ID (global_id, cur_func_id); + __gcov_topn_value_profiler_body (counter, global_id, GCOV_ICALL_TOPN_VAL); + __gcov_indirect_call_topn_callee = 0; + } +} + +#endif + +#ifdef L_gcov_direct_call_profiler +extern THREAD_PREFIX gcov_type *__gcov_direct_call_counters ATTRIBUTE_HIDDEN; +extern THREAD_PREFIX void *__gcov_direct_call_callee ATTRIBUTE_HIDDEN; +/* Direct call profiler. */ +void +__gcov_direct_call_profiler (void *cur_func, + void *cur_module_gcov_info, + gcov_unsigned_t cur_func_id) +{ + if (cur_func == __gcov_direct_call_callee) + { + gcov_type global_id + = ((struct gcov_info *) cur_module_gcov_info)->mod_info->ident; + global_id = GEN_FUNC_GLOBAL_ID (global_id, cur_func_id); + __gcov_direct_call_counters[0] = global_id; + __gcov_direct_call_counters[1]++; + __gcov_direct_call_callee = 0; + } +} +#endif + + +#ifdef L_gcov_average_profiler +/* Increase corresponding COUNTER by VALUE. FIXME: Perhaps we want + to saturate up. */ + +void +__gcov_average_profiler (gcov_type *counters, gcov_type value) +{ + counters[0] += value; + counters[1] ++; +} +#endif + +#ifdef L_gcov_ior_profiler +/* Increase corresponding COUNTER by VALUE. FIXME: Perhaps we want + to saturate up. */ + +void +__gcov_ior_profiler (gcov_type *counters, gcov_type value) +{ + *counters |= value; +} +#endif + +#ifdef L_gcov_fork +/* A wrapper for the fork function. Flushes the accumulated profiling data, so + that they are not counted twice. */ + +pid_t +__gcov_fork (void) +{ + __gcov_flush (); + return fork (); +} +#endif + +#ifdef L_gcov_execl +/* A wrapper for the execl function. Flushes the accumulated profiling data, so + that they are not lost. */ + +int +__gcov_execl (const char *path, char *arg, ...) +{ + va_list ap, aq; + unsigned i, length; + char **args; + + __gcov_flush (); + + va_start (ap, arg); + va_copy (aq, ap); + + length = 2; + while (va_arg (ap, char *)) + length++; + va_end (ap); + + args = (char **) alloca (length * sizeof (void *)); + args[0] = arg; + for (i = 1; i < length; i++) + args[i] = va_arg (aq, char *); + va_end (aq); + + return execv (path, args); +} +#endif + +#ifdef L_gcov_execlp +/* A wrapper for the execlp function. Flushes the accumulated profiling data, so + that they are not lost. */ + +int +__gcov_execlp (const char *path, char *arg, ...) +{ + va_list ap, aq; + unsigned i, length; + char **args; + + __gcov_flush (); + + va_start (ap, arg); + va_copy (aq, ap); + + length = 2; + while (va_arg (ap, char *)) + length++; + va_end (ap); + + args = (char **) alloca (length * sizeof (void *)); + args[0] = arg; + for (i = 1; i < length; i++) + args[i] = va_arg (aq, char *); + va_end (aq); + + return execvp (path, args); +} +#endif + +#ifdef L_gcov_execle +/* A wrapper for the execle function. Flushes the accumulated profiling data, so + that they are not lost. */ + +int +__gcov_execle (const char *path, char *arg, ...) +{ + va_list ap, aq; + unsigned i, length; + char **args; + char **envp; + + __gcov_flush (); + + va_start (ap, arg); + va_copy (aq, ap); + + length = 2; + while (va_arg (ap, char *)) + length++; + va_end (ap); + + args = (char **) alloca (length * sizeof (void *)); + args[0] = arg; + for (i = 1; i < length; i++) + args[i] = va_arg (aq, char *); + envp = va_arg (aq, char **); + va_end (aq); + + return execve (path, args, envp); +} +#endif + +#ifdef L_gcov_execv +/* A wrapper for the execv function. Flushes the accumulated profiling data, so + that they are not lost. */ + +int +__gcov_execv (const char *path, char *const argv[]) +{ + __gcov_flush (); + return execv (path, argv); +} +#endif + +#ifdef L_gcov_execvp +/* A wrapper for the execvp function. Flushes the accumulated profiling data, so + that they are not lost. */ + +int +__gcov_execvp (const char *path, char *const argv[]) +{ + __gcov_flush (); + return execvp (path, argv); +} +#endif + +#ifdef L_gcov_execve +/* A wrapper for the execve function. Flushes the accumulated profiling data, so + that they are not lost. */ + +int +__gcov_execve (const char *path, char *const argv[], char *const envp[]) +{ + __gcov_flush (); + return execve (path, argv, envp); +} +#endif + +#ifdef __GCOV_KERNEL__ +/* + * Provide different implementation for the following functions: + * __gcov_init + * __gcov_exit + * + * Provide the following dummy merge functions: + * __gcov_merge_add + * __gcov_merge_single + * __gcov_merge_delta + * __gcov_merge_ior + * __gcov_merge_icall_topn + * __gcov_merge_dc + * __gcov_merge_reusedist + * + * Reuse the following functions: + * __gcov_interval_profiler() + * __gcov_pow2_profiler() + * __gcov_average_profiler() + * __gcov_ior_profiler() + * __gcov_one_value_profiler() + * __gcov_indirect_call_profiler() + * |-> __gcov_one_value_profiler_body() + * + * For LIPO: (TBD) + * Change slightly for the following functions: + * __gcov_merge_icall_topn + * __gcov_merge_dc + * + * Reuse the following functions: + * __gcov_direct_call_profiler() + * __gcov_indirect_call_topn_profiler() + * |-> __gcov_topn_value_profiler_body() + * + */ + +/* Current virual gcda file. This is for kernel use only. */ +gcov_kernel_vfile *gcov_current_file; + +/* Set current virutal gcda file. It needs to be set before dumping + profile data. */ + +void +gcov_set_vfile (gcov_kernel_vfile *file) +{ + gcov_current_file = file; +} + +/* Dump one entry in the gcov_info list (for one object) in kernel. */ + +void +gcov_kernel_dump_one_gcov (struct gcov_info *info) +{ + gcc_assert (gcov_current_file); + + gcov_exit_init (); + + gcov_dump_one_gcov (info); +} + +#define DUMMY_FUNC(func) \ +void func (gcov_type *counters __attribute__ ((unused)), \ + unsigned n_counters __attribute__ ((unused))) {} + +DUMMY_FUNC (__gcov_merge_add) +EXPORT_SYMBOL (__gcov_merge_add); + +DUMMY_FUNC (__gcov_merge_single) +EXPORT_SYMBOL (__gcov_merge_single); + +DUMMY_FUNC (__gcov_merge_delta) +EXPORT_SYMBOL (__gcov_merge_delta); + +DUMMY_FUNC(__gcov_merge_ior) +EXPORT_SYMBOL (__gcov_merge_ior); + +DUMMY_FUNC (__gcov_merge_icall_topn) +EXPORT_SYMBOL (__gcov_merge_icall_topn); + +DUMMY_FUNC (__gcov_merge_dc) +EXPORT_SYMBOL (__gcov_merge_dc); + +DUMMY_FUNC (__gcov_merge_reusedist) +EXPORT_SYMBOL (__gcov_merge_reusedist); + +EXPORT_SYMBOL (__gcov_average_profiler); +EXPORT_SYMBOL (__gcov_indirect_call_profiler); +EXPORT_SYMBOL (__gcov_interval_profiler); +EXPORT_SYMBOL (__gcov_ior_profiler); +EXPORT_SYMBOL (__gcov_one_value_profiler); +EXPORT_SYMBOL (__gcov_pow2_profiler); + +#endif /* __GCOV_KERNEL__ */ + +#endif /* inhibit_libc */ diff --git a/lib/gcc/i686-linux-android/4.6/include-fixed/README b/lib/gcc/i686-linux-android/4.6/include-fixed/README new file mode 100644 index 0000000..7086a77 --- /dev/null +++ b/lib/gcc/i686-linux-android/4.6/include-fixed/README @@ -0,0 +1,14 @@ +This README file is copied into the directory for GCC-only header files +when fixincludes is run by the makefile for GCC. + +Many of the files in this directory were automatically edited from the +standard system header files by the fixincludes process. They are +system-specific, and will not work on any other kind of system. They +are also not part of GCC. The reason we have to do this is because +GCC requires ANSI C headers and many vendors supply ANSI-incompatible +headers. + +Because this is an automated process, sometimes headers get "fixed" +that do not, strictly speaking, need a fix. As long as nothing is broken +by the process, it is just an unfortunate collateral inconvenience. +We would like to rectify it, if it is not "too inconvenient". diff --git a/lib/gcc/i686-linux-android/4.6/include-fixed/asm/posix_types.h b/lib/gcc/i686-linux-android/4.6/include-fixed/asm/posix_types.h new file mode 100644 index 0000000..9ff36f5 --- /dev/null +++ b/lib/gcc/i686-linux-android/4.6/include-fixed/asm/posix_types.h @@ -0,0 +1,29 @@ +/* DO NOT EDIT THIS FILE. + + It has been auto-edited by fixincludes from: + + "/tmp/ndk-User/build/toolchain/prefix/sysroot/usr/include/asm/posix_types.h" + + This had to be done to correct non-standard usages in the + original, manufacturer supplied header file. */ + +/* This file fixes a bug in the __FD_ZERO macro + for older versions of the Linux kernel. */ +#ifndef _POSIX_TYPES_H_WRAPPER +#include <features.h> + #include_next <asm/posix_types.h> + +#if defined(__FD_ZERO) && !defined(__GLIBC__) +#undef __FD_ZERO +#define __FD_ZERO(fdsetp) \ + do { \ + int __d0, __d1; \ +__asm__ __volatile__("cld ; rep ; stosl" \ +: "=&c" (__d0), "=&D" (__d1) \ +: "a" (0), "0" (__FDSET_LONGS), \ + "1" ((__kernel_fd_set *) (fdsetp)) :"memory"); \ + } while (0) +#endif + +#define _POSIX_TYPES_H_WRAPPER +#endif /* _POSIX_TYPES_H_WRAPPER */ diff --git a/lib/gcc/i686-linux-android/4.6/include-fixed/limits.h b/lib/gcc/i686-linux-android/4.6/include-fixed/limits.h new file mode 100644 index 0000000..a1c35e9 --- /dev/null +++ b/lib/gcc/i686-linux-android/4.6/include-fixed/limits.h @@ -0,0 +1,172 @@ +/* Copyright (C) 1992, 1994, 1997, 1998 Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free +Software Foundation; either version 3, or (at your option) any later +version. + +GCC is distributed in the hope that it will be useful, but WITHOUT ANY +WARRANTY; without even the implied warranty of MERCHANTABILITY or +FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +for more details. + +Under Section 7 of GPL version 3, you are granted additional +permissions described in the GCC Runtime Library Exception, version +3.1, as published by the Free Software Foundation. + +You should have received a copy of the GNU General Public License and +a copy of the GCC Runtime Library Exception along with this program; +see the files COPYING3 and COPYING.RUNTIME respectively. If not, see +<http://www.gnu.org/licenses/>. */ + +/* This administrivia gets added to the beginning of limits.h + if the system has its own version of limits.h. */ + +/* We use _GCC_LIMITS_H_ because we want this not to match + any macros that the system's limits.h uses for its own purposes. */ +#ifndef _GCC_LIMITS_H_ /* Terminated in limity.h. */ +#define _GCC_LIMITS_H_ + +#ifndef _LIBC_LIMITS_H_ +/* Use "..." so that we find syslimits.h only in this same directory. */ +#include "syslimits.h" +#endif +/* Copyright (C) 1991, 1992, 1993, 1996, 1997, 1998, 1999, 2000, 2001, + 2002 Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free +Software Foundation; either version 3, or (at your option) any later +version. + +GCC is distributed in the hope that it will be useful, but WITHOUT ANY +WARRANTY; without even the implied warranty of MERCHANTABILITY or +FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +for more details. + +Under Section 7 of GPL version 3, you are granted additional +permissions described in the GCC Runtime Library Exception, version +3.1, as published by the Free Software Foundation. + +You should have received a copy of the GNU General Public License and +a copy of the GCC Runtime Library Exception along with this program; +see the files COPYING3 and COPYING.RUNTIME respectively. If not, see +<http://www.gnu.org/licenses/>. */ + +#ifndef _LIMITS_H___ +#define _LIMITS_H___ + +/* Number of bits in a `char'. */ +#undef CHAR_BIT +#define CHAR_BIT __CHAR_BIT__ + +/* Maximum length of a multibyte character. */ +#ifndef MB_LEN_MAX +#define MB_LEN_MAX 1 +#endif + +/* Minimum and maximum values a `signed char' can hold. */ +#undef SCHAR_MIN +#define SCHAR_MIN (-SCHAR_MAX - 1) +#undef SCHAR_MAX +#define SCHAR_MAX __SCHAR_MAX__ + +/* Maximum value an `unsigned char' can hold. (Minimum is 0). */ +#undef UCHAR_MAX +#if __SCHAR_MAX__ == __INT_MAX__ +# define UCHAR_MAX (SCHAR_MAX * 2U + 1U) +#else +# define UCHAR_MAX (SCHAR_MAX * 2 + 1) +#endif + +/* Minimum and maximum values a `char' can hold. */ +#ifdef __CHAR_UNSIGNED__ +# undef CHAR_MIN +# if __SCHAR_MAX__ == __INT_MAX__ +# define CHAR_MIN 0U +# else +# define CHAR_MIN 0 +# endif +# undef CHAR_MAX +# define CHAR_MAX UCHAR_MAX +#else +# undef CHAR_MIN +# define CHAR_MIN SCHAR_MIN +# undef CHAR_MAX +# define CHAR_MAX SCHAR_MAX +#endif + +/* Minimum and maximum values a `signed short int' can hold. */ +#undef SHRT_MIN +#define SHRT_MIN (-SHRT_MAX - 1) +#undef SHRT_MAX +#define SHRT_MAX __SHRT_MAX__ + +/* Maximum value an `unsigned short int' can hold. (Minimum is 0). */ +#undef USHRT_MAX +#if __SHRT_MAX__ == __INT_MAX__ +# define USHRT_MAX (SHRT_MAX * 2U + 1U) +#else +# define USHRT_MAX (SHRT_MAX * 2 + 1) +#endif + +/* Minimum and maximum values a `signed int' can hold. */ +#undef INT_MIN +#define INT_MIN (-INT_MAX - 1) +#undef INT_MAX +#define INT_MAX __INT_MAX__ + +/* Maximum value an `unsigned int' can hold. (Minimum is 0). */ +#undef UINT_MAX +#define UINT_MAX (INT_MAX * 2U + 1U) + +/* Minimum and maximum values a `signed long int' can hold. + (Same as `int'). */ +#undef LONG_MIN +#define LONG_MIN (-LONG_MAX - 1L) +#undef LONG_MAX +#define LONG_MAX __LONG_MAX__ + +/* Maximum value an `unsigned long int' can hold. (Minimum is 0). */ +#undef ULONG_MAX +#define ULONG_MAX (LONG_MAX * 2UL + 1UL) + +#if defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L +/* Minimum and maximum values a `signed long long int' can hold. */ +# undef LLONG_MIN +# define LLONG_MIN (-LLONG_MAX - 1LL) +# undef LLONG_MAX +# define LLONG_MAX __LONG_LONG_MAX__ + +/* Maximum value an `unsigned long long int' can hold. (Minimum is 0). */ +# undef ULLONG_MAX +# define ULLONG_MAX (LLONG_MAX * 2ULL + 1ULL) +#endif + +#if defined (__GNU_LIBRARY__) ? defined (__USE_GNU) : !defined (__STRICT_ANSI__) +/* Minimum and maximum values a `signed long long int' can hold. */ +# undef LONG_LONG_MIN +# define LONG_LONG_MIN (-LONG_LONG_MAX - 1LL) +# undef LONG_LONG_MAX +# define LONG_LONG_MAX __LONG_LONG_MAX__ + +/* Maximum value an `unsigned long long int' can hold. (Minimum is 0). */ +# undef ULONG_LONG_MAX +# define ULONG_LONG_MAX (LONG_LONG_MAX * 2ULL + 1ULL) +#endif + +#endif /* _LIMITS_H___ */ +/* This administrivia gets added to the end of limits.h + if the system has its own version of limits.h. */ + +#else /* not _GCC_LIMITS_H_ */ + +#ifdef _GCC_NEXT_LIMITS_H +#include_next <limits.h> /* recurse down to the real one */ +#endif + +#endif /* not _GCC_LIMITS_H_ */ diff --git a/lib/gcc/i686-linux-android/4.6/include-fixed/linux/a.out.h b/lib/gcc/i686-linux-android/4.6/include-fixed/linux/a.out.h new file mode 100644 index 0000000..737b879 --- /dev/null +++ b/lib/gcc/i686-linux-android/4.6/include-fixed/linux/a.out.h @@ -0,0 +1,229 @@ +/* DO NOT EDIT THIS FILE. + + It has been auto-edited by fixincludes from: + + "/tmp/ndk-User/build/toolchain/prefix/sysroot/usr/include/linux/a.out.h" + + This had to be done to correct non-standard usages in the + original, manufacturer supplied header file. */ + +/**************************************************************************** + **************************************************************************** + *** + *** This header was automatically generated from a Linux kernel header + *** of the same name, to make information necessary for userspace to + *** call into the kernel available to libc. It contains only constants, + *** structures, and macros generated from the original header, and thus, + *** contains no copyrightable information. + *** + **************************************************************************** + ****************************************************************************/ +#ifndef __A_OUT_GNU_H__ +#define __A_OUT_GNU_H__ + +#define __GNU_EXEC_MACROS__ + +#ifndef __STRUCT_EXEC_OVERRIDE__ + +#include <asm/a.out.h> + +#endif + +enum machine_type { +#ifdef M_OLDSUN2 + M__OLDSUN2 = M_OLDSUN2, +#else + M_OLDSUN2 = 0, +#endif +#ifdef M_68010 + M__68010 = M_68010, +#else + M_68010 = 1, +#endif +#ifdef M_68020 + M__68020 = M_68020, +#else + M_68020 = 2, +#endif +#ifdef M_SPARC + M__SPARC = M_SPARC, +#else + M_SPARC = 3, +#endif + + M_386 = 100, + M_MIPS1 = 151, + M_MIPS2 = 152 +}; + +#ifndef N_MAGIC +#define N_MAGIC(exec) ((exec).a_info & 0xffff) +#endif +#define N_MACHTYPE(exec) ((enum machine_type)(((exec).a_info >> 16) & 0xff)) +#define N_FLAGS(exec) (((exec).a_info >> 24) & 0xff) +#define N_SET_INFO(exec, magic, type, flags) ((exec).a_info = ((magic) & 0xffff) | (((int)(type) & 0xff) << 16) | (((flags) & 0xff) << 24)) +#define N_SET_MAGIC(exec, magic) ((exec).a_info = (((exec).a_info & 0xffff0000) | ((magic) & 0xffff))) + +#define N_SET_MACHTYPE(exec, machtype) ((exec).a_info = ((exec).a_info&0xff00ffff) | ((((int)(machtype))&0xff) << 16)) + +#define N_SET_FLAGS(exec, flags) ((exec).a_info = ((exec).a_info&0x00ffffff) | (((flags) & 0xff) << 24)) + +#define OMAGIC 0407 + +#define NMAGIC 0410 + +#define ZMAGIC 0413 + +#define QMAGIC 0314 + +#define CMAGIC 0421 + +#ifndef N_BADMAG +#define N_BADMAG(x) (N_MAGIC(x) != OMAGIC && N_MAGIC(x) != NMAGIC && N_MAGIC(x) != ZMAGIC && N_MAGIC(x) != QMAGIC) +#endif + +#define _N_HDROFF(x) (1024 - sizeof (struct exec)) + +#ifndef N_TXTOFF +#define N_TXTOFF(x) (N_MAGIC(x) == ZMAGIC ? _N_HDROFF((x)) + sizeof (struct exec) : (N_MAGIC(x) == QMAGIC ? 0 : sizeof (struct exec))) +#endif + +#ifndef N_DATOFF +#define N_DATOFF(x) (N_TXTOFF(x) + (x).a_text) +#endif + +#ifndef N_TRELOFF +#define N_TRELOFF(x) (N_DATOFF(x) + (x).a_data) +#endif + +#ifndef N_DRELOFF +#define N_DRELOFF(x) (N_TRELOFF(x) + N_TRSIZE(x)) +#endif + +#ifndef N_SYMOFF +#define N_SYMOFF(x) (N_DRELOFF(x) + N_DRSIZE(x)) +#endif + +#ifndef N_STROFF +#define N_STROFF(x) (N_SYMOFF(x) + N_SYMSIZE(x)) +#endif + +#ifndef N_TXTADDR +#define N_TXTADDR(x) (N_MAGIC(x) == QMAGIC ? PAGE_SIZE : 0) +#endif + +#if defined(vax) || defined(hp300) || defined(pyr) +#define SEGMENT_SIZE page_size +#endif +#ifdef sony +#define SEGMENT_SIZE 0x2000 +#endif +#ifdef is68k +#define SEGMENT_SIZE 0x20000 +#endif +#if defined(m68k) && defined(PORTAR) +#define PAGE_SIZE 0x400 +#define SEGMENT_SIZE PAGE_SIZE +#endif + +#ifdef __linux__ +#include <asm/page.h> +#if defined(__i386__) || defined(__mc68000__) +#define SEGMENT_SIZE 1024 +#else +#ifndef SEGMENT_SIZE +#define SEGMENT_SIZE PAGE_SIZE +#endif +#endif +#endif + +#define _N_SEGMENT_ROUND(x) ALIGN(x, SEGMENT_SIZE) + +#define _N_TXTENDADDR(x) (N_TXTADDR(x)+(x).a_text) + +#ifndef N_DATADDR +#define N_DATADDR(x) (N_MAGIC(x)==OMAGIC? (_N_TXTENDADDR(x)) : (_N_SEGMENT_ROUND (_N_TXTENDADDR(x)))) +#endif + +#ifndef N_BSSADDR +#define N_BSSADDR(x) (N_DATADDR(x) + (x).a_data) +#endif + +#ifndef N_NLIST_DECLARED +struct nlist { + union { + char *n_name; + struct nlist *n_next; + long n_strx; + } n_un; + unsigned char n_type; + char n_other; + short n_desc; + unsigned long n_value; +}; +#endif + +#ifndef N_UNDF +#define N_UNDF 0 +#endif +#ifndef N_ABS +#define N_ABS 2 +#endif +#ifndef N_TEXT +#define N_TEXT 4 +#endif +#ifndef N_DATA +#define N_DATA 6 +#endif +#ifndef N_BSS +#define N_BSS 8 +#endif +#ifndef N_FN +#define N_FN 15 +#endif + +#ifndef N_EXT +#define N_EXT 1 +#endif +#ifndef N_TYPE +#define N_TYPE 036 +#endif +#ifndef N_STAB +#define N_STAB 0340 +#endif + +#define N_INDR 0xa + +#define N_SETA 0x14 +#define N_SETT 0x16 +#define N_SETD 0x18 +#define N_SETB 0x1A + +#define N_SETV 0x1C + +#ifndef N_RELOCATION_INFO_DECLARED + +struct relocation_info +{ + + int r_address; + + unsigned int r_symbolnum:24; + + unsigned int r_pcrel:1; + + unsigned int r_length:2; + + unsigned int r_extern:1; + +#ifdef NS32K + unsigned r_bsr:1; + unsigned r_disp:1; + unsigned r_pad:2; +#else + unsigned int r_pad:4; +#endif +}; +#endif + +#endif diff --git a/lib/gcc/i686-linux-android/4.6/include-fixed/stdio.h b/lib/gcc/i686-linux-android/4.6/include-fixed/stdio.h new file mode 100644 index 0000000..3656425 --- /dev/null +++ b/lib/gcc/i686-linux-android/4.6/include-fixed/stdio.h @@ -0,0 +1,462 @@ +/* DO NOT EDIT THIS FILE. + + It has been auto-edited by fixincludes from: + + "/tmp/ndk-User/build/toolchain/prefix/sysroot/usr/include/stdio.h" + + This had to be done to correct non-standard usages in the + original, manufacturer supplied header file. */ + +/* $OpenBSD: stdio.h,v 1.35 2006/01/13 18:10:09 miod Exp $ */ +/* $NetBSD: stdio.h,v 1.18 1996/04/25 18:29:21 jtc Exp $ */ + +/*- + * Copyright (c) 1990 The Regents of the University of California. + * All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * Chris Torek. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)stdio.h 5.17 (Berkeley) 6/3/91 + */ + +#ifndef _STDIO_H_ +#define _STDIO_H_ + +#include <sys/cdefs.h> +#include <sys/_types.h> + +/* __gnuc_va_list and size_t must be defined by stdio.h according to Posix */ +#define __need___va_list +#include <stdarg.h> + +/* note that this forces stddef.h to *only* define size_t */ +#define __need_size_t +#include <stddef.h> + +#include <stddef.h> + +#if __BSD_VISIBLE || __POSIX_VISIBLE || __XPG_VISIBLE +#include <sys/types.h> /* XXX should be removed */ +#endif + +#ifndef _SIZE_T_DEFINED_ +#define _SIZE_T_DEFINED_ +typedef unsigned long size_t; +#endif + +#ifndef _OFF_T_DEFINED_ +#define _OFF_T_DEFINED_ +typedef long off_t; +#endif + +#ifndef NULL +#ifdef __GNUG__ +#define NULL __null +#else +#define NULL 0L +#endif +#endif + +#define _FSTDIO /* Define for new stdio with functions. */ + +typedef off_t fpos_t; /* stdio file position type */ + +/* + * NB: to fit things in six character monocase externals, the stdio + * code uses the prefix `__s' for stdio objects, typically followed + * by a three-character attempt at a mnemonic. + */ + +/* stdio buffers */ +struct __sbuf { + unsigned char *_base; + int _size; +}; + +/* + * stdio state variables. + * + * The following always hold: + * + * if (_flags&(__SLBF|__SWR)) == (__SLBF|__SWR), + * _lbfsize is -_bf._size, else _lbfsize is 0 + * if _flags&__SRD, _w is 0 + * if _flags&__SWR, _r is 0 + * + * This ensures that the getc and putc macros (or inline functions) never + * try to write or read from a file that is in `read' or `write' mode. + * (Moreover, they can, and do, automatically switch from read mode to + * write mode, and back, on "r+" and "w+" files.) + * + * _lbfsize is used only to make the inline line-buffered output stream + * code as compact as possible. + * + * _ub, _up, and _ur are used when ungetc() pushes back more characters + * than fit in the current _bf, or when ungetc() pushes back a character + * that does not match the previous one in _bf. When this happens, + * _ub._base becomes non-nil (i.e., a stream has ungetc() data iff + * _ub._base!=NULL) and _up and _ur save the current values of _p and _r. + * + * NOTE: if you change this structure, you also need to update the + * std() initializer in findfp.c. + */ +typedef struct __sFILE { + unsigned char *_p; /* current position in (some) buffer */ + int _r; /* read space left for getc() */ + int _w; /* write space left for putc() */ + short _flags; /* flags, below; this FILE is free if 0 */ + short _file; /* fileno, if Unix descriptor, else -1 */ + struct __sbuf _bf; /* the buffer (at least 1 byte, if !NULL) */ + int _lbfsize; /* 0 or -_bf._size, for inline putc */ + + /* operations */ + void *_cookie; /* cookie passed to io functions */ + int (*_close)(void *); + int (*_read)(void *, char *, int); + fpos_t (*_seek)(void *, fpos_t, int); + int (*_write)(void *, const char *, int); + + /* extension data, to avoid further ABI breakage */ + struct __sbuf _ext; + /* data for long sequences of ungetc() */ + unsigned char *_up; /* saved _p when _p is doing ungetc data */ + int _ur; /* saved _r when _r is counting ungetc data */ + + /* tricks to meet minimum requirements even when malloc() fails */ + unsigned char _ubuf[3]; /* guarantee an ungetc() buffer */ + unsigned char _nbuf[1]; /* guarantee a getc() buffer */ + + /* separate buffer for fgetln() when line crosses buffer boundary */ + struct __sbuf _lb; /* buffer for fgetln() */ + + /* Unix stdio files get aligned to block boundaries on fseek() */ + int _blksize; /* stat.st_blksize (may be != _bf._size) */ + fpos_t _offset; /* current lseek offset */ +} FILE; + +__BEGIN_DECLS +extern FILE __sF[]; +__END_DECLS + +#define __SLBF 0x0001 /* line buffered */ +#define __SNBF 0x0002 /* unbuffered */ +#define __SRD 0x0004 /* OK to read */ +#define __SWR 0x0008 /* OK to write */ + /* RD and WR are never simultaneously asserted */ +#define __SRW 0x0010 /* open for reading & writing */ +#define __SEOF 0x0020 /* found EOF */ +#define __SERR 0x0040 /* found error */ +#define __SMBF 0x0080 /* _buf is from malloc */ +#define __SAPP 0x0100 /* fdopen()ed in append mode */ +#define __SSTR 0x0200 /* this is an sprintf/snprintf string */ +#define __SOPT 0x0400 /* do fseek() optimisation */ +#define __SNPT 0x0800 /* do not do fseek() optimisation */ +#define __SOFF 0x1000 /* set iff _offset is in fact correct */ +#define __SMOD 0x2000 /* true => fgetln modified _p text */ +#define __SALC 0x4000 /* allocate string space dynamically */ + +/* + * The following three definitions are for ANSI C, which took them + * from System V, which brilliantly took internal interface macros and + * made them official arguments to setvbuf(), without renaming them. + * Hence, these ugly _IOxxx names are *supposed* to appear in user code. + * + * Although numbered as their counterparts above, the implementation + * does not rely on this. + */ +#define _IOFBF 0 /* setvbuf should set fully buffered */ +#define _IOLBF 1 /* setvbuf should set line buffered */ +#define _IONBF 2 /* setvbuf should set unbuffered */ + +#define BUFSIZ 1024 /* size of buffer used by setbuf */ + +#define EOF (-1) + +/* + * FOPEN_MAX is a minimum maximum, and should be the number of descriptors + * that the kernel can provide without allocation of a resource that can + * fail without the process sleeping. Do not use this for anything. + */ +#define FOPEN_MAX 20 /* must be <= OPEN_MAX <sys/syslimits.h> */ +#define FILENAME_MAX 1024 /* must be <= PATH_MAX <sys/syslimits.h> */ + +/* System V/ANSI C; this is the wrong way to do this, do *not* use these. */ +#if __BSD_VISIBLE || __XPG_VISIBLE +#define P_tmpdir "/tmp/" +#endif +#define L_tmpnam 1024 /* XXX must be == PATH_MAX */ +#define TMP_MAX 308915776 + +#ifndef SEEK_SET +#define SEEK_SET 0 /* set file offset to offset */ +#endif +#ifndef SEEK_CUR +#define SEEK_CUR 1 /* set file offset to current plus offset */ +#endif +#ifndef SEEK_END +#define SEEK_END 2 /* set file offset to EOF plus offset */ +#endif + +#define stdin (&__sF[0]) +#define stdout (&__sF[1]) +#define stderr (&__sF[2]) + +/* + * Functions defined in ANSI C standard. + */ +__BEGIN_DECLS +void clearerr(FILE *); +int fclose(FILE *); +int feof(FILE *); +int ferror(FILE *); +int fflush(FILE *); +int fgetc(FILE *); +int fgetpos(FILE *, fpos_t *); +char *fgets(char *, int, FILE *); +FILE *fopen(const char *, const char *); +int fprintf(FILE *, const char *, ...); +int fputc(int, FILE *); +int fputs(const char *, FILE *); +size_t fread(void *, size_t, size_t, FILE *); +FILE *freopen(const char *, const char *, FILE *); +int fscanf(FILE *, const char *, ...); +int fseek(FILE *, long, int); +int fseeko(FILE *, off_t, int); +int fsetpos(FILE *, const fpos_t *); +long ftell(FILE *); +off_t ftello(FILE *); +size_t fwrite(const void *, size_t, size_t, FILE *); +int getc(FILE *); +int getchar(void); +char *gets(char *); +#if __BSD_VISIBLE && !defined(__SYS_ERRLIST) +#define __SYS_ERRLIST + +extern int sys_nerr; /* perror(3) external variables */ +extern char *sys_errlist[]; +#endif +void perror(const char *); +int printf(const char *, ...); +int putc(int, FILE *); +int putchar(int); +int puts(const char *); +int remove(const char *); +int rename(const char *, const char *); +void rewind(FILE *); +int scanf(const char *, ...); +void setbuf(FILE *, char *); +int setvbuf(FILE *, char *, int, size_t); +int sprintf(char *, const char *, ...); +int sscanf(const char *, const char *, ...); +FILE *tmpfile(void); +char *tmpnam(char *); +int ungetc(int, FILE *); +int vfprintf(FILE *, const char *, __gnuc_va_list); +int vprintf(const char *, __gnuc_va_list); +int vsprintf(char *, const char *, __gnuc_va_list); + +#if __ISO_C_VISIBLE >= 1999 || __BSD_VISIBLE +int snprintf(char *, size_t, const char *, ...) + __attribute__((__format__ (printf, 3, 4))) + __attribute__((__nonnull__ (3))); +int vfscanf(FILE *, const char *, __gnuc_va_list) + __attribute__((__format__ (scanf, 2, 0))) + __attribute__((__nonnull__ (2))); +int vscanf(const char *, __gnuc_va_list) + __attribute__((__format__ (scanf, 1, 0))) + __attribute__((__nonnull__ (1))); +int vsnprintf(char *, size_t, const char *, __gnuc_va_list) + __attribute__((__format__ (printf, 3, 0))) + __attribute__((__nonnull__ (3))); +int vsscanf(const char *, const char *, __gnuc_va_list) + __attribute__((__format__ (scanf, 2, 0))) + __attribute__((__nonnull__ (2))); +#endif /* __ISO_C_VISIBLE >= 1999 || __BSD_VISIBLE */ + +__END_DECLS + + +/* + * Functions defined in POSIX 1003.1. + */ +#if __BSD_VISIBLE || __POSIX_VISIBLE || __XPG_VISIBLE +#define L_ctermid 1024 /* size for ctermid(); PATH_MAX */ +#define L_cuserid 9 /* size for cuserid(); UT_NAMESIZE + 1 */ + +__BEGIN_DECLS +#if 0 /* MISSING FROM BIONIC */ +char *ctermid(char *); +char *cuserid(char *); +#endif /* MISSING */ +FILE *fdopen(int, const char *); +int fileno(FILE *); + +#if (__POSIX_VISIBLE >= 199209) +int pclose(FILE *); +FILE *popen(const char *, const char *); +#endif + +#if __POSIX_VISIBLE >= 199506 +void flockfile(FILE *); +int ftrylockfile(FILE *); +void funlockfile(FILE *); + +/* + * These are normally used through macros as defined below, but POSIX + * requires functions as well. + */ +int getc_unlocked(FILE *); +int getchar_unlocked(void); +int putc_unlocked(int, FILE *); +int putchar_unlocked(int); +#endif /* __POSIX_VISIBLE >= 199506 */ + +#if __XPG_VISIBLE +char *tempnam(const char *, const char *); +#endif +__END_DECLS + +#endif /* __BSD_VISIBLE || __POSIX_VISIBLE || __XPG_VISIBLE */ + +/* + * Routines that are purely local. + */ +#if __BSD_VISIBLE +__BEGIN_DECLS +int asprintf(char **, const char *, ...) + __attribute__((__format__ (printf, 2, 3))) + __attribute__((__nonnull__ (2))); +char *fgetln(FILE *, size_t *); +int fpurge(FILE *); +int getw(FILE *); +int putw(int, FILE *); +void setbuffer(FILE *, char *, int); +int setlinebuf(FILE *); +int vasprintf(char **, const char *, __gnuc_va_list) + __attribute__((__format__ (printf, 2, 0))) + __attribute__((__nonnull__ (2))); +__END_DECLS + +/* + * Stdio function-access interface. + */ +__BEGIN_DECLS +FILE *funopen(const void *, + int (*)(void *, char *, int), + int (*)(void *, const char *, int), + fpos_t (*)(void *, fpos_t, int), + int (*)(void *)); +__END_DECLS +#define fropen(cookie, fn) funopen(cookie, fn, 0, 0, 0) +#define fwopen(cookie, fn) funopen(cookie, 0, fn, 0, 0) +#endif /* __BSD_VISIBLE */ + +/* + * Functions internal to the implementation. + */ +__BEGIN_DECLS +int __srget(FILE *); +int __swbuf(int, FILE *); +__END_DECLS + +/* + * The __sfoo macros are here so that we can + * define function versions in the C library. + */ +#define __sgetc(p) (--(p)->_r < 0 ? __srget(p) : (int)(*(p)->_p++)) +#if defined(__GNUC__) +static __inline int __sputc(int _c, FILE *_p) { + if (--_p->_w >= 0 || (_p->_w >= _p->_lbfsize && (char)_c != '\n')) + return (*_p->_p++ = _c); + else + return (__swbuf(_c, _p)); +} +#else +/* + * This has been tuned to generate reasonable code on the vax using pcc. + */ +#define __sputc(c, p) \ + (--(p)->_w < 0 ? \ + (p)->_w >= (p)->_lbfsize ? \ + (*(p)->_p = (c)), *(p)->_p != '\n' ? \ + (int)*(p)->_p++ : \ + __swbuf('\n', p) : \ + __swbuf((int)(c), p) : \ + (*(p)->_p = (c), (int)*(p)->_p++)) +#endif + +#define __sfeof(p) (((p)->_flags & __SEOF) != 0) +#define __sferror(p) (((p)->_flags & __SERR) != 0) +#define __sclearerr(p) ((void)((p)->_flags &= ~(__SERR|__SEOF))) +#define __sfileno(p) ((p)->_file) + +#define feof(p) __sfeof(p) +#define ferror(p) __sferror(p) + +#ifndef _POSIX_THREADS +#define clearerr(p) __sclearerr(p) +#endif + +#if __POSIX_VISIBLE +#define fileno(p) __sfileno(p) +#endif + +#ifndef lint +#ifndef _POSIX_THREADS +#define getc(fp) __sgetc(fp) +#endif /* _POSIX_THREADS */ +#define getc_unlocked(fp) __sgetc(fp) +/* + * The macro implementations of putc and putc_unlocked are not + * fully POSIX compliant; they do not set errno on failure + */ +#if __BSD_VISIBLE +#ifndef _POSIX_THREADS +#define putc(x, fp) __sputc(x, fp) +#endif /* _POSIX_THREADS */ +#define putc_unlocked(x, fp) __sputc(x, fp) +#endif /* __BSD_VISIBLE */ +#endif /* lint */ + +#define getchar() getc(stdin) +#define putchar(x) putc(x, stdout) +#define getchar_unlocked() getc_unlocked(stdin) +#define putchar_unlocked(c) putc_unlocked(c, stdout) + +#ifdef _GNU_SOURCE +/* + * glibc defines dprintf(int, const char*, ...), which is poorly named + * and likely to conflict with locally defined debugging printfs + * fdprintf is a better name, and some programs that use fdprintf use a + * #define fdprintf dprintf for compatibility + */ +int fdprintf(int, const char*, ...); +int vfdprintf(int, const char*, __gnuc_va_list); +#endif /* _GNU_SOURCE */ + +#endif /* _STDIO_H_ */ diff --git a/lib/gcc/i686-linux-android/4.6/include-fixed/sys/types.h b/lib/gcc/i686-linux-android/4.6/include-fixed/sys/types.h new file mode 100644 index 0000000..e36e0d8 --- /dev/null +++ b/lib/gcc/i686-linux-android/4.6/include-fixed/sys/types.h @@ -0,0 +1,140 @@ +/* DO NOT EDIT THIS FILE. + + It has been auto-edited by fixincludes from: + + "/tmp/ndk-User/build/toolchain/prefix/sysroot/usr/include/sys/types.h" + + This had to be done to correct non-standard usages in the + original, manufacturer supplied header file. */ + +/* + * Copyright (C) 2008 The Android Open Source Project + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS + * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE + * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, + * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS + * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED + * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT + * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ +#ifndef _SYS_TYPES_H_ +#define _SYS_TYPES_H_ + +#define __need_size_t +#define __need_ptrdiff_t +#include <stddef.h> +#include <stdint.h> +#include <sys/cdefs.h> + +#include <linux/posix_types.h> +#include <asm/types.h> +#include <linux/types.h> +#include <machine/kernel.h> + +typedef __u32 __kernel_dev_t; + +/* be careful with __kernel_gid_t and __kernel_uid_t + * these are defined as 16-bit for legacy reason, but + * the kernel uses 32-bits instead. + * + * 32-bit valuea are required for Android, so use + * __kernel_uid32_t and __kernel_gid32_t + */ + +typedef __kernel_blkcnt_t blkcnt_t; +typedef __kernel_blksize_t blksize_t; +typedef __kernel_clock_t clock_t; +typedef __kernel_clockid_t clockid_t; +typedef __kernel_dev_t dev_t; +typedef __kernel_fsblkcnt_t fsblkcnt_t; +typedef __kernel_fsfilcnt_t fsfilcnt_t; +typedef __kernel_gid32_t gid_t; +typedef __kernel_id_t id_t; +typedef __kernel_ino_t ino_t; +typedef __kernel_key_t key_t; +typedef __kernel_mode_t mode_t; +typedef __kernel_nlink_t nlink_t; +#ifndef _OFF_T_DEFINED_ +#define _OFF_T_DEFINED_ +typedef __kernel_off_t off_t; +#endif +typedef __kernel_loff_t loff_t; +typedef loff_t off64_t; /* GLibc-specific */ + +typedef __kernel_pid_t pid_t; + +/* while POSIX wants these in <sys/types.h>, we + * declare then in <pthread.h> instead */ +#if 0 +typedef .... pthread_attr_t; +typedef .... pthread_cond_t; +typedef .... pthread_condattr_t; +typedef .... pthread_key_t; +typedef .... pthread_mutex_t; +typedef .... pthread_once_t; +typedef .... pthread_rwlock_t; +typedef .... pthread_rwlock_attr_t; +typedef .... pthread_t; +#endif + +#ifndef _SIZE_T_DEFINED_ +#define _SIZE_T_DEFINED_ +#if !defined(_GCC_SIZE_T) +#define _GCC_SIZE_T +typedef __SIZE_TYPE__ size_t; +#endif + +#endif + +/* size_t is defined by the GCC-specific <stddef.h> */ +#ifndef _SSIZE_T_DEFINED_ +#define _SSIZE_T_DEFINED_ +typedef long int ssize_t; +#endif + +typedef __kernel_suseconds_t suseconds_t; +typedef __kernel_time_t time_t; +typedef __kernel_uid32_t uid_t; +typedef signed long useconds_t; + +typedef __kernel_daddr_t daddr_t; +typedef __kernel_timer_t timer_t; +typedef __kernel_mqd_t mqd_t; + +typedef __kernel_caddr_t caddr_t; +typedef unsigned int uint_t; +typedef unsigned int uint; + +/* for some applications */ +#include <sys/sysmacros.h> + +#ifdef __BSD_VISIBLE +typedef unsigned char u_char; +typedef unsigned short u_short; +typedef unsigned int u_int; +typedef unsigned long u_long; + +typedef uint32_t u_int32_t; +typedef uint16_t u_int16_t; +typedef uint8_t u_int8_t; +typedef uint64_t u_int64_t; +#endif + +#endif diff --git a/lib/gcc/i686-linux-android/4.6/include-fixed/syslimits.h b/lib/gcc/i686-linux-android/4.6/include-fixed/syslimits.h new file mode 100644 index 0000000..a362802 --- /dev/null +++ b/lib/gcc/i686-linux-android/4.6/include-fixed/syslimits.h @@ -0,0 +1,8 @@ +/* syslimits.h stands for the system's own limits.h file. + If we can use it ok unmodified, then we install this text. + If fixincludes fixes it, then the fixed version is installed + instead of this text. */ + +#define _GCC_NEXT_LIMITS_H /* tell gcc's limits.h to recurse */ +#include_next <limits.h> +#undef _GCC_NEXT_LIMITS_H diff --git a/lib/gcc/i686-linux-android/4.6/include/abmintrin.h b/lib/gcc/i686-linux-android/4.6/include/abmintrin.h new file mode 100644 index 0000000..9d87f57 --- /dev/null +++ b/lib/gcc/i686-linux-android/4.6/include/abmintrin.h @@ -0,0 +1,55 @@ +/* Copyright (C) 2009 Free Software Foundation, Inc. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + GCC is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + <http://www.gnu.org/licenses/>. */ + +#ifndef _X86INTRIN_H_INCLUDED +# error "Never use <abmintrin.h> directly; include <x86intrin.h> instead." +#endif + +#ifndef __ABM__ +# error "ABM instruction set not enabled" +#endif /* __ABM__ */ + +#ifndef _ABMINTRIN_H_INCLUDED +#define _ABMINTRIN_H_INCLUDED + +extern __inline unsigned short __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__lzcnt16 (unsigned short __X) +{ + return __builtin_clzs (__X); +} + +extern __inline unsigned int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__lzcnt (unsigned int __X) +{ + return __builtin_clz (__X); +} + +#ifdef __x86_64__ +extern __inline unsigned long __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__lzcnt64 (unsigned long __X) +{ + return __builtin_clzl (__X); +} +#endif + +#endif /* _ABMINTRIN_H_INCLUDED */ diff --git a/lib/gcc/i686-linux-android/4.6/include/ammintrin.h b/lib/gcc/i686-linux-android/4.6/include/ammintrin.h new file mode 100644 index 0000000..3647b31 --- /dev/null +++ b/lib/gcc/i686-linux-android/4.6/include/ammintrin.h @@ -0,0 +1,88 @@ +/* Copyright (C) 2007, 2008, 2009 Free Software Foundation, Inc. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + GCC is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + <http://www.gnu.org/licenses/>. */ + +/* Implemented from the specification included in the AMD Programmers + Manual Update, version 2.x */ + +#ifndef _AMMINTRIN_H_INCLUDED +#define _AMMINTRIN_H_INCLUDED + +#ifndef __SSE4A__ +# error "SSE4A instruction set not enabled" +#else + +/* We need definitions from the SSE3, SSE2 and SSE header files*/ +#include <pmmintrin.h> + +extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_stream_sd (double * __P, __m128d __Y) +{ + __builtin_ia32_movntsd (__P, (__v2df) __Y); +} + +extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_stream_ss (float * __P, __m128 __Y) +{ + __builtin_ia32_movntss (__P, (__v4sf) __Y); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_extract_si64 (__m128i __X, __m128i __Y) +{ + return (__m128i) __builtin_ia32_extrq ((__v2di) __X, (__v16qi) __Y); +} + +#ifdef __OPTIMIZE__ +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_extracti_si64 (__m128i __X, unsigned const int __I, unsigned const int __L) +{ + return (__m128i) __builtin_ia32_extrqi ((__v2di) __X, __I, __L); +} +#else +#define _mm_extracti_si64(X, I, L) \ + ((__m128i) __builtin_ia32_extrqi ((__v2di)(__m128i)(X), \ + (unsigned int)(I), (unsigned int)(L))) +#endif + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_insert_si64 (__m128i __X,__m128i __Y) +{ + return (__m128i) __builtin_ia32_insertq ((__v2di)__X, (__v2di)__Y); +} + +#ifdef __OPTIMIZE__ +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_inserti_si64(__m128i __X, __m128i __Y, unsigned const int __I, unsigned const int __L) +{ + return (__m128i) __builtin_ia32_insertqi ((__v2di)__X, (__v2di)__Y, __I, __L); +} +#else +#define _mm_inserti_si64(X, Y, I, L) \ + ((__m128i) __builtin_ia32_insertqi ((__v2di)(__m128i)(X), \ + (__v2di)(__m128i)(Y), \ + (unsigned int)(I), (unsigned int)(L))) +#endif + +#endif /* __SSE4A__ */ + +#endif /* _AMMINTRIN_H_INCLUDED */ diff --git a/lib/gcc/i686-linux-android/4.6/include/avxintrin.h b/lib/gcc/i686-linux-android/4.6/include/avxintrin.h new file mode 100644 index 0000000..6d4213d --- /dev/null +++ b/lib/gcc/i686-linux-android/4.6/include/avxintrin.h @@ -0,0 +1,1426 @@ +/* Copyright (C) 2008, 2009 Free Software Foundation, Inc. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + GCC is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + <http://www.gnu.org/licenses/>. */ + +/* Implemented from the specification included in the Intel C++ Compiler + User Guide and Reference, version 11.0. */ + +#ifndef _IMMINTRIN_H_INCLUDED +# error "Never use <avxintrin.h> directly; include <immintrin.h> instead." +#endif + +/* Internal data types for implementing the intrinsics. */ +typedef double __v4df __attribute__ ((__vector_size__ (32))); +typedef float __v8sf __attribute__ ((__vector_size__ (32))); +typedef long long __v4di __attribute__ ((__vector_size__ (32))); +typedef int __v8si __attribute__ ((__vector_size__ (32))); +typedef short __v16hi __attribute__ ((__vector_size__ (32))); +typedef char __v32qi __attribute__ ((__vector_size__ (32))); + +/* The Intel API is flexible enough that we must allow aliasing with other + vector types, and their scalar components. */ +typedef float __m256 __attribute__ ((__vector_size__ (32), + __may_alias__)); +typedef long long __m256i __attribute__ ((__vector_size__ (32), + __may_alias__)); +typedef double __m256d __attribute__ ((__vector_size__ (32), + __may_alias__)); + +/* Compare predicates for scalar and packed compare intrinsics. */ + +/* Equal (ordered, non-signaling) */ +#define _CMP_EQ_OQ 0x00 +/* Less-than (ordered, signaling) */ +#define _CMP_LT_OS 0x01 +/* Less-than-or-equal (ordered, signaling) */ +#define _CMP_LE_OS 0x02 +/* Unordered (non-signaling) */ +#define _CMP_UNORD_Q 0x03 +/* Not-equal (unordered, non-signaling) */ +#define _CMP_NEQ_UQ 0x04 +/* Not-less-than (unordered, signaling) */ +#define _CMP_NLT_US 0x05 +/* Not-less-than-or-equal (unordered, signaling) */ +#define _CMP_NLE_US 0x06 +/* Ordered (nonsignaling) */ +#define _CMP_ORD_Q 0x07 +/* Equal (unordered, non-signaling) */ +#define _CMP_EQ_UQ 0x08 +/* Not-greater-than-or-equal (unordered, signaling) */ +#define _CMP_NGE_US 0x09 +/* Not-greater-than (unordered, signaling) */ +#define _CMP_NGT_US 0x0a +/* False (ordered, non-signaling) */ +#define _CMP_FALSE_OQ 0x0b +/* Not-equal (ordered, non-signaling) */ +#define _CMP_NEQ_OQ 0x0c +/* Greater-than-or-equal (ordered, signaling) */ +#define _CMP_GE_OS 0x0d +/* Greater-than (ordered, signaling) */ +#define _CMP_GT_OS 0x0e +/* True (unordered, non-signaling) */ +#define _CMP_TRUE_UQ 0x0f +/* Equal (ordered, signaling) */ +#define _CMP_EQ_OS 0x10 +/* Less-than (ordered, non-signaling) */ +#define _CMP_LT_OQ 0x11 +/* Less-than-or-equal (ordered, non-signaling) */ +#define _CMP_LE_OQ 0x12 +/* Unordered (signaling) */ +#define _CMP_UNORD_S 0x13 +/* Not-equal (unordered, signaling) */ +#define _CMP_NEQ_US 0x14 +/* Not-less-than (unordered, non-signaling) */ +#define _CMP_NLT_UQ 0x15 +/* Not-less-than-or-equal (unordered, non-signaling) */ +#define _CMP_NLE_UQ 0x16 +/* Ordered (signaling) */ +#define _CMP_ORD_S 0x17 +/* Equal (unordered, signaling) */ +#define _CMP_EQ_US 0x18 +/* Not-greater-than-or-equal (unordered, non-signaling) */ +#define _CMP_NGE_UQ 0x19 +/* Not-greater-than (unordered, non-signaling) */ +#define _CMP_NGT_UQ 0x1a +/* False (ordered, signaling) */ +#define _CMP_FALSE_OS 0x1b +/* Not-equal (ordered, signaling) */ +#define _CMP_NEQ_OS 0x1c +/* Greater-than-or-equal (ordered, non-signaling) */ +#define _CMP_GE_OQ 0x1d +/* Greater-than (ordered, non-signaling) */ +#define _CMP_GT_OQ 0x1e +/* True (unordered, signaling) */ +#define _CMP_TRUE_US 0x1f + +extern __inline __m256d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_add_pd (__m256d __A, __m256d __B) +{ + return (__m256d) __builtin_ia32_addpd256 ((__v4df)__A, (__v4df)__B); +} + +extern __inline __m256 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_add_ps (__m256 __A, __m256 __B) +{ + return (__m256) __builtin_ia32_addps256 ((__v8sf)__A, (__v8sf)__B); +} + +extern __inline __m256d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_addsub_pd (__m256d __A, __m256d __B) +{ + return (__m256d) __builtin_ia32_addsubpd256 ((__v4df)__A, (__v4df)__B); +} + +extern __inline __m256 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_addsub_ps (__m256 __A, __m256 __B) +{ + return (__m256) __builtin_ia32_addsubps256 ((__v8sf)__A, (__v8sf)__B); +} + + +extern __inline __m256d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_and_pd (__m256d __A, __m256d __B) +{ + return (__m256d) __builtin_ia32_andpd256 ((__v4df)__A, (__v4df)__B); +} + +extern __inline __m256 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_and_ps (__m256 __A, __m256 __B) +{ + return (__m256) __builtin_ia32_andps256 ((__v8sf)__A, (__v8sf)__B); +} + +extern __inline __m256d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_andnot_pd (__m256d __A, __m256d __B) +{ + return (__m256d) __builtin_ia32_andnpd256 ((__v4df)__A, (__v4df)__B); +} + +extern __inline __m256 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_andnot_ps (__m256 __A, __m256 __B) +{ + return (__m256) __builtin_ia32_andnps256 ((__v8sf)__A, (__v8sf)__B); +} + +/* Double/single precision floating point blend instructions - select + data from 2 sources using constant/variable mask. */ + +#ifdef __OPTIMIZE__ +extern __inline __m256d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_blend_pd (__m256d __X, __m256d __Y, const int __M) +{ + return (__m256d) __builtin_ia32_blendpd256 ((__v4df)__X, + (__v4df)__Y, + __M); +} + +extern __inline __m256 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_blend_ps (__m256 __X, __m256 __Y, const int __M) +{ + return (__m256) __builtin_ia32_blendps256 ((__v8sf)__X, + (__v8sf)__Y, + __M); +} +#else +#define _mm256_blend_pd(X, Y, M) \ + ((__m256d) __builtin_ia32_blendpd256 ((__v4df)(__m256d)(X), \ + (__v4df)(__m256d)(Y), (int)(M))) + +#define _mm256_blend_ps(X, Y, M) \ + ((__m256) __builtin_ia32_blendps256 ((__v8sf)(__m256)(X), \ + (__v8sf)(__m256)(Y), (int)(M))) +#endif + +extern __inline __m256d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_blendv_pd (__m256d __X, __m256d __Y, __m256d __M) +{ + return (__m256d) __builtin_ia32_blendvpd256 ((__v4df)__X, + (__v4df)__Y, + (__v4df)__M); +} + +extern __inline __m256 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_blendv_ps (__m256 __X, __m256 __Y, __m256 __M) +{ + return (__m256) __builtin_ia32_blendvps256 ((__v8sf)__X, + (__v8sf)__Y, + (__v8sf)__M); +} + +extern __inline __m256d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_div_pd (__m256d __A, __m256d __B) +{ + return (__m256d) __builtin_ia32_divpd256 ((__v4df)__A, (__v4df)__B); +} + +extern __inline __m256 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_div_ps (__m256 __A, __m256 __B) +{ + return (__m256) __builtin_ia32_divps256 ((__v8sf)__A, (__v8sf)__B); +} + +/* Dot product instructions with mask-defined summing and zeroing parts + of result. */ + +#ifdef __OPTIMIZE__ +extern __inline __m256 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_dp_ps (__m256 __X, __m256 __Y, const int __M) +{ + return (__m256) __builtin_ia32_dpps256 ((__v8sf)__X, + (__v8sf)__Y, + __M); +} +#else +#define _mm256_dp_ps(X, Y, M) \ + ((__m256) __builtin_ia32_dpps256 ((__v8sf)(__m256)(X), \ + (__v8sf)(__m256)(Y), (int)(M))) +#endif + +extern __inline __m256d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_hadd_pd (__m256d __X, __m256d __Y) +{ + return (__m256d) __builtin_ia32_haddpd256 ((__v4df)__X, (__v4df)__Y); +} + +extern __inline __m256 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_hadd_ps (__m256 __X, __m256 __Y) +{ + return (__m256) __builtin_ia32_haddps256 ((__v8sf)__X, (__v8sf)__Y); +} + +extern __inline __m256d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_hsub_pd (__m256d __X, __m256d __Y) +{ + return (__m256d) __builtin_ia32_hsubpd256 ((__v4df)__X, (__v4df)__Y); +} + +extern __inline __m256 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_hsub_ps (__m256 __X, __m256 __Y) +{ + return (__m256) __builtin_ia32_hsubps256 ((__v8sf)__X, (__v8sf)__Y); +} + +extern __inline __m256d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_max_pd (__m256d __A, __m256d __B) +{ + return (__m256d) __builtin_ia32_maxpd256 ((__v4df)__A, (__v4df)__B); +} + +extern __inline __m256 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_max_ps (__m256 __A, __m256 __B) +{ + return (__m256) __builtin_ia32_maxps256 ((__v8sf)__A, (__v8sf)__B); +} + +extern __inline __m256d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_min_pd (__m256d __A, __m256d __B) +{ + return (__m256d) __builtin_ia32_minpd256 ((__v4df)__A, (__v4df)__B); +} + +extern __inline __m256 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_min_ps (__m256 __A, __m256 __B) +{ + return (__m256) __builtin_ia32_minps256 ((__v8sf)__A, (__v8sf)__B); +} + +extern __inline __m256d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mul_pd (__m256d __A, __m256d __B) +{ + return (__m256d) __builtin_ia32_mulpd256 ((__v4df)__A, (__v4df)__B); +} + +extern __inline __m256 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mul_ps (__m256 __A, __m256 __B) +{ + return (__m256) __builtin_ia32_mulps256 ((__v8sf)__A, (__v8sf)__B); +} + +extern __inline __m256d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_or_pd (__m256d __A, __m256d __B) +{ + return (__m256d) __builtin_ia32_orpd256 ((__v4df)__A, (__v4df)__B); +} + +extern __inline __m256 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_or_ps (__m256 __A, __m256 __B) +{ + return (__m256) __builtin_ia32_orps256 ((__v8sf)__A, (__v8sf)__B); +} + +#ifdef __OPTIMIZE__ +extern __inline __m256d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_shuffle_pd (__m256d __A, __m256d __B, const int __mask) +{ + return (__m256d) __builtin_ia32_shufpd256 ((__v4df)__A, (__v4df)__B, + __mask); +} + +extern __inline __m256 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_shuffle_ps (__m256 __A, __m256 __B, const int __mask) +{ + return (__m256) __builtin_ia32_shufps256 ((__v8sf)__A, (__v8sf)__B, + __mask); +} +#else +#define _mm256_shuffle_pd(A, B, N) \ + ((__m256d)__builtin_ia32_shufpd256 ((__v4df)(__m256d)(A), \ + (__v4df)(__m256d)(B), (int)(N))) + +#define _mm256_shuffle_ps(A, B, N) \ + ((__m256) __builtin_ia32_shufps256 ((__v8sf)(__m256)(A), \ + (__v8sf)(__m256)(B), (int)(N))) +#endif + +extern __inline __m256d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_sub_pd (__m256d __A, __m256d __B) +{ + return (__m256d) __builtin_ia32_subpd256 ((__v4df)__A, (__v4df)__B); +} + +extern __inline __m256 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_sub_ps (__m256 __A, __m256 __B) +{ + return (__m256) __builtin_ia32_subps256 ((__v8sf)__A, (__v8sf)__B); +} + +extern __inline __m256d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_xor_pd (__m256d __A, __m256d __B) +{ + return (__m256d) __builtin_ia32_xorpd256 ((__v4df)__A, (__v4df)__B); +} + +extern __inline __m256 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_xor_ps (__m256 __A, __m256 __B) +{ + return (__m256) __builtin_ia32_xorps256 ((__v8sf)__A, (__v8sf)__B); +} + +#ifdef __OPTIMIZE__ +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmp_pd (__m128d __X, __m128d __Y, const int __P) +{ + return (__m128d) __builtin_ia32_cmppd ((__v2df)__X, (__v2df)__Y, __P); +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmp_ps (__m128 __X, __m128 __Y, const int __P) +{ + return (__m128) __builtin_ia32_cmpps ((__v4sf)__X, (__v4sf)__Y, __P); +} + +extern __inline __m256d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_cmp_pd (__m256d __X, __m256d __Y, const int __P) +{ + return (__m256d) __builtin_ia32_cmppd256 ((__v4df)__X, (__v4df)__Y, + __P); +} + +extern __inline __m256 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_cmp_ps (__m256 __X, __m256 __Y, const int __P) +{ + return (__m256) __builtin_ia32_cmpps256 ((__v8sf)__X, (__v8sf)__Y, + __P); +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmp_sd (__m128d __X, __m128d __Y, const int __P) +{ + return (__m128d) __builtin_ia32_cmpsd ((__v2df)__X, (__v2df)__Y, __P); +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmp_ss (__m128 __X, __m128 __Y, const int __P) +{ + return (__m128) __builtin_ia32_cmpss ((__v4sf)__X, (__v4sf)__Y, __P); +} +#else +#define _mm_cmp_pd(X, Y, P) \ + ((__m128d) __builtin_ia32_cmppd ((__v2df)(__m128d)(X), \ + (__v2df)(__m128d)(Y), (int)(P))) + +#define _mm_cmp_ps(X, Y, P) \ + ((__m128) __builtin_ia32_cmpps ((__v4sf)(__m128)(X), \ + (__v4sf)(__m128)(Y), (int)(P))) + +#define _mm256_cmp_pd(X, Y, P) \ + ((__m256d) __builtin_ia32_cmppd256 ((__v4df)(__m256d)(X), \ + (__v4df)(__m256d)(Y), (int)(P))) + +#define _mm256_cmp_ps(X, Y, P) \ + ((__m256) __builtin_ia32_cmpps256 ((__v8sf)(__m256)(X), \ + (__v8sf)(__m256)(Y), (int)(P))) + +#define _mm_cmp_sd(X, Y, P) \ + ((__m128d) __builtin_ia32_cmpsd ((__v2df)(__m128d)(X), \ + (__v2df)(__m128d)(Y), (int)(P))) + +#define _mm_cmp_ss(X, Y, P) \ + ((__m128) __builtin_ia32_cmpss ((__v4sf)(__m128)(X), \ + (__v4sf)(__m128)(Y), (int)(P))) +#endif + +extern __inline __m256d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_cvtepi32_pd (__m128i __A) +{ + return (__m256d)__builtin_ia32_cvtdq2pd256 ((__v4si) __A); +} + +extern __inline __m256 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_cvtepi32_ps (__m256i __A) +{ + return (__m256)__builtin_ia32_cvtdq2ps256 ((__v8si) __A); +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_cvtpd_ps (__m256d __A) +{ + return (__m128)__builtin_ia32_cvtpd2ps256 ((__v4df) __A); +} + +extern __inline __m256i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_cvtps_epi32 (__m256 __A) +{ + return (__m256i)__builtin_ia32_cvtps2dq256 ((__v8sf) __A); +} + +extern __inline __m256d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_cvtps_pd (__m128 __A) +{ + return (__m256d)__builtin_ia32_cvtps2pd256 ((__v4sf) __A); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_cvttpd_epi32 (__m256d __A) +{ + return (__m128i)__builtin_ia32_cvttpd2dq256 ((__v4df) __A); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_cvtpd_epi32 (__m256d __A) +{ + return (__m128i)__builtin_ia32_cvtpd2dq256 ((__v4df) __A); +} + +extern __inline __m256i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_cvttps_epi32 (__m256 __A) +{ + return (__m256i)__builtin_ia32_cvttps2dq256 ((__v8sf) __A); +} + +#ifdef __OPTIMIZE__ +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_extractf128_pd (__m256d __X, const int __N) +{ + return (__m128d) __builtin_ia32_vextractf128_pd256 ((__v4df)__X, __N); +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_extractf128_ps (__m256 __X, const int __N) +{ + return (__m128) __builtin_ia32_vextractf128_ps256 ((__v8sf)__X, __N); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_extractf128_si256 (__m256i __X, const int __N) +{ + return (__m128i) __builtin_ia32_vextractf128_si256 ((__v8si)__X, __N); +} + +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_extract_epi32 (__m256i __X, int const __N) +{ + __m128i __Y = _mm256_extractf128_si256 (__X, __N >> 2); + return _mm_extract_epi32 (__Y, __N % 4); +} + +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_extract_epi16 (__m256i __X, int const __N) +{ + __m128i __Y = _mm256_extractf128_si256 (__X, __N >> 3); + return _mm_extract_epi16 (__Y, __N % 8); +} + +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_extract_epi8 (__m256i __X, int const __N) +{ + __m128i __Y = _mm256_extractf128_si256 (__X, __N >> 4); + return _mm_extract_epi8 (__Y, __N % 16); +} + +#ifdef __x86_64__ +extern __inline long long __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_extract_epi64 (__m256i __X, const int __N) +{ + __m128i __Y = _mm256_extractf128_si256 (__X, __N >> 1); + return _mm_extract_epi64 (__Y, __N % 2); +} +#endif +#else +#define _mm256_extractf128_pd(X, N) \ + ((__m128d) __builtin_ia32_vextractf128_pd256 ((__v4df)(__m256d)(X), \ + (int)(N))) + +#define _mm256_extractf128_ps(X, N) \ + ((__m128) __builtin_ia32_vextractf128_ps256 ((__v8sf)(__m256)(X), \ + (int)(N))) + +#define _mm256_extractf128_si256(X, N) \ + ((__m128i) __builtin_ia32_vextractf128_si256 ((__v8si)(__m256i)(X), \ + (int)(N))) + +#define _mm256_extract_epi32(X, N) \ + (__extension__ \ + ({ \ + __m128i __Y = _mm256_extractf128_si256 ((X), (N) >> 2); \ + _mm_extract_epi32 (__Y, (N) % 4); \ + })) + +#define _mm256_extract_epi16(X, N) \ + (__extension__ \ + ({ \ + __m128i __Y = _mm256_extractf128_si256 ((X), (N) >> 3); \ + _mm_extract_epi16 (__Y, (N) % 8); \ + })) + +#define _mm256_extract_epi8(X, N) \ + (__extension__ \ + ({ \ + __m128i __Y = _mm256_extractf128_si256 ((X), (N) >> 4); \ + _mm_extract_epi8 (__Y, (N) % 16); \ + })) + +#ifdef __x86_64__ +#define _mm256_extract_epi64(X, N) \ + (__extension__ \ + ({ \ + __m128i __Y = _mm256_extractf128_si256 ((X), (N) >> 1); \ + _mm_extract_epi64 (__Y, (N) % 2); \ + })) +#endif +#endif + +extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_zeroall (void) +{ + __builtin_ia32_vzeroall (); +} + +extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_zeroupper (void) +{ + __builtin_ia32_vzeroupper (); +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_permutevar_pd (__m128d __A, __m128i __C) +{ + return (__m128d) __builtin_ia32_vpermilvarpd ((__v2df)__A, + (__v2di)__C); +} + +extern __inline __m256d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_permutevar_pd (__m256d __A, __m256i __C) +{ + return (__m256d) __builtin_ia32_vpermilvarpd256 ((__v4df)__A, + (__v4di)__C); +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_permutevar_ps (__m128 __A, __m128i __C) +{ + return (__m128) __builtin_ia32_vpermilvarps ((__v4sf)__A, + (__v4si)__C); +} + +extern __inline __m256 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_permutevar_ps (__m256 __A, __m256i __C) +{ + return (__m256) __builtin_ia32_vpermilvarps256 ((__v8sf)__A, + (__v8si)__C); +} + +#ifdef __OPTIMIZE__ +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_permute_pd (__m128d __X, const int __C) +{ + return (__m128d) __builtin_ia32_vpermilpd ((__v2df)__X, __C); +} + +extern __inline __m256d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_permute_pd (__m256d __X, const int __C) +{ + return (__m256d) __builtin_ia32_vpermilpd256 ((__v4df)__X, __C); +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_permute_ps (__m128 __X, const int __C) +{ + return (__m128) __builtin_ia32_vpermilps ((__v4sf)__X, __C); +} + +extern __inline __m256 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_permute_ps (__m256 __X, const int __C) +{ + return (__m256) __builtin_ia32_vpermilps256 ((__v8sf)__X, __C); +} +#else +#define _mm_permute_pd(X, C) \ + ((__m128d) __builtin_ia32_vpermilpd ((__v2df)(__m128d)(X), (int)(C))) + +#define _mm256_permute_pd(X, C) \ + ((__m256d) __builtin_ia32_vpermilpd256 ((__v4df)(__m256d)(X), (int)(C))) + +#define _mm_permute_ps(X, C) \ + ((__m128) __builtin_ia32_vpermilps ((__v4sf)(__m128)(X), (int)(C))) + +#define _mm256_permute_ps(X, C) \ + ((__m256) __builtin_ia32_vpermilps256 ((__v8sf)(__m256)(X), (int)(C))) +#endif + +#ifdef __OPTIMIZE__ +extern __inline __m256d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_permute2f128_pd (__m256d __X, __m256d __Y, const int __C) +{ + return (__m256d) __builtin_ia32_vperm2f128_pd256 ((__v4df)__X, + (__v4df)__Y, + __C); +} + +extern __inline __m256 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_permute2f128_ps (__m256 __X, __m256 __Y, const int __C) +{ + return (__m256) __builtin_ia32_vperm2f128_ps256 ((__v8sf)__X, + (__v8sf)__Y, + __C); +} + +extern __inline __m256i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_permute2f128_si256 (__m256i __X, __m256i __Y, const int __C) +{ + return (__m256i) __builtin_ia32_vperm2f128_si256 ((__v8si)__X, + (__v8si)__Y, + __C); +} +#else +#define _mm256_permute2f128_pd(X, Y, C) \ + ((__m256d) __builtin_ia32_vperm2f128_pd256 ((__v4df)(__m256d)(X), \ + (__v4df)(__m256d)(Y), \ + (int)(C))) + +#define _mm256_permute2f128_ps(X, Y, C) \ + ((__m256) __builtin_ia32_vperm2f128_ps256 ((__v8sf)(__m256)(X), \ + (__v8sf)(__m256)(Y), \ + (int)(C))) + +#define _mm256_permute2f128_si256(X, Y, C) \ + ((__m256i) __builtin_ia32_vperm2f128_si256 ((__v8si)(__m256i)(X), \ + (__v8si)(__m256i)(Y), \ + (int)(C))) +#endif + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_broadcast_ss (float const *__X) +{ + return (__m128) __builtin_ia32_vbroadcastss (__X); +} + +extern __inline __m256d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_broadcast_sd (double const *__X) +{ + return (__m256d) __builtin_ia32_vbroadcastsd256 (__X); +} + +extern __inline __m256 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_broadcast_ss (float const *__X) +{ + return (__m256) __builtin_ia32_vbroadcastss256 (__X); +} + +extern __inline __m256d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_broadcast_pd (__m128d const *__X) +{ + return (__m256d) __builtin_ia32_vbroadcastf128_pd256 (__X); +} + +extern __inline __m256 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_broadcast_ps (__m128 const *__X) +{ + return (__m256) __builtin_ia32_vbroadcastf128_ps256 (__X); +} + +#ifdef __OPTIMIZE__ +extern __inline __m256d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_insertf128_pd (__m256d __X, __m128d __Y, const int __O) +{ + return (__m256d) __builtin_ia32_vinsertf128_pd256 ((__v4df)__X, + (__v2df)__Y, + __O); +} + +extern __inline __m256 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_insertf128_ps (__m256 __X, __m128 __Y, const int __O) +{ + return (__m256) __builtin_ia32_vinsertf128_ps256 ((__v8sf)__X, + (__v4sf)__Y, + __O); +} + +extern __inline __m256i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_insertf128_si256 (__m256i __X, __m128i __Y, const int __O) +{ + return (__m256i) __builtin_ia32_vinsertf128_si256 ((__v8si)__X, + (__v4si)__Y, + __O); +} + +extern __inline __m256i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_insert_epi32 (__m256i __X, int __D, int const __N) +{ + __m128i __Y = _mm256_extractf128_si256 (__X, __N >> 2); + __Y = _mm_insert_epi32 (__Y, __D, __N % 4); + return _mm256_insertf128_si256 (__X, __Y, __N >> 2); +} + +extern __inline __m256i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_insert_epi16 (__m256i __X, int __D, int const __N) +{ + __m128i __Y = _mm256_extractf128_si256 (__X, __N >> 3); + __Y = _mm_insert_epi16 (__Y, __D, __N % 8); + return _mm256_insertf128_si256 (__X, __Y, __N >> 3); +} + +extern __inline __m256i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_insert_epi8 (__m256i __X, int __D, int const __N) +{ + __m128i __Y = _mm256_extractf128_si256 (__X, __N >> 4); + __Y = _mm_insert_epi8 (__Y, __D, __N % 16); + return _mm256_insertf128_si256 (__X, __Y, __N >> 4); +} + +#ifdef __x86_64__ +extern __inline __m256i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_insert_epi64 (__m256i __X, long long __D, int const __N) +{ + __m128i __Y = _mm256_extractf128_si256 (__X, __N >> 1); + __Y = _mm_insert_epi64 (__Y, __D, __N % 2); + return _mm256_insertf128_si256 (__X, __Y, __N >> 1); +} +#endif +#else +#define _mm256_insertf128_pd(X, Y, O) \ + ((__m256d) __builtin_ia32_vinsertf128_pd256 ((__v4df)(__m256d)(X), \ + (__v2df)(__m128d)(Y), \ + (int)(O))) + +#define _mm256_insertf128_ps(X, Y, O) \ + ((__m256) __builtin_ia32_vinsertf128_ps256 ((__v8sf)(__m256)(X), \ + (__v4sf)(__m128)(Y), \ + (int)(O))) + +#define _mm256_insertf128_si256(X, Y, O) \ + ((__m256i) __builtin_ia32_vinsertf128_si256 ((__v8si)(__m256i)(X), \ + (__v4si)(__m128i)(Y), \ + (int)(O))) + +#define _mm256_insert_epi32(X, D, N) \ + (__extension__ \ + ({ \ + __m128i __Y = _mm256_extractf128_si256 ((X), (N) >> 2); \ + __Y = _mm_insert_epi32 (__Y, (D), (N) % 4); \ + _mm256_insertf128_si256 ((X), __Y, (N) >> 2); \ + })) + +#define _mm256_insert_epi16(X, D, N) \ + (__extension__ \ + ({ \ + __m128i __Y = _mm256_extractf128_si256 ((X), (N) >> 3); \ + __Y = _mm_insert_epi16 (__Y, (D), (N) % 8); \ + _mm256_insertf128_si256 ((X), __Y, (N) >> 3); \ + })) + +#define _mm256_insert_epi8(X, D, N) \ + (__extension__ \ + ({ \ + __m128i __Y = _mm256_extractf128_si256 ((X), (N) >> 4); \ + __Y = _mm_insert_epi8 (__Y, (D), (N) % 16); \ + _mm256_insertf128_si256 ((X), __Y, (N) >> 4); \ + })) + +#ifdef __x86_64__ +#define _mm256_insert_epi64(X, D, N) \ + (__extension__ \ + ({ \ + __m128i __Y = _mm256_extractf128_si256 ((X), (N) >> 1); \ + __Y = _mm_insert_epi64 (__Y, (D), (N) % 2); \ + _mm256_insertf128_si256 ((X), __Y, (N) >> 1); \ + })) +#endif +#endif + +extern __inline __m256d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_load_pd (double const *__P) +{ + return *(__m256d *)__P; +} + +extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_store_pd (double *__P, __m256d __A) +{ + *(__m256d *)__P = __A; +} + +extern __inline __m256 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_load_ps (float const *__P) +{ + return *(__m256 *)__P; +} + +extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_store_ps (float *__P, __m256 __A) +{ + *(__m256 *)__P = __A; +} + +extern __inline __m256d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_loadu_pd (double const *__P) +{ + return (__m256d) __builtin_ia32_loadupd256 (__P); +} + +extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_storeu_pd (double *__P, __m256d __A) +{ + __builtin_ia32_storeupd256 (__P, (__v4df)__A); +} + +extern __inline __m256 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_loadu_ps (float const *__P) +{ + return (__m256) __builtin_ia32_loadups256 (__P); +} + +extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_storeu_ps (float *__P, __m256 __A) +{ + __builtin_ia32_storeups256 (__P, (__v8sf)__A); +} + +extern __inline __m256i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_load_si256 (__m256i const *__P) +{ + return *__P; +} + +extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_store_si256 (__m256i *__P, __m256i __A) +{ + *__P = __A; +} + +extern __inline __m256i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_loadu_si256 (__m256i const *__P) +{ + return (__m256i) __builtin_ia32_loaddqu256 ((char const *)__P); +} + +extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_storeu_si256 (__m256i *__P, __m256i __A) +{ + __builtin_ia32_storedqu256 ((char *)__P, (__v32qi)__A); +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskload_pd (double const *__P, __m128i __M) +{ + return (__m128d) __builtin_ia32_maskloadpd ((const __v2df *)__P, + (__v2di)__M); +} + +extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskstore_pd (double *__P, __m128i __M, __m128d __A) +{ + __builtin_ia32_maskstorepd ((__v2df *)__P, (__v2di)__M, (__v2df)__A); +} + +extern __inline __m256d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskload_pd (double const *__P, __m256i __M) +{ + return (__m256d) __builtin_ia32_maskloadpd256 ((const __v4df *)__P, + (__v4di)__M); +} + +extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskstore_pd (double *__P, __m256i __M, __m256d __A) +{ + __builtin_ia32_maskstorepd256 ((__v4df *)__P, (__v4di)__M, (__v4df)__A); +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskload_ps (float const *__P, __m128i __M) +{ + return (__m128) __builtin_ia32_maskloadps ((const __v4sf *)__P, + (__v4si)__M); +} + +extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskstore_ps (float *__P, __m128i __M, __m128 __A) +{ + __builtin_ia32_maskstoreps ((__v4sf *)__P, (__v4si)__M, (__v4sf)__A); +} + +extern __inline __m256 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskload_ps (float const *__P, __m256i __M) +{ + return (__m256) __builtin_ia32_maskloadps256 ((const __v8sf *)__P, + (__v8si)__M); +} + +extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskstore_ps (float *__P, __m256i __M, __m256 __A) +{ + __builtin_ia32_maskstoreps256 ((__v8sf *)__P, (__v8si)__M, (__v8sf)__A); +} + +extern __inline __m256 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_movehdup_ps (__m256 __X) +{ + return (__m256) __builtin_ia32_movshdup256 ((__v8sf)__X); +} + +extern __inline __m256 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_moveldup_ps (__m256 __X) +{ + return (__m256) __builtin_ia32_movsldup256 ((__v8sf)__X); +} + +extern __inline __m256d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_movedup_pd (__m256d __X) +{ + return (__m256d) __builtin_ia32_movddup256 ((__v4df)__X); +} + +extern __inline __m256i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_lddqu_si256 (__m256i const *__P) +{ + return (__m256i) __builtin_ia32_lddqu256 ((char const *)__P); +} + +extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_stream_si256 (__m256i *__A, __m256i __B) +{ + __builtin_ia32_movntdq256 ((__v4di *)__A, (__v4di)__B); +} + +extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_stream_pd (double *__A, __m256d __B) +{ + __builtin_ia32_movntpd256 (__A, (__v4df)__B); +} + +extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_stream_ps (float *__P, __m256 __A) +{ + __builtin_ia32_movntps256 (__P, (__v8sf)__A); +} + +extern __inline __m256 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_rcp_ps (__m256 __A) +{ + return (__m256) __builtin_ia32_rcpps256 ((__v8sf)__A); +} + +extern __inline __m256 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_rsqrt_ps (__m256 __A) +{ + return (__m256) __builtin_ia32_rsqrtps256 ((__v8sf)__A); +} + +extern __inline __m256d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_sqrt_pd (__m256d __A) +{ + return (__m256d) __builtin_ia32_sqrtpd256 ((__v4df)__A); +} + +extern __inline __m256 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_sqrt_ps (__m256 __A) +{ + return (__m256) __builtin_ia32_sqrtps256 ((__v8sf)__A); +} + +#ifdef __OPTIMIZE__ +extern __inline __m256d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_round_pd (__m256d __V, const int __M) +{ + return (__m256d) __builtin_ia32_roundpd256 ((__v4df)__V, __M); +} + +extern __inline __m256 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_round_ps (__m256 __V, const int __M) +{ + return (__m256) __builtin_ia32_roundps256 ((__v8sf)__V, __M); +} +#else +#define _mm256_round_pd(V, M) \ + ((__m256d) __builtin_ia32_roundpd256 ((__v4df)(__m256d)(V), (int)(M))) + +#define _mm256_round_ps(V, M) \ + ((__m256) __builtin_ia32_roundps256 ((__v8sf)(__m256)(V), (int)(M))) +#endif + +#define _mm256_ceil_pd(V) _mm256_round_pd ((V), _MM_FROUND_CEIL) +#define _mm256_floor_pd(V) _mm256_round_pd ((V), _MM_FROUND_FLOOR) +#define _mm256_ceil_ps(V) _mm256_round_ps ((V), _MM_FROUND_CEIL) +#define _mm256_floor_ps(V) _mm256_round_ps ((V), _MM_FROUND_FLOOR) + +extern __inline __m256d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_unpackhi_pd (__m256d __A, __m256d __B) +{ + return (__m256d) __builtin_ia32_unpckhpd256 ((__v4df)__A, (__v4df)__B); +} + +extern __inline __m256d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_unpacklo_pd (__m256d __A, __m256d __B) +{ + return (__m256d) __builtin_ia32_unpcklpd256 ((__v4df)__A, (__v4df)__B); +} + +extern __inline __m256 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_unpackhi_ps (__m256 __A, __m256 __B) +{ + return (__m256) __builtin_ia32_unpckhps256 ((__v8sf)__A, (__v8sf)__B); +} + +extern __inline __m256 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_unpacklo_ps (__m256 __A, __m256 __B) +{ + return (__m256) __builtin_ia32_unpcklps256 ((__v8sf)__A, (__v8sf)__B); +} + +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_testz_pd (__m128d __M, __m128d __V) +{ + return __builtin_ia32_vtestzpd ((__v2df)__M, (__v2df)__V); +} + +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_testc_pd (__m128d __M, __m128d __V) +{ + return __builtin_ia32_vtestcpd ((__v2df)__M, (__v2df)__V); +} + +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_testnzc_pd (__m128d __M, __m128d __V) +{ + return __builtin_ia32_vtestnzcpd ((__v2df)__M, (__v2df)__V); +} + +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_testz_ps (__m128 __M, __m128 __V) +{ + return __builtin_ia32_vtestzps ((__v4sf)__M, (__v4sf)__V); +} + +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_testc_ps (__m128 __M, __m128 __V) +{ + return __builtin_ia32_vtestcps ((__v4sf)__M, (__v4sf)__V); +} + +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_testnzc_ps (__m128 __M, __m128 __V) +{ + return __builtin_ia32_vtestnzcps ((__v4sf)__M, (__v4sf)__V); +} + +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_testz_pd (__m256d __M, __m256d __V) +{ + return __builtin_ia32_vtestzpd256 ((__v4df)__M, (__v4df)__V); +} + +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_testc_pd (__m256d __M, __m256d __V) +{ + return __builtin_ia32_vtestcpd256 ((__v4df)__M, (__v4df)__V); +} + +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_testnzc_pd (__m256d __M, __m256d __V) +{ + return __builtin_ia32_vtestnzcpd256 ((__v4df)__M, (__v4df)__V); +} + +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_testz_ps (__m256 __M, __m256 __V) +{ + return __builtin_ia32_vtestzps256 ((__v8sf)__M, (__v8sf)__V); +} + +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_testc_ps (__m256 __M, __m256 __V) +{ + return __builtin_ia32_vtestcps256 ((__v8sf)__M, (__v8sf)__V); +} + +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_testnzc_ps (__m256 __M, __m256 __V) +{ + return __builtin_ia32_vtestnzcps256 ((__v8sf)__M, (__v8sf)__V); +} + +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_testz_si256 (__m256i __M, __m256i __V) +{ + return __builtin_ia32_ptestz256 ((__v4di)__M, (__v4di)__V); +} + +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_testc_si256 (__m256i __M, __m256i __V) +{ + return __builtin_ia32_ptestc256 ((__v4di)__M, (__v4di)__V); +} + +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_testnzc_si256 (__m256i __M, __m256i __V) +{ + return __builtin_ia32_ptestnzc256 ((__v4di)__M, (__v4di)__V); +} + +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_movemask_pd (__m256d __A) +{ + return __builtin_ia32_movmskpd256 ((__v4df)__A); +} + +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_movemask_ps (__m256 __A) +{ + return __builtin_ia32_movmskps256 ((__v8sf)__A); +} + +extern __inline __m256d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_setzero_pd (void) +{ + return __extension__ (__m256d){ 0.0, 0.0, 0.0, 0.0 }; +} + +extern __inline __m256 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_setzero_ps (void) +{ + return __extension__ (__m256){ 0.0, 0.0, 0.0, 0.0, + 0.0, 0.0, 0.0, 0.0 }; +} + +extern __inline __m256i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_setzero_si256 (void) +{ + return __extension__ (__m256i)(__v4di){ 0, 0, 0, 0 }; +} + +/* Create the vector [A B C D]. */ +extern __inline __m256d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_set_pd (double __A, double __B, double __C, double __D) +{ + return __extension__ (__m256d){ __D, __C, __B, __A }; +} + +/* Create the vector [A B C D E F G H]. */ +extern __inline __m256 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_set_ps (float __A, float __B, float __C, float __D, + float __E, float __F, float __G, float __H) +{ + return __extension__ (__m256){ __H, __G, __F, __E, + __D, __C, __B, __A }; +} + +/* Create the vector [A B C D E F G H]. */ +extern __inline __m256i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_set_epi32 (int __A, int __B, int __C, int __D, + int __E, int __F, int __G, int __H) +{ + return __extension__ (__m256i)(__v8si){ __H, __G, __F, __E, + __D, __C, __B, __A }; +} + +extern __inline __m256i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_set_epi16 (short __q15, short __q14, short __q13, short __q12, + short __q11, short __q10, short __q09, short __q08, + short __q07, short __q06, short __q05, short __q04, + short __q03, short __q02, short __q01, short __q00) +{ + return __extension__ (__m256i)(__v16hi){ + __q00, __q01, __q02, __q03, __q04, __q05, __q06, __q07, + __q08, __q09, __q10, __q11, __q12, __q13, __q14, __q15 + }; +} + +extern __inline __m256i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_set_epi8 (char __q31, char __q30, char __q29, char __q28, + char __q27, char __q26, char __q25, char __q24, + char __q23, char __q22, char __q21, char __q20, + char __q19, char __q18, char __q17, char __q16, + char __q15, char __q14, char __q13, char __q12, + char __q11, char __q10, char __q09, char __q08, + char __q07, char __q06, char __q05, char __q04, + char __q03, char __q02, char __q01, char __q00) +{ + return __extension__ (__m256i)(__v32qi){ + __q00, __q01, __q02, __q03, __q04, __q05, __q06, __q07, + __q08, __q09, __q10, __q11, __q12, __q13, __q14, __q15, + __q16, __q17, __q18, __q19, __q20, __q21, __q22, __q23, + __q24, __q25, __q26, __q27, __q28, __q29, __q30, __q31 + }; +} + +extern __inline __m256i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_set_epi64x (long long __A, long long __B, long long __C, + long long __D) +{ + return __extension__ (__m256i)(__v4di){ __D, __C, __B, __A }; +} + +/* Create a vector with all elements equal to A. */ +extern __inline __m256d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_set1_pd (double __A) +{ + return __extension__ (__m256d){ __A, __A, __A, __A }; +} + +/* Create a vector with all elements equal to A. */ +extern __inline __m256 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_set1_ps (float __A) +{ + return __extension__ (__m256){ __A, __A, __A, __A, + __A, __A, __A, __A }; +} + +/* Create a vector with all elements equal to A. */ +extern __inline __m256i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_set1_epi32 (int __A) +{ + return __extension__ (__m256i)(__v8si){ __A, __A, __A, __A, + __A, __A, __A, __A }; +} + +extern __inline __m256i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_set1_epi16 (short __A) +{ + return _mm256_set_epi16 (__A, __A, __A, __A, __A, __A, __A, __A, + __A, __A, __A, __A, __A, __A, __A, __A); +} + +extern __inline __m256i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_set1_epi8 (char __A) +{ + return _mm256_set_epi8 (__A, __A, __A, __A, __A, __A, __A, __A, + __A, __A, __A, __A, __A, __A, __A, __A, + __A, __A, __A, __A, __A, __A, __A, __A, + __A, __A, __A, __A, __A, __A, __A, __A); +} + +extern __inline __m256i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_set1_epi64x (long long __A) +{ + return __extension__ (__m256i)(__v4di){ __A, __A, __A, __A }; +} + +/* Create vectors of elements in the reversed order from the + _mm256_set_XXX functions. */ + +extern __inline __m256d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_setr_pd (double __A, double __B, double __C, double __D) +{ + return _mm256_set_pd (__D, __C, __B, __A); +} + +extern __inline __m256 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_setr_ps (float __A, float __B, float __C, float __D, + float __E, float __F, float __G, float __H) +{ + return _mm256_set_ps (__H, __G, __F, __E, __D, __C, __B, __A); +} + +extern __inline __m256i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_setr_epi32 (int __A, int __B, int __C, int __D, + int __E, int __F, int __G, int __H) +{ + return _mm256_set_epi32 (__H, __G, __F, __E, __D, __C, __B, __A); +} + +extern __inline __m256i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_setr_epi16 (short __q15, short __q14, short __q13, short __q12, + short __q11, short __q10, short __q09, short __q08, + short __q07, short __q06, short __q05, short __q04, + short __q03, short __q02, short __q01, short __q00) +{ + return _mm256_set_epi16 (__q00, __q01, __q02, __q03, + __q04, __q05, __q06, __q07, + __q08, __q09, __q10, __q11, + __q12, __q13, __q14, __q15); +} + +extern __inline __m256i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_setr_epi8 (char __q31, char __q30, char __q29, char __q28, + char __q27, char __q26, char __q25, char __q24, + char __q23, char __q22, char __q21, char __q20, + char __q19, char __q18, char __q17, char __q16, + char __q15, char __q14, char __q13, char __q12, + char __q11, char __q10, char __q09, char __q08, + char __q07, char __q06, char __q05, char __q04, + char __q03, char __q02, char __q01, char __q00) +{ + return _mm256_set_epi8 (__q00, __q01, __q02, __q03, + __q04, __q05, __q06, __q07, + __q08, __q09, __q10, __q11, + __q12, __q13, __q14, __q15, + __q16, __q17, __q18, __q19, + __q20, __q21, __q22, __q23, + __q24, __q25, __q26, __q27, + __q28, __q29, __q30, __q31); +} + +extern __inline __m256i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_setr_epi64x (long long __A, long long __B, long long __C, + long long __D) +{ + return _mm256_set_epi64x (__D, __C, __B, __A); +} + +/* Casts between various SP, DP, INT vector types. Note that these do no + conversion of values, they just change the type. */ +extern __inline __m256 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_castpd_ps (__m256d __A) +{ + return (__m256) __A; +} + +extern __inline __m256i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_castpd_si256 (__m256d __A) +{ + return (__m256i) __A; +} + +extern __inline __m256d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_castps_pd (__m256 __A) +{ + return (__m256d) __A; +} + +extern __inline __m256i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_castps_si256(__m256 __A) +{ + return (__m256i) __A; +} + +extern __inline __m256 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_castsi256_ps (__m256i __A) +{ + return (__m256) __A; +} + +extern __inline __m256d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_castsi256_pd (__m256i __A) +{ + return (__m256d) __A; +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_castpd256_pd128 (__m256d __A) +{ + return (__m128d) __builtin_ia32_pd_pd256 ((__v4df)__A); +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_castps256_ps128 (__m256 __A) +{ + return (__m128) __builtin_ia32_ps_ps256 ((__v8sf)__A); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_castsi256_si128 (__m256i __A) +{ + return (__m128i) __builtin_ia32_si_si256 ((__v8si)__A); +} + +/* When cast is done from a 128 to 256-bit type, the low 128 bits of + the 256-bit result contain source parameter value and the upper 128 + bits of the result are undefined. Those intrinsics shouldn't + generate any extra moves. */ + +extern __inline __m256d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_castpd128_pd256 (__m128d __A) +{ + return (__m256d) __builtin_ia32_pd256_pd ((__v2df)__A); +} + +extern __inline __m256 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_castps128_ps256 (__m128 __A) +{ + return (__m256) __builtin_ia32_ps256_ps ((__v4sf)__A); +} + +extern __inline __m256i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_castsi128_si256 (__m128i __A) +{ + return (__m256i) __builtin_ia32_si256_si ((__v4si)__A); +} diff --git a/lib/gcc/i686-linux-android/4.6/include/bmiintrin.h b/lib/gcc/i686-linux-android/4.6/include/bmiintrin.h new file mode 100644 index 0000000..225f2ec --- /dev/null +++ b/lib/gcc/i686-linux-android/4.6/include/bmiintrin.h @@ -0,0 +1,145 @@ +/* Copyright (C) 2010 Free Software Foundation, Inc. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + GCC is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + <http://www.gnu.org/licenses/>. */ + +#ifndef _X86INTRIN_H_INCLUDED +# error "Never use <bmiintrin.h> directly; include <x86intrin.h> instead." +#endif + +#ifndef __BMI__ +# error "BMI instruction set not enabled" +#endif /* __BMI__ */ + +#ifndef _BMIINTRIN_H_INCLUDED +#define _BMIINTRIN_H_INCLUDED + +extern __inline unsigned short __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__lzcnt_u16 (unsigned short __X) +{ + return __builtin_clzs (__X); +} + +extern __inline unsigned short __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__tzcnt_u16 (unsigned short __X) +{ + return __builtin_ctzs (__X); +} + + +extern __inline unsigned int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__andn_u32 (unsigned int __X, unsigned int __Y) +{ + unsigned int tmp = ~(__X) & (__Y); + return tmp; +} + +extern __inline unsigned int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__bextr_u32 (unsigned int __X, unsigned int __Y) +{ + return __builtin_ia32_bextr_u32 (__X, __Y); +} + +extern __inline unsigned int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__blsi_u32 (unsigned int __X) +{ + unsigned int tmp = (__X) & (-(__X)); + return tmp; +} + +extern __inline unsigned int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__blsmsk_u32 (unsigned int __X) +{ + unsigned int tmp = (__X) ^ (__X - 1); + return tmp; +} + +extern __inline unsigned int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__blsr_u32 (unsigned int __X) +{ + unsigned int tmp = (__X) & (__X - 1); + return tmp; +} + +extern __inline unsigned int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__lzcnt_u32 (unsigned int __X) +{ + return __builtin_clz (__X); +} + + +extern __inline unsigned int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__tzcnt_u32 (unsigned int __X) +{ + return __builtin_ctz (__X); +} + + +#ifdef __x86_64__ +extern __inline unsigned long long __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__andn_u64 (unsigned long long __X, unsigned long long __Y) +{ + unsigned long long tmp = ~(__X) & (__Y); + return tmp; +} + +extern __inline unsigned long long __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__bextr_u64 (unsigned long long __X, unsigned long long __Y) +{ + return __builtin_ia32_bextr_u64 (__X, __Y); +} + +extern __inline unsigned long long __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__blsi_u64 (unsigned long long __X) +{ + unsigned long long tmp = (__X) & (-(__X)); + return tmp; +} + +extern __inline unsigned long long __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__blsmsk_u64 (unsigned long long __X) +{ + unsigned long long tmp = (__X) ^ (__X - 1); + return tmp; +} + +extern __inline unsigned long long __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__blsr_u64 (unsigned long long __X) +{ + unsigned long long tmp = (__X) & (__X - 1); + return tmp; +} + +extern __inline unsigned long long __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__lzcnt_u64 (unsigned long long __X) +{ + return __builtin_clzll (__X); +} + +extern __inline unsigned long long __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__tzcnt_u64 (unsigned long long __X) +{ + return __builtin_ctzll (__X); +} + +#endif /* __x86_64__ */ + +#endif /* _BMIINTRIN_H_INCLUDED */ diff --git a/lib/gcc/i686-linux-android/4.6/include/bmmintrin.h b/lib/gcc/i686-linux-android/4.6/include/bmmintrin.h new file mode 100644 index 0000000..91d4e77 --- /dev/null +++ b/lib/gcc/i686-linux-android/4.6/include/bmmintrin.h @@ -0,0 +1,29 @@ +/* Copyright (C) 2007, 2008, 2009 Free Software Foundation, Inc. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + GCC is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + <http://www.gnu.org/licenses/>. */ + +#ifndef _BMMINTRIN_H_INCLUDED +#define _BMMINTRIN_H_INCLUDED + +# error "SSE5 instruction set removed from compiler" + +#endif /* _BMMINTRIN_H_INCLUDED */ diff --git a/lib/gcc/i686-linux-android/4.6/include/cpuid.h b/lib/gcc/i686-linux-android/4.6/include/cpuid.h new file mode 100644 index 0000000..3c3f47b --- /dev/null +++ b/lib/gcc/i686-linux-android/4.6/include/cpuid.h @@ -0,0 +1,188 @@ +/* + * Copyright (C) 2007, 2008, 2009, 2010 Free Software Foundation, Inc. + * + * This file is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 3, or (at your option) any + * later version. + * + * This file is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * Under Section 7 of GPL version 3, you are granted additional + * permissions described in the GCC Runtime Library Exception, version + * 3.1, as published by the Free Software Foundation. + * + * You should have received a copy of the GNU General Public License and + * a copy of the GCC Runtime Library Exception along with this program; + * see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + * <http://www.gnu.org/licenses/>. + */ + +/* %ecx */ +#define bit_SSE3 (1 << 0) +#define bit_PCLMUL (1 << 1) +#define bit_SSSE3 (1 << 9) +#define bit_FMA (1 << 12) +#define bit_CMPXCHG16B (1 << 13) +#define bit_SSE4_1 (1 << 19) +#define bit_SSE4_2 (1 << 20) +#define bit_MOVBE (1 << 22) +#define bit_POPCNT (1 << 23) +#define bit_AES (1 << 25) +#define bit_XSAVE (1 << 26) +#define bit_OSXSAVE (1 << 27) +#define bit_AVX (1 << 28) +#define bit_F16C (1 << 29) +#define bit_RDRND (1 << 30) + +/* %edx */ +#define bit_CMPXCHG8B (1 << 8) +#define bit_CMOV (1 << 15) +#define bit_MMX (1 << 23) +#define bit_FXSAVE (1 << 24) +#define bit_SSE (1 << 25) +#define bit_SSE2 (1 << 26) + +/* Extended Features */ +/* %ecx */ +#define bit_LAHF_LM (1 << 0) +#define bit_ABM (1 << 5) +#define bit_SSE4a (1 << 6) +#define bit_XOP (1 << 11) +#define bit_LWP (1 << 15) +#define bit_FMA4 (1 << 16) +#define bit_TBM (1 << 21) + +/* %edx */ +#define bit_MMXEXT (1 << 22) +#define bit_LM (1 << 29) +#define bit_3DNOWP (1 << 30) +#define bit_3DNOW (1 << 31) + +/* Extended Features (%eax == 7) */ +#define bit_FSGSBASE (1 << 0) +#define bit_BMI (1 << 3) + +#if defined(__i386__) && defined(__PIC__) +/* %ebx may be the PIC register. */ +#if __GNUC__ >= 3 +#define __cpuid(level, a, b, c, d) \ + __asm__ ("xchg{l}\t{%%}ebx, %1\n\t" \ + "cpuid\n\t" \ + "xchg{l}\t{%%}ebx, %1\n\t" \ + : "=a" (a), "=r" (b), "=c" (c), "=d" (d) \ + : "0" (level)) + +#define __cpuid_count(level, count, a, b, c, d) \ + __asm__ ("xchg{l}\t{%%}ebx, %1\n\t" \ + "cpuid\n\t" \ + "xchg{l}\t{%%}ebx, %1\n\t" \ + : "=a" (a), "=r" (b), "=c" (c), "=d" (d) \ + : "0" (level), "2" (count)) +#else +/* Host GCCs older than 3.0 weren't supporting Intel asm syntax + nor alternatives in i386 code. */ +#define __cpuid(level, a, b, c, d) \ + __asm__ ("xchgl\t%%ebx, %1\n\t" \ + "cpuid\n\t" \ + "xchgl\t%%ebx, %1\n\t" \ + : "=a" (a), "=r" (b), "=c" (c), "=d" (d) \ + : "0" (level)) + +#define __cpuid_count(level, count, a, b, c, d) \ + __asm__ ("xchgl\t%%ebx, %1\n\t" \ + "cpuid\n\t" \ + "xchgl\t%%ebx, %1\n\t" \ + : "=a" (a), "=r" (b), "=c" (c), "=d" (d) \ + : "0" (level), "2" (count)) +#endif +#else +#define __cpuid(level, a, b, c, d) \ + __asm__ ("cpuid\n\t" \ + : "=a" (a), "=b" (b), "=c" (c), "=d" (d) \ + : "0" (level)) + +#define __cpuid_count(level, count, a, b, c, d) \ + __asm__ ("cpuid\n\t" \ + : "=a" (a), "=b" (b), "=c" (c), "=d" (d) \ + : "0" (level), "2" (count)) +#endif + +/* Return highest supported input value for cpuid instruction. ext can + be either 0x0 or 0x8000000 to return highest supported value for + basic or extended cpuid information. Function returns 0 if cpuid + is not supported or whatever cpuid returns in eax register. If sig + pointer is non-null, then first four bytes of the signature + (as found in ebx register) are returned in location pointed by sig. */ + +static __inline unsigned int +__get_cpuid_max (unsigned int __ext, unsigned int *__sig) +{ + unsigned int __eax, __ebx, __ecx, __edx; + +#ifndef __x86_64__ + /* See if we can use cpuid. On AMD64 we always can. */ +#if __GNUC__ >= 3 + __asm__ ("pushf{l|d}\n\t" + "pushf{l|d}\n\t" + "pop{l}\t%0\n\t" + "mov{l}\t{%0, %1|%1, %0}\n\t" + "xor{l}\t{%2, %0|%0, %2}\n\t" + "push{l}\t%0\n\t" + "popf{l|d}\n\t" + "pushf{l|d}\n\t" + "pop{l}\t%0\n\t" + "popf{l|d}\n\t" + : "=&r" (__eax), "=&r" (__ebx) + : "i" (0x00200000)); +#else +/* Host GCCs older than 3.0 weren't supporting Intel asm syntax + nor alternatives in i386 code. */ + __asm__ ("pushfl\n\t" + "pushfl\n\t" + "popl\t%0\n\t" + "movl\t%0, %1\n\t" + "xorl\t%2, %0\n\t" + "pushl\t%0\n\t" + "popfl\n\t" + "pushfl\n\t" + "popl\t%0\n\t" + "popfl\n\t" + : "=&r" (__eax), "=&r" (__ebx) + : "i" (0x00200000)); +#endif + + if (!((__eax ^ __ebx) & 0x00200000)) + return 0; +#endif + + /* Host supports cpuid. Return highest supported cpuid input value. */ + __cpuid (__ext, __eax, __ebx, __ecx, __edx); + + if (__sig) + *__sig = __ebx; + + return __eax; +} + +/* Return cpuid data for requested cpuid level, as found in returned + eax, ebx, ecx and edx registers. The function checks if cpuid is + supported and returns 1 for valid cpuid information or 0 for + unsupported cpuid level. All pointers are required to be non-null. */ + +static __inline int +__get_cpuid (unsigned int __level, + unsigned int *__eax, unsigned int *__ebx, + unsigned int *__ecx, unsigned int *__edx) +{ + unsigned int __ext = __level & 0x80000000; + + if (__get_cpuid_max (__ext, 0) < __level) + return 0; + + __cpuid (__level, *__eax, *__ebx, *__ecx, *__edx); + return 1; +} diff --git a/lib/gcc/i686-linux-android/4.6/include/cross-stdarg.h b/lib/gcc/i686-linux-android/4.6/include/cross-stdarg.h new file mode 100644 index 0000000..7139ffa --- /dev/null +++ b/lib/gcc/i686-linux-android/4.6/include/cross-stdarg.h @@ -0,0 +1,73 @@ +/* Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 + Free Software Foundation, Inc. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + GCC is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + <http://www.gnu.org/licenses/>. */ + +#ifndef __CROSS_STDARG_H_INCLUDED +#define __CROSS_STDARG_H_INCLUDED + +/* Make sure that for non x64 targets cross builtins are defined. */ +#ifndef __x86_64__ +/* Call abi ms_abi. */ +#define __builtin_ms_va_list __builtin_va_list +#define __builtin_ms_va_copy __builtin_va_copy +#define __builtin_ms_va_start __builtin_va_start +#define __builtin_ms_va_end __builtin_va_end + +/* Call abi sysv_abi. */ +#define __builtin_sysv_va_list __builtin_va_list +#define __builtin_sysv_va_copy __builtin_va_copy +#define __builtin_sysv_va_start __builtin_va_start +#define __builtin_sysv_va_end __builtin_va_end +#endif + +#define __ms_va_copy(__d,__s) __builtin_ms_va_copy(__d,__s) +#define __ms_va_start(__v,__l) __builtin_ms_va_start(__v,__l) +#define __ms_va_arg(__v,__l) __builtin_va_arg(__v,__l) +#define __ms_va_end(__v) __builtin_ms_va_end(__v) + +#define __sysv_va_copy(__d,__s) __builtin_sysv_va_copy(__d,__s) +#define __sysv_va_start(__v,__l) __builtin_sysv_va_start(__v,__l) +#define __sysv_va_arg(__v,__l) __builtin_va_arg(__v,__l) +#define __sysv_va_end(__v) __builtin_sysv_va_end(__v) + +#ifndef __GNUC_SYSV_VA_LIST +#define __GNUC_SYSV_VA_LIST + typedef __builtin_sysv_va_list __gnuc_sysv_va_list; +#endif + +#ifndef _SYSV_VA_LIST_DEFINED +#define _SYSV_VA_LIST_DEFINED + typedef __gnuc_sysv_va_list sysv_va_list; +#endif + +#ifndef __GNUC_MS_VA_LIST +#define __GNUC_MS_VA_LIST + typedef __builtin_ms_va_list __gnuc_ms_va_list; +#endif + +#ifndef _MS_VA_LIST_DEFINED +#define _MS_VA_LIST_DEFINED + typedef __gnuc_ms_va_list ms_va_list; +#endif + +#endif /* __CROSS_STDARG_H_INCLUDED */ diff --git a/lib/gcc/i686-linux-android/4.6/include/emmintrin.h b/lib/gcc/i686-linux-android/4.6/include/emmintrin.h new file mode 100644 index 0000000..fe4cd6a --- /dev/null +++ b/lib/gcc/i686-linux-android/4.6/include/emmintrin.h @@ -0,0 +1,1513 @@ +/* Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 + Free Software Foundation, Inc. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + GCC is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + <http://www.gnu.org/licenses/>. */ + +/* Implemented from the specification included in the Intel C++ Compiler + User Guide and Reference, version 9.0. */ + +#ifndef _EMMINTRIN_H_INCLUDED +#define _EMMINTRIN_H_INCLUDED + +#ifndef __SSE2__ +# error "SSE2 instruction set not enabled" +#else + +/* We need definitions from the SSE header files*/ +#include <xmmintrin.h> + +/* SSE2 */ +typedef double __v2df __attribute__ ((__vector_size__ (16))); +typedef long long __v2di __attribute__ ((__vector_size__ (16))); +typedef int __v4si __attribute__ ((__vector_size__ (16))); +typedef short __v8hi __attribute__ ((__vector_size__ (16))); +typedef char __v16qi __attribute__ ((__vector_size__ (16))); + +/* The Intel API is flexible enough that we must allow aliasing with other + vector types, and their scalar components. */ +typedef long long __m128i __attribute__ ((__vector_size__ (16), __may_alias__)); +typedef double __m128d __attribute__ ((__vector_size__ (16), __may_alias__)); + +/* Create a selector for use with the SHUFPD instruction. */ +#define _MM_SHUFFLE2(fp1,fp0) \ + (((fp1) << 1) | (fp0)) + +/* Create a vector with element 0 as F and the rest zero. */ +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_set_sd (double __F) +{ + return __extension__ (__m128d){ __F, 0.0 }; +} + +/* Create a vector with both elements equal to F. */ +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_set1_pd (double __F) +{ + return __extension__ (__m128d){ __F, __F }; +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_set_pd1 (double __F) +{ + return _mm_set1_pd (__F); +} + +/* Create a vector with the lower value X and upper value W. */ +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_set_pd (double __W, double __X) +{ + return __extension__ (__m128d){ __X, __W }; +} + +/* Create a vector with the lower value W and upper value X. */ +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_setr_pd (double __W, double __X) +{ + return __extension__ (__m128d){ __W, __X }; +} + +/* Create a vector of zeros. */ +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_setzero_pd (void) +{ + return __extension__ (__m128d){ 0.0, 0.0 }; +} + +/* Sets the low DPFP value of A from the low value of B. */ +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_move_sd (__m128d __A, __m128d __B) +{ + return (__m128d) __builtin_ia32_movsd ((__v2df)__A, (__v2df)__B); +} + +/* Load two DPFP values from P. The address must be 16-byte aligned. */ +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_load_pd (double const *__P) +{ + return *(__m128d *)__P; +} + +/* Load two DPFP values from P. The address need not be 16-byte aligned. */ +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_loadu_pd (double const *__P) +{ + return __builtin_ia32_loadupd (__P); +} + +/* Create a vector with all two elements equal to *P. */ +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_load1_pd (double const *__P) +{ + return _mm_set1_pd (*__P); +} + +/* Create a vector with element 0 as *P and the rest zero. */ +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_load_sd (double const *__P) +{ + return _mm_set_sd (*__P); +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_load_pd1 (double const *__P) +{ + return _mm_load1_pd (__P); +} + +/* Load two DPFP values in reverse order. The address must be aligned. */ +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_loadr_pd (double const *__P) +{ + __m128d __tmp = _mm_load_pd (__P); + return __builtin_ia32_shufpd (__tmp, __tmp, _MM_SHUFFLE2 (0,1)); +} + +/* Store two DPFP values. The address must be 16-byte aligned. */ +extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_store_pd (double *__P, __m128d __A) +{ + *(__m128d *)__P = __A; +} + +/* Store two DPFP values. The address need not be 16-byte aligned. */ +extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_storeu_pd (double *__P, __m128d __A) +{ + __builtin_ia32_storeupd (__P, __A); +} + +/* Stores the lower DPFP value. */ +extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_store_sd (double *__P, __m128d __A) +{ + *__P = __builtin_ia32_vec_ext_v2df (__A, 0); +} + +extern __inline double __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtsd_f64 (__m128d __A) +{ + return __builtin_ia32_vec_ext_v2df (__A, 0); +} + +extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_storel_pd (double *__P, __m128d __A) +{ + _mm_store_sd (__P, __A); +} + +/* Stores the upper DPFP value. */ +extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_storeh_pd (double *__P, __m128d __A) +{ + *__P = __builtin_ia32_vec_ext_v2df (__A, 1); +} + +/* Store the lower DPFP value across two words. + The address must be 16-byte aligned. */ +extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_store1_pd (double *__P, __m128d __A) +{ + _mm_store_pd (__P, __builtin_ia32_shufpd (__A, __A, _MM_SHUFFLE2 (0,0))); +} + +extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_store_pd1 (double *__P, __m128d __A) +{ + _mm_store1_pd (__P, __A); +} + +/* Store two DPFP values in reverse order. The address must be aligned. */ +extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_storer_pd (double *__P, __m128d __A) +{ + _mm_store_pd (__P, __builtin_ia32_shufpd (__A, __A, _MM_SHUFFLE2 (0,1))); +} + +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtsi128_si32 (__m128i __A) +{ + return __builtin_ia32_vec_ext_v4si ((__v4si)__A, 0); +} + +#ifdef __x86_64__ +/* Intel intrinsic. */ +extern __inline long long __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtsi128_si64 (__m128i __A) +{ + return __builtin_ia32_vec_ext_v2di ((__v2di)__A, 0); +} + +/* Microsoft intrinsic. */ +extern __inline long long __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtsi128_si64x (__m128i __A) +{ + return __builtin_ia32_vec_ext_v2di ((__v2di)__A, 0); +} +#endif + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_add_pd (__m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_addpd ((__v2df)__A, (__v2df)__B); +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_add_sd (__m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_addsd ((__v2df)__A, (__v2df)__B); +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_sub_pd (__m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_subpd ((__v2df)__A, (__v2df)__B); +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_sub_sd (__m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_subsd ((__v2df)__A, (__v2df)__B); +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mul_pd (__m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_mulpd ((__v2df)__A, (__v2df)__B); +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mul_sd (__m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_mulsd ((__v2df)__A, (__v2df)__B); +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_div_pd (__m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_divpd ((__v2df)__A, (__v2df)__B); +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_div_sd (__m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_divsd ((__v2df)__A, (__v2df)__B); +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_sqrt_pd (__m128d __A) +{ + return (__m128d)__builtin_ia32_sqrtpd ((__v2df)__A); +} + +/* Return pair {sqrt (A[0), B[1]}. */ +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_sqrt_sd (__m128d __A, __m128d __B) +{ + __v2df __tmp = __builtin_ia32_movsd ((__v2df)__A, (__v2df)__B); + return (__m128d)__builtin_ia32_sqrtsd ((__v2df)__tmp); +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_min_pd (__m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_minpd ((__v2df)__A, (__v2df)__B); +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_min_sd (__m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_minsd ((__v2df)__A, (__v2df)__B); +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_max_pd (__m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_maxpd ((__v2df)__A, (__v2df)__B); +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_max_sd (__m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_maxsd ((__v2df)__A, (__v2df)__B); +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_and_pd (__m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_andpd ((__v2df)__A, (__v2df)__B); +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_andnot_pd (__m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_andnpd ((__v2df)__A, (__v2df)__B); +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_or_pd (__m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_orpd ((__v2df)__A, (__v2df)__B); +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_xor_pd (__m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_xorpd ((__v2df)__A, (__v2df)__B); +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpeq_pd (__m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_cmpeqpd ((__v2df)__A, (__v2df)__B); +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmplt_pd (__m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_cmpltpd ((__v2df)__A, (__v2df)__B); +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmple_pd (__m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_cmplepd ((__v2df)__A, (__v2df)__B); +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpgt_pd (__m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_cmpgtpd ((__v2df)__A, (__v2df)__B); +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpge_pd (__m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_cmpgepd ((__v2df)__A, (__v2df)__B); +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpneq_pd (__m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_cmpneqpd ((__v2df)__A, (__v2df)__B); +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpnlt_pd (__m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_cmpnltpd ((__v2df)__A, (__v2df)__B); +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpnle_pd (__m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_cmpnlepd ((__v2df)__A, (__v2df)__B); +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpngt_pd (__m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_cmpngtpd ((__v2df)__A, (__v2df)__B); +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpnge_pd (__m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_cmpngepd ((__v2df)__A, (__v2df)__B); +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpord_pd (__m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_cmpordpd ((__v2df)__A, (__v2df)__B); +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpunord_pd (__m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_cmpunordpd ((__v2df)__A, (__v2df)__B); +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpeq_sd (__m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_cmpeqsd ((__v2df)__A, (__v2df)__B); +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmplt_sd (__m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_cmpltsd ((__v2df)__A, (__v2df)__B); +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmple_sd (__m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_cmplesd ((__v2df)__A, (__v2df)__B); +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpgt_sd (__m128d __A, __m128d __B) +{ + return (__m128d) __builtin_ia32_movsd ((__v2df) __A, + (__v2df) + __builtin_ia32_cmpltsd ((__v2df) __B, + (__v2df) + __A)); +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpge_sd (__m128d __A, __m128d __B) +{ + return (__m128d) __builtin_ia32_movsd ((__v2df) __A, + (__v2df) + __builtin_ia32_cmplesd ((__v2df) __B, + (__v2df) + __A)); +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpneq_sd (__m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_cmpneqsd ((__v2df)__A, (__v2df)__B); +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpnlt_sd (__m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_cmpnltsd ((__v2df)__A, (__v2df)__B); +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpnle_sd (__m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_cmpnlesd ((__v2df)__A, (__v2df)__B); +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpngt_sd (__m128d __A, __m128d __B) +{ + return (__m128d) __builtin_ia32_movsd ((__v2df) __A, + (__v2df) + __builtin_ia32_cmpnltsd ((__v2df) __B, + (__v2df) + __A)); +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpnge_sd (__m128d __A, __m128d __B) +{ + return (__m128d) __builtin_ia32_movsd ((__v2df) __A, + (__v2df) + __builtin_ia32_cmpnlesd ((__v2df) __B, + (__v2df) + __A)); +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpord_sd (__m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_cmpordsd ((__v2df)__A, (__v2df)__B); +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpunord_sd (__m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_cmpunordsd ((__v2df)__A, (__v2df)__B); +} + +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_comieq_sd (__m128d __A, __m128d __B) +{ + return __builtin_ia32_comisdeq ((__v2df)__A, (__v2df)__B); +} + +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_comilt_sd (__m128d __A, __m128d __B) +{ + return __builtin_ia32_comisdlt ((__v2df)__A, (__v2df)__B); +} + +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_comile_sd (__m128d __A, __m128d __B) +{ + return __builtin_ia32_comisdle ((__v2df)__A, (__v2df)__B); +} + +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_comigt_sd (__m128d __A, __m128d __B) +{ + return __builtin_ia32_comisdgt ((__v2df)__A, (__v2df)__B); +} + +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_comige_sd (__m128d __A, __m128d __B) +{ + return __builtin_ia32_comisdge ((__v2df)__A, (__v2df)__B); +} + +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_comineq_sd (__m128d __A, __m128d __B) +{ + return __builtin_ia32_comisdneq ((__v2df)__A, (__v2df)__B); +} + +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_ucomieq_sd (__m128d __A, __m128d __B) +{ + return __builtin_ia32_ucomisdeq ((__v2df)__A, (__v2df)__B); +} + +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_ucomilt_sd (__m128d __A, __m128d __B) +{ + return __builtin_ia32_ucomisdlt ((__v2df)__A, (__v2df)__B); +} + +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_ucomile_sd (__m128d __A, __m128d __B) +{ + return __builtin_ia32_ucomisdle ((__v2df)__A, (__v2df)__B); +} + +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_ucomigt_sd (__m128d __A, __m128d __B) +{ + return __builtin_ia32_ucomisdgt ((__v2df)__A, (__v2df)__B); +} + +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_ucomige_sd (__m128d __A, __m128d __B) +{ + return __builtin_ia32_ucomisdge ((__v2df)__A, (__v2df)__B); +} + +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_ucomineq_sd (__m128d __A, __m128d __B) +{ + return __builtin_ia32_ucomisdneq ((__v2df)__A, (__v2df)__B); +} + +/* Create a vector of Qi, where i is the element number. */ + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_set_epi64x (long long __q1, long long __q0) +{ + return __extension__ (__m128i)(__v2di){ __q0, __q1 }; +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_set_epi64 (__m64 __q1, __m64 __q0) +{ + return _mm_set_epi64x ((long long)__q1, (long long)__q0); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_set_epi32 (int __q3, int __q2, int __q1, int __q0) +{ + return __extension__ (__m128i)(__v4si){ __q0, __q1, __q2, __q3 }; +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_set_epi16 (short __q7, short __q6, short __q5, short __q4, + short __q3, short __q2, short __q1, short __q0) +{ + return __extension__ (__m128i)(__v8hi){ + __q0, __q1, __q2, __q3, __q4, __q5, __q6, __q7 }; +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_set_epi8 (char __q15, char __q14, char __q13, char __q12, + char __q11, char __q10, char __q09, char __q08, + char __q07, char __q06, char __q05, char __q04, + char __q03, char __q02, char __q01, char __q00) +{ + return __extension__ (__m128i)(__v16qi){ + __q00, __q01, __q02, __q03, __q04, __q05, __q06, __q07, + __q08, __q09, __q10, __q11, __q12, __q13, __q14, __q15 + }; +} + +/* Set all of the elements of the vector to A. */ + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_set1_epi64x (long long __A) +{ + return _mm_set_epi64x (__A, __A); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_set1_epi64 (__m64 __A) +{ + return _mm_set_epi64 (__A, __A); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_set1_epi32 (int __A) +{ + return _mm_set_epi32 (__A, __A, __A, __A); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_set1_epi16 (short __A) +{ + return _mm_set_epi16 (__A, __A, __A, __A, __A, __A, __A, __A); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_set1_epi8 (char __A) +{ + return _mm_set_epi8 (__A, __A, __A, __A, __A, __A, __A, __A, + __A, __A, __A, __A, __A, __A, __A, __A); +} + +/* Create a vector of Qi, where i is the element number. + The parameter order is reversed from the _mm_set_epi* functions. */ + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_setr_epi64 (__m64 __q0, __m64 __q1) +{ + return _mm_set_epi64 (__q1, __q0); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_setr_epi32 (int __q0, int __q1, int __q2, int __q3) +{ + return _mm_set_epi32 (__q3, __q2, __q1, __q0); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_setr_epi16 (short __q0, short __q1, short __q2, short __q3, + short __q4, short __q5, short __q6, short __q7) +{ + return _mm_set_epi16 (__q7, __q6, __q5, __q4, __q3, __q2, __q1, __q0); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_setr_epi8 (char __q00, char __q01, char __q02, char __q03, + char __q04, char __q05, char __q06, char __q07, + char __q08, char __q09, char __q10, char __q11, + char __q12, char __q13, char __q14, char __q15) +{ + return _mm_set_epi8 (__q15, __q14, __q13, __q12, __q11, __q10, __q09, __q08, + __q07, __q06, __q05, __q04, __q03, __q02, __q01, __q00); +} + +/* Create a vector with element 0 as *P and the rest zero. */ + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_load_si128 (__m128i const *__P) +{ + return *__P; +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_loadu_si128 (__m128i const *__P) +{ + return (__m128i) __builtin_ia32_loaddqu ((char const *)__P); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_loadl_epi64 (__m128i const *__P) +{ + return _mm_set_epi64 ((__m64)0LL, *(__m64 *)__P); +} + +extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_store_si128 (__m128i *__P, __m128i __B) +{ + *__P = __B; +} + +extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_storeu_si128 (__m128i *__P, __m128i __B) +{ + __builtin_ia32_storedqu ((char *)__P, (__v16qi)__B); +} + +extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_storel_epi64 (__m128i *__P, __m128i __B) +{ + *(long long *)__P = __builtin_ia32_vec_ext_v2di ((__v2di)__B, 0); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_movepi64_pi64 (__m128i __B) +{ + return (__m64) __builtin_ia32_vec_ext_v2di ((__v2di)__B, 0); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_movpi64_epi64 (__m64 __A) +{ + return _mm_set_epi64 ((__m64)0LL, __A); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_move_epi64 (__m128i __A) +{ + return (__m128i)__builtin_ia32_movq128 ((__v2di) __A); +} + +/* Create a vector of zeros. */ +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_setzero_si128 (void) +{ + return __extension__ (__m128i)(__v4si){ 0, 0, 0, 0 }; +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtepi32_pd (__m128i __A) +{ + return (__m128d)__builtin_ia32_cvtdq2pd ((__v4si) __A); +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtepi32_ps (__m128i __A) +{ + return (__m128)__builtin_ia32_cvtdq2ps ((__v4si) __A); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtpd_epi32 (__m128d __A) +{ + return (__m128i)__builtin_ia32_cvtpd2dq ((__v2df) __A); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtpd_pi32 (__m128d __A) +{ + return (__m64)__builtin_ia32_cvtpd2pi ((__v2df) __A); +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtpd_ps (__m128d __A) +{ + return (__m128)__builtin_ia32_cvtpd2ps ((__v2df) __A); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvttpd_epi32 (__m128d __A) +{ + return (__m128i)__builtin_ia32_cvttpd2dq ((__v2df) __A); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvttpd_pi32 (__m128d __A) +{ + return (__m64)__builtin_ia32_cvttpd2pi ((__v2df) __A); +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtpi32_pd (__m64 __A) +{ + return (__m128d)__builtin_ia32_cvtpi2pd ((__v2si) __A); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtps_epi32 (__m128 __A) +{ + return (__m128i)__builtin_ia32_cvtps2dq ((__v4sf) __A); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvttps_epi32 (__m128 __A) +{ + return (__m128i)__builtin_ia32_cvttps2dq ((__v4sf) __A); +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtps_pd (__m128 __A) +{ + return (__m128d)__builtin_ia32_cvtps2pd ((__v4sf) __A); +} + +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtsd_si32 (__m128d __A) +{ + return __builtin_ia32_cvtsd2si ((__v2df) __A); +} + +#ifdef __x86_64__ +/* Intel intrinsic. */ +extern __inline long long __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtsd_si64 (__m128d __A) +{ + return __builtin_ia32_cvtsd2si64 ((__v2df) __A); +} + +/* Microsoft intrinsic. */ +extern __inline long long __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtsd_si64x (__m128d __A) +{ + return __builtin_ia32_cvtsd2si64 ((__v2df) __A); +} +#endif + +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvttsd_si32 (__m128d __A) +{ + return __builtin_ia32_cvttsd2si ((__v2df) __A); +} + +#ifdef __x86_64__ +/* Intel intrinsic. */ +extern __inline long long __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvttsd_si64 (__m128d __A) +{ + return __builtin_ia32_cvttsd2si64 ((__v2df) __A); +} + +/* Microsoft intrinsic. */ +extern __inline long long __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvttsd_si64x (__m128d __A) +{ + return __builtin_ia32_cvttsd2si64 ((__v2df) __A); +} +#endif + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtsd_ss (__m128 __A, __m128d __B) +{ + return (__m128)__builtin_ia32_cvtsd2ss ((__v4sf) __A, (__v2df) __B); +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtsi32_sd (__m128d __A, int __B) +{ + return (__m128d)__builtin_ia32_cvtsi2sd ((__v2df) __A, __B); +} + +#ifdef __x86_64__ +/* Intel intrinsic. */ +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtsi64_sd (__m128d __A, long long __B) +{ + return (__m128d)__builtin_ia32_cvtsi642sd ((__v2df) __A, __B); +} + +/* Microsoft intrinsic. */ +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtsi64x_sd (__m128d __A, long long __B) +{ + return (__m128d)__builtin_ia32_cvtsi642sd ((__v2df) __A, __B); +} +#endif + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtss_sd (__m128d __A, __m128 __B) +{ + return (__m128d)__builtin_ia32_cvtss2sd ((__v2df) __A, (__v4sf)__B); +} + +#ifdef __OPTIMIZE__ +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_shuffle_pd(__m128d __A, __m128d __B, const int __mask) +{ + return (__m128d)__builtin_ia32_shufpd ((__v2df)__A, (__v2df)__B, __mask); +} +#else +#define _mm_shuffle_pd(A, B, N) \ + ((__m128d)__builtin_ia32_shufpd ((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), (int)(N))) +#endif + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_unpackhi_pd (__m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_unpckhpd ((__v2df)__A, (__v2df)__B); +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_unpacklo_pd (__m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_unpcklpd ((__v2df)__A, (__v2df)__B); +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_loadh_pd (__m128d __A, double const *__B) +{ + return (__m128d)__builtin_ia32_loadhpd ((__v2df)__A, __B); +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_loadl_pd (__m128d __A, double const *__B) +{ + return (__m128d)__builtin_ia32_loadlpd ((__v2df)__A, __B); +} + +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_movemask_pd (__m128d __A) +{ + return __builtin_ia32_movmskpd ((__v2df)__A); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_packs_epi16 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_packsswb128 ((__v8hi)__A, (__v8hi)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_packs_epi32 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_packssdw128 ((__v4si)__A, (__v4si)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_packus_epi16 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_packuswb128 ((__v8hi)__A, (__v8hi)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_unpackhi_epi8 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_punpckhbw128 ((__v16qi)__A, (__v16qi)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_unpackhi_epi16 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_punpckhwd128 ((__v8hi)__A, (__v8hi)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_unpackhi_epi32 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_punpckhdq128 ((__v4si)__A, (__v4si)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_unpackhi_epi64 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_punpckhqdq128 ((__v2di)__A, (__v2di)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_unpacklo_epi8 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_punpcklbw128 ((__v16qi)__A, (__v16qi)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_unpacklo_epi16 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_punpcklwd128 ((__v8hi)__A, (__v8hi)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_unpacklo_epi32 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_punpckldq128 ((__v4si)__A, (__v4si)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_unpacklo_epi64 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_punpcklqdq128 ((__v2di)__A, (__v2di)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_add_epi8 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_paddb128 ((__v16qi)__A, (__v16qi)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_add_epi16 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_paddw128 ((__v8hi)__A, (__v8hi)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_add_epi32 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_paddd128 ((__v4si)__A, (__v4si)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_add_epi64 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_paddq128 ((__v2di)__A, (__v2di)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_adds_epi8 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_paddsb128 ((__v16qi)__A, (__v16qi)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_adds_epi16 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_paddsw128 ((__v8hi)__A, (__v8hi)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_adds_epu8 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_paddusb128 ((__v16qi)__A, (__v16qi)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_adds_epu16 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_paddusw128 ((__v8hi)__A, (__v8hi)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_sub_epi8 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_psubb128 ((__v16qi)__A, (__v16qi)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_sub_epi16 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_psubw128 ((__v8hi)__A, (__v8hi)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_sub_epi32 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_psubd128 ((__v4si)__A, (__v4si)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_sub_epi64 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_psubq128 ((__v2di)__A, (__v2di)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_subs_epi8 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_psubsb128 ((__v16qi)__A, (__v16qi)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_subs_epi16 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_psubsw128 ((__v8hi)__A, (__v8hi)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_subs_epu8 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_psubusb128 ((__v16qi)__A, (__v16qi)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_subs_epu16 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_psubusw128 ((__v8hi)__A, (__v8hi)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_madd_epi16 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_pmaddwd128 ((__v8hi)__A, (__v8hi)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mulhi_epi16 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_pmulhw128 ((__v8hi)__A, (__v8hi)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mullo_epi16 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_pmullw128 ((__v8hi)__A, (__v8hi)__B); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mul_su32 (__m64 __A, __m64 __B) +{ + return (__m64)__builtin_ia32_pmuludq ((__v2si)__A, (__v2si)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mul_epu32 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_pmuludq128 ((__v4si)__A, (__v4si)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_slli_epi16 (__m128i __A, int __B) +{ + return (__m128i)__builtin_ia32_psllwi128 ((__v8hi)__A, __B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_slli_epi32 (__m128i __A, int __B) +{ + return (__m128i)__builtin_ia32_pslldi128 ((__v4si)__A, __B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_slli_epi64 (__m128i __A, int __B) +{ + return (__m128i)__builtin_ia32_psllqi128 ((__v2di)__A, __B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_srai_epi16 (__m128i __A, int __B) +{ + return (__m128i)__builtin_ia32_psrawi128 ((__v8hi)__A, __B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_srai_epi32 (__m128i __A, int __B) +{ + return (__m128i)__builtin_ia32_psradi128 ((__v4si)__A, __B); +} + +#ifdef __OPTIMIZE__ +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_srli_si128 (__m128i __A, const int __N) +{ + return (__m128i)__builtin_ia32_psrldqi128 (__A, __N * 8); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_slli_si128 (__m128i __A, const int __N) +{ + return (__m128i)__builtin_ia32_pslldqi128 (__A, __N * 8); +} +#else +#define _mm_srli_si128(A, N) \ + ((__m128i)__builtin_ia32_psrldqi128 ((__m128i)(A), (int)(N) * 8)) +#define _mm_slli_si128(A, N) \ + ((__m128i)__builtin_ia32_pslldqi128 ((__m128i)(A), (int)(N) * 8)) +#endif + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_srli_epi16 (__m128i __A, int __B) +{ + return (__m128i)__builtin_ia32_psrlwi128 ((__v8hi)__A, __B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_srli_epi32 (__m128i __A, int __B) +{ + return (__m128i)__builtin_ia32_psrldi128 ((__v4si)__A, __B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_srli_epi64 (__m128i __A, int __B) +{ + return (__m128i)__builtin_ia32_psrlqi128 ((__v2di)__A, __B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_sll_epi16 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_psllw128((__v8hi)__A, (__v8hi)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_sll_epi32 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_pslld128((__v4si)__A, (__v4si)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_sll_epi64 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_psllq128((__v2di)__A, (__v2di)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_sra_epi16 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_psraw128 ((__v8hi)__A, (__v8hi)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_sra_epi32 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_psrad128 ((__v4si)__A, (__v4si)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_srl_epi16 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_psrlw128 ((__v8hi)__A, (__v8hi)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_srl_epi32 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_psrld128 ((__v4si)__A, (__v4si)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_srl_epi64 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_psrlq128 ((__v2di)__A, (__v2di)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_and_si128 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_pand128 ((__v2di)__A, (__v2di)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_andnot_si128 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_pandn128 ((__v2di)__A, (__v2di)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_or_si128 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_por128 ((__v2di)__A, (__v2di)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_xor_si128 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_pxor128 ((__v2di)__A, (__v2di)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpeq_epi8 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_pcmpeqb128 ((__v16qi)__A, (__v16qi)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpeq_epi16 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_pcmpeqw128 ((__v8hi)__A, (__v8hi)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpeq_epi32 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_pcmpeqd128 ((__v4si)__A, (__v4si)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmplt_epi8 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_pcmpgtb128 ((__v16qi)__B, (__v16qi)__A); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmplt_epi16 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_pcmpgtw128 ((__v8hi)__B, (__v8hi)__A); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmplt_epi32 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_pcmpgtd128 ((__v4si)__B, (__v4si)__A); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpgt_epi8 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_pcmpgtb128 ((__v16qi)__A, (__v16qi)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpgt_epi16 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_pcmpgtw128 ((__v8hi)__A, (__v8hi)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpgt_epi32 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_pcmpgtd128 ((__v4si)__A, (__v4si)__B); +} + +#ifdef __OPTIMIZE__ +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_extract_epi16 (__m128i const __A, int const __N) +{ + return (unsigned short) __builtin_ia32_vec_ext_v8hi ((__v8hi)__A, __N); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_insert_epi16 (__m128i const __A, int const __D, int const __N) +{ + return (__m128i) __builtin_ia32_vec_set_v8hi ((__v8hi)__A, __D, __N); +} +#else +#define _mm_extract_epi16(A, N) \ + ((int) (unsigned short) __builtin_ia32_vec_ext_v8hi ((__v8hi)(__m128i)(A), (int)(N))) +#define _mm_insert_epi16(A, D, N) \ + ((__m128i) __builtin_ia32_vec_set_v8hi ((__v8hi)(__m128i)(A), \ + (int)(D), (int)(N))) +#endif + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_max_epi16 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_pmaxsw128 ((__v8hi)__A, (__v8hi)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_max_epu8 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_pmaxub128 ((__v16qi)__A, (__v16qi)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_min_epi16 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_pminsw128 ((__v8hi)__A, (__v8hi)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_min_epu8 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_pminub128 ((__v16qi)__A, (__v16qi)__B); +} + +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_movemask_epi8 (__m128i __A) +{ + return __builtin_ia32_pmovmskb128 ((__v16qi)__A); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mulhi_epu16 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_pmulhuw128 ((__v8hi)__A, (__v8hi)__B); +} + +#ifdef __OPTIMIZE__ +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_shufflehi_epi16 (__m128i __A, const int __mask) +{ + return (__m128i)__builtin_ia32_pshufhw ((__v8hi)__A, __mask); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_shufflelo_epi16 (__m128i __A, const int __mask) +{ + return (__m128i)__builtin_ia32_pshuflw ((__v8hi)__A, __mask); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_shuffle_epi32 (__m128i __A, const int __mask) +{ + return (__m128i)__builtin_ia32_pshufd ((__v4si)__A, __mask); +} +#else +#define _mm_shufflehi_epi16(A, N) \ + ((__m128i)__builtin_ia32_pshufhw ((__v8hi)(__m128i)(A), (int)(N))) +#define _mm_shufflelo_epi16(A, N) \ + ((__m128i)__builtin_ia32_pshuflw ((__v8hi)(__m128i)(A), (int)(N))) +#define _mm_shuffle_epi32(A, N) \ + ((__m128i)__builtin_ia32_pshufd ((__v4si)(__m128i)(A), (int)(N))) +#endif + +extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskmoveu_si128 (__m128i __A, __m128i __B, char *__C) +{ + __builtin_ia32_maskmovdqu ((__v16qi)__A, (__v16qi)__B, __C); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_avg_epu8 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_pavgb128 ((__v16qi)__A, (__v16qi)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_avg_epu16 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_pavgw128 ((__v8hi)__A, (__v8hi)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_sad_epu8 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_psadbw128 ((__v16qi)__A, (__v16qi)__B); +} + +extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_stream_si32 (int *__A, int __B) +{ + __builtin_ia32_movnti (__A, __B); +} + +extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_stream_si128 (__m128i *__A, __m128i __B) +{ + __builtin_ia32_movntdq ((__v2di *)__A, (__v2di)__B); +} + +extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_stream_pd (double *__A, __m128d __B) +{ + __builtin_ia32_movntpd (__A, (__v2df)__B); +} + +extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_clflush (void const *__A) +{ + __builtin_ia32_clflush (__A); +} + +extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_lfence (void) +{ + __builtin_ia32_lfence (); +} + +extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mfence (void) +{ + __builtin_ia32_mfence (); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtsi32_si128 (int __A) +{ + return _mm_set_epi32 (0, 0, 0, __A); +} + +#ifdef __x86_64__ +/* Intel intrinsic. */ +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtsi64_si128 (long long __A) +{ + return _mm_set_epi64x (0, __A); +} + +/* Microsoft intrinsic. */ +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtsi64x_si128 (long long __A) +{ + return _mm_set_epi64x (0, __A); +} +#endif + +/* Casts between various SP, DP, INT vector types. Note that these do no + conversion of values, they just change the type. */ +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_castpd_ps(__m128d __A) +{ + return (__m128) __A; +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_castpd_si128(__m128d __A) +{ + return (__m128i) __A; +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_castps_pd(__m128 __A) +{ + return (__m128d) __A; +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_castps_si128(__m128 __A) +{ + return (__m128i) __A; +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_castsi128_ps(__m128i __A) +{ + return (__m128) __A; +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_castsi128_pd(__m128i __A) +{ + return (__m128d) __A; +} + +#endif /* __SSE2__ */ + +#endif /* _EMMINTRIN_H_INCLUDED */ diff --git a/lib/gcc/i686-linux-android/4.6/include/float.h b/lib/gcc/i686-linux-android/4.6/include/float.h new file mode 100644 index 0000000..b78cc0c --- /dev/null +++ b/lib/gcc/i686-linux-android/4.6/include/float.h @@ -0,0 +1,277 @@ +/* Copyright (C) 2002, 2007, 2008, 2009, 2010 Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 3, or (at your option) +any later version. + +GCC is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +Under Section 7 of GPL version 3, you are granted additional +permissions described in the GCC Runtime Library Exception, version +3.1, as published by the Free Software Foundation. + +You should have received a copy of the GNU General Public License and +a copy of the GCC Runtime Library Exception along with this program; +see the files COPYING3 and COPYING.RUNTIME respectively. If not, see +<http://www.gnu.org/licenses/>. */ + +/* + * ISO C Standard: 5.2.4.2.2 Characteristics of floating types <float.h> + */ + +#ifndef _FLOAT_H___ +#define _FLOAT_H___ + +/* Radix of exponent representation, b. */ +#undef FLT_RADIX +#define FLT_RADIX __FLT_RADIX__ + +/* Number of base-FLT_RADIX digits in the significand, p. */ +#undef FLT_MANT_DIG +#undef DBL_MANT_DIG +#undef LDBL_MANT_DIG +#define FLT_MANT_DIG __FLT_MANT_DIG__ +#define DBL_MANT_DIG __DBL_MANT_DIG__ +#define LDBL_MANT_DIG __LDBL_MANT_DIG__ + +/* Number of decimal digits, q, such that any floating-point number with q + decimal digits can be rounded into a floating-point number with p radix b + digits and back again without change to the q decimal digits, + + p * log10(b) if b is a power of 10 + floor((p - 1) * log10(b)) otherwise +*/ +#undef FLT_DIG +#undef DBL_DIG +#undef LDBL_DIG +#define FLT_DIG __FLT_DIG__ +#define DBL_DIG __DBL_DIG__ +#define LDBL_DIG __LDBL_DIG__ + +/* Minimum int x such that FLT_RADIX**(x-1) is a normalized float, emin */ +#undef FLT_MIN_EXP +#undef DBL_MIN_EXP +#undef LDBL_MIN_EXP +#define FLT_MIN_EXP __FLT_MIN_EXP__ +#define DBL_MIN_EXP __DBL_MIN_EXP__ +#define LDBL_MIN_EXP __LDBL_MIN_EXP__ + +/* Minimum negative integer such that 10 raised to that power is in the + range of normalized floating-point numbers, + + ceil(log10(b) * (emin - 1)) +*/ +#undef FLT_MIN_10_EXP +#undef DBL_MIN_10_EXP +#undef LDBL_MIN_10_EXP +#define FLT_MIN_10_EXP __FLT_MIN_10_EXP__ +#define DBL_MIN_10_EXP __DBL_MIN_10_EXP__ +#define LDBL_MIN_10_EXP __LDBL_MIN_10_EXP__ + +/* Maximum int x such that FLT_RADIX**(x-1) is a representable float, emax. */ +#undef FLT_MAX_EXP +#undef DBL_MAX_EXP +#undef LDBL_MAX_EXP +#define FLT_MAX_EXP __FLT_MAX_EXP__ +#define DBL_MAX_EXP __DBL_MAX_EXP__ +#define LDBL_MAX_EXP __LDBL_MAX_EXP__ + +/* Maximum integer such that 10 raised to that power is in the range of + representable finite floating-point numbers, + + floor(log10((1 - b**-p) * b**emax)) +*/ +#undef FLT_MAX_10_EXP +#undef DBL_MAX_10_EXP +#undef LDBL_MAX_10_EXP +#define FLT_MAX_10_EXP __FLT_MAX_10_EXP__ +#define DBL_MAX_10_EXP __DBL_MAX_10_EXP__ +#define LDBL_MAX_10_EXP __LDBL_MAX_10_EXP__ + +/* Maximum representable finite floating-point number, + + (1 - b**-p) * b**emax +*/ +#undef FLT_MAX +#undef DBL_MAX +#undef LDBL_MAX +#define FLT_MAX __FLT_MAX__ +#define DBL_MAX __DBL_MAX__ +#define LDBL_MAX __LDBL_MAX__ + +/* The difference between 1 and the least value greater than 1 that is + representable in the given floating point type, b**1-p. */ +#undef FLT_EPSILON +#undef DBL_EPSILON +#undef LDBL_EPSILON +#define FLT_EPSILON __FLT_EPSILON__ +#define DBL_EPSILON __DBL_EPSILON__ +#define LDBL_EPSILON __LDBL_EPSILON__ + +/* Minimum normalized positive floating-point number, b**(emin - 1). */ +#undef FLT_MIN +#undef DBL_MIN +#undef LDBL_MIN +#define FLT_MIN __FLT_MIN__ +#define DBL_MIN __DBL_MIN__ +#define LDBL_MIN __LDBL_MIN__ + +/* Addition rounds to 0: zero, 1: nearest, 2: +inf, 3: -inf, -1: unknown. */ +/* ??? This is supposed to change with calls to fesetround in <fenv.h>. */ +#undef FLT_ROUNDS +#define FLT_ROUNDS 1 + +#if defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L +/* The floating-point expression evaluation method. + -1 indeterminate + 0 evaluate all operations and constants just to the range and + precision of the type + 1 evaluate operations and constants of type float and double + to the range and precision of the double type, evaluate + long double operations and constants to the range and + precision of the long double type + 2 evaluate all operations and constants to the range and + precision of the long double type + + ??? This ought to change with the setting of the fp control word; + the value provided by the compiler assumes the widest setting. */ +#undef FLT_EVAL_METHOD +#define FLT_EVAL_METHOD __FLT_EVAL_METHOD__ + +/* Number of decimal digits, n, such that any floating-point number in the + widest supported floating type with pmax radix b digits can be rounded + to a floating-point number with n decimal digits and back again without + change to the value, + + pmax * log10(b) if b is a power of 10 + ceil(1 + pmax * log10(b)) otherwise +*/ +#undef DECIMAL_DIG +#define DECIMAL_DIG __DECIMAL_DIG__ + +#endif /* C99 */ + +#if defined (__STDC_VERSION__) && __STDC_VERSION__ > 199901L +/* Versions of DECIMAL_DIG for each floating-point type. */ +#undef FLT_DECIMAL_DIG +#undef DBL_DECIMAL_DIG +#undef LDBL_DECIMAL_DIG +#define FLT_DECIMAL_DIG __FLT_DECIMAL_DIG__ +#define DBL_DECIMAL_DIG __DBL_DECIMAL_DIG__ +#define LDBL_DECIMAL_DIG __DECIMAL_DIG__ + +/* Whether types support subnormal numbers. */ +#undef FLT_HAS_SUBNORM +#undef DBL_HAS_SUBNORM +#undef LDBL_HAS_SUBNORM +#define FLT_HAS_SUBNORM __FLT_HAS_DENORM__ +#define DBL_HAS_SUBNORM __DBL_HAS_DENORM__ +#define LDBL_HAS_SUBNORM __LDBL_HAS_DENORM__ + +/* Minimum positive values, including subnormals. */ +#undef FLT_TRUE_MIN +#undef DBL_TRUE_MIN +#undef LDBL_TRUE_MIN +#if __FLT_HAS_DENORM__ +#define FLT_TRUE_MIN __FLT_DENORM_MIN__ +#else +#define FLT_TRUE_MIN __FLT_MIN__ +#endif +#if __DBL_HAS_DENORM__ +#define DBL_TRUE_MIN __DBL_DENORM_MIN__ +#else +#define DBL_TRUE_MIN __DBL_MIN__ +#endif +#if __LDBL_HAS_DENORM__ +#define LDBL_TRUE_MIN __LDBL_DENORM_MIN__ +#else +#define LDBL_TRUE_MIN __LDBL_MIN__ +#endif + +#endif /* C1X */ + +#ifdef __STDC_WANT_DEC_FP__ +/* Draft Technical Report 24732, extension for decimal floating-point + arithmetic: Characteristic of decimal floating types <float.h>. */ + +/* Number of base-FLT_RADIX digits in the significand, p. */ +#undef DEC32_MANT_DIG +#undef DEC64_MANT_DIG +#undef DEC128_MANT_DIG +#define DEC32_MANT_DIG __DEC32_MANT_DIG__ +#define DEC64_MANT_DIG __DEC64_MANT_DIG__ +#define DEC128_MANT_DIG __DEC128_MANT_DIG__ + +/* Minimum exponent. */ +#undef DEC32_MIN_EXP +#undef DEC64_MIN_EXP +#undef DEC128_MIN_EXP +#define DEC32_MIN_EXP __DEC32_MIN_EXP__ +#define DEC64_MIN_EXP __DEC64_MIN_EXP__ +#define DEC128_MIN_EXP __DEC128_MIN_EXP__ + +/* Maximum exponent. */ +#undef DEC32_MAX_EXP +#undef DEC64_MAX_EXP +#undef DEC128_MAX_EXP +#define DEC32_MAX_EXP __DEC32_MAX_EXP__ +#define DEC64_MAX_EXP __DEC64_MAX_EXP__ +#define DEC128_MAX_EXP __DEC128_MAX_EXP__ + +/* Maximum representable finite decimal floating-point number + (there are 6, 15, and 33 9s after the decimal points respectively). */ +#undef DEC32_MAX +#undef DEC64_MAX +#undef DEC128_MAX +#define DEC32_MAX __DEC32_MAX__ +#define DEC64_MAX __DEC64_MAX__ +#define DEC128_MAX __DEC128_MAX__ + +/* The difference between 1 and the least value greater than 1 that is + representable in the given floating point type. */ +#undef DEC32_EPSILON +#undef DEC64_EPSILON +#undef DEC128_EPSILON +#define DEC32_EPSILON __DEC32_EPSILON__ +#define DEC64_EPSILON __DEC64_EPSILON__ +#define DEC128_EPSILON __DEC128_EPSILON__ + +/* Minimum normalized positive floating-point number. */ +#undef DEC32_MIN +#undef DEC64_MIN +#undef DEC128_MIN +#define DEC32_MIN __DEC32_MIN__ +#define DEC64_MIN __DEC64_MIN__ +#define DEC128_MIN __DEC128_MIN__ + +/* Minimum subnormal positive floating-point number. */ +#undef DEC32_SUBNORMAL_MIN +#undef DEC64_SUBNORMAL_MIN +#undef DEC128_SUBNORMAL_MIN +#define DEC32_SUBNORMAL_MIN __DEC32_SUBNORMAL_MIN__ +#define DEC64_SUBNORMAL_MIN __DEC64_SUBNORMAL_MIN__ +#define DEC128_SUBNORMAL_MIN __DEC128_SUBNORMAL_MIN__ + +/* The floating-point expression evaluation method. + -1 indeterminate + 0 evaluate all operations and constants just to the range and + precision of the type + 1 evaluate operations and constants of type _Decimal32 + and _Decimal64 to the range and precision of the _Decimal64 + type, evaluate _Decimal128 operations and constants to the + range and precision of the _Decimal128 type; + 2 evaluate all operations and constants to the range and + precision of the _Decimal128 type. */ + +#undef DEC_EVAL_METHOD +#define DEC_EVAL_METHOD __DEC_EVAL_METHOD__ + +#endif /* __STDC_WANT_DEC_FP__ */ + +#endif /* _FLOAT_H___ */ diff --git a/lib/gcc/i686-linux-android/4.6/include/fma4intrin.h b/lib/gcc/i686-linux-android/4.6/include/fma4intrin.h new file mode 100644 index 0000000..ae30bfe --- /dev/null +++ b/lib/gcc/i686-linux-android/4.6/include/fma4intrin.h @@ -0,0 +1,236 @@ +/* Copyright (C) 2007, 2008, 2009, 2010 Free Software Foundation, Inc. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + GCC is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + <http://www.gnu.org/licenses/>. */ + +#ifndef _X86INTRIN_H_INCLUDED +# error "Never use <fma4intrin.h> directly; include <x86intrin.h> instead." +#endif + +#ifndef _FMA4INTRIN_H_INCLUDED +#define _FMA4INTRIN_H_INCLUDED + +#ifndef __FMA4__ +# error "FMA4 instruction set not enabled" +#else + +/* We need definitions from the SSE4A, SSE3, SSE2 and SSE header files. */ +#include <ammintrin.h> + +/* 128b Floating point multiply/add type instructions. */ +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_macc_ps (__m128 __A, __m128 __B, __m128 __C) +{ + return (__m128) __builtin_ia32_vfmaddps ((__v4sf)__A, (__v4sf)__B, (__v4sf)__C); +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_macc_pd (__m128d __A, __m128d __B, __m128d __C) +{ + return (__m128d) __builtin_ia32_vfmaddpd ((__v2df)__A, (__v2df)__B, (__v2df)__C); +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_macc_ss (__m128 __A, __m128 __B, __m128 __C) +{ + return (__m128) __builtin_ia32_vfmaddss ((__v4sf)__A, (__v4sf)__B, (__v4sf)__C); +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_macc_sd (__m128d __A, __m128d __B, __m128d __C) +{ + return (__m128d) __builtin_ia32_vfmaddsd ((__v2df)__A, (__v2df)__B, (__v2df)__C); +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_msub_ps (__m128 __A, __m128 __B, __m128 __C) + +{ + return (__m128) __builtin_ia32_vfmaddps ((__v4sf)__A, (__v4sf)__B, -(__v4sf)__C); +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_msub_pd (__m128d __A, __m128d __B, __m128d __C) +{ + return (__m128d) __builtin_ia32_vfmaddpd ((__v2df)__A, (__v2df)__B, -(__v2df)__C); +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_msub_ss (__m128 __A, __m128 __B, __m128 __C) +{ + return (__m128) __builtin_ia32_vfmaddss ((__v4sf)__A, (__v4sf)__B, -(__v4sf)__C); +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_msub_sd (__m128d __A, __m128d __B, __m128d __C) +{ + return (__m128d) __builtin_ia32_vfmaddsd ((__v2df)__A, (__v2df)__B, -(__v2df)__C); +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_nmacc_ps (__m128 __A, __m128 __B, __m128 __C) +{ + return (__m128) __builtin_ia32_vfmaddps (-(__v4sf)__A, (__v4sf)__B, (__v4sf)__C); +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_nmacc_pd (__m128d __A, __m128d __B, __m128d __C) +{ + return (__m128d) __builtin_ia32_vfmaddpd (-(__v2df)__A, (__v2df)__B, (__v2df)__C); +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_nmacc_ss (__m128 __A, __m128 __B, __m128 __C) +{ + return (__m128) __builtin_ia32_vfmaddss (-(__v4sf)__A, (__v4sf)__B, (__v4sf)__C); +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_nmacc_sd (__m128d __A, __m128d __B, __m128d __C) +{ + return (__m128d) __builtin_ia32_vfmaddsd (-(__v2df)__A, (__v2df)__B, (__v2df)__C); +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_nmsub_ps (__m128 __A, __m128 __B, __m128 __C) +{ + return (__m128) __builtin_ia32_vfmaddps (-(__v4sf)__A, (__v4sf)__B, -(__v4sf)__C); +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_nmsub_pd (__m128d __A, __m128d __B, __m128d __C) +{ + return (__m128d) __builtin_ia32_vfmaddpd (-(__v2df)__A, (__v2df)__B, -(__v2df)__C); +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_nmsub_ss (__m128 __A, __m128 __B, __m128 __C) +{ + return (__m128) __builtin_ia32_vfmaddss (-(__v4sf)__A, (__v4sf)__B, -(__v4sf)__C); +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_nmsub_sd (__m128d __A, __m128d __B, __m128d __C) +{ + return (__m128d) __builtin_ia32_vfmaddsd (-(__v2df)__A, (__v2df)__B, -(__v2df)__C); +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maddsub_ps (__m128 __A, __m128 __B, __m128 __C) +{ + return (__m128) __builtin_ia32_vfmaddsubps ((__v4sf)__A, (__v4sf)__B, (__v4sf)__C); +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maddsub_pd (__m128d __A, __m128d __B, __m128d __C) +{ + return (__m128d) __builtin_ia32_vfmaddsubpd ((__v2df)__A, (__v2df)__B, (__v2df)__C); +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_msubadd_ps (__m128 __A, __m128 __B, __m128 __C) +{ + return (__m128) __builtin_ia32_vfmaddsubps ((__v4sf)__A, (__v4sf)__B, -(__v4sf)__C); +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_msubadd_pd (__m128d __A, __m128d __B, __m128d __C) +{ + return (__m128d) __builtin_ia32_vfmaddsubpd ((__v2df)__A, (__v2df)__B, -(__v2df)__C); +} + +/* 256b Floating point multiply/add type instructions. */ +extern __inline __m256 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_macc_ps (__m256 __A, __m256 __B, __m256 __C) +{ + return (__m256) __builtin_ia32_vfmaddps256 ((__v8sf)__A, (__v8sf)__B, (__v8sf)__C); +} + +extern __inline __m256d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_macc_pd (__m256d __A, __m256d __B, __m256d __C) +{ + return (__m256d) __builtin_ia32_vfmaddpd256 ((__v4df)__A, (__v4df)__B, (__v4df)__C); +} + +extern __inline __m256 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_msub_ps (__m256 __A, __m256 __B, __m256 __C) + +{ + return (__m256) __builtin_ia32_vfmaddps256 ((__v8sf)__A, (__v8sf)__B, -(__v8sf)__C); +} + +extern __inline __m256d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_msub_pd (__m256d __A, __m256d __B, __m256d __C) +{ + return (__m256d) __builtin_ia32_vfmaddpd256 ((__v4df)__A, (__v4df)__B, -(__v4df)__C); +} + +extern __inline __m256 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_nmacc_ps (__m256 __A, __m256 __B, __m256 __C) +{ + return (__m256) __builtin_ia32_vfmaddps256 (-(__v8sf)__A, (__v8sf)__B, (__v8sf)__C); +} + +extern __inline __m256d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_nmacc_pd (__m256d __A, __m256d __B, __m256d __C) +{ + return (__m256d) __builtin_ia32_vfmaddpd256 (-(__v4df)__A, (__v4df)__B, (__v4df)__C); +} + +extern __inline __m256 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_nmsub_ps (__m256 __A, __m256 __B, __m256 __C) +{ + return (__m256) __builtin_ia32_vfmaddps256 (-(__v8sf)__A, (__v8sf)__B, -(__v8sf)__C); +} + +extern __inline __m256d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_nmsub_pd (__m256d __A, __m256d __B, __m256d __C) +{ + return (__m256d) __builtin_ia32_vfmaddpd256 (-(__v4df)__A, (__v4df)__B, -(__v4df)__C); +} + +extern __inline __m256 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maddsub_ps (__m256 __A, __m256 __B, __m256 __C) +{ + return (__m256) __builtin_ia32_vfmaddsubps256 ((__v8sf)__A, (__v8sf)__B, (__v8sf)__C); +} + +extern __inline __m256d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maddsub_pd (__m256d __A, __m256d __B, __m256d __C) +{ + return (__m256d) __builtin_ia32_vfmaddsubpd256 ((__v4df)__A, (__v4df)__B, (__v4df)__C); +} + +extern __inline __m256 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_msubadd_ps (__m256 __A, __m256 __B, __m256 __C) +{ + return (__m256) __builtin_ia32_vfmaddsubps256 ((__v8sf)__A, (__v8sf)__B, -(__v8sf)__C); +} + +extern __inline __m256d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_msubadd_pd (__m256d __A, __m256d __B, __m256d __C) +{ + return (__m256d) __builtin_ia32_vfmaddsubpd256 ((__v4df)__A, (__v4df)__B, -(__v4df)__C); +} + +#endif + +#endif diff --git a/lib/gcc/i686-linux-android/4.6/include/ia32intrin.h b/lib/gcc/i686-linux-android/4.6/include/ia32intrin.h new file mode 100644 index 0000000..76c20a6 --- /dev/null +++ b/lib/gcc/i686-linux-android/4.6/include/ia32intrin.h @@ -0,0 +1,234 @@ +/* Copyright (C) 2009, 2010 Free Software Foundation, Inc. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + GCC is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + <http://www.gnu.org/licenses/>. */ + +#ifndef _X86INTRIN_H_INCLUDED +# error "Never use <ia32intrin.h> directly; include <x86intrin.h> instead." +#endif + +/* 32bit bsf */ +extern __inline int +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__bsfd (int __X) +{ + return __builtin_ctz (__X); +} + +/* 32bit bsr */ +extern __inline int +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__bsrd (int __X) +{ + return __builtin_ia32_bsrsi (__X); +} + +/* 32bit bswap */ +extern __inline int +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__bswapd (int __X) +{ + return __builtin_bswap32 (__X); +} + +#ifdef __SSE4_2__ +/* 32bit accumulate CRC32 (polynomial 0x11EDC6F41) value. */ +extern __inline unsigned int +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__crc32b (unsigned int __C, unsigned char __V) +{ + return __builtin_ia32_crc32qi (__C, __V); +} + +extern __inline unsigned int +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__crc32w (unsigned int __C, unsigned short __V) +{ + return __builtin_ia32_crc32hi (__C, __V); +} + +extern __inline unsigned int +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__crc32d (unsigned int __C, unsigned int __V) +{ + return __builtin_ia32_crc32si (__C, __V); +} +#endif /* SSE4.2 */ + +/* 32bit popcnt */ +extern __inline int +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__popcntd (unsigned int __X) +{ + return __builtin_popcount (__X); +} + +/* rdpmc */ +extern __inline unsigned long long +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__rdpmc (int __S) +{ + return __builtin_ia32_rdpmc (__S); +} + +/* rdtsc */ +extern __inline unsigned long long +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__rdtsc (void) +{ + return __builtin_ia32_rdtsc (); +} + +/* rdtscp */ +extern __inline unsigned long long +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__rdtscp (unsigned int *__A) +{ + return __builtin_ia32_rdtscp (__A); +} + +/* 8bit rol */ +extern __inline unsigned char +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__rolb (unsigned char __X, int __C) +{ + return __builtin_ia32_rolqi (__X, __C); +} + +/* 16bit rol */ +extern __inline unsigned short +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__rolw (unsigned short __X, int __C) +{ + return __builtin_ia32_rolhi (__X, __C); +} + +/* 32bit rol */ +extern __inline unsigned int +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__rold (unsigned int __X, int __C) +{ + return (__X << __C) | (__X >> (32 - __C)); +} + +/* 8bit ror */ +extern __inline unsigned char +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__rorb (unsigned char __X, int __C) +{ + return __builtin_ia32_rorqi (__X, __C); +} + +/* 16bit ror */ +extern __inline unsigned short +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__rorw (unsigned short __X, int __C) +{ + return __builtin_ia32_rorhi (__X, __C); +} + +/* 32bit ror */ +extern __inline unsigned int +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__rord (unsigned int __X, int __C) +{ + return (__X >> __C) | (__X << (32 - __C)); +} + +#ifdef __x86_64__ +/* 64bit bsf */ +extern __inline int +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__bsfq (long long __X) +{ + return __builtin_ctzll (__X); +} + +/* 64bit bsr */ +extern __inline int +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__bsrq (long long __X) +{ + return __builtin_ia32_bsrdi (__X); +} + +/* 64bit bswap */ +extern __inline long long +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__bswapq (long long __X) +{ + return __builtin_bswap64 (__X); +} + +#ifdef __SSE4_2__ +/* 64bit accumulate CRC32 (polynomial 0x11EDC6F41) value. */ +extern __inline unsigned long long +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__crc32q (unsigned long long __C, unsigned long long __V) +{ + return __builtin_ia32_crc32di (__C, __V); +} +#endif + +/* 64bit popcnt */ +extern __inline long long +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__popcntq (unsigned long long __X) +{ + return __builtin_popcountll (__X); +} + +/* 64bit rol */ +extern __inline unsigned long long +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__rolq (unsigned long long __X, int __C) +{ + return (__X << __C) | (__X >> (64 - __C)); +} + +/* 64bit ror */ +extern __inline unsigned long long +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__rorq (unsigned long long __X, int __C) +{ + return (__X >> __C) | (__X << (64 - __C)); +} + +#define _bswap64(a) __bswapq(a) +#define _popcnt64(a) __popcntq(a) +#define _lrotl(a,b) __rolq((a), (b)) +#define _lrotr(a,b) __rorq((a), (b)) +#else +#define _lrotl(a,b) __rold((a), (b)) +#define _lrotr(a,b) __rord((a), (b)) +#endif + +#define _bit_scan_forward(a) __bsfd(a) +#define _bit_scan_reverse(a) __bsrd(a) +#define _bswap(a) __bswapd(a) +#define _popcnt32(a) __popcntd(a) +#define _rdpmc(a) __rdpmc(a) +#define _rdtsc() __rdtsc() +#define _rdtscp(a) __rdtscp(a) +#define _rotwl(a,b) __rolw((a), (b)) +#define _rotwr(a,b) __rorw((a), (b)) +#define _rotl(a,b) __rold((a), (b)) +#define _rotr(a,b) __rord((a), (b)) diff --git a/lib/gcc/i686-linux-android/4.6/include/immintrin.h b/lib/gcc/i686-linux-android/4.6/include/immintrin.h new file mode 100644 index 0000000..11a1a4e --- /dev/null +++ b/lib/gcc/i686-linux-android/4.6/include/immintrin.h @@ -0,0 +1,203 @@ +/* Copyright (C) 2008, 2009, 2010 Free Software Foundation, Inc. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + GCC is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + <http://www.gnu.org/licenses/>. */ + +#ifndef _IMMINTRIN_H_INCLUDED +#define _IMMINTRIN_H_INCLUDED + +#ifdef __MMX__ +#include <mmintrin.h> +#endif + +#ifdef __SSE__ +#include <xmmintrin.h> +#endif + +#ifdef __SSE2__ +#include <emmintrin.h> +#endif + +#ifdef __SSE3__ +#include <pmmintrin.h> +#endif + +#ifdef __SSSE3__ +#include <tmmintrin.h> +#endif + +#if defined (__SSE4_2__) || defined (__SSE4_1__) +#include <smmintrin.h> +#endif + +#if defined (__AES__) || defined (__PCLMUL__) +#include <wmmintrin.h> +#endif + +#ifdef __AVX__ +#include <avxintrin.h> +#endif + +#ifdef __RDRND__ +extern __inline int +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_rdrand16_step (unsigned short *__P) +{ + return __builtin_ia32_rdrand16_step (__P); +} + +extern __inline int +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_rdrand32_step (unsigned int *__P) +{ + return __builtin_ia32_rdrand32_step (__P); +} +#endif /* __RDRND__ */ + +#ifdef __x86_64__ +#ifdef __FSGSBASE__ +extern __inline unsigned int +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_readfsbase_u32 (void) +{ + return __builtin_ia32_rdfsbase32 (); +} + +extern __inline unsigned long long +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_readfsbase_u64 (void) +{ + return __builtin_ia32_rdfsbase64 (); +} + +extern __inline unsigned int +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_readgsbase_u32 (void) +{ + return __builtin_ia32_rdgsbase32 (); +} + +extern __inline unsigned long long +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_readgsbase_u64 (void) +{ + return __builtin_ia32_rdgsbase64 (); +} + +extern __inline void +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_writefsbase_u32 (unsigned int __B) +{ + __builtin_ia32_wrfsbase32 (__B); +} + +extern __inline void +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_writefsbase_u64 (unsigned long long __B) +{ + __builtin_ia32_wrfsbase64 (__B); +} + +extern __inline void +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_writegsbase_u32 (unsigned int __B) +{ + __builtin_ia32_wrgsbase32 (__B); +} + +extern __inline void +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_writegsbase_u64 (unsigned long long __B) +{ + __builtin_ia32_wrgsbase64 (__B); +} +#endif /* __FSGSBASE__ */ + +#ifdef __RDRND__ +extern __inline int +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_rdrand64_step (unsigned long long *__P) +{ + return __builtin_ia32_rdrand64_step (__P); +} +#endif /* __RDRND__ */ +#endif /* __x86_64__ */ + +#ifdef __F16C__ +extern __inline float __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_cvtsh_ss (unsigned short __S) +{ + __v8hi __H = __extension__ (__v8hi){ __S, 0, 0, 0, 0, 0, 0, 0 }; + __v4sf __A = __builtin_ia32_vcvtph2ps (__H); + return __builtin_ia32_vec_ext_v4sf (__A, 0); +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtph_ps (__m128i __A) +{ + return (__m128) __builtin_ia32_vcvtph2ps ((__v8hi) __A); +} + +extern __inline __m256 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_cvtph_ps (__m128i __A) +{ + return (__m256) __builtin_ia32_vcvtph2ps256 ((__v8hi) __A); +} + +#ifdef __OPTIMIZE__ +extern __inline unsigned short __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_cvtss_sh (float __F, const int __I) +{ + __v4sf __A = __extension__ (__v4sf){ __F, 0, 0, 0 }; + __v8hi __H = __builtin_ia32_vcvtps2ph (__A, __I); + return (unsigned short) __builtin_ia32_vec_ext_v8hi (__H, 0); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtps_ph (__m128 __A, const int __I) +{ + return (__m128i) __builtin_ia32_vcvtps2ph ((__v4sf) __A, __I); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_cvtps_ph (__m256 __A, const int __I) +{ + return (__m128i) __builtin_ia32_vcvtps2ph256 ((__v8sf) __A, __I); +} +#else +#define _cvtss_sh(__F, __I) \ + (__extension__ \ + ({ \ + __v4sf __A = __extension__ (__v4sf){ __F, 0, 0, 0 }; \ + __v8hi __H = __builtin_ia32_vcvtps2ph (__A, __I); \ + (unsigned short) __builtin_ia32_vec_ext_v8hi (__H, 0); \ + })) + +#define _mm_cvtps_ph(A, I) \ + ((__m128i) __builtin_ia32_vcvtps2ph ((__v4sf)(__m128) A, (int) (I))) + +#define _mm256_cvtps_ph(A, I) \ + ((__m128i) __builtin_ia32_vcvtps2ph256 ((__v8sf)(__m256) A, (int) (I))) +#endif + +#endif /* __F16C__ */ + +#endif /* _IMMINTRIN_H_INCLUDED */ diff --git a/lib/gcc/i686-linux-android/4.6/include/iso646.h b/lib/gcc/i686-linux-android/4.6/include/iso646.h new file mode 100644 index 0000000..28ff9d1 --- /dev/null +++ b/lib/gcc/i686-linux-android/4.6/include/iso646.h @@ -0,0 +1,45 @@ +/* Copyright (C) 1997, 1999, 2009 Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 3, or (at your option) +any later version. + +GCC is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +Under Section 7 of GPL version 3, you are granted additional +permissions described in the GCC Runtime Library Exception, version +3.1, as published by the Free Software Foundation. + +You should have received a copy of the GNU General Public License and +a copy of the GCC Runtime Library Exception along with this program; +see the files COPYING3 and COPYING.RUNTIME respectively. If not, see +<http://www.gnu.org/licenses/>. */ + +/* + * ISO C Standard: 7.9 Alternative spellings <iso646.h> + */ + +#ifndef _ISO646_H +#define _ISO646_H + +#ifndef __cplusplus +#define and && +#define and_eq &= +#define bitand & +#define bitor | +#define compl ~ +#define not ! +#define not_eq != +#define or || +#define or_eq |= +#define xor ^ +#define xor_eq ^= +#endif + +#endif diff --git a/lib/gcc/i686-linux-android/4.6/include/lwpintrin.h b/lib/gcc/i686-linux-android/4.6/include/lwpintrin.h new file mode 100644 index 0000000..954b039 --- /dev/null +++ b/lib/gcc/i686-linux-android/4.6/include/lwpintrin.h @@ -0,0 +1,100 @@ +/* Copyright (C) 2007, 2008, 2009 Free Software Foundation, Inc. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + GCC is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + <http://www.gnu.org/licenses/>. */ + +#ifndef _X86INTRIN_H_INCLUDED +# error "Never use <lwpintrin.h> directly; include <x86intrin.h> instead." +#endif + +#ifndef _LWPINTRIN_H_INCLUDED +#define _LWPINTRIN_H_INCLUDED + +#ifndef __LWP__ +# error "LWP instruction set not enabled" +#else + +extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__llwpcb (void *pcbAddress) +{ + __builtin_ia32_llwpcb (pcbAddress); +} + +extern __inline void * __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__slwpcb (void) +{ + return __builtin_ia32_slwpcb (); +} + +#ifdef __OPTIMIZE__ +extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__lwpval32 (unsigned int data2, unsigned int data1, unsigned int flags) +{ + __builtin_ia32_lwpval32 (data2, data1, flags); +} + +#ifdef __x86_64__ +extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__lwpval64 (unsigned long long data2, unsigned int data1, unsigned int flags) +{ + __builtin_ia32_lwpval64 (data2, data1, flags); +} +#endif +#else +#define __lwpval32(D2, D1, F) \ + (__builtin_ia32_lwpval32 ((unsigned int) (D2), (unsigned int) (D1), \ + (unsigned int) (F))) +#ifdef __x86_64__ +#define __lwpval64(D2, D1, F) \ + (__builtin_ia32_lwpval64 ((unsigned long long) (D2), (unsigned int) (D1), \ + (unsigned int) (F))) +#endif +#endif + + +#ifdef __OPTIMIZE__ +extern __inline unsigned char __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__lwpins32 (unsigned int data2, unsigned int data1, unsigned int flags) +{ + return __builtin_ia32_lwpins32 (data2, data1, flags); +} + +#ifdef __x86_64__ +extern __inline unsigned char __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__lwpins64 (unsigned long long data2, unsigned int data1, unsigned int flags) +{ + return __builtin_ia32_lwpins64 (data2, data1, flags); +} +#endif +#else +#define __lwpins32(D2, D1, F) \ + (__builtin_ia32_lwpins32 ((unsigned int) (D2), (unsigned int) (D1), \ + (unsigned int) (F))) +#ifdef __x86_64__ +#define __lwpins64(D2, D1, F) \ + (__builtin_ia32_lwpins64 ((unsigned long long) (D2), (unsigned int) (D1), \ + (unsigned int) (F))) +#endif +#endif + +#endif /* __LWP__ */ + +#endif /* _LWPINTRIN_H_INCLUDED */ diff --git a/lib/gcc/i686-linux-android/4.6/include/mm3dnow.h b/lib/gcc/i686-linux-android/4.6/include/mm3dnow.h new file mode 100644 index 0000000..0d0735c --- /dev/null +++ b/lib/gcc/i686-linux-android/4.6/include/mm3dnow.h @@ -0,0 +1,215 @@ +/* Copyright (C) 2004, 2007, 2008, 2009 Free Software Foundation, Inc. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + GCC is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + <http://www.gnu.org/licenses/>. */ + +/* Implemented from the mm3dnow.h (of supposedly AMD origin) included with + MSVC 7.1. */ + +#ifndef _MM3DNOW_H_INCLUDED +#define _MM3DNOW_H_INCLUDED + +#ifdef __3dNOW__ + +#include <mmintrin.h> + +extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_femms (void) +{ + __builtin_ia32_femms(); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_pavgusb (__m64 __A, __m64 __B) +{ + return (__m64)__builtin_ia32_pavgusb ((__v8qi)__A, (__v8qi)__B); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_pf2id (__m64 __A) +{ + return (__m64)__builtin_ia32_pf2id ((__v2sf)__A); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_pfacc (__m64 __A, __m64 __B) +{ + return (__m64)__builtin_ia32_pfacc ((__v2sf)__A, (__v2sf)__B); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_pfadd (__m64 __A, __m64 __B) +{ + return (__m64)__builtin_ia32_pfadd ((__v2sf)__A, (__v2sf)__B); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_pfcmpeq (__m64 __A, __m64 __B) +{ + return (__m64)__builtin_ia32_pfcmpeq ((__v2sf)__A, (__v2sf)__B); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_pfcmpge (__m64 __A, __m64 __B) +{ + return (__m64)__builtin_ia32_pfcmpge ((__v2sf)__A, (__v2sf)__B); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_pfcmpgt (__m64 __A, __m64 __B) +{ + return (__m64)__builtin_ia32_pfcmpgt ((__v2sf)__A, (__v2sf)__B); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_pfmax (__m64 __A, __m64 __B) +{ + return (__m64)__builtin_ia32_pfmax ((__v2sf)__A, (__v2sf)__B); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_pfmin (__m64 __A, __m64 __B) +{ + return (__m64)__builtin_ia32_pfmin ((__v2sf)__A, (__v2sf)__B); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_pfmul (__m64 __A, __m64 __B) +{ + return (__m64)__builtin_ia32_pfmul ((__v2sf)__A, (__v2sf)__B); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_pfrcp (__m64 __A) +{ + return (__m64)__builtin_ia32_pfrcp ((__v2sf)__A); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_pfrcpit1 (__m64 __A, __m64 __B) +{ + return (__m64)__builtin_ia32_pfrcpit1 ((__v2sf)__A, (__v2sf)__B); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_pfrcpit2 (__m64 __A, __m64 __B) +{ + return (__m64)__builtin_ia32_pfrcpit2 ((__v2sf)__A, (__v2sf)__B); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_pfrsqrt (__m64 __A) +{ + return (__m64)__builtin_ia32_pfrsqrt ((__v2sf)__A); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_pfrsqit1 (__m64 __A, __m64 __B) +{ + return (__m64)__builtin_ia32_pfrsqit1 ((__v2sf)__A, (__v2sf)__B); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_pfsub (__m64 __A, __m64 __B) +{ + return (__m64)__builtin_ia32_pfsub ((__v2sf)__A, (__v2sf)__B); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_pfsubr (__m64 __A, __m64 __B) +{ + return (__m64)__builtin_ia32_pfsubr ((__v2sf)__A, (__v2sf)__B); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_pi2fd (__m64 __A) +{ + return (__m64)__builtin_ia32_pi2fd ((__v2si)__A); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_pmulhrw (__m64 __A, __m64 __B) +{ + return (__m64)__builtin_ia32_pmulhrw ((__v4hi)__A, (__v4hi)__B); +} + +extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_prefetch (void *__P) +{ + __builtin_prefetch (__P, 0, 3 /* _MM_HINT_T0 */); +} + +extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_prefetchw (void *__P) +{ + __builtin_prefetch (__P, 1, 3 /* _MM_HINT_T0 */); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_from_float (float __A) +{ + return __extension__ (__m64)(__v2sf){ __A, 0.0f }; +} + +extern __inline float __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_to_float (__m64 __A) +{ + union { __v2sf v; float a[2]; } __tmp; + __tmp.v = (__v2sf)__A; + return __tmp.a[0]; +} + +#ifdef __3dNOW_A__ + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_pf2iw (__m64 __A) +{ + return (__m64)__builtin_ia32_pf2iw ((__v2sf)__A); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_pfnacc (__m64 __A, __m64 __B) +{ + return (__m64)__builtin_ia32_pfnacc ((__v2sf)__A, (__v2sf)__B); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_pfpnacc (__m64 __A, __m64 __B) +{ + return (__m64)__builtin_ia32_pfpnacc ((__v2sf)__A, (__v2sf)__B); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_pi2fw (__m64 __A) +{ + return (__m64)__builtin_ia32_pi2fw ((__v2si)__A); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_pswapd (__m64 __A) +{ + return (__m64)__builtin_ia32_pswapdsf ((__v2sf)__A); +} + +#endif /* __3dNOW_A__ */ +#endif /* __3dNOW__ */ + +#endif /* _MM3DNOW_H_INCLUDED */ diff --git a/lib/gcc/i686-linux-android/4.6/include/mm_malloc.h b/lib/gcc/i686-linux-android/4.6/include/mm_malloc.h new file mode 100644 index 0000000..0a9f2e2 --- /dev/null +++ b/lib/gcc/i686-linux-android/4.6/include/mm_malloc.h @@ -0,0 +1,57 @@ +/* Copyright (C) 2004, 2006, 2009 Free Software Foundation, Inc. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + GCC is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + <http://www.gnu.org/licenses/>. */ + +#ifndef _MM_MALLOC_H_INCLUDED +#define _MM_MALLOC_H_INCLUDED + +#include <stdlib.h> + +/* We can't depend on <stdlib.h> since the prototype of posix_memalign + may not be visible. */ +#ifndef __cplusplus +extern int posix_memalign (void **, size_t, size_t); +#else +extern "C" int posix_memalign (void **, size_t, size_t) throw (); +#endif + +static __inline void * +_mm_malloc (size_t size, size_t alignment) +{ + void *ptr; + if (alignment == 1) + return malloc (size); + if (alignment == 2 || (sizeof (void *) == 8 && alignment == 4)) + alignment = sizeof (void *); + if (posix_memalign (&ptr, alignment, size) == 0) + return ptr; + else + return NULL; +} + +static __inline void +_mm_free (void * ptr) +{ + free (ptr); +} + +#endif /* _MM_MALLOC_H_INCLUDED */ diff --git a/lib/gcc/i686-linux-android/4.6/include/mmintrin.h b/lib/gcc/i686-linux-android/4.6/include/mmintrin.h new file mode 100644 index 0000000..497e22e --- /dev/null +++ b/lib/gcc/i686-linux-android/4.6/include/mmintrin.h @@ -0,0 +1,921 @@ +/* Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 + Free Software Foundation, Inc. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + GCC is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + <http://www.gnu.org/licenses/>. */ + +/* Implemented from the specification included in the Intel C++ Compiler + User Guide and Reference, version 9.0. */ + +#ifndef _MMINTRIN_H_INCLUDED +#define _MMINTRIN_H_INCLUDED + +#ifndef __MMX__ +# error "MMX instruction set not enabled" +#else +/* The Intel API is flexible enough that we must allow aliasing with other + vector types, and their scalar components. */ +typedef int __m64 __attribute__ ((__vector_size__ (8), __may_alias__)); + +/* Internal data types for implementing the intrinsics. */ +typedef int __v2si __attribute__ ((__vector_size__ (8))); +typedef short __v4hi __attribute__ ((__vector_size__ (8))); +typedef char __v8qi __attribute__ ((__vector_size__ (8))); +typedef long long __v1di __attribute__ ((__vector_size__ (8))); +typedef float __v2sf __attribute__ ((__vector_size__ (8))); + +/* Empty the multimedia state. */ +extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_empty (void) +{ + __builtin_ia32_emms (); +} + +extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_empty (void) +{ + _mm_empty (); +} + +/* Convert I to a __m64 object. The integer is zero-extended to 64-bits. */ +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtsi32_si64 (int __i) +{ + return (__m64) __builtin_ia32_vec_init_v2si (__i, 0); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_from_int (int __i) +{ + return _mm_cvtsi32_si64 (__i); +} + +#ifdef __x86_64__ +/* Convert I to a __m64 object. */ + +/* Intel intrinsic. */ +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_from_int64 (long long __i) +{ + return (__m64) __i; +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtsi64_m64 (long long __i) +{ + return (__m64) __i; +} + +/* Microsoft intrinsic. */ +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtsi64x_si64 (long long __i) +{ + return (__m64) __i; +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_set_pi64x (long long __i) +{ + return (__m64) __i; +} +#endif + +/* Convert the lower 32 bits of the __m64 object into an integer. */ +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtsi64_si32 (__m64 __i) +{ + return __builtin_ia32_vec_ext_v2si ((__v2si)__i, 0); +} + +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_to_int (__m64 __i) +{ + return _mm_cvtsi64_si32 (__i); +} + +#ifdef __x86_64__ +/* Convert the __m64 object to a 64bit integer. */ + +/* Intel intrinsic. */ +extern __inline long long __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_to_int64 (__m64 __i) +{ + return (long long)__i; +} + +extern __inline long long __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtm64_si64 (__m64 __i) +{ + return (long long)__i; +} + +/* Microsoft intrinsic. */ +extern __inline long long __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtsi64_si64x (__m64 __i) +{ + return (long long)__i; +} +#endif + +/* Pack the four 16-bit values from M1 into the lower four 8-bit values of + the result, and the four 16-bit values from M2 into the upper four 8-bit + values of the result, all with signed saturation. */ +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_packs_pi16 (__m64 __m1, __m64 __m2) +{ + return (__m64) __builtin_ia32_packsswb ((__v4hi)__m1, (__v4hi)__m2); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_packsswb (__m64 __m1, __m64 __m2) +{ + return _mm_packs_pi16 (__m1, __m2); +} + +/* Pack the two 32-bit values from M1 in to the lower two 16-bit values of + the result, and the two 32-bit values from M2 into the upper two 16-bit + values of the result, all with signed saturation. */ +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_packs_pi32 (__m64 __m1, __m64 __m2) +{ + return (__m64) __builtin_ia32_packssdw ((__v2si)__m1, (__v2si)__m2); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_packssdw (__m64 __m1, __m64 __m2) +{ + return _mm_packs_pi32 (__m1, __m2); +} + +/* Pack the four 16-bit values from M1 into the lower four 8-bit values of + the result, and the four 16-bit values from M2 into the upper four 8-bit + values of the result, all with unsigned saturation. */ +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_packs_pu16 (__m64 __m1, __m64 __m2) +{ + return (__m64) __builtin_ia32_packuswb ((__v4hi)__m1, (__v4hi)__m2); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_packuswb (__m64 __m1, __m64 __m2) +{ + return _mm_packs_pu16 (__m1, __m2); +} + +/* Interleave the four 8-bit values from the high half of M1 with the four + 8-bit values from the high half of M2. */ +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_unpackhi_pi8 (__m64 __m1, __m64 __m2) +{ + return (__m64) __builtin_ia32_punpckhbw ((__v8qi)__m1, (__v8qi)__m2); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_punpckhbw (__m64 __m1, __m64 __m2) +{ + return _mm_unpackhi_pi8 (__m1, __m2); +} + +/* Interleave the two 16-bit values from the high half of M1 with the two + 16-bit values from the high half of M2. */ +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_unpackhi_pi16 (__m64 __m1, __m64 __m2) +{ + return (__m64) __builtin_ia32_punpckhwd ((__v4hi)__m1, (__v4hi)__m2); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_punpckhwd (__m64 __m1, __m64 __m2) +{ + return _mm_unpackhi_pi16 (__m1, __m2); +} + +/* Interleave the 32-bit value from the high half of M1 with the 32-bit + value from the high half of M2. */ +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_unpackhi_pi32 (__m64 __m1, __m64 __m2) +{ + return (__m64) __builtin_ia32_punpckhdq ((__v2si)__m1, (__v2si)__m2); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_punpckhdq (__m64 __m1, __m64 __m2) +{ + return _mm_unpackhi_pi32 (__m1, __m2); +} + +/* Interleave the four 8-bit values from the low half of M1 with the four + 8-bit values from the low half of M2. */ +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_unpacklo_pi8 (__m64 __m1, __m64 __m2) +{ + return (__m64) __builtin_ia32_punpcklbw ((__v8qi)__m1, (__v8qi)__m2); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_punpcklbw (__m64 __m1, __m64 __m2) +{ + return _mm_unpacklo_pi8 (__m1, __m2); +} + +/* Interleave the two 16-bit values from the low half of M1 with the two + 16-bit values from the low half of M2. */ +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_unpacklo_pi16 (__m64 __m1, __m64 __m2) +{ + return (__m64) __builtin_ia32_punpcklwd ((__v4hi)__m1, (__v4hi)__m2); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_punpcklwd (__m64 __m1, __m64 __m2) +{ + return _mm_unpacklo_pi16 (__m1, __m2); +} + +/* Interleave the 32-bit value from the low half of M1 with the 32-bit + value from the low half of M2. */ +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_unpacklo_pi32 (__m64 __m1, __m64 __m2) +{ + return (__m64) __builtin_ia32_punpckldq ((__v2si)__m1, (__v2si)__m2); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_punpckldq (__m64 __m1, __m64 __m2) +{ + return _mm_unpacklo_pi32 (__m1, __m2); +} + +/* Add the 8-bit values in M1 to the 8-bit values in M2. */ +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_add_pi8 (__m64 __m1, __m64 __m2) +{ + return (__m64) __builtin_ia32_paddb ((__v8qi)__m1, (__v8qi)__m2); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_paddb (__m64 __m1, __m64 __m2) +{ + return _mm_add_pi8 (__m1, __m2); +} + +/* Add the 16-bit values in M1 to the 16-bit values in M2. */ +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_add_pi16 (__m64 __m1, __m64 __m2) +{ + return (__m64) __builtin_ia32_paddw ((__v4hi)__m1, (__v4hi)__m2); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_paddw (__m64 __m1, __m64 __m2) +{ + return _mm_add_pi16 (__m1, __m2); +} + +/* Add the 32-bit values in M1 to the 32-bit values in M2. */ +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_add_pi32 (__m64 __m1, __m64 __m2) +{ + return (__m64) __builtin_ia32_paddd ((__v2si)__m1, (__v2si)__m2); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_paddd (__m64 __m1, __m64 __m2) +{ + return _mm_add_pi32 (__m1, __m2); +} + +/* Add the 64-bit values in M1 to the 64-bit values in M2. */ +#ifdef __SSE2__ +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_add_si64 (__m64 __m1, __m64 __m2) +{ + return (__m64) __builtin_ia32_paddq ((__v1di)__m1, (__v1di)__m2); +} +#endif + +/* Add the 8-bit values in M1 to the 8-bit values in M2 using signed + saturated arithmetic. */ +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_adds_pi8 (__m64 __m1, __m64 __m2) +{ + return (__m64) __builtin_ia32_paddsb ((__v8qi)__m1, (__v8qi)__m2); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_paddsb (__m64 __m1, __m64 __m2) +{ + return _mm_adds_pi8 (__m1, __m2); +} + +/* Add the 16-bit values in M1 to the 16-bit values in M2 using signed + saturated arithmetic. */ +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_adds_pi16 (__m64 __m1, __m64 __m2) +{ + return (__m64) __builtin_ia32_paddsw ((__v4hi)__m1, (__v4hi)__m2); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_paddsw (__m64 __m1, __m64 __m2) +{ + return _mm_adds_pi16 (__m1, __m2); +} + +/* Add the 8-bit values in M1 to the 8-bit values in M2 using unsigned + saturated arithmetic. */ +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_adds_pu8 (__m64 __m1, __m64 __m2) +{ + return (__m64) __builtin_ia32_paddusb ((__v8qi)__m1, (__v8qi)__m2); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_paddusb (__m64 __m1, __m64 __m2) +{ + return _mm_adds_pu8 (__m1, __m2); +} + +/* Add the 16-bit values in M1 to the 16-bit values in M2 using unsigned + saturated arithmetic. */ +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_adds_pu16 (__m64 __m1, __m64 __m2) +{ + return (__m64) __builtin_ia32_paddusw ((__v4hi)__m1, (__v4hi)__m2); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_paddusw (__m64 __m1, __m64 __m2) +{ + return _mm_adds_pu16 (__m1, __m2); +} + +/* Subtract the 8-bit values in M2 from the 8-bit values in M1. */ +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_sub_pi8 (__m64 __m1, __m64 __m2) +{ + return (__m64) __builtin_ia32_psubb ((__v8qi)__m1, (__v8qi)__m2); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_psubb (__m64 __m1, __m64 __m2) +{ + return _mm_sub_pi8 (__m1, __m2); +} + +/* Subtract the 16-bit values in M2 from the 16-bit values in M1. */ +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_sub_pi16 (__m64 __m1, __m64 __m2) +{ + return (__m64) __builtin_ia32_psubw ((__v4hi)__m1, (__v4hi)__m2); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_psubw (__m64 __m1, __m64 __m2) +{ + return _mm_sub_pi16 (__m1, __m2); +} + +/* Subtract the 32-bit values in M2 from the 32-bit values in M1. */ +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_sub_pi32 (__m64 __m1, __m64 __m2) +{ + return (__m64) __builtin_ia32_psubd ((__v2si)__m1, (__v2si)__m2); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_psubd (__m64 __m1, __m64 __m2) +{ + return _mm_sub_pi32 (__m1, __m2); +} + +/* Add the 64-bit values in M1 to the 64-bit values in M2. */ +#ifdef __SSE2__ +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_sub_si64 (__m64 __m1, __m64 __m2) +{ + return (__m64) __builtin_ia32_psubq ((__v1di)__m1, (__v1di)__m2); +} +#endif + +/* Subtract the 8-bit values in M2 from the 8-bit values in M1 using signed + saturating arithmetic. */ +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_subs_pi8 (__m64 __m1, __m64 __m2) +{ + return (__m64) __builtin_ia32_psubsb ((__v8qi)__m1, (__v8qi)__m2); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_psubsb (__m64 __m1, __m64 __m2) +{ + return _mm_subs_pi8 (__m1, __m2); +} + +/* Subtract the 16-bit values in M2 from the 16-bit values in M1 using + signed saturating arithmetic. */ +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_subs_pi16 (__m64 __m1, __m64 __m2) +{ + return (__m64) __builtin_ia32_psubsw ((__v4hi)__m1, (__v4hi)__m2); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_psubsw (__m64 __m1, __m64 __m2) +{ + return _mm_subs_pi16 (__m1, __m2); +} + +/* Subtract the 8-bit values in M2 from the 8-bit values in M1 using + unsigned saturating arithmetic. */ +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_subs_pu8 (__m64 __m1, __m64 __m2) +{ + return (__m64) __builtin_ia32_psubusb ((__v8qi)__m1, (__v8qi)__m2); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_psubusb (__m64 __m1, __m64 __m2) +{ + return _mm_subs_pu8 (__m1, __m2); +} + +/* Subtract the 16-bit values in M2 from the 16-bit values in M1 using + unsigned saturating arithmetic. */ +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_subs_pu16 (__m64 __m1, __m64 __m2) +{ + return (__m64) __builtin_ia32_psubusw ((__v4hi)__m1, (__v4hi)__m2); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_psubusw (__m64 __m1, __m64 __m2) +{ + return _mm_subs_pu16 (__m1, __m2); +} + +/* Multiply four 16-bit values in M1 by four 16-bit values in M2 producing + four 32-bit intermediate results, which are then summed by pairs to + produce two 32-bit results. */ +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_madd_pi16 (__m64 __m1, __m64 __m2) +{ + return (__m64) __builtin_ia32_pmaddwd ((__v4hi)__m1, (__v4hi)__m2); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_pmaddwd (__m64 __m1, __m64 __m2) +{ + return _mm_madd_pi16 (__m1, __m2); +} + +/* Multiply four signed 16-bit values in M1 by four signed 16-bit values in + M2 and produce the high 16 bits of the 32-bit results. */ +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mulhi_pi16 (__m64 __m1, __m64 __m2) +{ + return (__m64) __builtin_ia32_pmulhw ((__v4hi)__m1, (__v4hi)__m2); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_pmulhw (__m64 __m1, __m64 __m2) +{ + return _mm_mulhi_pi16 (__m1, __m2); +} + +/* Multiply four 16-bit values in M1 by four 16-bit values in M2 and produce + the low 16 bits of the results. */ +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mullo_pi16 (__m64 __m1, __m64 __m2) +{ + return (__m64) __builtin_ia32_pmullw ((__v4hi)__m1, (__v4hi)__m2); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_pmullw (__m64 __m1, __m64 __m2) +{ + return _mm_mullo_pi16 (__m1, __m2); +} + +/* Shift four 16-bit values in M left by COUNT. */ +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_sll_pi16 (__m64 __m, __m64 __count) +{ + return (__m64) __builtin_ia32_psllw ((__v4hi)__m, (__v4hi)__count); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_psllw (__m64 __m, __m64 __count) +{ + return _mm_sll_pi16 (__m, __count); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_slli_pi16 (__m64 __m, int __count) +{ + return (__m64) __builtin_ia32_psllwi ((__v4hi)__m, __count); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_psllwi (__m64 __m, int __count) +{ + return _mm_slli_pi16 (__m, __count); +} + +/* Shift two 32-bit values in M left by COUNT. */ +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_sll_pi32 (__m64 __m, __m64 __count) +{ + return (__m64) __builtin_ia32_pslld ((__v2si)__m, (__v2si)__count); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_pslld (__m64 __m, __m64 __count) +{ + return _mm_sll_pi32 (__m, __count); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_slli_pi32 (__m64 __m, int __count) +{ + return (__m64) __builtin_ia32_pslldi ((__v2si)__m, __count); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_pslldi (__m64 __m, int __count) +{ + return _mm_slli_pi32 (__m, __count); +} + +/* Shift the 64-bit value in M left by COUNT. */ +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_sll_si64 (__m64 __m, __m64 __count) +{ + return (__m64) __builtin_ia32_psllq ((__v1di)__m, (__v1di)__count); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_psllq (__m64 __m, __m64 __count) +{ + return _mm_sll_si64 (__m, __count); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_slli_si64 (__m64 __m, int __count) +{ + return (__m64) __builtin_ia32_psllqi ((__v1di)__m, __count); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_psllqi (__m64 __m, int __count) +{ + return _mm_slli_si64 (__m, __count); +} + +/* Shift four 16-bit values in M right by COUNT; shift in the sign bit. */ +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_sra_pi16 (__m64 __m, __m64 __count) +{ + return (__m64) __builtin_ia32_psraw ((__v4hi)__m, (__v4hi)__count); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_psraw (__m64 __m, __m64 __count) +{ + return _mm_sra_pi16 (__m, __count); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_srai_pi16 (__m64 __m, int __count) +{ + return (__m64) __builtin_ia32_psrawi ((__v4hi)__m, __count); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_psrawi (__m64 __m, int __count) +{ + return _mm_srai_pi16 (__m, __count); +} + +/* Shift two 32-bit values in M right by COUNT; shift in the sign bit. */ +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_sra_pi32 (__m64 __m, __m64 __count) +{ + return (__m64) __builtin_ia32_psrad ((__v2si)__m, (__v2si)__count); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_psrad (__m64 __m, __m64 __count) +{ + return _mm_sra_pi32 (__m, __count); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_srai_pi32 (__m64 __m, int __count) +{ + return (__m64) __builtin_ia32_psradi ((__v2si)__m, __count); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_psradi (__m64 __m, int __count) +{ + return _mm_srai_pi32 (__m, __count); +} + +/* Shift four 16-bit values in M right by COUNT; shift in zeros. */ +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_srl_pi16 (__m64 __m, __m64 __count) +{ + return (__m64) __builtin_ia32_psrlw ((__v4hi)__m, (__v4hi)__count); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_psrlw (__m64 __m, __m64 __count) +{ + return _mm_srl_pi16 (__m, __count); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_srli_pi16 (__m64 __m, int __count) +{ + return (__m64) __builtin_ia32_psrlwi ((__v4hi)__m, __count); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_psrlwi (__m64 __m, int __count) +{ + return _mm_srli_pi16 (__m, __count); +} + +/* Shift two 32-bit values in M right by COUNT; shift in zeros. */ +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_srl_pi32 (__m64 __m, __m64 __count) +{ + return (__m64) __builtin_ia32_psrld ((__v2si)__m, (__v2si)__count); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_psrld (__m64 __m, __m64 __count) +{ + return _mm_srl_pi32 (__m, __count); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_srli_pi32 (__m64 __m, int __count) +{ + return (__m64) __builtin_ia32_psrldi ((__v2si)__m, __count); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_psrldi (__m64 __m, int __count) +{ + return _mm_srli_pi32 (__m, __count); +} + +/* Shift the 64-bit value in M left by COUNT; shift in zeros. */ +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_srl_si64 (__m64 __m, __m64 __count) +{ + return (__m64) __builtin_ia32_psrlq ((__v1di)__m, (__v1di)__count); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_psrlq (__m64 __m, __m64 __count) +{ + return _mm_srl_si64 (__m, __count); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_srli_si64 (__m64 __m, int __count) +{ + return (__m64) __builtin_ia32_psrlqi ((__v1di)__m, __count); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_psrlqi (__m64 __m, int __count) +{ + return _mm_srli_si64 (__m, __count); +} + +/* Bit-wise AND the 64-bit values in M1 and M2. */ +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_and_si64 (__m64 __m1, __m64 __m2) +{ + return __builtin_ia32_pand (__m1, __m2); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_pand (__m64 __m1, __m64 __m2) +{ + return _mm_and_si64 (__m1, __m2); +} + +/* Bit-wise complement the 64-bit value in M1 and bit-wise AND it with the + 64-bit value in M2. */ +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_andnot_si64 (__m64 __m1, __m64 __m2) +{ + return __builtin_ia32_pandn (__m1, __m2); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_pandn (__m64 __m1, __m64 __m2) +{ + return _mm_andnot_si64 (__m1, __m2); +} + +/* Bit-wise inclusive OR the 64-bit values in M1 and M2. */ +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_or_si64 (__m64 __m1, __m64 __m2) +{ + return __builtin_ia32_por (__m1, __m2); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_por (__m64 __m1, __m64 __m2) +{ + return _mm_or_si64 (__m1, __m2); +} + +/* Bit-wise exclusive OR the 64-bit values in M1 and M2. */ +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_xor_si64 (__m64 __m1, __m64 __m2) +{ + return __builtin_ia32_pxor (__m1, __m2); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_pxor (__m64 __m1, __m64 __m2) +{ + return _mm_xor_si64 (__m1, __m2); +} + +/* Compare eight 8-bit values. The result of the comparison is 0xFF if the + test is true and zero if false. */ +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpeq_pi8 (__m64 __m1, __m64 __m2) +{ + return (__m64) __builtin_ia32_pcmpeqb ((__v8qi)__m1, (__v8qi)__m2); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_pcmpeqb (__m64 __m1, __m64 __m2) +{ + return _mm_cmpeq_pi8 (__m1, __m2); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpgt_pi8 (__m64 __m1, __m64 __m2) +{ + return (__m64) __builtin_ia32_pcmpgtb ((__v8qi)__m1, (__v8qi)__m2); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_pcmpgtb (__m64 __m1, __m64 __m2) +{ + return _mm_cmpgt_pi8 (__m1, __m2); +} + +/* Compare four 16-bit values. The result of the comparison is 0xFFFF if + the test is true and zero if false. */ +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpeq_pi16 (__m64 __m1, __m64 __m2) +{ + return (__m64) __builtin_ia32_pcmpeqw ((__v4hi)__m1, (__v4hi)__m2); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_pcmpeqw (__m64 __m1, __m64 __m2) +{ + return _mm_cmpeq_pi16 (__m1, __m2); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpgt_pi16 (__m64 __m1, __m64 __m2) +{ + return (__m64) __builtin_ia32_pcmpgtw ((__v4hi)__m1, (__v4hi)__m2); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_pcmpgtw (__m64 __m1, __m64 __m2) +{ + return _mm_cmpgt_pi16 (__m1, __m2); +} + +/* Compare two 32-bit values. The result of the comparison is 0xFFFFFFFF if + the test is true and zero if false. */ +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpeq_pi32 (__m64 __m1, __m64 __m2) +{ + return (__m64) __builtin_ia32_pcmpeqd ((__v2si)__m1, (__v2si)__m2); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_pcmpeqd (__m64 __m1, __m64 __m2) +{ + return _mm_cmpeq_pi32 (__m1, __m2); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpgt_pi32 (__m64 __m1, __m64 __m2) +{ + return (__m64) __builtin_ia32_pcmpgtd ((__v2si)__m1, (__v2si)__m2); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_pcmpgtd (__m64 __m1, __m64 __m2) +{ + return _mm_cmpgt_pi32 (__m1, __m2); +} + +/* Creates a 64-bit zero. */ +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_setzero_si64 (void) +{ + return (__m64)0LL; +} + +/* Creates a vector of two 32-bit values; I0 is least significant. */ +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_set_pi32 (int __i1, int __i0) +{ + return (__m64) __builtin_ia32_vec_init_v2si (__i0, __i1); +} + +/* Creates a vector of four 16-bit values; W0 is least significant. */ +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_set_pi16 (short __w3, short __w2, short __w1, short __w0) +{ + return (__m64) __builtin_ia32_vec_init_v4hi (__w0, __w1, __w2, __w3); +} + +/* Creates a vector of eight 8-bit values; B0 is least significant. */ +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_set_pi8 (char __b7, char __b6, char __b5, char __b4, + char __b3, char __b2, char __b1, char __b0) +{ + return (__m64) __builtin_ia32_vec_init_v8qi (__b0, __b1, __b2, __b3, + __b4, __b5, __b6, __b7); +} + +/* Similar, but with the arguments in reverse order. */ +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_setr_pi32 (int __i0, int __i1) +{ + return _mm_set_pi32 (__i1, __i0); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_setr_pi16 (short __w0, short __w1, short __w2, short __w3) +{ + return _mm_set_pi16 (__w3, __w2, __w1, __w0); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_setr_pi8 (char __b0, char __b1, char __b2, char __b3, + char __b4, char __b5, char __b6, char __b7) +{ + return _mm_set_pi8 (__b7, __b6, __b5, __b4, __b3, __b2, __b1, __b0); +} + +/* Creates a vector of two 32-bit values, both elements containing I. */ +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_set1_pi32 (int __i) +{ + return _mm_set_pi32 (__i, __i); +} + +/* Creates a vector of four 16-bit values, all elements containing W. */ +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_set1_pi16 (short __w) +{ + return _mm_set_pi16 (__w, __w, __w, __w); +} + +/* Creates a vector of eight 8-bit values, all elements containing B. */ +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_set1_pi8 (char __b) +{ + return _mm_set_pi8 (__b, __b, __b, __b, __b, __b, __b, __b); +} + +#endif /* __MMX__ */ +#endif /* _MMINTRIN_H_INCLUDED */ diff --git a/lib/gcc/i686-linux-android/4.6/include/nmmintrin.h b/lib/gcc/i686-linux-android/4.6/include/nmmintrin.h new file mode 100644 index 0000000..2a2d264 --- /dev/null +++ b/lib/gcc/i686-linux-android/4.6/include/nmmintrin.h @@ -0,0 +1,37 @@ +/* Copyright (C) 2007, 2009 Free Software Foundation, Inc. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + GCC is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + <http://www.gnu.org/licenses/>. + +/* Implemented from the specification included in the Intel C++ Compiler + User Guide and Reference, version 10.0. */ + +#ifndef _NMMINTRIN_H_INCLUDED +#define _NMMINTRIN_H_INCLUDED + +#ifndef __SSE4_2__ +# error "SSE4.2 instruction set not enabled" +#else +/* We just include SSE4.1 header file. */ +#include <smmintrin.h> +#endif /* __SSE4_2__ */ + +#endif /* _NMMINTRIN_H_INCLUDED */ diff --git a/lib/gcc/i686-linux-android/4.6/include/pmmintrin.h b/lib/gcc/i686-linux-android/4.6/include/pmmintrin.h new file mode 100644 index 0000000..c5c9ae2 --- /dev/null +++ b/lib/gcc/i686-linux-android/4.6/include/pmmintrin.h @@ -0,0 +1,128 @@ +/* Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009 + Free Software Foundation, Inc. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + GCC is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + <http://www.gnu.org/licenses/>. */ + +/* Implemented from the specification included in the Intel C++ Compiler + User Guide and Reference, version 9.0. */ + +#ifndef _PMMINTRIN_H_INCLUDED +#define _PMMINTRIN_H_INCLUDED + +#ifndef __SSE3__ +# error "SSE3 instruction set not enabled" +#else + +/* We need definitions from the SSE2 and SSE header files*/ +#include <emmintrin.h> + +/* Additional bits in the MXCSR. */ +#define _MM_DENORMALS_ZERO_MASK 0x0040 +#define _MM_DENORMALS_ZERO_ON 0x0040 +#define _MM_DENORMALS_ZERO_OFF 0x0000 + +#define _MM_SET_DENORMALS_ZERO_MODE(mode) \ + _mm_setcsr ((_mm_getcsr () & ~_MM_DENORMALS_ZERO_MASK) | (mode)) +#define _MM_GET_DENORMALS_ZERO_MODE() \ + (_mm_getcsr() & _MM_DENORMALS_ZERO_MASK) + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_addsub_ps (__m128 __X, __m128 __Y) +{ + return (__m128) __builtin_ia32_addsubps ((__v4sf)__X, (__v4sf)__Y); +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_hadd_ps (__m128 __X, __m128 __Y) +{ + return (__m128) __builtin_ia32_haddps ((__v4sf)__X, (__v4sf)__Y); +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_hsub_ps (__m128 __X, __m128 __Y) +{ + return (__m128) __builtin_ia32_hsubps ((__v4sf)__X, (__v4sf)__Y); +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_movehdup_ps (__m128 __X) +{ + return (__m128) __builtin_ia32_movshdup ((__v4sf)__X); +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_moveldup_ps (__m128 __X) +{ + return (__m128) __builtin_ia32_movsldup ((__v4sf)__X); +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_addsub_pd (__m128d __X, __m128d __Y) +{ + return (__m128d) __builtin_ia32_addsubpd ((__v2df)__X, (__v2df)__Y); +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_hadd_pd (__m128d __X, __m128d __Y) +{ + return (__m128d) __builtin_ia32_haddpd ((__v2df)__X, (__v2df)__Y); +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_hsub_pd (__m128d __X, __m128d __Y) +{ + return (__m128d) __builtin_ia32_hsubpd ((__v2df)__X, (__v2df)__Y); +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_loaddup_pd (double const *__P) +{ + return _mm_load1_pd (__P); +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_movedup_pd (__m128d __X) +{ + return _mm_shuffle_pd (__X, __X, _MM_SHUFFLE2 (0,0)); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_lddqu_si128 (__m128i const *__P) +{ + return (__m128i) __builtin_ia32_lddqu ((char const *)__P); +} + +extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_monitor (void const * __P, unsigned int __E, unsigned int __H) +{ + __builtin_ia32_monitor (__P, __E, __H); +} + +extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mwait (unsigned int __E, unsigned int __H) +{ + __builtin_ia32_mwait (__E, __H); +} + +#endif /* __SSE3__ */ + +#endif /* _PMMINTRIN_H_INCLUDED */ diff --git a/lib/gcc/i686-linux-android/4.6/include/popcntintrin.h b/lib/gcc/i686-linux-android/4.6/include/popcntintrin.h new file mode 100644 index 0000000..8d4d657 --- /dev/null +++ b/lib/gcc/i686-linux-android/4.6/include/popcntintrin.h @@ -0,0 +1,46 @@ +/* Copyright (C) 2009 Free Software Foundation, Inc. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + GCC is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + <http://www.gnu.org/licenses/>. */ + +#ifndef __POPCNT__ +# error "POPCNT instruction set not enabled" +#endif /* __POPCNT__ */ + +#ifndef _POPCNTINTRIN_H_INCLUDED +#define _POPCNTINTRIN_H_INCLUDED + +/* Calculate a number of bits set to 1. */ +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_popcnt_u32 (unsigned int __X) +{ + return __builtin_popcount (__X); +} + +#ifdef __x86_64__ +extern __inline long long __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_popcnt_u64 (unsigned long long __X) +{ + return __builtin_popcountll (__X); +} +#endif + +#endif /* _POPCNTINTRIN_H_INCLUDED */ diff --git a/lib/gcc/i686-linux-android/4.6/include/smmintrin.h b/lib/gcc/i686-linux-android/4.6/include/smmintrin.h new file mode 100644 index 0000000..e12c56a --- /dev/null +++ b/lib/gcc/i686-linux-android/4.6/include/smmintrin.h @@ -0,0 +1,831 @@ +/* Copyright (C) 2007, 2008, 2009, 2010 Free Software Foundation, Inc. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + GCC is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + <http://www.gnu.org/licenses/>. + + +/* Implemented from the specification included in the Intel C++ Compiler + User Guide and Reference, version 10.0. */ + +#ifndef _SMMINTRIN_H_INCLUDED +#define _SMMINTRIN_H_INCLUDED + +#ifndef __SSE4_1__ +# error "SSE4.1 instruction set not enabled" +#else + +/* We need definitions from the SSSE3, SSE3, SSE2 and SSE header + files. */ +#include <tmmintrin.h> + +/* Rounding mode macros. */ +#define _MM_FROUND_TO_NEAREST_INT 0x00 +#define _MM_FROUND_TO_NEG_INF 0x01 +#define _MM_FROUND_TO_POS_INF 0x02 +#define _MM_FROUND_TO_ZERO 0x03 +#define _MM_FROUND_CUR_DIRECTION 0x04 + +#define _MM_FROUND_RAISE_EXC 0x00 +#define _MM_FROUND_NO_EXC 0x08 + +#define _MM_FROUND_NINT \ + (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_RAISE_EXC) +#define _MM_FROUND_FLOOR \ + (_MM_FROUND_TO_NEG_INF | _MM_FROUND_RAISE_EXC) +#define _MM_FROUND_CEIL \ + (_MM_FROUND_TO_POS_INF | _MM_FROUND_RAISE_EXC) +#define _MM_FROUND_TRUNC \ + (_MM_FROUND_TO_ZERO | _MM_FROUND_RAISE_EXC) +#define _MM_FROUND_RINT \ + (_MM_FROUND_CUR_DIRECTION | _MM_FROUND_RAISE_EXC) +#define _MM_FROUND_NEARBYINT \ + (_MM_FROUND_CUR_DIRECTION | _MM_FROUND_NO_EXC) + +/* Test Instruction */ +/* Packed integer 128-bit bitwise comparison. Return 1 if + (__V & __M) == 0. */ +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_testz_si128 (__m128i __M, __m128i __V) +{ + return __builtin_ia32_ptestz128 ((__v2di)__M, (__v2di)__V); +} + +/* Packed integer 128-bit bitwise comparison. Return 1 if + (__V & ~__M) == 0. */ +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_testc_si128 (__m128i __M, __m128i __V) +{ + return __builtin_ia32_ptestc128 ((__v2di)__M, (__v2di)__V); +} + +/* Packed integer 128-bit bitwise comparison. Return 1 if + (__V & __M) != 0 && (__V & ~__M) != 0. */ +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_testnzc_si128 (__m128i __M, __m128i __V) +{ + return __builtin_ia32_ptestnzc128 ((__v2di)__M, (__v2di)__V); +} + +/* Macros for packed integer 128-bit comparison intrinsics. */ +#define _mm_test_all_zeros(M, V) _mm_testz_si128 ((M), (V)) + +#define _mm_test_all_ones(V) \ + _mm_testc_si128 ((V), _mm_cmpeq_epi32 ((V), (V))) + +#define _mm_test_mix_ones_zeros(M, V) _mm_testnzc_si128 ((M), (V)) + +/* Packed/scalar double precision floating point rounding. */ + +#ifdef __OPTIMIZE__ +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_round_pd (__m128d __V, const int __M) +{ + return (__m128d) __builtin_ia32_roundpd ((__v2df)__V, __M); +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_round_sd(__m128d __D, __m128d __V, const int __M) +{ + return (__m128d) __builtin_ia32_roundsd ((__v2df)__D, + (__v2df)__V, + __M); +} +#else +#define _mm_round_pd(V, M) \ + ((__m128d) __builtin_ia32_roundpd ((__v2df)(__m128d)(V), (int)(M))) + +#define _mm_round_sd(D, V, M) \ + ((__m128d) __builtin_ia32_roundsd ((__v2df)(__m128d)(D), \ + (__v2df)(__m128d)(V), (int)(M))) +#endif + +/* Packed/scalar single precision floating point rounding. */ + +#ifdef __OPTIMIZE__ +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_round_ps (__m128 __V, const int __M) +{ + return (__m128) __builtin_ia32_roundps ((__v4sf)__V, __M); +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_round_ss (__m128 __D, __m128 __V, const int __M) +{ + return (__m128) __builtin_ia32_roundss ((__v4sf)__D, + (__v4sf)__V, + __M); +} +#else +#define _mm_round_ps(V, M) \ + ((__m128) __builtin_ia32_roundps ((__v4sf)(__m128)(V), (int)(M))) + +#define _mm_round_ss(D, V, M) \ + ((__m128) __builtin_ia32_roundss ((__v4sf)(__m128)(D), \ + (__v4sf)(__m128)(V), (int)(M))) +#endif + +/* Macros for ceil/floor intrinsics. */ +#define _mm_ceil_pd(V) _mm_round_pd ((V), _MM_FROUND_CEIL) +#define _mm_ceil_sd(D, V) _mm_round_sd ((D), (V), _MM_FROUND_CEIL) + +#define _mm_floor_pd(V) _mm_round_pd((V), _MM_FROUND_FLOOR) +#define _mm_floor_sd(D, V) _mm_round_sd ((D), (V), _MM_FROUND_FLOOR) + +#define _mm_ceil_ps(V) _mm_round_ps ((V), _MM_FROUND_CEIL) +#define _mm_ceil_ss(D, V) _mm_round_ss ((D), (V), _MM_FROUND_CEIL) + +#define _mm_floor_ps(V) _mm_round_ps ((V), _MM_FROUND_FLOOR) +#define _mm_floor_ss(D, V) _mm_round_ss ((D), (V), _MM_FROUND_FLOOR) + +/* SSE4.1 */ + +/* Integer blend instructions - select data from 2 sources using + constant/variable mask. */ + +#ifdef __OPTIMIZE__ +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_blend_epi16 (__m128i __X, __m128i __Y, const int __M) +{ + return (__m128i) __builtin_ia32_pblendw128 ((__v8hi)__X, + (__v8hi)__Y, + __M); +} +#else +#define _mm_blend_epi16(X, Y, M) \ + ((__m128i) __builtin_ia32_pblendw128 ((__v8hi)(__m128i)(X), \ + (__v8hi)(__m128i)(Y), (int)(M))) +#endif + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_blendv_epi8 (__m128i __X, __m128i __Y, __m128i __M) +{ + return (__m128i) __builtin_ia32_pblendvb128 ((__v16qi)__X, + (__v16qi)__Y, + (__v16qi)__M); +} + +/* Single precision floating point blend instructions - select data + from 2 sources using constant/variable mask. */ + +#ifdef __OPTIMIZE__ +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_blend_ps (__m128 __X, __m128 __Y, const int __M) +{ + return (__m128) __builtin_ia32_blendps ((__v4sf)__X, + (__v4sf)__Y, + __M); +} +#else +#define _mm_blend_ps(X, Y, M) \ + ((__m128) __builtin_ia32_blendps ((__v4sf)(__m128)(X), \ + (__v4sf)(__m128)(Y), (int)(M))) +#endif + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_blendv_ps (__m128 __X, __m128 __Y, __m128 __M) +{ + return (__m128) __builtin_ia32_blendvps ((__v4sf)__X, + (__v4sf)__Y, + (__v4sf)__M); +} + +/* Double precision floating point blend instructions - select data + from 2 sources using constant/variable mask. */ + +#ifdef __OPTIMIZE__ +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_blend_pd (__m128d __X, __m128d __Y, const int __M) +{ + return (__m128d) __builtin_ia32_blendpd ((__v2df)__X, + (__v2df)__Y, + __M); +} +#else +#define _mm_blend_pd(X, Y, M) \ + ((__m128d) __builtin_ia32_blendpd ((__v2df)(__m128d)(X), \ + (__v2df)(__m128d)(Y), (int)(M))) +#endif + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_blendv_pd (__m128d __X, __m128d __Y, __m128d __M) +{ + return (__m128d) __builtin_ia32_blendvpd ((__v2df)__X, + (__v2df)__Y, + (__v2df)__M); +} + +/* Dot product instructions with mask-defined summing and zeroing parts + of result. */ + +#ifdef __OPTIMIZE__ +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_dp_ps (__m128 __X, __m128 __Y, const int __M) +{ + return (__m128) __builtin_ia32_dpps ((__v4sf)__X, + (__v4sf)__Y, + __M); +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_dp_pd (__m128d __X, __m128d __Y, const int __M) +{ + return (__m128d) __builtin_ia32_dppd ((__v2df)__X, + (__v2df)__Y, + __M); +} +#else +#define _mm_dp_ps(X, Y, M) \ + ((__m128) __builtin_ia32_dpps ((__v4sf)(__m128)(X), \ + (__v4sf)(__m128)(Y), (int)(M))) + +#define _mm_dp_pd(X, Y, M) \ + ((__m128d) __builtin_ia32_dppd ((__v2df)(__m128d)(X), \ + (__v2df)(__m128d)(Y), (int)(M))) +#endif + +/* Packed integer 64-bit comparison, zeroing or filling with ones + corresponding parts of result. */ +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpeq_epi64 (__m128i __X, __m128i __Y) +{ + return (__m128i) __builtin_ia32_pcmpeqq ((__v2di)__X, (__v2di)__Y); +} + +/* Min/max packed integer instructions. */ + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_min_epi8 (__m128i __X, __m128i __Y) +{ + return (__m128i) __builtin_ia32_pminsb128 ((__v16qi)__X, (__v16qi)__Y); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_max_epi8 (__m128i __X, __m128i __Y) +{ + return (__m128i) __builtin_ia32_pmaxsb128 ((__v16qi)__X, (__v16qi)__Y); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_min_epu16 (__m128i __X, __m128i __Y) +{ + return (__m128i) __builtin_ia32_pminuw128 ((__v8hi)__X, (__v8hi)__Y); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_max_epu16 (__m128i __X, __m128i __Y) +{ + return (__m128i) __builtin_ia32_pmaxuw128 ((__v8hi)__X, (__v8hi)__Y); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_min_epi32 (__m128i __X, __m128i __Y) +{ + return (__m128i) __builtin_ia32_pminsd128 ((__v4si)__X, (__v4si)__Y); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_max_epi32 (__m128i __X, __m128i __Y) +{ + return (__m128i) __builtin_ia32_pmaxsd128 ((__v4si)__X, (__v4si)__Y); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_min_epu32 (__m128i __X, __m128i __Y) +{ + return (__m128i) __builtin_ia32_pminud128 ((__v4si)__X, (__v4si)__Y); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_max_epu32 (__m128i __X, __m128i __Y) +{ + return (__m128i) __builtin_ia32_pmaxud128 ((__v4si)__X, (__v4si)__Y); +} + +/* Packed integer 32-bit multiplication with truncation of upper + halves of results. */ +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mullo_epi32 (__m128i __X, __m128i __Y) +{ + return (__m128i) __builtin_ia32_pmulld128 ((__v4si)__X, (__v4si)__Y); +} + +/* Packed integer 32-bit multiplication of 2 pairs of operands + with two 64-bit results. */ +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mul_epi32 (__m128i __X, __m128i __Y) +{ + return (__m128i) __builtin_ia32_pmuldq128 ((__v4si)__X, (__v4si)__Y); +} + +/* Insert single precision float into packed single precision array + element selected by index N. The bits [7-6] of N define S + index, the bits [5-4] define D index, and bits [3-0] define + zeroing mask for D. */ + +#ifdef __OPTIMIZE__ +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_insert_ps (__m128 __D, __m128 __S, const int __N) +{ + return (__m128) __builtin_ia32_insertps128 ((__v4sf)__D, + (__v4sf)__S, + __N); +} +#else +#define _mm_insert_ps(D, S, N) \ + ((__m128) __builtin_ia32_insertps128 ((__v4sf)(__m128)(D), \ + (__v4sf)(__m128)(S), (int)(N))) +#endif + +/* Helper macro to create the N value for _mm_insert_ps. */ +#define _MM_MK_INSERTPS_NDX(S, D, M) (((S) << 6) | ((D) << 4) | (M)) + +/* Extract binary representation of single precision float from packed + single precision array element of X selected by index N. */ + +#ifdef __OPTIMIZE__ +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_extract_ps (__m128 __X, const int __N) +{ + union { int i; float f; } __tmp; + __tmp.f = __builtin_ia32_vec_ext_v4sf ((__v4sf)__X, __N); + return __tmp.i; +} +#else +#define _mm_extract_ps(X, N) \ + (__extension__ \ + ({ \ + union { int i; float f; } __tmp; \ + __tmp.f = __builtin_ia32_vec_ext_v4sf ((__v4sf)(__m128)(X), (int)(N)); \ + __tmp.i; \ + })) +#endif + +/* Extract binary representation of single precision float into + D from packed single precision array element of S selected + by index N. */ +#define _MM_EXTRACT_FLOAT(D, S, N) \ + { (D) = __builtin_ia32_vec_ext_v4sf ((__v4sf)(S), (N)); } + +/* Extract specified single precision float element into the lower + part of __m128. */ +#define _MM_PICK_OUT_PS(X, N) \ + _mm_insert_ps (_mm_setzero_ps (), (X), \ + _MM_MK_INSERTPS_NDX ((N), 0, 0x0e)) + +/* Insert integer, S, into packed integer array element of D + selected by index N. */ + +#ifdef __OPTIMIZE__ +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_insert_epi8 (__m128i __D, int __S, const int __N) +{ + return (__m128i) __builtin_ia32_vec_set_v16qi ((__v16qi)__D, + __S, __N); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_insert_epi32 (__m128i __D, int __S, const int __N) +{ + return (__m128i) __builtin_ia32_vec_set_v4si ((__v4si)__D, + __S, __N); +} + +#ifdef __x86_64__ +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_insert_epi64 (__m128i __D, long long __S, const int __N) +{ + return (__m128i) __builtin_ia32_vec_set_v2di ((__v2di)__D, + __S, __N); +} +#endif +#else +#define _mm_insert_epi8(D, S, N) \ + ((__m128i) __builtin_ia32_vec_set_v16qi ((__v16qi)(__m128i)(D), \ + (int)(S), (int)(N))) + +#define _mm_insert_epi32(D, S, N) \ + ((__m128i) __builtin_ia32_vec_set_v4si ((__v4si)(__m128i)(D), \ + (int)(S), (int)(N))) + +#ifdef __x86_64__ +#define _mm_insert_epi64(D, S, N) \ + ((__m128i) __builtin_ia32_vec_set_v2di ((__v2di)(__m128i)(D), \ + (long long)(S), (int)(N))) +#endif +#endif + +/* Extract integer from packed integer array element of X selected by + index N. */ + +#ifdef __OPTIMIZE__ +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_extract_epi8 (__m128i __X, const int __N) +{ + return (unsigned char) __builtin_ia32_vec_ext_v16qi ((__v16qi)__X, __N); +} + +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_extract_epi32 (__m128i __X, const int __N) +{ + return __builtin_ia32_vec_ext_v4si ((__v4si)__X, __N); +} + +#ifdef __x86_64__ +extern __inline long long __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_extract_epi64 (__m128i __X, const int __N) +{ + return __builtin_ia32_vec_ext_v2di ((__v2di)__X, __N); +} +#endif +#else +#define _mm_extract_epi8(X, N) \ + ((int) (unsigned char) __builtin_ia32_vec_ext_v16qi ((__v16qi)(__m128i)(X), (int)(N))) +#define _mm_extract_epi32(X, N) \ + ((int) __builtin_ia32_vec_ext_v4si ((__v4si)(__m128i)(X), (int)(N))) + +#ifdef __x86_64__ +#define _mm_extract_epi64(X, N) \ + ((long long) __builtin_ia32_vec_ext_v2di ((__v2di)(__m128i)(X), (int)(N))) +#endif +#endif + +/* Return horizontal packed word minimum and its index in bits [15:0] + and bits [18:16] respectively. */ +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_minpos_epu16 (__m128i __X) +{ + return (__m128i) __builtin_ia32_phminposuw128 ((__v8hi)__X); +} + +/* Packed integer sign-extension. */ + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtepi8_epi32 (__m128i __X) +{ + return (__m128i) __builtin_ia32_pmovsxbd128 ((__v16qi)__X); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtepi16_epi32 (__m128i __X) +{ + return (__m128i) __builtin_ia32_pmovsxwd128 ((__v8hi)__X); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtepi8_epi64 (__m128i __X) +{ + return (__m128i) __builtin_ia32_pmovsxbq128 ((__v16qi)__X); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtepi32_epi64 (__m128i __X) +{ + return (__m128i) __builtin_ia32_pmovsxdq128 ((__v4si)__X); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtepi16_epi64 (__m128i __X) +{ + return (__m128i) __builtin_ia32_pmovsxwq128 ((__v8hi)__X); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtepi8_epi16 (__m128i __X) +{ + return (__m128i) __builtin_ia32_pmovsxbw128 ((__v16qi)__X); +} + +/* Packed integer zero-extension. */ + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtepu8_epi32 (__m128i __X) +{ + return (__m128i) __builtin_ia32_pmovzxbd128 ((__v16qi)__X); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtepu16_epi32 (__m128i __X) +{ + return (__m128i) __builtin_ia32_pmovzxwd128 ((__v8hi)__X); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtepu8_epi64 (__m128i __X) +{ + return (__m128i) __builtin_ia32_pmovzxbq128 ((__v16qi)__X); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtepu32_epi64 (__m128i __X) +{ + return (__m128i) __builtin_ia32_pmovzxdq128 ((__v4si)__X); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtepu16_epi64 (__m128i __X) +{ + return (__m128i) __builtin_ia32_pmovzxwq128 ((__v8hi)__X); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtepu8_epi16 (__m128i __X) +{ + return (__m128i) __builtin_ia32_pmovzxbw128 ((__v16qi)__X); +} + +/* Pack 8 double words from 2 operands into 8 words of result with + unsigned saturation. */ +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_packus_epi32 (__m128i __X, __m128i __Y) +{ + return (__m128i) __builtin_ia32_packusdw128 ((__v4si)__X, (__v4si)__Y); +} + +/* Sum absolute 8-bit integer difference of adjacent groups of 4 + byte integers in the first 2 operands. Starting offsets within + operands are determined by the 3rd mask operand. */ + +#ifdef __OPTIMIZE__ +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mpsadbw_epu8 (__m128i __X, __m128i __Y, const int __M) +{ + return (__m128i) __builtin_ia32_mpsadbw128 ((__v16qi)__X, + (__v16qi)__Y, __M); +} +#else +#define _mm_mpsadbw_epu8(X, Y, M) \ + ((__m128i) __builtin_ia32_mpsadbw128 ((__v16qi)(__m128i)(X), \ + (__v16qi)(__m128i)(Y), (int)(M))) +#endif + +/* Load double quadword using non-temporal aligned hint. */ +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_stream_load_si128 (__m128i *__X) +{ + return (__m128i) __builtin_ia32_movntdqa ((__v2di *) __X); +} + +#ifdef __SSE4_2__ + +/* These macros specify the source data format. */ +#define _SIDD_UBYTE_OPS 0x00 +#define _SIDD_UWORD_OPS 0x01 +#define _SIDD_SBYTE_OPS 0x02 +#define _SIDD_SWORD_OPS 0x03 + +/* These macros specify the comparison operation. */ +#define _SIDD_CMP_EQUAL_ANY 0x00 +#define _SIDD_CMP_RANGES 0x04 +#define _SIDD_CMP_EQUAL_EACH 0x08 +#define _SIDD_CMP_EQUAL_ORDERED 0x0c + +/* These macros specify the the polarity. */ +#define _SIDD_POSITIVE_POLARITY 0x00 +#define _SIDD_NEGATIVE_POLARITY 0x10 +#define _SIDD_MASKED_POSITIVE_POLARITY 0x20 +#define _SIDD_MASKED_NEGATIVE_POLARITY 0x30 + +/* These macros specify the output selection in _mm_cmpXstri (). */ +#define _SIDD_LEAST_SIGNIFICANT 0x00 +#define _SIDD_MOST_SIGNIFICANT 0x40 + +/* These macros specify the output selection in _mm_cmpXstrm (). */ +#define _SIDD_BIT_MASK 0x00 +#define _SIDD_UNIT_MASK 0x40 + +/* Intrinsics for text/string processing. */ + +#ifdef __OPTIMIZE__ +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpistrm (__m128i __X, __m128i __Y, const int __M) +{ + return (__m128i) __builtin_ia32_pcmpistrm128 ((__v16qi)__X, + (__v16qi)__Y, + __M); +} + +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpistri (__m128i __X, __m128i __Y, const int __M) +{ + return __builtin_ia32_pcmpistri128 ((__v16qi)__X, + (__v16qi)__Y, + __M); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpestrm (__m128i __X, int __LX, __m128i __Y, int __LY, const int __M) +{ + return (__m128i) __builtin_ia32_pcmpestrm128 ((__v16qi)__X, __LX, + (__v16qi)__Y, __LY, + __M); +} + +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpestri (__m128i __X, int __LX, __m128i __Y, int __LY, const int __M) +{ + return __builtin_ia32_pcmpestri128 ((__v16qi)__X, __LX, + (__v16qi)__Y, __LY, + __M); +} +#else +#define _mm_cmpistrm(X, Y, M) \ + ((__m128i) __builtin_ia32_pcmpistrm128 ((__v16qi)(__m128i)(X), \ + (__v16qi)(__m128i)(Y), (int)(M))) +#define _mm_cmpistri(X, Y, M) \ + ((int) __builtin_ia32_pcmpistri128 ((__v16qi)(__m128i)(X), \ + (__v16qi)(__m128i)(Y), (int)(M))) + +#define _mm_cmpestrm(X, LX, Y, LY, M) \ + ((__m128i) __builtin_ia32_pcmpestrm128 ((__v16qi)(__m128i)(X), \ + (int)(LX), (__v16qi)(__m128i)(Y), \ + (int)(LY), (int)(M))) +#define _mm_cmpestri(X, LX, Y, LY, M) \ + ((int) __builtin_ia32_pcmpestri128 ((__v16qi)(__m128i)(X), (int)(LX), \ + (__v16qi)(__m128i)(Y), (int)(LY), \ + (int)(M))) +#endif + +/* Intrinsics for text/string processing and reading values of + EFlags. */ + +#ifdef __OPTIMIZE__ +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpistra (__m128i __X, __m128i __Y, const int __M) +{ + return __builtin_ia32_pcmpistria128 ((__v16qi)__X, + (__v16qi)__Y, + __M); +} + +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpistrc (__m128i __X, __m128i __Y, const int __M) +{ + return __builtin_ia32_pcmpistric128 ((__v16qi)__X, + (__v16qi)__Y, + __M); +} + +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpistro (__m128i __X, __m128i __Y, const int __M) +{ + return __builtin_ia32_pcmpistrio128 ((__v16qi)__X, + (__v16qi)__Y, + __M); +} + +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpistrs (__m128i __X, __m128i __Y, const int __M) +{ + return __builtin_ia32_pcmpistris128 ((__v16qi)__X, + (__v16qi)__Y, + __M); +} + +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpistrz (__m128i __X, __m128i __Y, const int __M) +{ + return __builtin_ia32_pcmpistriz128 ((__v16qi)__X, + (__v16qi)__Y, + __M); +} + +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpestra (__m128i __X, int __LX, __m128i __Y, int __LY, const int __M) +{ + return __builtin_ia32_pcmpestria128 ((__v16qi)__X, __LX, + (__v16qi)__Y, __LY, + __M); +} + +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpestrc (__m128i __X, int __LX, __m128i __Y, int __LY, const int __M) +{ + return __builtin_ia32_pcmpestric128 ((__v16qi)__X, __LX, + (__v16qi)__Y, __LY, + __M); +} + +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpestro (__m128i __X, int __LX, __m128i __Y, int __LY, const int __M) +{ + return __builtin_ia32_pcmpestrio128 ((__v16qi)__X, __LX, + (__v16qi)__Y, __LY, + __M); +} + +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpestrs (__m128i __X, int __LX, __m128i __Y, int __LY, const int __M) +{ + return __builtin_ia32_pcmpestris128 ((__v16qi)__X, __LX, + (__v16qi)__Y, __LY, + __M); +} + +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpestrz (__m128i __X, int __LX, __m128i __Y, int __LY, const int __M) +{ + return __builtin_ia32_pcmpestriz128 ((__v16qi)__X, __LX, + (__v16qi)__Y, __LY, + __M); +} +#else +#define _mm_cmpistra(X, Y, M) \ + ((int) __builtin_ia32_pcmpistria128 ((__v16qi)(__m128i)(X), \ + (__v16qi)(__m128i)(Y), (int)(M))) +#define _mm_cmpistrc(X, Y, M) \ + ((int) __builtin_ia32_pcmpistric128 ((__v16qi)(__m128i)(X), \ + (__v16qi)(__m128i)(Y), (int)(M))) +#define _mm_cmpistro(X, Y, M) \ + ((int) __builtin_ia32_pcmpistrio128 ((__v16qi)(__m128i)(X), \ + (__v16qi)(__m128i)(Y), (int)(M))) +#define _mm_cmpistrs(X, Y, M) \ + ((int) __builtin_ia32_pcmpistris128 ((__v16qi)(__m128i)(X), \ + (__v16qi)(__m128i)(Y), (int)(M))) +#define _mm_cmpistrz(X, Y, M) \ + ((int) __builtin_ia32_pcmpistriz128 ((__v16qi)(__m128i)(X), \ + (__v16qi)(__m128i)(Y), (int)(M))) + +#define _mm_cmpestra(X, LX, Y, LY, M) \ + ((int) __builtin_ia32_pcmpestria128 ((__v16qi)(__m128i)(X), (int)(LX), \ + (__v16qi)(__m128i)(Y), (int)(LY), \ + (int)(M))) +#define _mm_cmpestrc(X, LX, Y, LY, M) \ + ((int) __builtin_ia32_pcmpestric128 ((__v16qi)(__m128i)(X), (int)(LX), \ + (__v16qi)(__m128i)(Y), (int)(LY), \ + (int)(M))) +#define _mm_cmpestro(X, LX, Y, LY, M) \ + ((int) __builtin_ia32_pcmpestrio128 ((__v16qi)(__m128i)(X), (int)(LX), \ + (__v16qi)(__m128i)(Y), (int)(LY), \ + (int)(M))) +#define _mm_cmpestrs(X, LX, Y, LY, M) \ + ((int) __builtin_ia32_pcmpestris128 ((__v16qi)(__m128i)(X), (int)(LX), \ + (__v16qi)(__m128i)(Y), (int)(LY), \ + (int)(M))) +#define _mm_cmpestrz(X, LX, Y, LY, M) \ + ((int) __builtin_ia32_pcmpestriz128 ((__v16qi)(__m128i)(X), (int)(LX), \ + (__v16qi)(__m128i)(Y), (int)(LY), \ + (int)(M))) +#endif + +/* Packed integer 64-bit comparison, zeroing or filling with ones + corresponding parts of result. */ +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpgt_epi64 (__m128i __X, __m128i __Y) +{ + return (__m128i) __builtin_ia32_pcmpgtq ((__v2di)__X, (__v2di)__Y); +} + +#ifdef __POPCNT__ +#include <popcntintrin.h> +#endif + +/* Accumulate CRC32 (polynomial 0x11EDC6F41) value. */ +extern __inline unsigned int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_crc32_u8 (unsigned int __C, unsigned char __V) +{ + return __builtin_ia32_crc32qi (__C, __V); +} + +extern __inline unsigned int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_crc32_u16 (unsigned int __C, unsigned short __V) +{ + return __builtin_ia32_crc32hi (__C, __V); +} + +extern __inline unsigned int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_crc32_u32 (unsigned int __C, unsigned int __V) +{ + return __builtin_ia32_crc32si (__C, __V); +} + +#ifdef __x86_64__ +extern __inline unsigned long long __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_crc32_u64 (unsigned long long __C, unsigned long long __V) +{ + return __builtin_ia32_crc32di (__C, __V); +} +#endif + +#endif /* __SSE4_2__ */ + +#endif /* __SSE4_1__ */ + +#endif /* _SMMINTRIN_H_INCLUDED */ diff --git a/lib/gcc/i686-linux-android/4.6/include/stdarg.h b/lib/gcc/i686-linux-android/4.6/include/stdarg.h new file mode 100644 index 0000000..54dc2e7 --- /dev/null +++ b/lib/gcc/i686-linux-android/4.6/include/stdarg.h @@ -0,0 +1,130 @@ +/* Copyright (C) 1989, 1997, 1998, 1999, 2000, 2009 Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 3, or (at your option) +any later version. + +GCC is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +Under Section 7 of GPL version 3, you are granted additional +permissions described in the GCC Runtime Library Exception, version +3.1, as published by the Free Software Foundation. + +You should have received a copy of the GNU General Public License and +a copy of the GCC Runtime Library Exception along with this program; +see the files COPYING3 and COPYING.RUNTIME respectively. If not, see +<http://www.gnu.org/licenses/>. */ + +/* + * ISO C Standard: 7.15 Variable arguments <stdarg.h> + */ + +#ifndef _STDARG_H +#ifndef _ANSI_STDARG_H_ +#ifndef __need___va_list +#define _STDARG_H +#define _ANSI_STDARG_H_ +#endif /* not __need___va_list */ +#undef __need___va_list + +/* Define __gnuc_va_list. */ + +#ifndef __GNUC_VA_LIST +#define __GNUC_VA_LIST +typedef __builtin_va_list __gnuc_va_list; +#endif + +/* Define the standard macros for the user, + if this invocation was from the user program. */ +#ifdef _STDARG_H + +#define va_start(v,l) __builtin_va_start(v,l) +#define va_end(v) __builtin_va_end(v) +#define va_arg(v,l) __builtin_va_arg(v,l) +#if !defined(__STRICT_ANSI__) || __STDC_VERSION__ + 0 >= 199900L || defined(__GXX_EXPERIMENTAL_CXX0X__) +#define va_copy(d,s) __builtin_va_copy(d,s) +#endif +#define __va_copy(d,s) __builtin_va_copy(d,s) + +/* Define va_list, if desired, from __gnuc_va_list. */ +/* We deliberately do not define va_list when called from + stdio.h, because ANSI C says that stdio.h is not supposed to define + va_list. stdio.h needs to have access to that data type, + but must not use that name. It should use the name __gnuc_va_list, + which is safe because it is reserved for the implementation. */ + +#ifdef _HIDDEN_VA_LIST /* On OSF1, this means varargs.h is "half-loaded". */ +#undef _VA_LIST +#endif + +#ifdef _BSD_VA_LIST +#undef _BSD_VA_LIST +#endif + +#if defined(__svr4__) || (defined(_SCO_DS) && !defined(__VA_LIST)) +/* SVR4.2 uses _VA_LIST for an internal alias for va_list, + so we must avoid testing it and setting it here. + SVR4 uses _VA_LIST as a flag in stdarg.h, but we should + have no conflict with that. */ +#ifndef _VA_LIST_ +#define _VA_LIST_ +#ifdef __i860__ +#ifndef _VA_LIST +#define _VA_LIST va_list +#endif +#endif /* __i860__ */ +typedef __gnuc_va_list va_list; +#ifdef _SCO_DS +#define __VA_LIST +#endif +#endif /* _VA_LIST_ */ +#else /* not __svr4__ || _SCO_DS */ + +/* The macro _VA_LIST_ is the same thing used by this file in Ultrix. + But on BSD NET2 we must not test or define or undef it. + (Note that the comments in NET 2's ansi.h + are incorrect for _VA_LIST_--see stdio.h!) */ +#if !defined (_VA_LIST_) || defined (__BSD_NET2__) || defined (____386BSD____) || defined (__bsdi__) || defined (__sequent__) || defined (__FreeBSD__) || defined(WINNT) +/* The macro _VA_LIST_DEFINED is used in Windows NT 3.5 */ +#ifndef _VA_LIST_DEFINED +/* The macro _VA_LIST is used in SCO Unix 3.2. */ +#ifndef _VA_LIST +/* The macro _VA_LIST_T_H is used in the Bull dpx2 */ +#ifndef _VA_LIST_T_H +/* The macro __va_list__ is used by BeOS. */ +#ifndef __va_list__ +typedef __gnuc_va_list va_list; +#endif /* not __va_list__ */ +#endif /* not _VA_LIST_T_H */ +#endif /* not _VA_LIST */ +#endif /* not _VA_LIST_DEFINED */ +#if !(defined (__BSD_NET2__) || defined (____386BSD____) || defined (__bsdi__) || defined (__sequent__) || defined (__FreeBSD__)) +#define _VA_LIST_ +#endif +#ifndef _VA_LIST +#define _VA_LIST +#endif +#ifndef _VA_LIST_DEFINED +#define _VA_LIST_DEFINED +#endif +#ifndef _VA_LIST_T_H +#define _VA_LIST_T_H +#endif +#ifndef __va_list__ +#define __va_list__ +#endif + +#endif /* not _VA_LIST_, except on certain systems */ + +#endif /* not __svr4__ */ + +#endif /* _STDARG_H */ + +#endif /* not _ANSI_STDARG_H_ */ +#endif /* not _STDARG_H */ diff --git a/lib/gcc/i686-linux-android/4.6/include/stdbool.h b/lib/gcc/i686-linux-android/4.6/include/stdbool.h new file mode 100644 index 0000000..4ed911f --- /dev/null +++ b/lib/gcc/i686-linux-android/4.6/include/stdbool.h @@ -0,0 +1,50 @@ +/* Copyright (C) 1998, 1999, 2000, 2009 Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 3, or (at your option) +any later version. + +GCC is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +Under Section 7 of GPL version 3, you are granted additional +permissions described in the GCC Runtime Library Exception, version +3.1, as published by the Free Software Foundation. + +You should have received a copy of the GNU General Public License and +a copy of the GCC Runtime Library Exception along with this program; +see the files COPYING3 and COPYING.RUNTIME respectively. If not, see +<http://www.gnu.org/licenses/>. */ + +/* + * ISO C Standard: 7.16 Boolean type and values <stdbool.h> + */ + +#ifndef _STDBOOL_H +#define _STDBOOL_H + +#ifndef __cplusplus + +#define bool _Bool +#define true 1 +#define false 0 + +#else /* __cplusplus */ + +/* Supporting <stdbool.h> in C++ is a GCC extension. */ +#define _Bool bool +#define bool bool +#define false false +#define true true + +#endif /* __cplusplus */ + +/* Signal that all the definitions are present. */ +#define __bool_true_false_are_defined 1 + +#endif /* stdbool.h */ diff --git a/lib/gcc/i686-linux-android/4.6/include/stddef.h b/lib/gcc/i686-linux-android/4.6/include/stddef.h new file mode 100644 index 0000000..565ef7b --- /dev/null +++ b/lib/gcc/i686-linux-android/4.6/include/stddef.h @@ -0,0 +1,418 @@ +/* Copyright (C) 1989, 1997, 1998, 1999, 2000, 2002, 2004, 2009 + Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 3, or (at your option) +any later version. + +GCC is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +Under Section 7 of GPL version 3, you are granted additional +permissions described in the GCC Runtime Library Exception, version +3.1, as published by the Free Software Foundation. + +You should have received a copy of the GNU General Public License and +a copy of the GCC Runtime Library Exception along with this program; +see the files COPYING3 and COPYING.RUNTIME respectively. If not, see +<http://www.gnu.org/licenses/>. */ + +/* + * ISO C Standard: 7.17 Common definitions <stddef.h> + */ +#if (!defined(_STDDEF_H) && !defined(_STDDEF_H_) && !defined(_ANSI_STDDEF_H) \ + && !defined(__STDDEF_H__)) \ + || defined(__need_wchar_t) || defined(__need_size_t) \ + || defined(__need_ptrdiff_t) || defined(__need_NULL) \ + || defined(__need_wint_t) + +/* Any one of these symbols __need_* means that GNU libc + wants us just to define one data type. So don't define + the symbols that indicate this file's entire job has been done. */ +#if (!defined(__need_wchar_t) && !defined(__need_size_t) \ + && !defined(__need_ptrdiff_t) && !defined(__need_NULL) \ + && !defined(__need_wint_t)) +#define _STDDEF_H +#define _STDDEF_H_ +/* snaroff@next.com says the NeXT needs this. */ +#define _ANSI_STDDEF_H +/* Irix 5.1 needs this. */ +#define __STDDEF_H__ +#endif + +#ifndef __sys_stdtypes_h +/* This avoids lossage on SunOS but only if stdtypes.h comes first. + There's no way to win with the other order! Sun lossage. */ + +/* On 4.3bsd-net2, make sure ansi.h is included, so we have + one less case to deal with in the following. */ +#if defined (__BSD_NET2__) || defined (____386BSD____) || (defined (__FreeBSD__) && (__FreeBSD__ < 5)) || defined(__NetBSD__) +#include <machine/ansi.h> +#endif +/* On FreeBSD 5, machine/ansi.h does not exist anymore... */ +#if defined (__FreeBSD__) && (__FreeBSD__ >= 5) +#include <sys/_types.h> +#endif + +/* In 4.3bsd-net2, machine/ansi.h defines these symbols, which are + defined if the corresponding type is *not* defined. + FreeBSD-2.1 defines _MACHINE_ANSI_H_ instead of _ANSI_H_. + NetBSD defines _I386_ANSI_H_ and _X86_64_ANSI_H_ instead of _ANSI_H_ */ +#if defined(_ANSI_H_) || defined(_MACHINE_ANSI_H_) || defined(_X86_64_ANSI_H_) || defined(_I386_ANSI_H_) +#if !defined(_SIZE_T_) && !defined(_BSD_SIZE_T_) +#define _SIZE_T +#endif +#if !defined(_PTRDIFF_T_) && !defined(_BSD_PTRDIFF_T_) +#define _PTRDIFF_T +#endif +/* On BSD/386 1.1, at least, machine/ansi.h defines _BSD_WCHAR_T_ + instead of _WCHAR_T_. */ +#if !defined(_WCHAR_T_) && !defined(_BSD_WCHAR_T_) +#ifndef _BSD_WCHAR_T_ +#define _WCHAR_T +#endif +#endif +/* Undef _FOO_T_ if we are supposed to define foo_t. */ +#if defined (__need_ptrdiff_t) || defined (_STDDEF_H_) +#undef _PTRDIFF_T_ +#undef _BSD_PTRDIFF_T_ +#endif +#if defined (__need_size_t) || defined (_STDDEF_H_) +#undef _SIZE_T_ +#undef _BSD_SIZE_T_ +#endif +#if defined (__need_wchar_t) || defined (_STDDEF_H_) +#undef _WCHAR_T_ +#undef _BSD_WCHAR_T_ +#endif +#endif /* defined(_ANSI_H_) || defined(_MACHINE_ANSI_H_) || defined(_X86_64_ANSI_H_) || defined(_I386_ANSI_H_) */ + +/* Sequent's header files use _PTRDIFF_T_ in some conflicting way. + Just ignore it. */ +#if defined (__sequent__) && defined (_PTRDIFF_T_) +#undef _PTRDIFF_T_ +#endif + +/* On VxWorks, <type/vxTypesBase.h> may have defined macros like + _TYPE_size_t which will typedef size_t. fixincludes patched the + vxTypesBase.h so that this macro is only defined if _GCC_SIZE_T is + not defined, and so that defining this macro defines _GCC_SIZE_T. + If we find that the macros are still defined at this point, we must + invoke them so that the type is defined as expected. */ +#if defined (_TYPE_ptrdiff_t) && (defined (__need_ptrdiff_t) || defined (_STDDEF_H_)) +_TYPE_ptrdiff_t; +#undef _TYPE_ptrdiff_t +#endif +#if defined (_TYPE_size_t) && (defined (__need_size_t) || defined (_STDDEF_H_)) +_TYPE_size_t; +#undef _TYPE_size_t +#endif +#if defined (_TYPE_wchar_t) && (defined (__need_wchar_t) || defined (_STDDEF_H_)) +_TYPE_wchar_t; +#undef _TYPE_wchar_t +#endif + +/* In case nobody has defined these types, but we aren't running under + GCC 2.00, make sure that __PTRDIFF_TYPE__, __SIZE_TYPE__, and + __WCHAR_TYPE__ have reasonable values. This can happen if the + parts of GCC is compiled by an older compiler, that actually + include gstddef.h, such as collect2. */ + +/* Signed type of difference of two pointers. */ + +/* Define this type if we are doing the whole job, + or if we want this type in particular. */ +#if defined (_STDDEF_H) || defined (__need_ptrdiff_t) +#ifndef _PTRDIFF_T /* in case <sys/types.h> has defined it. */ +#ifndef _T_PTRDIFF_ +#ifndef _T_PTRDIFF +#ifndef __PTRDIFF_T +#ifndef _PTRDIFF_T_ +#ifndef _BSD_PTRDIFF_T_ +#ifndef ___int_ptrdiff_t_h +#ifndef _GCC_PTRDIFF_T +#define _PTRDIFF_T +#define _T_PTRDIFF_ +#define _T_PTRDIFF +#define __PTRDIFF_T +#define _PTRDIFF_T_ +#define _BSD_PTRDIFF_T_ +#define ___int_ptrdiff_t_h +#define _GCC_PTRDIFF_T +#ifndef __PTRDIFF_TYPE__ +#define __PTRDIFF_TYPE__ long int +#endif +typedef __PTRDIFF_TYPE__ ptrdiff_t; +#endif /* _GCC_PTRDIFF_T */ +#endif /* ___int_ptrdiff_t_h */ +#endif /* _BSD_PTRDIFF_T_ */ +#endif /* _PTRDIFF_T_ */ +#endif /* __PTRDIFF_T */ +#endif /* _T_PTRDIFF */ +#endif /* _T_PTRDIFF_ */ +#endif /* _PTRDIFF_T */ + +/* If this symbol has done its job, get rid of it. */ +#undef __need_ptrdiff_t + +#endif /* _STDDEF_H or __need_ptrdiff_t. */ + +/* Unsigned type of `sizeof' something. */ + +/* Define this type if we are doing the whole job, + or if we want this type in particular. */ +#if defined (_STDDEF_H) || defined (__need_size_t) +#ifndef __size_t__ /* BeOS */ +#ifndef __SIZE_T__ /* Cray Unicos/Mk */ +#ifndef _SIZE_T /* in case <sys/types.h> has defined it. */ +#ifndef _SYS_SIZE_T_H +#ifndef _T_SIZE_ +#ifndef _T_SIZE +#ifndef __SIZE_T +#ifndef _SIZE_T_ +#ifndef _BSD_SIZE_T_ +#ifndef _SIZE_T_DEFINED_ +#ifndef _SIZE_T_DEFINED +#ifndef _BSD_SIZE_T_DEFINED_ /* Darwin */ +#ifndef _SIZE_T_DECLARED /* FreeBSD 5 */ +#ifndef ___int_size_t_h +#ifndef _GCC_SIZE_T +#ifndef _SIZET_ +#ifndef __size_t +#define __size_t__ /* BeOS */ +#define __SIZE_T__ /* Cray Unicos/Mk */ +#define _SIZE_T +#define _SYS_SIZE_T_H +#define _T_SIZE_ +#define _T_SIZE +#define __SIZE_T +#define _SIZE_T_ +#define _BSD_SIZE_T_ +#define _SIZE_T_DEFINED_ +#define _SIZE_T_DEFINED +#define _BSD_SIZE_T_DEFINED_ /* Darwin */ +#define _SIZE_T_DECLARED /* FreeBSD 5 */ +#define ___int_size_t_h +#define _GCC_SIZE_T +#define _SIZET_ +#if defined (__FreeBSD__) && (__FreeBSD__ >= 5) +/* __size_t is a typedef on FreeBSD 5!, must not trash it. */ +#else +#define __size_t +#endif +#ifndef __SIZE_TYPE__ +#define __SIZE_TYPE__ long unsigned int +#endif +#if !(defined (__GNUG__) && defined (size_t)) +typedef __SIZE_TYPE__ size_t; +#ifdef __BEOS__ +typedef long ssize_t; +#endif /* __BEOS__ */ +#endif /* !(defined (__GNUG__) && defined (size_t)) */ +#endif /* __size_t */ +#endif /* _SIZET_ */ +#endif /* _GCC_SIZE_T */ +#endif /* ___int_size_t_h */ +#endif /* _SIZE_T_DECLARED */ +#endif /* _BSD_SIZE_T_DEFINED_ */ +#endif /* _SIZE_T_DEFINED */ +#endif /* _SIZE_T_DEFINED_ */ +#endif /* _BSD_SIZE_T_ */ +#endif /* _SIZE_T_ */ +#endif /* __SIZE_T */ +#endif /* _T_SIZE */ +#endif /* _T_SIZE_ */ +#endif /* _SYS_SIZE_T_H */ +#endif /* _SIZE_T */ +#endif /* __SIZE_T__ */ +#endif /* __size_t__ */ +#undef __need_size_t +#endif /* _STDDEF_H or __need_size_t. */ + + +/* Wide character type. + Locale-writers should change this as necessary to + be big enough to hold unique values not between 0 and 127, + and not (wchar_t) -1, for each defined multibyte character. */ + +/* Define this type if we are doing the whole job, + or if we want this type in particular. */ +#if defined (_STDDEF_H) || defined (__need_wchar_t) +#ifndef __wchar_t__ /* BeOS */ +#ifndef __WCHAR_T__ /* Cray Unicos/Mk */ +#ifndef _WCHAR_T +#ifndef _T_WCHAR_ +#ifndef _T_WCHAR +#ifndef __WCHAR_T +#ifndef _WCHAR_T_ +#ifndef _BSD_WCHAR_T_ +#ifndef _BSD_WCHAR_T_DEFINED_ /* Darwin */ +#ifndef _BSD_RUNE_T_DEFINED_ /* Darwin */ +#ifndef _WCHAR_T_DECLARED /* FreeBSD 5 */ +#ifndef _WCHAR_T_DEFINED_ +#ifndef _WCHAR_T_DEFINED +#ifndef _WCHAR_T_H +#ifndef ___int_wchar_t_h +#ifndef __INT_WCHAR_T_H +#ifndef _GCC_WCHAR_T +#define __wchar_t__ /* BeOS */ +#define __WCHAR_T__ /* Cray Unicos/Mk */ +#define _WCHAR_T +#define _T_WCHAR_ +#define _T_WCHAR +#define __WCHAR_T +#define _WCHAR_T_ +#define _BSD_WCHAR_T_ +#define _WCHAR_T_DEFINED_ +#define _WCHAR_T_DEFINED +#define _WCHAR_T_H +#define ___int_wchar_t_h +#define __INT_WCHAR_T_H +#define _GCC_WCHAR_T +#define _WCHAR_T_DECLARED + +/* On BSD/386 1.1, at least, machine/ansi.h defines _BSD_WCHAR_T_ + instead of _WCHAR_T_, and _BSD_RUNE_T_ (which, unlike the other + symbols in the _FOO_T_ family, stays defined even after its + corresponding type is defined). If we define wchar_t, then we + must undef _WCHAR_T_; for BSD/386 1.1 (and perhaps others), if + we undef _WCHAR_T_, then we must also define rune_t, since + headers like runetype.h assume that if machine/ansi.h is included, + and _BSD_WCHAR_T_ is not defined, then rune_t is available. + machine/ansi.h says, "Note that _WCHAR_T_ and _RUNE_T_ must be of + the same type." */ +#ifdef _BSD_WCHAR_T_ +#undef _BSD_WCHAR_T_ +#ifdef _BSD_RUNE_T_ +#if !defined (_ANSI_SOURCE) && !defined (_POSIX_SOURCE) +typedef _BSD_RUNE_T_ rune_t; +#define _BSD_WCHAR_T_DEFINED_ +#define _BSD_RUNE_T_DEFINED_ /* Darwin */ +#if defined (__FreeBSD__) && (__FreeBSD__ < 5) +/* Why is this file so hard to maintain properly? In contrast to + the comment above regarding BSD/386 1.1, on FreeBSD for as long + as the symbol has existed, _BSD_RUNE_T_ must not stay defined or + redundant typedefs will occur when stdlib.h is included after this file. */ +#undef _BSD_RUNE_T_ +#endif +#endif +#endif +#endif +/* FreeBSD 5 can't be handled well using "traditional" logic above + since it no longer defines _BSD_RUNE_T_ yet still desires to export + rune_t in some cases... */ +#if defined (__FreeBSD__) && (__FreeBSD__ >= 5) +#if !defined (_ANSI_SOURCE) && !defined (_POSIX_SOURCE) +#if __BSD_VISIBLE +#ifndef _RUNE_T_DECLARED +typedef __rune_t rune_t; +#define _RUNE_T_DECLARED +#endif +#endif +#endif +#endif + +#ifndef __WCHAR_TYPE__ +#define __WCHAR_TYPE__ int +#endif +#ifndef __cplusplus +typedef __WCHAR_TYPE__ wchar_t; +#endif +#endif +#endif +#endif +#endif +#endif +#endif +#endif /* _WCHAR_T_DECLARED */ +#endif /* _BSD_RUNE_T_DEFINED_ */ +#endif +#endif +#endif +#endif +#endif +#endif +#endif +#endif /* __WCHAR_T__ */ +#endif /* __wchar_t__ */ +#undef __need_wchar_t +#endif /* _STDDEF_H or __need_wchar_t. */ + +#if defined (__need_wint_t) +#ifndef _WINT_T +#define _WINT_T + +#ifndef __WINT_TYPE__ +#define __WINT_TYPE__ unsigned int +#endif +typedef __WINT_TYPE__ wint_t; +#endif +#undef __need_wint_t +#endif + +/* In 4.3bsd-net2, leave these undefined to indicate that size_t, etc. + are already defined. */ +/* BSD/OS 3.1 and FreeBSD [23].x require the MACHINE_ANSI_H check here. */ +/* NetBSD 5 requires the I386_ANSI_H and X86_64_ANSI_H checks here. */ +#if defined(_ANSI_H_) || defined(_MACHINE_ANSI_H_) || defined(_X86_64_ANSI_H_) || defined(_I386_ANSI_H_) +/* The references to _GCC_PTRDIFF_T_, _GCC_SIZE_T_, and _GCC_WCHAR_T_ + are probably typos and should be removed before 2.8 is released. */ +#ifdef _GCC_PTRDIFF_T_ +#undef _PTRDIFF_T_ +#undef _BSD_PTRDIFF_T_ +#endif +#ifdef _GCC_SIZE_T_ +#undef _SIZE_T_ +#undef _BSD_SIZE_T_ +#endif +#ifdef _GCC_WCHAR_T_ +#undef _WCHAR_T_ +#undef _BSD_WCHAR_T_ +#endif +/* The following ones are the real ones. */ +#ifdef _GCC_PTRDIFF_T +#undef _PTRDIFF_T_ +#undef _BSD_PTRDIFF_T_ +#endif +#ifdef _GCC_SIZE_T +#undef _SIZE_T_ +#undef _BSD_SIZE_T_ +#endif +#ifdef _GCC_WCHAR_T +#undef _WCHAR_T_ +#undef _BSD_WCHAR_T_ +#endif +#endif /* _ANSI_H_ || _MACHINE_ANSI_H_ || _X86_64_ANSI_H_ || _I386_ANSI_H_ */ + +#endif /* __sys_stdtypes_h */ + +/* A null pointer constant. */ + +#if defined (_STDDEF_H) || defined (__need_NULL) +#undef NULL /* in case <stdio.h> has defined it. */ +#ifdef __GNUG__ +#define NULL __null +#else /* G++ */ +#ifndef __cplusplus +#define NULL ((void *)0) +#else /* C++ */ +#define NULL 0 +#endif /* C++ */ +#endif /* G++ */ +#endif /* NULL not defined and <stddef.h> or need NULL. */ +#undef __need_NULL + +#ifdef _STDDEF_H + +/* Offset of member MEMBER in a struct of type TYPE. */ +#define offsetof(TYPE, MEMBER) __builtin_offsetof (TYPE, MEMBER) + +#endif /* _STDDEF_H was defined this time */ + +#endif /* !_STDDEF_H && !_STDDEF_H_ && !_ANSI_STDDEF_H && !__STDDEF_H__ + || __need_XXX was not defined before */ diff --git a/lib/gcc/i686-linux-android/4.6/include/stdfix.h b/lib/gcc/i686-linux-android/4.6/include/stdfix.h new file mode 100644 index 0000000..3c3ec00 --- /dev/null +++ b/lib/gcc/i686-linux-android/4.6/include/stdfix.h @@ -0,0 +1,204 @@ +/* Copyright (C) 2007, 2009 Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 3, or (at your option) +any later version. + +GCC is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +Under Section 7 of GPL version 3, you are granted additional +permissions described in the GCC Runtime Library Exception, version +3.1, as published by the Free Software Foundation. + +You should have received a copy of the GNU General Public License and +a copy of the GCC Runtime Library Exception along with this program; +see the files COPYING3 and COPYING.RUNTIME respectively. If not, see +<http://www.gnu.org/licenses/>. */ + +/* ISO/IEC JTC1 SC22 WG14 N1169 + * Date: 2006-04-04 + * ISO/IEC TR 18037 + * Programming languages - C - Extensions to support embedded processors + */ + +#ifndef _STDFIX_H +#define _STDFIX_H + +/* 7.18a.1 Introduction. */ + +#undef fract +#undef accum +#undef sat +#define fract _Fract +#define accum _Accum +#define sat _Sat + +/* 7.18a.3 Precision macros. */ + +#undef SFRACT_FBIT +#undef SFRACT_MIN +#undef SFRACT_MAX +#undef SFRACT_EPSILON +#define SFRACT_FBIT __SFRACT_FBIT__ +#define SFRACT_MIN __SFRACT_MIN__ +#define SFRACT_MAX __SFRACT_MAX__ +#define SFRACT_EPSILON __SFRACT_EPSILON__ + +#undef USFRACT_FBIT +#undef USFRACT_MIN +#undef USFRACT_MAX +#undef USFRACT_EPSILON +#define USFRACT_FBIT __USFRACT_FBIT__ +#define USFRACT_MIN __USFRACT_MIN__ /* GCC extension. */ +#define USFRACT_MAX __USFRACT_MAX__ +#define USFRACT_EPSILON __USFRACT_EPSILON__ + +#undef FRACT_FBIT +#undef FRACT_MIN +#undef FRACT_MAX +#undef FRACT_EPSILON +#define FRACT_FBIT __FRACT_FBIT__ +#define FRACT_MIN __FRACT_MIN__ +#define FRACT_MAX __FRACT_MAX__ +#define FRACT_EPSILON __FRACT_EPSILON__ + +#undef UFRACT_FBIT +#undef UFRACT_MIN +#undef UFRACT_MAX +#undef UFRACT_EPSILON +#define UFRACT_FBIT __UFRACT_FBIT__ +#define UFRACT_MIN __UFRACT_MIN__ /* GCC extension. */ +#define UFRACT_MAX __UFRACT_MAX__ +#define UFRACT_EPSILON __UFRACT_EPSILON__ + +#undef LFRACT_FBIT +#undef LFRACT_MIN +#undef LFRACT_MAX +#undef LFRACT_EPSILON +#define LFRACT_FBIT __LFRACT_FBIT__ +#define LFRACT_MIN __LFRACT_MIN__ +#define LFRACT_MAX __LFRACT_MAX__ +#define LFRACT_EPSILON __LFRACT_EPSILON__ + +#undef ULFRACT_FBIT +#undef ULFRACT_MIN +#undef ULFRACT_MAX +#undef ULFRACT_EPSILON +#define ULFRACT_FBIT __ULFRACT_FBIT__ +#define ULFRACT_MIN __ULFRACT_MIN__ /* GCC extension. */ +#define ULFRACT_MAX __ULFRACT_MAX__ +#define ULFRACT_EPSILON __ULFRACT_EPSILON__ + +#undef LLFRACT_FBIT +#undef LLFRACT_MIN +#undef LLFRACT_MAX +#undef LLFRACT_EPSILON +#define LLFRACT_FBIT __LLFRACT_FBIT__ /* GCC extension. */ +#define LLFRACT_MIN __LLFRACT_MIN__ /* GCC extension. */ +#define LLFRACT_MAX __LLFRACT_MAX__ /* GCC extension. */ +#define LLFRACT_EPSILON __LLFRACT_EPSILON__ /* GCC extension. */ + +#undef ULLFRACT_FBIT +#undef ULLFRACT_MIN +#undef ULLFRACT_MAX +#undef ULLFRACT_EPSILON +#define ULLFRACT_FBIT __ULLFRACT_FBIT__ /* GCC extension. */ +#define ULLFRACT_MIN __ULLFRACT_MIN__ /* GCC extension. */ +#define ULLFRACT_MAX __ULLFRACT_MAX__ /* GCC extension. */ +#define ULLFRACT_EPSILON __ULLFRACT_EPSILON__ /* GCC extension. */ + +#undef SACCUM_FBIT +#undef SACCUM_IBIT +#undef SACCUM_MIN +#undef SACCUM_MAX +#undef SACCUM_EPSILON +#define SACCUM_FBIT __SACCUM_FBIT__ +#define SACCUM_IBIT __SACCUM_IBIT__ +#define SACCUM_MIN __SACCUM_MIN__ +#define SACCUM_MAX __SACCUM_MAX__ +#define SACCUM_EPSILON __SACCUM_EPSILON__ + +#undef USACCUM_FBIT +#undef USACCUM_IBIT +#undef USACCUM_MIN +#undef USACCUM_MAX +#undef USACCUM_EPSILON +#define USACCUM_FBIT __USACCUM_FBIT__ +#define USACCUM_IBIT __USACCUM_IBIT__ +#define USACCUM_MIN __USACCUM_MIN__ /* GCC extension. */ +#define USACCUM_MAX __USACCUM_MAX__ +#define USACCUM_EPSILON __USACCUM_EPSILON__ + +#undef ACCUM_FBIT +#undef ACCUM_IBIT +#undef ACCUM_MIN +#undef ACCUM_MAX +#undef ACCUM_EPSILON +#define ACCUM_FBIT __ACCUM_FBIT__ +#define ACCUM_IBIT __ACCUM_IBIT__ +#define ACCUM_MIN __ACCUM_MIN__ +#define ACCUM_MAX __ACCUM_MAX__ +#define ACCUM_EPSILON __ACCUM_EPSILON__ + +#undef UACCUM_FBIT +#undef UACCUM_IBIT +#undef UACCUM_MIN +#undef UACCUM_MAX +#undef UACCUM_EPSILON +#define UACCUM_FBIT __UACCUM_FBIT__ +#define UACCUM_IBIT __UACCUM_IBIT__ +#define UACCUM_MIN __UACCUM_MIN__ /* GCC extension. */ +#define UACCUM_MAX __UACCUM_MAX__ +#define UACCUM_EPSILON __UACCUM_EPSILON__ + +#undef LACCUM_FBIT +#undef LACCUM_IBIT +#undef LACCUM_MIN +#undef LACCUM_MAX +#undef LACCUM_EPSILON +#define LACCUM_FBIT __LACCUM_FBIT__ +#define LACCUM_IBIT __LACCUM_IBIT__ +#define LACCUM_MIN __LACCUM_MIN__ +#define LACCUM_MAX __LACCUM_MAX__ +#define LACCUM_EPSILON __LACCUM_EPSILON__ + +#undef ULACCUM_FBIT +#undef ULACCUM_IBIT +#undef ULACCUM_MIN +#undef ULACCUM_MAX +#undef ULACCUM_EPSILON +#define ULACCUM_FBIT __ULACCUM_FBIT__ +#define ULACCUM_IBIT __ULACCUM_IBIT__ +#define ULACCUM_MIN __ULACCUM_MIN__ /* GCC extension. */ +#define ULACCUM_MAX __ULACCUM_MAX__ +#define ULACCUM_EPSILON __ULACCUM_EPSILON__ + +#undef LLACCUM_FBIT +#undef LLACCUM_IBIT +#undef LLACCUM_MIN +#undef LLACCUM_MAX +#undef LLACCUM_EPSILON +#define LLACCUM_FBIT __LLACCUM_FBIT__ /* GCC extension. */ +#define LLACCUM_IBIT __LLACCUM_IBIT__ /* GCC extension. */ +#define LLACCUM_MIN __LLACCUM_MIN__ /* GCC extension. */ +#define LLACCUM_MAX __LLACCUM_MAX__ /* GCC extension. */ +#define LLACCUM_EPSILON __LLACCUM_EPSILON__ /* GCC extension. */ + +#undef ULLACCUM_FBIT +#undef ULLACCUM_IBIT +#undef ULLACCUM_MIN +#undef ULLACCUM_MAX +#undef ULLACCUM_EPSILON +#define ULLACCUM_FBIT __ULLACCUM_FBIT__ /* GCC extension. */ +#define ULLACCUM_IBIT __ULLACCUM_IBIT__ /* GCC extension. */ +#define ULLACCUM_MIN __ULLACCUM_MIN__ /* GCC extension. */ +#define ULLACCUM_MAX __ULLACCUM_MAX__ /* GCC extension. */ +#define ULLACCUM_EPSILON __ULLACCUM_EPSILON__ /* GCC extension. */ + +#endif /* _STDFIX_H */ diff --git a/lib/gcc/i686-linux-android/4.6/include/stdint-gcc.h b/lib/gcc/i686-linux-android/4.6/include/stdint-gcc.h new file mode 100644 index 0000000..22780a1 --- /dev/null +++ b/lib/gcc/i686-linux-android/4.6/include/stdint-gcc.h @@ -0,0 +1,259 @@ +/* Copyright (C) 2008, 2009 Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 3, or (at your option) +any later version. + +GCC is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +Under Section 7 of GPL version 3, you are granted additional +permissions described in the GCC Runtime Library Exception, version +3.1, as published by the Free Software Foundation. + +You should have received a copy of the GNU General Public License and +a copy of the GCC Runtime Library Exception along with this program; +see the files COPYING3 and COPYING.RUNTIME respectively. If not, see +<http://www.gnu.org/licenses/>. */ + +/* + * ISO C Standard: 7.18 Integer types <stdint.h> + */ + +#ifndef _GCC_STDINT_H +#define _GCC_STDINT_H + +/* 7.8.1.1 Exact-width integer types */ + +#ifdef __INT8_TYPE__ +typedef __INT8_TYPE__ int8_t; +#endif +#ifdef __INT16_TYPE__ +typedef __INT16_TYPE__ int16_t; +#endif +#ifdef __INT32_TYPE__ +typedef __INT32_TYPE__ int32_t; +#endif +#ifdef __INT64_TYPE__ +typedef __INT64_TYPE__ int64_t; +#endif +#ifdef __UINT8_TYPE__ +typedef __UINT8_TYPE__ uint8_t; +#endif +#ifdef __UINT16_TYPE__ +typedef __UINT16_TYPE__ uint16_t; +#endif +#ifdef __UINT32_TYPE__ +typedef __UINT32_TYPE__ uint32_t; +#endif +#ifdef __UINT64_TYPE__ +typedef __UINT64_TYPE__ uint64_t; +#endif + +/* 7.8.1.2 Minimum-width integer types */ + +typedef __INT_LEAST8_TYPE__ int_least8_t; +typedef __INT_LEAST16_TYPE__ int_least16_t; +typedef __INT_LEAST32_TYPE__ int_least32_t; +typedef __INT_LEAST64_TYPE__ int_least64_t; +typedef __UINT_LEAST8_TYPE__ uint_least8_t; +typedef __UINT_LEAST16_TYPE__ uint_least16_t; +typedef __UINT_LEAST32_TYPE__ uint_least32_t; +typedef __UINT_LEAST64_TYPE__ uint_least64_t; + +/* 7.8.1.3 Fastest minimum-width integer types */ + +typedef __INT_FAST8_TYPE__ int_fast8_t; +typedef __INT_FAST16_TYPE__ int_fast16_t; +typedef __INT_FAST32_TYPE__ int_fast32_t; +typedef __INT_FAST64_TYPE__ int_fast64_t; +typedef __UINT_FAST8_TYPE__ uint_fast8_t; +typedef __UINT_FAST16_TYPE__ uint_fast16_t; +typedef __UINT_FAST32_TYPE__ uint_fast32_t; +typedef __UINT_FAST64_TYPE__ uint_fast64_t; + +/* 7.8.1.4 Integer types capable of holding object pointers */ + +#ifdef __INTPTR_TYPE__ +typedef __INTPTR_TYPE__ intptr_t; +#endif +#ifdef __UINTPTR_TYPE__ +typedef __UINTPTR_TYPE__ uintptr_t; +#endif + +/* 7.8.1.5 Greatest-width integer types */ + +typedef __INTMAX_TYPE__ intmax_t; +typedef __UINTMAX_TYPE__ uintmax_t; + +#if !defined __cplusplus || defined __STDC_LIMIT_MACROS + +/* 7.18.2 Limits of specified-width integer types */ + +#ifdef __INT8_MAX__ +# undef INT8_MAX +# define INT8_MAX __INT8_MAX__ +# undef INT8_MIN +# define INT8_MIN (-INT8_MAX - 1) +#endif +#ifdef __UINT8_MAX__ +# undef UINT8_MAX +# define UINT8_MAX __UINT8_MAX__ +#endif +#ifdef __INT16_MAX__ +# undef INT16_MAX +# define INT16_MAX __INT16_MAX__ +# undef INT16_MIN +# define INT16_MIN (-INT16_MAX - 1) +#endif +#ifdef __UINT16_MAX__ +# undef UINT16_MAX +# define UINT16_MAX __UINT16_MAX__ +#endif +#ifdef __INT32_MAX__ +# undef INT32_MAX +# define INT32_MAX __INT32_MAX__ +# undef INT32_MIN +# define INT32_MIN (-INT32_MAX - 1) +#endif +#ifdef __UINT32_MAX__ +# undef UINT32_MAX +# define UINT32_MAX __UINT32_MAX__ +#endif +#ifdef __INT64_MAX__ +# undef INT64_MAX +# define INT64_MAX __INT64_MAX__ +# undef INT64_MIN +# define INT64_MIN (-INT64_MAX - 1) +#endif +#ifdef __UINT64_MAX__ +# undef UINT64_MAX +# define UINT64_MAX __UINT64_MAX__ +#endif + +#undef INT_LEAST8_MAX +#define INT_LEAST8_MAX __INT_LEAST8_MAX__ +#undef INT_LEAST8_MIN +#define INT_LEAST8_MIN (-INT_LEAST8_MAX - 1) +#undef UINT_LEAST8_MAX +#define UINT_LEAST8_MAX __UINT_LEAST8_MAX__ +#undef INT_LEAST16_MAX +#define INT_LEAST16_MAX __INT_LEAST16_MAX__ +#undef INT_LEAST16_MIN +#define INT_LEAST16_MIN (-INT_LEAST16_MAX - 1) +#undef UINT_LEAST16_MAX +#define UINT_LEAST16_MAX __UINT_LEAST16_MAX__ +#undef INT_LEAST32_MAX +#define INT_LEAST32_MAX __INT_LEAST32_MAX__ +#undef INT_LEAST32_MIN +#define INT_LEAST32_MIN (-INT_LEAST32_MAX - 1) +#undef UINT_LEAST32_MAX +#define UINT_LEAST32_MAX __UINT_LEAST32_MAX__ +#undef INT_LEAST64_MAX +#define INT_LEAST64_MAX __INT_LEAST64_MAX__ +#undef INT_LEAST64_MIN +#define INT_LEAST64_MIN (-INT_LEAST64_MAX - 1) +#undef UINT_LEAST64_MAX +#define UINT_LEAST64_MAX __UINT_LEAST64_MAX__ + +#undef INT_FAST8_MAX +#define INT_FAST8_MAX __INT_FAST8_MAX__ +#undef INT_FAST8_MIN +#define INT_FAST8_MIN (-INT_FAST8_MAX - 1) +#undef UINT_FAST8_MAX +#define UINT_FAST8_MAX __UINT_FAST8_MAX__ +#undef INT_FAST16_MAX +#define INT_FAST16_MAX __INT_FAST16_MAX__ +#undef INT_FAST16_MIN +#define INT_FAST16_MIN (-INT_FAST16_MAX - 1) +#undef UINT_FAST16_MAX +#define UINT_FAST16_MAX __UINT_FAST16_MAX__ +#undef INT_FAST32_MAX +#define INT_FAST32_MAX __INT_FAST32_MAX__ +#undef INT_FAST32_MIN +#define INT_FAST32_MIN (-INT_FAST32_MAX - 1) +#undef UINT_FAST32_MAX +#define UINT_FAST32_MAX __UINT_FAST32_MAX__ +#undef INT_FAST64_MAX +#define INT_FAST64_MAX __INT_FAST64_MAX__ +#undef INT_FAST64_MIN +#define INT_FAST64_MIN (-INT_FAST64_MAX - 1) +#undef UINT_FAST64_MAX +#define UINT_FAST64_MAX __UINT_FAST64_MAX__ + +#ifdef __INTPTR_MAX__ +# undef INTPTR_MAX +# define INTPTR_MAX __INTPTR_MAX__ +# undef INTPTR_MIN +# define INTPTR_MIN (-INTPTR_MAX - 1) +#endif +#ifdef __UINTPTR_MAX__ +# undef UINTPTR_MAX +# define UINTPTR_MAX __UINTPTR_MAX__ +#endif + +#undef INTMAX_MAX +#define INTMAX_MAX __INTMAX_MAX__ +#undef INTMAX_MIN +#define INTMAX_MIN (-INTMAX_MAX - 1) +#undef UINTMAX_MAX +#define UINTMAX_MAX __UINTMAX_MAX__ + +/* 7.18.3 Limits of other integer types */ + +#undef PTRDIFF_MAX +#define PTRDIFF_MAX __PTRDIFF_MAX__ +#undef PTRDIFF_MIN +#define PTRDIFF_MIN (-PTRDIFF_MAX - 1) + +#undef SIG_ATOMIC_MAX +#define SIG_ATOMIC_MAX __SIG_ATOMIC_MAX__ +#undef SIG_ATOMIC_MIN +#define SIG_ATOMIC_MIN __SIG_ATOMIC_MIN__ + +#undef SIZE_MAX +#define SIZE_MAX __SIZE_MAX__ + +#undef WCHAR_MAX +#define WCHAR_MAX __WCHAR_MAX__ +#undef WCHAR_MIN +#define WCHAR_MIN __WCHAR_MIN__ + +#undef WINT_MAX +#define WINT_MAX __WINT_MAX__ +#undef WINT_MIN +#define WINT_MIN __WINT_MIN__ + +#endif /* !defined __cplusplus || defined __STDC_LIMIT_MACROS */ + +#if !defined __cplusplus || defined __STDC_CONSTANT_MACROS + +#undef INT8_C +#define INT8_C(c) __INT8_C(c) +#undef INT16_C +#define INT16_C(c) __INT16_C(c) +#undef INT32_C +#define INT32_C(c) __INT32_C(c) +#undef INT64_C +#define INT64_C(c) __INT64_C(c) +#undef UINT8_C +#define UINT8_C(c) __UINT8_C(c) +#undef UINT16_C +#define UINT16_C(c) __UINT16_C(c) +#undef UINT32_C +#define UINT32_C(c) __UINT32_C(c) +#undef UINT64_C +#define UINT64_C(c) __UINT64_C(c) +#undef INTMAX_C +#define INTMAX_C(c) __INTMAX_C(c) +#undef UINTMAX_C +#define UINTMAX_C(c) __UINTMAX_C(c) + +#endif /* !defined __cplusplus || defined __STDC_CONSTANT_MACROS */ + +#endif /* _GCC_STDINT_H */ diff --git a/lib/gcc/i686-linux-android/4.6/include/stdint.h b/lib/gcc/i686-linux-android/4.6/include/stdint.h new file mode 100644 index 0000000..e45f819 --- /dev/null +++ b/lib/gcc/i686-linux-android/4.6/include/stdint.h @@ -0,0 +1,8 @@ +#ifndef _GCC_WRAP_STDINT_H +#if __STDC_HOSTED__ +# include_next <stdint.h> +#else +# include "stdint-gcc.h" +#endif +#define _GCC_WRAP_STDINT_H +#endif diff --git a/lib/gcc/i686-linux-android/4.6/include/tbmintrin.h b/lib/gcc/i686-linux-android/4.6/include/tbmintrin.h new file mode 100644 index 0000000..8d2431d --- /dev/null +++ b/lib/gcc/i686-linux-android/4.6/include/tbmintrin.h @@ -0,0 +1,191 @@ +/* Copyright (C) 2010 Free Software Foundation, Inc. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + GCC is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + <http://www.gnu.org/licenses/>. */ + +#ifndef _X86INTRIN_H_INCLUDED +# error "Never use <tbmintrin.h> directly; include <x86intrin.h> instead." +#endif + +#ifndef __TBM__ +# error "TBM instruction set not enabled" +#endif /* __TBM__ */ + +#ifndef _TBMINTRIN_H_INCLUDED +#define _TBMINTRIN_H_INCLUDED + +#ifdef __OPTIMIZE__ +extern __inline unsigned int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__bextri_u32 (unsigned int __X, const unsigned int __I) +{ + return __builtin_ia32_bextri_u32 (__X, __I); +} +#else +#define __bextri_u32(X, I) \ + ((unsigned int)__builtin_ia32_bextri_u32 ((unsigned int)(X), \ + (unsigned int)(I))) +#endif /*__OPTIMIZE__ */ + +extern __inline unsigned int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__blcfill_u32 (unsigned int __X) +{ + unsigned int tmp = (__X) & ((__X) + 1); + return tmp; +} + +extern __inline unsigned int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__blci_u32 (unsigned int __X) +{ + unsigned int tmp = (__X) | (~((__X) + 1)); + return tmp; +} + +extern __inline unsigned int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__blcic_u32 (unsigned int __X) +{ + unsigned int tmp = (~(__X)) & ((__X) + 1); + return tmp; +} + +extern __inline unsigned int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__blcmsk_u32 (unsigned int __X) +{ + unsigned int tmp = (__X) ^ ((__X) + 1); + return tmp; +} + +extern __inline unsigned int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__blcs_u32 (unsigned int __X) +{ + unsigned int tmp = (__X) | ((__X) + 1); + return tmp; +} + +extern __inline unsigned int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__blsfill_u32 (unsigned int __X) +{ + unsigned int tmp = (__X) | ((__X) - 1); + return tmp; +} + +extern __inline unsigned int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__blsic_u32 (unsigned int __X) +{ + unsigned int tmp = (~(__X)) | ((__X) - 1); + return tmp; +} + +extern __inline unsigned int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__t1mskc_u32 (unsigned int __X) +{ + unsigned int tmp = (~(__X)) | ((__X) + 1); + return tmp; +} + +extern __inline unsigned int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__tzmsk_u32 (unsigned int __X) +{ + unsigned int tmp = (~(__X)) & ((__X) - 1); + return tmp; +} + + + +#ifdef __x86_64__ +#ifdef __OPTIMIZE__ +extern __inline unsigned long long __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__bextri_u64 (unsigned long long __X, const unsigned int __I) +{ + return __builtin_ia32_bextri_u64 (__X, __I); +} +#else +#define __bextri_u64(X, I) \ + ((unsigned long long)__builtin_ia32_bextri_u64 ((unsigned long long)(X), \ + (unsigned long long)(I))) +#endif /*__OPTIMIZE__ */ + +extern __inline unsigned long long __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__blcfill_u64 (unsigned long long __X) +{ + unsigned long long tmp = (__X) & ((__X) + 1); + return tmp; +} + +extern __inline unsigned long long __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__blci_u64 (unsigned long long __X) +{ + unsigned long long tmp = (__X) | (~((__X) + 1)); + return tmp; +} + +extern __inline unsigned long long __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__blcic_u64 (unsigned long long __X) +{ + unsigned long long tmp = (~(__X)) & ((__X) + 1); + return tmp; +} + +extern __inline unsigned long long __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__blcmsk_u64 (unsigned long long __X) +{ + unsigned long long tmp = (__X) ^ ((__X) + 1); + return tmp; +} + +extern __inline unsigned long long __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__blcs_u64 (unsigned long long __X) +{ + unsigned long long tmp = (__X) | ((__X) + 1); + return tmp; +} + +extern __inline unsigned long long __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__blsfill_u64 (unsigned long long __X) +{ + unsigned long long tmp = (__X) | ((__X) - 1); + return tmp; +} + +extern __inline unsigned long long __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__blsic_u64 (unsigned long long __X) +{ + unsigned long long tmp = (~(__X)) | ((__X) - 1); + return tmp; +} + +extern __inline unsigned long long __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__t1mskc_u64 (unsigned long long __X) +{ + unsigned long long tmp = (~(__X)) | ((__X) + 1); + return tmp; +} + +extern __inline unsigned long long __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__tzmsk_u64 (unsigned long long __X) +{ + unsigned long long tmp = (~(__X)) & ((__X) - 1); + return tmp; +} + + +#endif /* __x86_64__ */ +#endif /* _TBMINTRIN_H_INCLUDED */ + diff --git a/lib/gcc/i686-linux-android/4.6/include/tmmintrin.h b/lib/gcc/i686-linux-android/4.6/include/tmmintrin.h new file mode 100644 index 0000000..9835669 --- /dev/null +++ b/lib/gcc/i686-linux-android/4.6/include/tmmintrin.h @@ -0,0 +1,244 @@ +/* Copyright (C) 2006, 2007, 2008, 2009 Free Software Foundation, Inc. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + GCC is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + <http://www.gnu.org/licenses/>. */ + +/* Implemented from the specification included in the Intel C++ Compiler + User Guide and Reference, version 9.1. */ + +#ifndef _TMMINTRIN_H_INCLUDED +#define _TMMINTRIN_H_INCLUDED + +#ifndef __SSSE3__ +# error "SSSE3 instruction set not enabled" +#else + +/* We need definitions from the SSE3, SSE2 and SSE header files*/ +#include <pmmintrin.h> + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_hadd_epi16 (__m128i __X, __m128i __Y) +{ + return (__m128i) __builtin_ia32_phaddw128 ((__v8hi)__X, (__v8hi)__Y); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_hadd_epi32 (__m128i __X, __m128i __Y) +{ + return (__m128i) __builtin_ia32_phaddd128 ((__v4si)__X, (__v4si)__Y); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_hadds_epi16 (__m128i __X, __m128i __Y) +{ + return (__m128i) __builtin_ia32_phaddsw128 ((__v8hi)__X, (__v8hi)__Y); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_hadd_pi16 (__m64 __X, __m64 __Y) +{ + return (__m64) __builtin_ia32_phaddw ((__v4hi)__X, (__v4hi)__Y); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_hadd_pi32 (__m64 __X, __m64 __Y) +{ + return (__m64) __builtin_ia32_phaddd ((__v2si)__X, (__v2si)__Y); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_hadds_pi16 (__m64 __X, __m64 __Y) +{ + return (__m64) __builtin_ia32_phaddsw ((__v4hi)__X, (__v4hi)__Y); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_hsub_epi16 (__m128i __X, __m128i __Y) +{ + return (__m128i) __builtin_ia32_phsubw128 ((__v8hi)__X, (__v8hi)__Y); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_hsub_epi32 (__m128i __X, __m128i __Y) +{ + return (__m128i) __builtin_ia32_phsubd128 ((__v4si)__X, (__v4si)__Y); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_hsubs_epi16 (__m128i __X, __m128i __Y) +{ + return (__m128i) __builtin_ia32_phsubsw128 ((__v8hi)__X, (__v8hi)__Y); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_hsub_pi16 (__m64 __X, __m64 __Y) +{ + return (__m64) __builtin_ia32_phsubw ((__v4hi)__X, (__v4hi)__Y); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_hsub_pi32 (__m64 __X, __m64 __Y) +{ + return (__m64) __builtin_ia32_phsubd ((__v2si)__X, (__v2si)__Y); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_hsubs_pi16 (__m64 __X, __m64 __Y) +{ + return (__m64) __builtin_ia32_phsubsw ((__v4hi)__X, (__v4hi)__Y); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maddubs_epi16 (__m128i __X, __m128i __Y) +{ + return (__m128i) __builtin_ia32_pmaddubsw128 ((__v16qi)__X, (__v16qi)__Y); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maddubs_pi16 (__m64 __X, __m64 __Y) +{ + return (__m64) __builtin_ia32_pmaddubsw ((__v8qi)__X, (__v8qi)__Y); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mulhrs_epi16 (__m128i __X, __m128i __Y) +{ + return (__m128i) __builtin_ia32_pmulhrsw128 ((__v8hi)__X, (__v8hi)__Y); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mulhrs_pi16 (__m64 __X, __m64 __Y) +{ + return (__m64) __builtin_ia32_pmulhrsw ((__v4hi)__X, (__v4hi)__Y); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_shuffle_epi8 (__m128i __X, __m128i __Y) +{ + return (__m128i) __builtin_ia32_pshufb128 ((__v16qi)__X, (__v16qi)__Y); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_shuffle_pi8 (__m64 __X, __m64 __Y) +{ + return (__m64) __builtin_ia32_pshufb ((__v8qi)__X, (__v8qi)__Y); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_sign_epi8 (__m128i __X, __m128i __Y) +{ + return (__m128i) __builtin_ia32_psignb128 ((__v16qi)__X, (__v16qi)__Y); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_sign_epi16 (__m128i __X, __m128i __Y) +{ + return (__m128i) __builtin_ia32_psignw128 ((__v8hi)__X, (__v8hi)__Y); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_sign_epi32 (__m128i __X, __m128i __Y) +{ + return (__m128i) __builtin_ia32_psignd128 ((__v4si)__X, (__v4si)__Y); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_sign_pi8 (__m64 __X, __m64 __Y) +{ + return (__m64) __builtin_ia32_psignb ((__v8qi)__X, (__v8qi)__Y); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_sign_pi16 (__m64 __X, __m64 __Y) +{ + return (__m64) __builtin_ia32_psignw ((__v4hi)__X, (__v4hi)__Y); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_sign_pi32 (__m64 __X, __m64 __Y) +{ + return (__m64) __builtin_ia32_psignd ((__v2si)__X, (__v2si)__Y); +} + +#ifdef __OPTIMIZE__ +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_alignr_epi8(__m128i __X, __m128i __Y, const int __N) +{ + return (__m128i) __builtin_ia32_palignr128 ((__v2di)__X, + (__v2di)__Y, __N * 8); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_alignr_pi8(__m64 __X, __m64 __Y, const int __N) +{ + return (__m64) __builtin_ia32_palignr ((__v1di)__X, + (__v1di)__Y, __N * 8); +} +#else +#define _mm_alignr_epi8(X, Y, N) \ + ((__m128i) __builtin_ia32_palignr128 ((__v2di)(__m128i)(X), \ + (__v2di)(__m128i)(Y), \ + (int)(N) * 8)) +#define _mm_alignr_pi8(X, Y, N) \ + ((__m64) __builtin_ia32_palignr ((__v1di)(__m64)(X), \ + (__v1di)(__m64)(Y), \ + (int)(N) * 8)) +#endif + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_abs_epi8 (__m128i __X) +{ + return (__m128i) __builtin_ia32_pabsb128 ((__v16qi)__X); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_abs_epi16 (__m128i __X) +{ + return (__m128i) __builtin_ia32_pabsw128 ((__v8hi)__X); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_abs_epi32 (__m128i __X) +{ + return (__m128i) __builtin_ia32_pabsd128 ((__v4si)__X); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_abs_pi8 (__m64 __X) +{ + return (__m64) __builtin_ia32_pabsb ((__v8qi)__X); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_abs_pi16 (__m64 __X) +{ + return (__m64) __builtin_ia32_pabsw ((__v4hi)__X); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_abs_pi32 (__m64 __X) +{ + return (__m64) __builtin_ia32_pabsd ((__v2si)__X); +} + +#endif /* __SSSE3__ */ + +#endif /* _TMMINTRIN_H_INCLUDED */ diff --git a/lib/gcc/i686-linux-android/4.6/include/unwind.h b/lib/gcc/i686-linux-android/4.6/include/unwind.h new file mode 100644 index 0000000..4ff9017 --- /dev/null +++ b/lib/gcc/i686-linux-android/4.6/include/unwind.h @@ -0,0 +1,276 @@ +/* Exception handling and frame unwind runtime interface routines. + Copyright (C) 2001, 2003, 2004, 2006, 2008, 2009 Free Software Foundation, Inc. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify it + under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + GCC is distributed in the hope that it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public + License for more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + <http://www.gnu.org/licenses/>. */ + +/* This is derived from the C++ ABI for IA-64. Where we diverge + for cross-architecture compatibility are noted with "@@@". */ + +#ifndef _UNWIND_H +#define _UNWIND_H + +#ifndef HIDE_EXPORTS +#pragma GCC visibility push(default) +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +/* Level 1: Base ABI */ + +/* @@@ The IA-64 ABI uses uint64 throughout. Most places this is + inefficient for 32-bit and smaller machines. */ +typedef unsigned _Unwind_Word __attribute__((__mode__(__unwind_word__))); +typedef signed _Unwind_Sword __attribute__((__mode__(__unwind_word__))); +#if defined(__ia64__) && defined(__hpux__) +typedef unsigned _Unwind_Ptr __attribute__((__mode__(__word__))); +#else +typedef unsigned _Unwind_Ptr __attribute__((__mode__(__pointer__))); +#endif +typedef unsigned _Unwind_Internal_Ptr __attribute__((__mode__(__pointer__))); + +/* @@@ The IA-64 ABI uses a 64-bit word to identify the producer and + consumer of an exception. We'll go along with this for now even on + 32-bit machines. We'll need to provide some other option for + 16-bit machines and for machines with > 8 bits per byte. */ +typedef unsigned _Unwind_Exception_Class __attribute__((__mode__(__DI__))); + +/* The unwind interface uses reason codes in several contexts to + identify the reasons for failures or other actions. */ +typedef enum +{ + _URC_NO_REASON = 0, + _URC_FOREIGN_EXCEPTION_CAUGHT = 1, + _URC_FATAL_PHASE2_ERROR = 2, + _URC_FATAL_PHASE1_ERROR = 3, + _URC_NORMAL_STOP = 4, + _URC_END_OF_STACK = 5, + _URC_HANDLER_FOUND = 6, + _URC_INSTALL_CONTEXT = 7, + _URC_CONTINUE_UNWIND = 8 +} _Unwind_Reason_Code; + + +/* The unwind interface uses a pointer to an exception header object + as its representation of an exception being thrown. In general, the + full representation of an exception object is language- and + implementation-specific, but it will be prefixed by a header + understood by the unwind interface. */ + +struct _Unwind_Exception; + +typedef void (*_Unwind_Exception_Cleanup_Fn) (_Unwind_Reason_Code, + struct _Unwind_Exception *); + +struct _Unwind_Exception +{ + _Unwind_Exception_Class exception_class; + _Unwind_Exception_Cleanup_Fn exception_cleanup; + _Unwind_Word private_1; + _Unwind_Word private_2; + + /* @@@ The IA-64 ABI says that this structure must be double-word aligned. + Taking that literally does not make much sense generically. Instead we + provide the maximum alignment required by any type for the machine. */ +} __attribute__((__aligned__)); + + +/* The ACTIONS argument to the personality routine is a bitwise OR of one + or more of the following constants. */ +typedef int _Unwind_Action; + +#define _UA_SEARCH_PHASE 1 +#define _UA_CLEANUP_PHASE 2 +#define _UA_HANDLER_FRAME 4 +#define _UA_FORCE_UNWIND 8 +#define _UA_END_OF_STACK 16 + +/* The target can override this macro to define any back-end-specific + attributes required for the lowest-level stack frame. */ +#ifndef LIBGCC2_UNWIND_ATTRIBUTE +#define LIBGCC2_UNWIND_ATTRIBUTE +#endif + +/* This is an opaque type used to refer to a system-specific data + structure used by the system unwinder. This context is created and + destroyed by the system, and passed to the personality routine + during unwinding. */ +struct _Unwind_Context; + +/* Raise an exception, passing along the given exception object. */ +extern _Unwind_Reason_Code LIBGCC2_UNWIND_ATTRIBUTE +_Unwind_RaiseException (struct _Unwind_Exception *); + +/* Raise an exception for forced unwinding. */ + +typedef _Unwind_Reason_Code (*_Unwind_Stop_Fn) + (int, _Unwind_Action, _Unwind_Exception_Class, + struct _Unwind_Exception *, struct _Unwind_Context *, void *); + +extern _Unwind_Reason_Code LIBGCC2_UNWIND_ATTRIBUTE +_Unwind_ForcedUnwind (struct _Unwind_Exception *, _Unwind_Stop_Fn, void *); + +/* Helper to invoke the exception_cleanup routine. */ +extern void _Unwind_DeleteException (struct _Unwind_Exception *); + +/* Resume propagation of an existing exception. This is used after + e.g. executing cleanup code, and not to implement rethrowing. */ +extern void LIBGCC2_UNWIND_ATTRIBUTE +_Unwind_Resume (struct _Unwind_Exception *); + +/* @@@ Resume propagation of a FORCE_UNWIND exception, or to rethrow + a normal exception that was handled. */ +extern _Unwind_Reason_Code LIBGCC2_UNWIND_ATTRIBUTE +_Unwind_Resume_or_Rethrow (struct _Unwind_Exception *); + +/* @@@ Use unwind data to perform a stack backtrace. The trace callback + is called for every stack frame in the call chain, but no cleanup + actions are performed. */ +typedef _Unwind_Reason_Code (*_Unwind_Trace_Fn) + (struct _Unwind_Context *, void *); + +extern _Unwind_Reason_Code LIBGCC2_UNWIND_ATTRIBUTE +_Unwind_Backtrace (_Unwind_Trace_Fn, void *); + +/* These functions are used for communicating information about the unwind + context (i.e. the unwind descriptors and the user register state) between + the unwind library and the personality routine and landing pad. Only + selected registers may be manipulated. */ + +extern _Unwind_Word _Unwind_GetGR (struct _Unwind_Context *, int); +extern void _Unwind_SetGR (struct _Unwind_Context *, int, _Unwind_Word); + +extern _Unwind_Ptr _Unwind_GetIP (struct _Unwind_Context *); +extern _Unwind_Ptr _Unwind_GetIPInfo (struct _Unwind_Context *, int *); +extern void _Unwind_SetIP (struct _Unwind_Context *, _Unwind_Ptr); + +/* @@@ Retrieve the CFA of the given context. */ +extern _Unwind_Word _Unwind_GetCFA (struct _Unwind_Context *); + +extern void *_Unwind_GetLanguageSpecificData (struct _Unwind_Context *); + +extern _Unwind_Ptr _Unwind_GetRegionStart (struct _Unwind_Context *); + + +/* The personality routine is the function in the C++ (or other language) + runtime library which serves as an interface between the system unwind + library and language-specific exception handling semantics. It is + specific to the code fragment described by an unwind info block, and + it is always referenced via the pointer in the unwind info block, and + hence it has no ABI-specified name. + + Note that this implies that two different C++ implementations can + use different names, and have different contents in the language + specific data area. Moreover, that the language specific data + area contains no version info because name of the function invoked + provides more effective versioning by detecting at link time the + lack of code to handle the different data format. */ + +typedef _Unwind_Reason_Code (*_Unwind_Personality_Fn) + (int, _Unwind_Action, _Unwind_Exception_Class, + struct _Unwind_Exception *, struct _Unwind_Context *); + +/* @@@ The following alternate entry points are for setjmp/longjmp + based unwinding. */ + +struct SjLj_Function_Context; +extern void _Unwind_SjLj_Register (struct SjLj_Function_Context *); +extern void _Unwind_SjLj_Unregister (struct SjLj_Function_Context *); + +extern _Unwind_Reason_Code LIBGCC2_UNWIND_ATTRIBUTE +_Unwind_SjLj_RaiseException (struct _Unwind_Exception *); +extern _Unwind_Reason_Code LIBGCC2_UNWIND_ATTRIBUTE +_Unwind_SjLj_ForcedUnwind (struct _Unwind_Exception *, _Unwind_Stop_Fn, void *); +extern void LIBGCC2_UNWIND_ATTRIBUTE +_Unwind_SjLj_Resume (struct _Unwind_Exception *); +extern _Unwind_Reason_Code LIBGCC2_UNWIND_ATTRIBUTE +_Unwind_SjLj_Resume_or_Rethrow (struct _Unwind_Exception *); + +/* @@@ The following provide access to the base addresses for text + and data-relative addressing in the LDSA. In order to stay link + compatible with the standard ABI for IA-64, we inline these. */ + +#ifdef __ia64__ +#include <stdlib.h> + +static inline _Unwind_Ptr +_Unwind_GetDataRelBase (struct _Unwind_Context *_C) +{ + /* The GP is stored in R1. */ + return _Unwind_GetGR (_C, 1); +} + +static inline _Unwind_Ptr +_Unwind_GetTextRelBase (struct _Unwind_Context *_C __attribute__ ((__unused__))) +{ + abort (); + return 0; +} + +/* @@@ Retrieve the Backing Store Pointer of the given context. */ +extern _Unwind_Word _Unwind_GetBSP (struct _Unwind_Context *); +#else +extern _Unwind_Ptr _Unwind_GetDataRelBase (struct _Unwind_Context *); +extern _Unwind_Ptr _Unwind_GetTextRelBase (struct _Unwind_Context *); +#endif + +/* @@@ Given an address, return the entry point of the function that + contains it. */ +extern void * _Unwind_FindEnclosingFunction (void *pc); + +#ifndef __SIZEOF_LONG__ + #error "__SIZEOF_LONG__ macro not defined" +#endif + +#ifndef __SIZEOF_POINTER__ + #error "__SIZEOF_POINTER__ macro not defined" +#endif + + +/* leb128 type numbers have a potentially unlimited size. + The target of the following definitions of _sleb128_t and _uleb128_t + is to have efficient data types large enough to hold the leb128 type + numbers used in the unwind code. + Mostly these types will simply be defined to long and unsigned long + except when a unsigned long data type on the target machine is not + capable of storing a pointer. */ + +#if __SIZEOF_LONG__ >= __SIZEOF_POINTER__ + typedef long _sleb128_t; + typedef unsigned long _uleb128_t; +#elif __SIZEOF_LONG_LONG__ >= __SIZEOF_POINTER__ + typedef long long _sleb128_t; + typedef unsigned long long _uleb128_t; +#else +# error "What type shall we use for _sleb128_t?" +#endif + +#ifdef __cplusplus +} +#endif + +#ifndef HIDE_EXPORTS +#pragma GCC visibility pop +#endif + +#endif /* unwind.h */ diff --git a/lib/gcc/i686-linux-android/4.6/include/varargs.h b/lib/gcc/i686-linux-android/4.6/include/varargs.h new file mode 100644 index 0000000..4b9803e --- /dev/null +++ b/lib/gcc/i686-linux-android/4.6/include/varargs.h @@ -0,0 +1,7 @@ +#ifndef _VARARGS_H +#define _VARARGS_H + +#error "GCC no longer implements <varargs.h>." +#error "Revise your code to use <stdarg.h>." + +#endif diff --git a/lib/gcc/i686-linux-android/4.6/include/wmmintrin.h b/lib/gcc/i686-linux-android/4.6/include/wmmintrin.h new file mode 100644 index 0000000..2c4bdc9 --- /dev/null +++ b/lib/gcc/i686-linux-android/4.6/include/wmmintrin.h @@ -0,0 +1,120 @@ +/* Copyright (C) 2008, 2009 Free Software Foundation, Inc. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + GCC is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + <http://www.gnu.org/licenses/>. */ + +/* Implemented from the specification included in the Intel C++ Compiler + User Guide and Reference, version 10.1. */ + +#ifndef _WMMINTRIN_H_INCLUDED +#define _WMMINTRIN_H_INCLUDED + +/* We need definitions from the SSE2 header file. */ +#include <emmintrin.h> + +#if !defined (__AES__) && !defined (__PCLMUL__) +# error "AES/PCLMUL instructions not enabled" +#else + +/* AES */ + +#ifdef __AES__ +/* Performs 1 round of AES decryption of the first m128i using + the second m128i as a round key. */ +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_aesdec_si128 (__m128i __X, __m128i __Y) +{ + return (__m128i) __builtin_ia32_aesdec128 ((__v2di)__X, (__v2di)__Y); +} + +/* Performs the last round of AES decryption of the first m128i + using the second m128i as a round key. */ +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_aesdeclast_si128 (__m128i __X, __m128i __Y) +{ + return (__m128i) __builtin_ia32_aesdeclast128 ((__v2di)__X, + (__v2di)__Y); +} + +/* Performs 1 round of AES encryption of the first m128i using + the second m128i as a round key. */ +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_aesenc_si128 (__m128i __X, __m128i __Y) +{ + return (__m128i) __builtin_ia32_aesenc128 ((__v2di)__X, (__v2di)__Y); +} + +/* Performs the last round of AES encryption of the first m128i + using the second m128i as a round key. */ +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_aesenclast_si128 (__m128i __X, __m128i __Y) +{ + return (__m128i) __builtin_ia32_aesenclast128 ((__v2di)__X, (__v2di)__Y); +} + +/* Performs the InverseMixColumn operation on the source m128i + and stores the result into m128i destination. */ +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_aesimc_si128 (__m128i __X) +{ + return (__m128i) __builtin_ia32_aesimc128 ((__v2di)__X); +} + +/* Generates a m128i round key for the input m128i AES cipher key and + byte round constant. The second parameter must be a compile time + constant. */ +#ifdef __OPTIMIZE__ +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_aeskeygenassist_si128 (__m128i __X, const int __C) +{ + return (__m128i) __builtin_ia32_aeskeygenassist128 ((__v2di)__X, __C); +} +#else +#define _mm_aeskeygenassist_si128(X, C) \ + ((__m128i) __builtin_ia32_aeskeygenassist128 ((__v2di)(__m128i)(X), \ + (int)(C))) +#endif +#endif /* __AES__ */ + +/* PCLMUL */ + +#ifdef __PCLMUL__ +/* Performs carry-less integer multiplication of 64-bit halves of + 128-bit input operands. The third parameter inducates which 64-bit + haves of the input parameters v1 and v2 should be used. It must be + a compile time constant. */ +#ifdef __OPTIMIZE__ +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_clmulepi64_si128 (__m128i __X, __m128i __Y, const int __I) +{ + return (__m128i) __builtin_ia32_pclmulqdq128 ((__v2di)__X, + (__v2di)__Y, __I); +} +#else +#define _mm_clmulepi64_si128(X, Y, I) \ + ((__m128i) __builtin_ia32_pclmulqdq128 ((__v2di)(__m128i)(X), \ + (__v2di)(__m128i)(Y), (int)(I))) +#endif +#endif /* __PCLMUL__ */ + +#endif /* __AES__/__PCLMUL__ */ + +#endif /* _WMMINTRIN_H_INCLUDED */ diff --git a/lib/gcc/i686-linux-android/4.6/include/x86intrin.h b/lib/gcc/i686-linux-android/4.6/include/x86intrin.h new file mode 100644 index 0000000..36b43df --- /dev/null +++ b/lib/gcc/i686-linux-android/4.6/include/x86intrin.h @@ -0,0 +1,96 @@ +/* Copyright (C) 2008, 2009, 2010 Free Software Foundation, Inc. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + GCC is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + <http://www.gnu.org/licenses/>. */ + +#ifndef _X86INTRIN_H_INCLUDED +#define _X86INTRIN_H_INCLUDED + +#include <ia32intrin.h> + +#ifdef __MMX__ +#include <mmintrin.h> +#endif + +#ifdef __SSE__ +#include <xmmintrin.h> +#endif + +#ifdef __SSE2__ +#include <emmintrin.h> +#endif + +#ifdef __SSE3__ +#include <pmmintrin.h> +#endif + +#ifdef __SSSE3__ +#include <tmmintrin.h> +#endif + +#ifdef __SSE4A__ +#include <ammintrin.h> +#endif + +#if defined (__SSE4_2__) || defined (__SSE4_1__) +#include <smmintrin.h> +#endif + +#if defined (__AES__) || defined (__PCLMUL__) +#include <wmmintrin.h> +#endif + +/* For including AVX instructions */ +#include <immintrin.h> + +#ifdef __3dNOW__ +#include <mm3dnow.h> +#endif + +#ifdef __FMA4__ +#include <fma4intrin.h> +#endif + +#ifdef __XOP__ +#include <xopintrin.h> +#endif + +#ifdef __LWP__ +#include <lwpintrin.h> +#endif + +#ifdef __ABM__ +#include <abmintrin.h> +#endif + +#ifdef __BMI__ +#include <bmiintrin.h> +#endif + +#ifdef __TBM__ +#include <tbmintrin.h> +#endif + +#ifdef __POPCNT__ +#include <popcntintrin.h> +#endif + +#endif /* _X86INTRIN_H_INCLUDED */ diff --git a/lib/gcc/i686-linux-android/4.6/include/xmmintrin.h b/lib/gcc/i686-linux-android/4.6/include/xmmintrin.h new file mode 100644 index 0000000..5aefa9d --- /dev/null +++ b/lib/gcc/i686-linux-android/4.6/include/xmmintrin.h @@ -0,0 +1,1251 @@ +/* Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 + Free Software Foundation, Inc. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + GCC is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + <http://www.gnu.org/licenses/>. */ + +/* Implemented from the specification included in the Intel C++ Compiler + User Guide and Reference, version 9.0. */ + +#ifndef _XMMINTRIN_H_INCLUDED +#define _XMMINTRIN_H_INCLUDED + +#ifndef __SSE__ +# error "SSE instruction set not enabled" +#else + +/* We need type definitions from the MMX header file. */ +#include <mmintrin.h> + +/* Get _mm_malloc () and _mm_free (). */ +#include <mm_malloc.h> + +/* The Intel API is flexible enough that we must allow aliasing with other + vector types, and their scalar components. */ +typedef float __m128 __attribute__ ((__vector_size__ (16), __may_alias__)); + +/* Internal data types for implementing the intrinsics. */ +typedef float __v4sf __attribute__ ((__vector_size__ (16))); + +/* Create a selector for use with the SHUFPS instruction. */ +#define _MM_SHUFFLE(fp3,fp2,fp1,fp0) \ + (((fp3) << 6) | ((fp2) << 4) | ((fp1) << 2) | (fp0)) + +/* Constants for use with _mm_prefetch. */ +enum _mm_hint +{ + _MM_HINT_T0 = 3, + _MM_HINT_T1 = 2, + _MM_HINT_T2 = 1, + _MM_HINT_NTA = 0 +}; + +/* Bits in the MXCSR. */ +#define _MM_EXCEPT_MASK 0x003f +#define _MM_EXCEPT_INVALID 0x0001 +#define _MM_EXCEPT_DENORM 0x0002 +#define _MM_EXCEPT_DIV_ZERO 0x0004 +#define _MM_EXCEPT_OVERFLOW 0x0008 +#define _MM_EXCEPT_UNDERFLOW 0x0010 +#define _MM_EXCEPT_INEXACT 0x0020 + +#define _MM_MASK_MASK 0x1f80 +#define _MM_MASK_INVALID 0x0080 +#define _MM_MASK_DENORM 0x0100 +#define _MM_MASK_DIV_ZERO 0x0200 +#define _MM_MASK_OVERFLOW 0x0400 +#define _MM_MASK_UNDERFLOW 0x0800 +#define _MM_MASK_INEXACT 0x1000 + +#define _MM_ROUND_MASK 0x6000 +#define _MM_ROUND_NEAREST 0x0000 +#define _MM_ROUND_DOWN 0x2000 +#define _MM_ROUND_UP 0x4000 +#define _MM_ROUND_TOWARD_ZERO 0x6000 + +#define _MM_FLUSH_ZERO_MASK 0x8000 +#define _MM_FLUSH_ZERO_ON 0x8000 +#define _MM_FLUSH_ZERO_OFF 0x0000 + +/* Create a vector of zeros. */ +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_setzero_ps (void) +{ + return __extension__ (__m128){ 0.0f, 0.0f, 0.0f, 0.0f }; +} + +/* Perform the respective operation on the lower SPFP (single-precision + floating-point) values of A and B; the upper three SPFP values are + passed through from A. */ + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_add_ss (__m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_addss ((__v4sf)__A, (__v4sf)__B); +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_sub_ss (__m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_subss ((__v4sf)__A, (__v4sf)__B); +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mul_ss (__m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_mulss ((__v4sf)__A, (__v4sf)__B); +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_div_ss (__m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_divss ((__v4sf)__A, (__v4sf)__B); +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_sqrt_ss (__m128 __A) +{ + return (__m128) __builtin_ia32_sqrtss ((__v4sf)__A); +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_rcp_ss (__m128 __A) +{ + return (__m128) __builtin_ia32_rcpss ((__v4sf)__A); +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_rsqrt_ss (__m128 __A) +{ + return (__m128) __builtin_ia32_rsqrtss ((__v4sf)__A); +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_min_ss (__m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_minss ((__v4sf)__A, (__v4sf)__B); +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_max_ss (__m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_maxss ((__v4sf)__A, (__v4sf)__B); +} + +/* Perform the respective operation on the four SPFP values in A and B. */ + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_add_ps (__m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_addps ((__v4sf)__A, (__v4sf)__B); +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_sub_ps (__m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_subps ((__v4sf)__A, (__v4sf)__B); +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mul_ps (__m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_mulps ((__v4sf)__A, (__v4sf)__B); +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_div_ps (__m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_divps ((__v4sf)__A, (__v4sf)__B); +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_sqrt_ps (__m128 __A) +{ + return (__m128) __builtin_ia32_sqrtps ((__v4sf)__A); +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_rcp_ps (__m128 __A) +{ + return (__m128) __builtin_ia32_rcpps ((__v4sf)__A); +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_rsqrt_ps (__m128 __A) +{ + return (__m128) __builtin_ia32_rsqrtps ((__v4sf)__A); +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_min_ps (__m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_minps ((__v4sf)__A, (__v4sf)__B); +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_max_ps (__m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_maxps ((__v4sf)__A, (__v4sf)__B); +} + +/* Perform logical bit-wise operations on 128-bit values. */ + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_and_ps (__m128 __A, __m128 __B) +{ + return __builtin_ia32_andps (__A, __B); +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_andnot_ps (__m128 __A, __m128 __B) +{ + return __builtin_ia32_andnps (__A, __B); +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_or_ps (__m128 __A, __m128 __B) +{ + return __builtin_ia32_orps (__A, __B); +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_xor_ps (__m128 __A, __m128 __B) +{ + return __builtin_ia32_xorps (__A, __B); +} + +/* Perform a comparison on the lower SPFP values of A and B. If the + comparison is true, place a mask of all ones in the result, otherwise a + mask of zeros. The upper three SPFP values are passed through from A. */ + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpeq_ss (__m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_cmpeqss ((__v4sf)__A, (__v4sf)__B); +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmplt_ss (__m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_cmpltss ((__v4sf)__A, (__v4sf)__B); +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmple_ss (__m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_cmpless ((__v4sf)__A, (__v4sf)__B); +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpgt_ss (__m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_movss ((__v4sf) __A, + (__v4sf) + __builtin_ia32_cmpltss ((__v4sf) __B, + (__v4sf) + __A)); +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpge_ss (__m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_movss ((__v4sf) __A, + (__v4sf) + __builtin_ia32_cmpless ((__v4sf) __B, + (__v4sf) + __A)); +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpneq_ss (__m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_cmpneqss ((__v4sf)__A, (__v4sf)__B); +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpnlt_ss (__m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_cmpnltss ((__v4sf)__A, (__v4sf)__B); +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpnle_ss (__m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_cmpnless ((__v4sf)__A, (__v4sf)__B); +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpngt_ss (__m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_movss ((__v4sf) __A, + (__v4sf) + __builtin_ia32_cmpnltss ((__v4sf) __B, + (__v4sf) + __A)); +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpnge_ss (__m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_movss ((__v4sf) __A, + (__v4sf) + __builtin_ia32_cmpnless ((__v4sf) __B, + (__v4sf) + __A)); +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpord_ss (__m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_cmpordss ((__v4sf)__A, (__v4sf)__B); +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpunord_ss (__m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_cmpunordss ((__v4sf)__A, (__v4sf)__B); +} + +/* Perform a comparison on the four SPFP values of A and B. For each + element, if the comparison is true, place a mask of all ones in the + result, otherwise a mask of zeros. */ + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpeq_ps (__m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_cmpeqps ((__v4sf)__A, (__v4sf)__B); +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmplt_ps (__m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_cmpltps ((__v4sf)__A, (__v4sf)__B); +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmple_ps (__m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_cmpleps ((__v4sf)__A, (__v4sf)__B); +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpgt_ps (__m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_cmpgtps ((__v4sf)__A, (__v4sf)__B); +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpge_ps (__m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_cmpgeps ((__v4sf)__A, (__v4sf)__B); +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpneq_ps (__m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_cmpneqps ((__v4sf)__A, (__v4sf)__B); +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpnlt_ps (__m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_cmpnltps ((__v4sf)__A, (__v4sf)__B); +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpnle_ps (__m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_cmpnleps ((__v4sf)__A, (__v4sf)__B); +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpngt_ps (__m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_cmpngtps ((__v4sf)__A, (__v4sf)__B); +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpnge_ps (__m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_cmpngeps ((__v4sf)__A, (__v4sf)__B); +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpord_ps (__m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_cmpordps ((__v4sf)__A, (__v4sf)__B); +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpunord_ps (__m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_cmpunordps ((__v4sf)__A, (__v4sf)__B); +} + +/* Compare the lower SPFP values of A and B and return 1 if true + and 0 if false. */ + +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_comieq_ss (__m128 __A, __m128 __B) +{ + return __builtin_ia32_comieq ((__v4sf)__A, (__v4sf)__B); +} + +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_comilt_ss (__m128 __A, __m128 __B) +{ + return __builtin_ia32_comilt ((__v4sf)__A, (__v4sf)__B); +} + +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_comile_ss (__m128 __A, __m128 __B) +{ + return __builtin_ia32_comile ((__v4sf)__A, (__v4sf)__B); +} + +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_comigt_ss (__m128 __A, __m128 __B) +{ + return __builtin_ia32_comigt ((__v4sf)__A, (__v4sf)__B); +} + +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_comige_ss (__m128 __A, __m128 __B) +{ + return __builtin_ia32_comige ((__v4sf)__A, (__v4sf)__B); +} + +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_comineq_ss (__m128 __A, __m128 __B) +{ + return __builtin_ia32_comineq ((__v4sf)__A, (__v4sf)__B); +} + +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_ucomieq_ss (__m128 __A, __m128 __B) +{ + return __builtin_ia32_ucomieq ((__v4sf)__A, (__v4sf)__B); +} + +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_ucomilt_ss (__m128 __A, __m128 __B) +{ + return __builtin_ia32_ucomilt ((__v4sf)__A, (__v4sf)__B); +} + +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_ucomile_ss (__m128 __A, __m128 __B) +{ + return __builtin_ia32_ucomile ((__v4sf)__A, (__v4sf)__B); +} + +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_ucomigt_ss (__m128 __A, __m128 __B) +{ + return __builtin_ia32_ucomigt ((__v4sf)__A, (__v4sf)__B); +} + +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_ucomige_ss (__m128 __A, __m128 __B) +{ + return __builtin_ia32_ucomige ((__v4sf)__A, (__v4sf)__B); +} + +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_ucomineq_ss (__m128 __A, __m128 __B) +{ + return __builtin_ia32_ucomineq ((__v4sf)__A, (__v4sf)__B); +} + +/* Convert the lower SPFP value to a 32-bit integer according to the current + rounding mode. */ +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtss_si32 (__m128 __A) +{ + return __builtin_ia32_cvtss2si ((__v4sf) __A); +} + +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvt_ss2si (__m128 __A) +{ + return _mm_cvtss_si32 (__A); +} + +#ifdef __x86_64__ +/* Convert the lower SPFP value to a 32-bit integer according to the + current rounding mode. */ + +/* Intel intrinsic. */ +extern __inline long long __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtss_si64 (__m128 __A) +{ + return __builtin_ia32_cvtss2si64 ((__v4sf) __A); +} + +/* Microsoft intrinsic. */ +extern __inline long long __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtss_si64x (__m128 __A) +{ + return __builtin_ia32_cvtss2si64 ((__v4sf) __A); +} +#endif + +/* Convert the two lower SPFP values to 32-bit integers according to the + current rounding mode. Return the integers in packed form. */ +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtps_pi32 (__m128 __A) +{ + return (__m64) __builtin_ia32_cvtps2pi ((__v4sf) __A); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvt_ps2pi (__m128 __A) +{ + return _mm_cvtps_pi32 (__A); +} + +/* Truncate the lower SPFP value to a 32-bit integer. */ +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvttss_si32 (__m128 __A) +{ + return __builtin_ia32_cvttss2si ((__v4sf) __A); +} + +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtt_ss2si (__m128 __A) +{ + return _mm_cvttss_si32 (__A); +} + +#ifdef __x86_64__ +/* Truncate the lower SPFP value to a 32-bit integer. */ + +/* Intel intrinsic. */ +extern __inline long long __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvttss_si64 (__m128 __A) +{ + return __builtin_ia32_cvttss2si64 ((__v4sf) __A); +} + +/* Microsoft intrinsic. */ +extern __inline long long __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvttss_si64x (__m128 __A) +{ + return __builtin_ia32_cvttss2si64 ((__v4sf) __A); +} +#endif + +/* Truncate the two lower SPFP values to 32-bit integers. Return the + integers in packed form. */ +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvttps_pi32 (__m128 __A) +{ + return (__m64) __builtin_ia32_cvttps2pi ((__v4sf) __A); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtt_ps2pi (__m128 __A) +{ + return _mm_cvttps_pi32 (__A); +} + +/* Convert B to a SPFP value and insert it as element zero in A. */ +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtsi32_ss (__m128 __A, int __B) +{ + return (__m128) __builtin_ia32_cvtsi2ss ((__v4sf) __A, __B); +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvt_si2ss (__m128 __A, int __B) +{ + return _mm_cvtsi32_ss (__A, __B); +} + +#ifdef __x86_64__ +/* Convert B to a SPFP value and insert it as element zero in A. */ + +/* Intel intrinsic. */ +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtsi64_ss (__m128 __A, long long __B) +{ + return (__m128) __builtin_ia32_cvtsi642ss ((__v4sf) __A, __B); +} + +/* Microsoft intrinsic. */ +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtsi64x_ss (__m128 __A, long long __B) +{ + return (__m128) __builtin_ia32_cvtsi642ss ((__v4sf) __A, __B); +} +#endif + +/* Convert the two 32-bit values in B to SPFP form and insert them + as the two lower elements in A. */ +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtpi32_ps (__m128 __A, __m64 __B) +{ + return (__m128) __builtin_ia32_cvtpi2ps ((__v4sf) __A, (__v2si)__B); +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvt_pi2ps (__m128 __A, __m64 __B) +{ + return _mm_cvtpi32_ps (__A, __B); +} + +/* Convert the four signed 16-bit values in A to SPFP form. */ +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtpi16_ps (__m64 __A) +{ + __v4hi __sign; + __v2si __hisi, __losi; + __v4sf __zero, __ra, __rb; + + /* This comparison against zero gives us a mask that can be used to + fill in the missing sign bits in the unpack operations below, so + that we get signed values after unpacking. */ + __sign = __builtin_ia32_pcmpgtw ((__v4hi)0LL, (__v4hi)__A); + + /* Convert the four words to doublewords. */ + __losi = (__v2si) __builtin_ia32_punpcklwd ((__v4hi)__A, __sign); + __hisi = (__v2si) __builtin_ia32_punpckhwd ((__v4hi)__A, __sign); + + /* Convert the doublewords to floating point two at a time. */ + __zero = (__v4sf) _mm_setzero_ps (); + __ra = __builtin_ia32_cvtpi2ps (__zero, __losi); + __rb = __builtin_ia32_cvtpi2ps (__ra, __hisi); + + return (__m128) __builtin_ia32_movlhps (__ra, __rb); +} + +/* Convert the four unsigned 16-bit values in A to SPFP form. */ +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtpu16_ps (__m64 __A) +{ + __v2si __hisi, __losi; + __v4sf __zero, __ra, __rb; + + /* Convert the four words to doublewords. */ + __losi = (__v2si) __builtin_ia32_punpcklwd ((__v4hi)__A, (__v4hi)0LL); + __hisi = (__v2si) __builtin_ia32_punpckhwd ((__v4hi)__A, (__v4hi)0LL); + + /* Convert the doublewords to floating point two at a time. */ + __zero = (__v4sf) _mm_setzero_ps (); + __ra = __builtin_ia32_cvtpi2ps (__zero, __losi); + __rb = __builtin_ia32_cvtpi2ps (__ra, __hisi); + + return (__m128) __builtin_ia32_movlhps (__ra, __rb); +} + +/* Convert the low four signed 8-bit values in A to SPFP form. */ +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtpi8_ps (__m64 __A) +{ + __v8qi __sign; + + /* This comparison against zero gives us a mask that can be used to + fill in the missing sign bits in the unpack operations below, so + that we get signed values after unpacking. */ + __sign = __builtin_ia32_pcmpgtb ((__v8qi)0LL, (__v8qi)__A); + + /* Convert the four low bytes to words. */ + __A = (__m64) __builtin_ia32_punpcklbw ((__v8qi)__A, __sign); + + return _mm_cvtpi16_ps(__A); +} + +/* Convert the low four unsigned 8-bit values in A to SPFP form. */ +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtpu8_ps(__m64 __A) +{ + __A = (__m64) __builtin_ia32_punpcklbw ((__v8qi)__A, (__v8qi)0LL); + return _mm_cvtpu16_ps(__A); +} + +/* Convert the four signed 32-bit values in A and B to SPFP form. */ +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtpi32x2_ps(__m64 __A, __m64 __B) +{ + __v4sf __zero = (__v4sf) _mm_setzero_ps (); + __v4sf __sfa = __builtin_ia32_cvtpi2ps (__zero, (__v2si)__A); + __v4sf __sfb = __builtin_ia32_cvtpi2ps (__sfa, (__v2si)__B); + return (__m128) __builtin_ia32_movlhps (__sfa, __sfb); +} + +/* Convert the four SPFP values in A to four signed 16-bit integers. */ +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtps_pi16(__m128 __A) +{ + __v4sf __hisf = (__v4sf)__A; + __v4sf __losf = __builtin_ia32_movhlps (__hisf, __hisf); + __v2si __hisi = __builtin_ia32_cvtps2pi (__hisf); + __v2si __losi = __builtin_ia32_cvtps2pi (__losf); + return (__m64) __builtin_ia32_packssdw (__hisi, __losi); +} + +/* Convert the four SPFP values in A to four signed 8-bit integers. */ +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtps_pi8(__m128 __A) +{ + __v4hi __tmp = (__v4hi) _mm_cvtps_pi16 (__A); + return (__m64) __builtin_ia32_packsswb (__tmp, (__v4hi)0LL); +} + +/* Selects four specific SPFP values from A and B based on MASK. */ +#ifdef __OPTIMIZE__ +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_shuffle_ps (__m128 __A, __m128 __B, int const __mask) +{ + return (__m128) __builtin_ia32_shufps ((__v4sf)__A, (__v4sf)__B, __mask); +} +#else +#define _mm_shuffle_ps(A, B, MASK) \ + ((__m128) __builtin_ia32_shufps ((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), (int)(MASK))) +#endif + +/* Selects and interleaves the upper two SPFP values from A and B. */ +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_unpackhi_ps (__m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_unpckhps ((__v4sf)__A, (__v4sf)__B); +} + +/* Selects and interleaves the lower two SPFP values from A and B. */ +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_unpacklo_ps (__m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_unpcklps ((__v4sf)__A, (__v4sf)__B); +} + +/* Sets the upper two SPFP values with 64-bits of data loaded from P; + the lower two values are passed through from A. */ +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_loadh_pi (__m128 __A, __m64 const *__P) +{ + return (__m128) __builtin_ia32_loadhps ((__v4sf)__A, (const __v2sf *)__P); +} + +/* Stores the upper two SPFP values of A into P. */ +extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_storeh_pi (__m64 *__P, __m128 __A) +{ + __builtin_ia32_storehps ((__v2sf *)__P, (__v4sf)__A); +} + +/* Moves the upper two values of B into the lower two values of A. */ +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_movehl_ps (__m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_movhlps ((__v4sf)__A, (__v4sf)__B); +} + +/* Moves the lower two values of B into the upper two values of A. */ +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_movelh_ps (__m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_movlhps ((__v4sf)__A, (__v4sf)__B); +} + +/* Sets the lower two SPFP values with 64-bits of data loaded from P; + the upper two values are passed through from A. */ +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_loadl_pi (__m128 __A, __m64 const *__P) +{ + return (__m128) __builtin_ia32_loadlps ((__v4sf)__A, (const __v2sf *)__P); +} + +/* Stores the lower two SPFP values of A into P. */ +extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_storel_pi (__m64 *__P, __m128 __A) +{ + __builtin_ia32_storelps ((__v2sf *)__P, (__v4sf)__A); +} + +/* Creates a 4-bit mask from the most significant bits of the SPFP values. */ +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_movemask_ps (__m128 __A) +{ + return __builtin_ia32_movmskps ((__v4sf)__A); +} + +/* Return the contents of the control register. */ +extern __inline unsigned int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_getcsr (void) +{ + return __builtin_ia32_stmxcsr (); +} + +/* Read exception bits from the control register. */ +extern __inline unsigned int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_MM_GET_EXCEPTION_STATE (void) +{ + return _mm_getcsr() & _MM_EXCEPT_MASK; +} + +extern __inline unsigned int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_MM_GET_EXCEPTION_MASK (void) +{ + return _mm_getcsr() & _MM_MASK_MASK; +} + +extern __inline unsigned int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_MM_GET_ROUNDING_MODE (void) +{ + return _mm_getcsr() & _MM_ROUND_MASK; +} + +extern __inline unsigned int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_MM_GET_FLUSH_ZERO_MODE (void) +{ + return _mm_getcsr() & _MM_FLUSH_ZERO_MASK; +} + +/* Set the control register to I. */ +extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_setcsr (unsigned int __I) +{ + __builtin_ia32_ldmxcsr (__I); +} + +/* Set exception bits in the control register. */ +extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_MM_SET_EXCEPTION_STATE(unsigned int __mask) +{ + _mm_setcsr((_mm_getcsr() & ~_MM_EXCEPT_MASK) | __mask); +} + +extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_MM_SET_EXCEPTION_MASK (unsigned int __mask) +{ + _mm_setcsr((_mm_getcsr() & ~_MM_MASK_MASK) | __mask); +} + +extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_MM_SET_ROUNDING_MODE (unsigned int __mode) +{ + _mm_setcsr((_mm_getcsr() & ~_MM_ROUND_MASK) | __mode); +} + +extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_MM_SET_FLUSH_ZERO_MODE (unsigned int __mode) +{ + _mm_setcsr((_mm_getcsr() & ~_MM_FLUSH_ZERO_MASK) | __mode); +} + +/* Create a vector with element 0 as F and the rest zero. */ +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_set_ss (float __F) +{ + return __extension__ (__m128)(__v4sf){ __F, 0.0f, 0.0f, 0.0f }; +} + +/* Create a vector with all four elements equal to F. */ +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_set1_ps (float __F) +{ + return __extension__ (__m128)(__v4sf){ __F, __F, __F, __F }; +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_set_ps1 (float __F) +{ + return _mm_set1_ps (__F); +} + +/* Create a vector with element 0 as *P and the rest zero. */ +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_load_ss (float const *__P) +{ + return _mm_set_ss (*__P); +} + +/* Create a vector with all four elements equal to *P. */ +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_load1_ps (float const *__P) +{ + return _mm_set1_ps (*__P); +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_load_ps1 (float const *__P) +{ + return _mm_load1_ps (__P); +} + +/* Load four SPFP values from P. The address must be 16-byte aligned. */ +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_load_ps (float const *__P) +{ + return (__m128) *(__v4sf *)__P; +} + +/* Load four SPFP values from P. The address need not be 16-byte aligned. */ +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_loadu_ps (float const *__P) +{ + return (__m128) __builtin_ia32_loadups (__P); +} + +/* Load four SPFP values in reverse order. The address must be aligned. */ +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_loadr_ps (float const *__P) +{ + __v4sf __tmp = *(__v4sf *)__P; + return (__m128) __builtin_ia32_shufps (__tmp, __tmp, _MM_SHUFFLE (0,1,2,3)); +} + +/* Create the vector [Z Y X W]. */ +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_set_ps (const float __Z, const float __Y, const float __X, const float __W) +{ + return __extension__ (__m128)(__v4sf){ __W, __X, __Y, __Z }; +} + +/* Create the vector [W X Y Z]. */ +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_setr_ps (float __Z, float __Y, float __X, float __W) +{ + return __extension__ (__m128)(__v4sf){ __Z, __Y, __X, __W }; +} + +/* Stores the lower SPFP value. */ +extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_store_ss (float *__P, __m128 __A) +{ + *__P = __builtin_ia32_vec_ext_v4sf ((__v4sf)__A, 0); +} + +extern __inline float __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtss_f32 (__m128 __A) +{ + return __builtin_ia32_vec_ext_v4sf ((__v4sf)__A, 0); +} + +/* Store four SPFP values. The address must be 16-byte aligned. */ +extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_store_ps (float *__P, __m128 __A) +{ + *(__v4sf *)__P = (__v4sf)__A; +} + +/* Store four SPFP values. The address need not be 16-byte aligned. */ +extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_storeu_ps (float *__P, __m128 __A) +{ + __builtin_ia32_storeups (__P, (__v4sf)__A); +} + +/* Store the lower SPFP value across four words. */ +extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_store1_ps (float *__P, __m128 __A) +{ + __v4sf __va = (__v4sf)__A; + __v4sf __tmp = __builtin_ia32_shufps (__va, __va, _MM_SHUFFLE (0,0,0,0)); + _mm_storeu_ps (__P, __tmp); +} + +extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_store_ps1 (float *__P, __m128 __A) +{ + _mm_store1_ps (__P, __A); +} + +/* Store four SPFP values in reverse order. The address must be aligned. */ +extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_storer_ps (float *__P, __m128 __A) +{ + __v4sf __va = (__v4sf)__A; + __v4sf __tmp = __builtin_ia32_shufps (__va, __va, _MM_SHUFFLE (0,1,2,3)); + _mm_store_ps (__P, __tmp); +} + +/* Sets the low SPFP value of A from the low value of B. */ +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_move_ss (__m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_movss ((__v4sf)__A, (__v4sf)__B); +} + +/* Extracts one of the four words of A. The selector N must be immediate. */ +#ifdef __OPTIMIZE__ +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_extract_pi16 (__m64 const __A, int const __N) +{ + return __builtin_ia32_vec_ext_v4hi ((__v4hi)__A, __N); +} + +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_pextrw (__m64 const __A, int const __N) +{ + return _mm_extract_pi16 (__A, __N); +} +#else +#define _mm_extract_pi16(A, N) \ + ((int) __builtin_ia32_vec_ext_v4hi ((__v4hi)(__m64)(A), (int)(N))) + +#define _m_pextrw(A, N) _mm_extract_pi16(A, N) +#endif + +/* Inserts word D into one of four words of A. The selector N must be + immediate. */ +#ifdef __OPTIMIZE__ +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_insert_pi16 (__m64 const __A, int const __D, int const __N) +{ + return (__m64) __builtin_ia32_vec_set_v4hi ((__v4hi)__A, __D, __N); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_pinsrw (__m64 const __A, int const __D, int const __N) +{ + return _mm_insert_pi16 (__A, __D, __N); +} +#else +#define _mm_insert_pi16(A, D, N) \ + ((__m64) __builtin_ia32_vec_set_v4hi ((__v4hi)(__m64)(A), \ + (int)(D), (int)(N))) + +#define _m_pinsrw(A, D, N) _mm_insert_pi16(A, D, N) +#endif + +/* Compute the element-wise maximum of signed 16-bit values. */ +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_max_pi16 (__m64 __A, __m64 __B) +{ + return (__m64) __builtin_ia32_pmaxsw ((__v4hi)__A, (__v4hi)__B); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_pmaxsw (__m64 __A, __m64 __B) +{ + return _mm_max_pi16 (__A, __B); +} + +/* Compute the element-wise maximum of unsigned 8-bit values. */ +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_max_pu8 (__m64 __A, __m64 __B) +{ + return (__m64) __builtin_ia32_pmaxub ((__v8qi)__A, (__v8qi)__B); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_pmaxub (__m64 __A, __m64 __B) +{ + return _mm_max_pu8 (__A, __B); +} + +/* Compute the element-wise minimum of signed 16-bit values. */ +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_min_pi16 (__m64 __A, __m64 __B) +{ + return (__m64) __builtin_ia32_pminsw ((__v4hi)__A, (__v4hi)__B); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_pminsw (__m64 __A, __m64 __B) +{ + return _mm_min_pi16 (__A, __B); +} + +/* Compute the element-wise minimum of unsigned 8-bit values. */ +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_min_pu8 (__m64 __A, __m64 __B) +{ + return (__m64) __builtin_ia32_pminub ((__v8qi)__A, (__v8qi)__B); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_pminub (__m64 __A, __m64 __B) +{ + return _mm_min_pu8 (__A, __B); +} + +/* Create an 8-bit mask of the signs of 8-bit values. */ +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_movemask_pi8 (__m64 __A) +{ + return __builtin_ia32_pmovmskb ((__v8qi)__A); +} + +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_pmovmskb (__m64 __A) +{ + return _mm_movemask_pi8 (__A); +} + +/* Multiply four unsigned 16-bit values in A by four unsigned 16-bit values + in B and produce the high 16 bits of the 32-bit results. */ +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mulhi_pu16 (__m64 __A, __m64 __B) +{ + return (__m64) __builtin_ia32_pmulhuw ((__v4hi)__A, (__v4hi)__B); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_pmulhuw (__m64 __A, __m64 __B) +{ + return _mm_mulhi_pu16 (__A, __B); +} + +/* Return a combination of the four 16-bit values in A. The selector + must be an immediate. */ +#ifdef __OPTIMIZE__ +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_shuffle_pi16 (__m64 __A, int const __N) +{ + return (__m64) __builtin_ia32_pshufw ((__v4hi)__A, __N); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_pshufw (__m64 __A, int const __N) +{ + return _mm_shuffle_pi16 (__A, __N); +} +#else +#define _mm_shuffle_pi16(A, N) \ + ((__m64) __builtin_ia32_pshufw ((__v4hi)(__m64)(A), (int)(N))) + +#define _m_pshufw(A, N) _mm_shuffle_pi16 (A, N) +#endif + +/* Conditionally store byte elements of A into P. The high bit of each + byte in the selector N determines whether the corresponding byte from + A is stored. */ +extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskmove_si64 (__m64 __A, __m64 __N, char *__P) +{ + __builtin_ia32_maskmovq ((__v8qi)__A, (__v8qi)__N, __P); +} + +extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_maskmovq (__m64 __A, __m64 __N, char *__P) +{ + _mm_maskmove_si64 (__A, __N, __P); +} + +/* Compute the rounded averages of the unsigned 8-bit values in A and B. */ +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_avg_pu8 (__m64 __A, __m64 __B) +{ + return (__m64) __builtin_ia32_pavgb ((__v8qi)__A, (__v8qi)__B); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_pavgb (__m64 __A, __m64 __B) +{ + return _mm_avg_pu8 (__A, __B); +} + +/* Compute the rounded averages of the unsigned 16-bit values in A and B. */ +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_avg_pu16 (__m64 __A, __m64 __B) +{ + return (__m64) __builtin_ia32_pavgw ((__v4hi)__A, (__v4hi)__B); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_pavgw (__m64 __A, __m64 __B) +{ + return _mm_avg_pu16 (__A, __B); +} + +/* Compute the sum of the absolute differences of the unsigned 8-bit + values in A and B. Return the value in the lower 16-bit word; the + upper words are cleared. */ +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_sad_pu8 (__m64 __A, __m64 __B) +{ + return (__m64) __builtin_ia32_psadbw ((__v8qi)__A, (__v8qi)__B); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_psadbw (__m64 __A, __m64 __B) +{ + return _mm_sad_pu8 (__A, __B); +} + +/* Loads one cache line from address P to a location "closer" to the + processor. The selector I specifies the type of prefetch operation. */ +#ifdef __OPTIMIZE__ +extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_prefetch (const void *__P, enum _mm_hint __I) +{ + __builtin_prefetch (__P, 0, __I); +} +#else +#define _mm_prefetch(P, I) \ + __builtin_prefetch ((P), 0, (I)) +#endif + +/* Stores the data in A to the address P without polluting the caches. */ +extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_stream_pi (__m64 *__P, __m64 __A) +{ + __builtin_ia32_movntq ((unsigned long long *)__P, (unsigned long long)__A); +} + +/* Likewise. The address must be 16-byte aligned. */ +extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_stream_ps (float *__P, __m128 __A) +{ + __builtin_ia32_movntps (__P, (__v4sf)__A); +} + +/* Guarantees that every preceding store is globally visible before + any subsequent store. */ +extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_sfence (void) +{ + __builtin_ia32_sfence (); +} + +/* The execution of the next instruction is delayed by an implementation + specific amount of time. The instruction does not modify the + architectural state. */ +extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_pause (void) +{ + __asm__ __volatile__ ("rep; nop" : : ); +} + +/* Transpose the 4x4 matrix composed of row[0-3]. */ +#define _MM_TRANSPOSE4_PS(row0, row1, row2, row3) \ +do { \ + __v4sf __r0 = (row0), __r1 = (row1), __r2 = (row2), __r3 = (row3); \ + __v4sf __t0 = __builtin_ia32_unpcklps (__r0, __r1); \ + __v4sf __t1 = __builtin_ia32_unpcklps (__r2, __r3); \ + __v4sf __t2 = __builtin_ia32_unpckhps (__r0, __r1); \ + __v4sf __t3 = __builtin_ia32_unpckhps (__r2, __r3); \ + (row0) = __builtin_ia32_movlhps (__t0, __t1); \ + (row1) = __builtin_ia32_movhlps (__t1, __t0); \ + (row2) = __builtin_ia32_movlhps (__t2, __t3); \ + (row3) = __builtin_ia32_movhlps (__t3, __t2); \ +} while (0) + +/* For backward source compatibility. */ +#ifdef __SSE2__ +# include <emmintrin.h> +#endif + +#endif /* __SSE__ */ +#endif /* _XMMINTRIN_H_INCLUDED */ diff --git a/lib/gcc/i686-linux-android/4.6/include/xopintrin.h b/lib/gcc/i686-linux-android/4.6/include/xopintrin.h new file mode 100644 index 0000000..3ebcb4b --- /dev/null +++ b/lib/gcc/i686-linux-android/4.6/include/xopintrin.h @@ -0,0 +1,835 @@ +/* Copyright (C) 2007, 2008, 2009, 2010 Free Software Foundation, Inc. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + GCC is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + <http://www.gnu.org/licenses/>. */ + +#ifndef _X86INTRIN_H_INCLUDED +# error "Never use <xopintrin.h> directly; include <x86intrin.h> instead." +#endif + +#ifndef _XOPMMINTRIN_H_INCLUDED +#define _XOPMMINTRIN_H_INCLUDED + +#ifndef __XOP__ +# error "XOP instruction set not enabled" +#else + +#include <fma4intrin.h> + +/* Integer multiply/add intructions. */ +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maccs_epi16(__m128i __A, __m128i __B, __m128i __C) +{ + return (__m128i) __builtin_ia32_vpmacssww ((__v8hi)__A,(__v8hi)__B, (__v8hi)__C); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_macc_epi16(__m128i __A, __m128i __B, __m128i __C) +{ + return (__m128i) __builtin_ia32_vpmacsww ((__v8hi)__A, (__v8hi)__B, (__v8hi)__C); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maccsd_epi16(__m128i __A, __m128i __B, __m128i __C) +{ + return (__m128i) __builtin_ia32_vpmacsswd ((__v8hi)__A, (__v8hi)__B, (__v4si)__C); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maccd_epi16(__m128i __A, __m128i __B, __m128i __C) +{ + return (__m128i) __builtin_ia32_vpmacswd ((__v8hi)__A, (__v8hi)__B, (__v4si)__C); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maccs_epi32(__m128i __A, __m128i __B, __m128i __C) +{ + return (__m128i) __builtin_ia32_vpmacssdd ((__v4si)__A, (__v4si)__B, (__v4si)__C); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_macc_epi32(__m128i __A, __m128i __B, __m128i __C) +{ + return (__m128i) __builtin_ia32_vpmacsdd ((__v4si)__A, (__v4si)__B, (__v4si)__C); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maccslo_epi32(__m128i __A, __m128i __B, __m128i __C) +{ + return (__m128i) __builtin_ia32_vpmacssdql ((__v4si)__A, (__v4si)__B, (__v2di)__C); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_macclo_epi32(__m128i __A, __m128i __B, __m128i __C) +{ + return (__m128i) __builtin_ia32_vpmacsdql ((__v4si)__A, (__v4si)__B, (__v2di)__C); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maccshi_epi32(__m128i __A, __m128i __B, __m128i __C) +{ + return (__m128i) __builtin_ia32_vpmacssdqh ((__v4si)__A, (__v4si)__B, (__v2di)__C); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_macchi_epi32(__m128i __A, __m128i __B, __m128i __C) +{ + return (__m128i) __builtin_ia32_vpmacsdqh ((__v4si)__A, (__v4si)__B, (__v2di)__C); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maddsd_epi16(__m128i __A, __m128i __B, __m128i __C) +{ + return (__m128i) __builtin_ia32_vpmadcsswd ((__v8hi)__A,(__v8hi)__B,(__v4si)__C); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maddd_epi16(__m128i __A, __m128i __B, __m128i __C) +{ + return (__m128i) __builtin_ia32_vpmadcswd ((__v8hi)__A,(__v8hi)__B,(__v4si)__C); +} + +/* Packed Integer Horizontal Add and Subtract */ +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_haddw_epi8(__m128i __A) +{ + return (__m128i) __builtin_ia32_vphaddbw ((__v16qi)__A); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_haddd_epi8(__m128i __A) +{ + return (__m128i) __builtin_ia32_vphaddbd ((__v16qi)__A); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_haddq_epi8(__m128i __A) +{ + return (__m128i) __builtin_ia32_vphaddbq ((__v16qi)__A); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_haddd_epi16(__m128i __A) +{ + return (__m128i) __builtin_ia32_vphaddwd ((__v8hi)__A); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_haddq_epi16(__m128i __A) +{ + return (__m128i) __builtin_ia32_vphaddwq ((__v8hi)__A); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_haddq_epi32(__m128i __A) +{ + return (__m128i) __builtin_ia32_vphadddq ((__v4si)__A); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_haddw_epu8(__m128i __A) +{ + return (__m128i) __builtin_ia32_vphaddubw ((__v16qi)__A); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_haddd_epu8(__m128i __A) +{ + return (__m128i) __builtin_ia32_vphaddubd ((__v16qi)__A); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_haddq_epu8(__m128i __A) +{ + return (__m128i) __builtin_ia32_vphaddubq ((__v16qi)__A); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_haddd_epu16(__m128i __A) +{ + return (__m128i) __builtin_ia32_vphadduwd ((__v8hi)__A); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_haddq_epu16(__m128i __A) +{ + return (__m128i) __builtin_ia32_vphadduwq ((__v8hi)__A); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_haddq_epu32(__m128i __A) +{ + return (__m128i) __builtin_ia32_vphaddudq ((__v4si)__A); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_hsubw_epi8(__m128i __A) +{ + return (__m128i) __builtin_ia32_vphsubbw ((__v16qi)__A); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_hsubd_epi16(__m128i __A) +{ + return (__m128i) __builtin_ia32_vphsubwd ((__v8hi)__A); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_hsubq_epi32(__m128i __A) +{ + return (__m128i) __builtin_ia32_vphsubdq ((__v4si)__A); +} + +/* Vector conditional move and permute */ + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmov_si128(__m128i __A, __m128i __B, __m128i __C) +{ + return (__m128i) __builtin_ia32_vpcmov (__A, __B, __C); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_perm_epi8(__m128i __A, __m128i __B, __m128i __C) +{ + return (__m128i) __builtin_ia32_vpperm ((__v16qi)__A, (__v16qi)__B, (__v16qi)__C); +} + +/* Packed Integer Rotates and Shifts + Rotates - Non-Immediate form */ + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_rot_epi8(__m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_vprotb ((__v16qi)__A, (__v16qi)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_rot_epi16(__m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_vprotw ((__v8hi)__A, (__v8hi)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_rot_epi32(__m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_vprotd ((__v4si)__A, (__v4si)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_rot_epi64(__m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_vprotq ((__v2di)__A, (__v2di)__B); +} + +/* Rotates - Immediate form */ + +#ifdef __OPTIMIZE__ +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_roti_epi8(__m128i __A, const int __B) +{ + return (__m128i) __builtin_ia32_vprotbi ((__v16qi)__A, __B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_roti_epi16(__m128i __A, const int __B) +{ + return (__m128i) __builtin_ia32_vprotwi ((__v8hi)__A, __B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_roti_epi32(__m128i __A, const int __B) +{ + return (__m128i) __builtin_ia32_vprotdi ((__v4si)__A, __B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_roti_epi64(__m128i __A, const int __B) +{ + return (__m128i) __builtin_ia32_vprotqi ((__v2di)__A, __B); +} +#else +#define _mm_roti_epi8(A, N) \ + ((__m128i) __builtin_ia32_vprotbi ((__v16qi)(__m128i)(A), (int)(N))) +#define _mm_roti_epi16(A, N) \ + ((__m128i) __builtin_ia32_vprotwi ((__v8hi)(__m128i)(A), (int)(N))) +#define _mm_roti_epi32(A, N) \ + ((__m128i) __builtin_ia32_vprotdi ((__v4si)(__m128i)(A), (int)(N))) +#define _mm_roti_epi64(A, N) \ + ((__m128i) __builtin_ia32_vprotqi ((__v2di)(__m128i)(A), (int)(N))) +#endif + +/* Shifts */ + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_shl_epi8(__m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_vpshlb ((__v16qi)__A, (__v16qi)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_shl_epi16(__m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_vpshlw ((__v8hi)__A, (__v8hi)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_shl_epi32(__m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_vpshld ((__v4si)__A, (__v4si)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_shl_epi64(__m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_vpshlq ((__v2di)__A, (__v2di)__B); +} + + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_sha_epi8(__m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_vpshab ((__v16qi)__A, (__v16qi)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_sha_epi16(__m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_vpshaw ((__v8hi)__A, (__v8hi)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_sha_epi32(__m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_vpshad ((__v4si)__A, (__v4si)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_sha_epi64(__m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_vpshaq ((__v2di)__A, (__v2di)__B); +} + +/* Compare and Predicate Generation + pcom (integer, unsinged bytes) */ + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_comlt_epu8(__m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_vpcomltub ((__v16qi)__A, (__v16qi)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_comle_epu8(__m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_vpcomleub ((__v16qi)__A, (__v16qi)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_comgt_epu8(__m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_vpcomgtub ((__v16qi)__A, (__v16qi)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_comge_epu8(__m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_vpcomgeub ((__v16qi)__A, (__v16qi)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_comeq_epu8(__m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_vpcomequb ((__v16qi)__A, (__v16qi)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_comneq_epu8(__m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_vpcomnequb ((__v16qi)__A, (__v16qi)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_comfalse_epu8(__m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_vpcomfalseub ((__v16qi)__A, (__v16qi)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_comtrue_epu8(__m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_vpcomtrueub ((__v16qi)__A, (__v16qi)__B); +} + +/*pcom (integer, unsinged words) */ + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_comlt_epu16(__m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_vpcomltuw ((__v8hi)__A, (__v8hi)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_comle_epu16(__m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_vpcomleuw ((__v8hi)__A, (__v8hi)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_comgt_epu16(__m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_vpcomgtuw ((__v8hi)__A, (__v8hi)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_comge_epu16(__m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_vpcomgeuw ((__v8hi)__A, (__v8hi)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_comeq_epu16(__m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_vpcomequw ((__v8hi)__A, (__v8hi)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_comneq_epu16(__m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_vpcomnequw ((__v8hi)__A, (__v8hi)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_comfalse_epu16(__m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_vpcomfalseuw ((__v8hi)__A, (__v8hi)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_comtrue_epu16(__m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_vpcomtrueuw ((__v8hi)__A, (__v8hi)__B); +} + +/*pcom (integer, unsinged double words) */ + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_comlt_epu32(__m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_vpcomltud ((__v4si)__A, (__v4si)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_comle_epu32(__m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_vpcomleud ((__v4si)__A, (__v4si)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_comgt_epu32(__m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_vpcomgtud ((__v4si)__A, (__v4si)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_comge_epu32(__m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_vpcomgeud ((__v4si)__A, (__v4si)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_comeq_epu32(__m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_vpcomequd ((__v4si)__A, (__v4si)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_comneq_epu32(__m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_vpcomnequd ((__v4si)__A, (__v4si)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_comfalse_epu32(__m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_vpcomfalseud ((__v4si)__A, (__v4si)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_comtrue_epu32(__m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_vpcomtrueud ((__v4si)__A, (__v4si)__B); +} + +/*pcom (integer, unsinged quad words) */ + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_comlt_epu64(__m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_vpcomltuq ((__v2di)__A, (__v2di)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_comle_epu64(__m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_vpcomleuq ((__v2di)__A, (__v2di)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_comgt_epu64(__m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_vpcomgtuq ((__v2di)__A, (__v2di)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_comge_epu64(__m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_vpcomgeuq ((__v2di)__A, (__v2di)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_comeq_epu64(__m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_vpcomequq ((__v2di)__A, (__v2di)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_comneq_epu64(__m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_vpcomnequq ((__v2di)__A, (__v2di)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_comfalse_epu64(__m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_vpcomfalseuq ((__v2di)__A, (__v2di)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_comtrue_epu64(__m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_vpcomtrueuq ((__v2di)__A, (__v2di)__B); +} + +/*pcom (integer, signed bytes) */ + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_comlt_epi8(__m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_vpcomltb ((__v16qi)__A, (__v16qi)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_comle_epi8(__m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_vpcomleb ((__v16qi)__A, (__v16qi)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_comgt_epi8(__m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_vpcomgtb ((__v16qi)__A, (__v16qi)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_comge_epi8(__m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_vpcomgeb ((__v16qi)__A, (__v16qi)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_comeq_epi8(__m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_vpcomeqb ((__v16qi)__A, (__v16qi)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_comneq_epi8(__m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_vpcomneqb ((__v16qi)__A, (__v16qi)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_comfalse_epi8(__m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_vpcomfalseb ((__v16qi)__A, (__v16qi)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_comtrue_epi8(__m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_vpcomtrueb ((__v16qi)__A, (__v16qi)__B); +} + +/*pcom (integer, signed words) */ + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_comlt_epi16(__m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_vpcomltw ((__v8hi)__A, (__v8hi)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_comle_epi16(__m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_vpcomlew ((__v8hi)__A, (__v8hi)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_comgt_epi16(__m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_vpcomgtw ((__v8hi)__A, (__v8hi)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_comge_epi16(__m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_vpcomgew ((__v8hi)__A, (__v8hi)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_comeq_epi16(__m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_vpcomeqw ((__v8hi)__A, (__v8hi)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_comneq_epi16(__m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_vpcomneqw ((__v8hi)__A, (__v8hi)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_comfalse_epi16(__m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_vpcomfalsew ((__v8hi)__A, (__v8hi)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_comtrue_epi16(__m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_vpcomtruew ((__v8hi)__A, (__v8hi)__B); +} + +/*pcom (integer, signed double words) */ + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_comlt_epi32(__m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_vpcomltd ((__v4si)__A, (__v4si)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_comle_epi32(__m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_vpcomled ((__v4si)__A, (__v4si)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_comgt_epi32(__m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_vpcomgtd ((__v4si)__A, (__v4si)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_comge_epi32(__m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_vpcomged ((__v4si)__A, (__v4si)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_comeq_epi32(__m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_vpcomeqd ((__v4si)__A, (__v4si)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_comneq_epi32(__m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_vpcomneqd ((__v4si)__A, (__v4si)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_comfalse_epi32(__m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_vpcomfalsed ((__v4si)__A, (__v4si)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_comtrue_epi32(__m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_vpcomtrued ((__v4si)__A, (__v4si)__B); +} + +/*pcom (integer, signed quad words) */ + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_comlt_epi64(__m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_vpcomltq ((__v2di)__A, (__v2di)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_comle_epi64(__m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_vpcomleq ((__v2di)__A, (__v2di)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_comgt_epi64(__m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_vpcomgtq ((__v2di)__A, (__v2di)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_comge_epi64(__m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_vpcomgeq ((__v2di)__A, (__v2di)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_comeq_epi64(__m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_vpcomeqq ((__v2di)__A, (__v2di)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_comneq_epi64(__m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_vpcomneqq ((__v2di)__A, (__v2di)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_comfalse_epi64(__m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_vpcomfalseq ((__v2di)__A, (__v2di)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_comtrue_epi64(__m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_vpcomtrueq ((__v2di)__A, (__v2di)__B); +} + +/* FRCZ */ + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_frcz_ps (__m128 __A) +{ + return (__m128) __builtin_ia32_vfrczps ((__v4sf)__A); +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_frcz_pd (__m128d __A) +{ + return (__m128d) __builtin_ia32_vfrczpd ((__v2df)__A); +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_frcz_ss (__m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_vfrczss ((__v4sf)__A, (__v4sf)__B); +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_frcz_sd (__m128d __A, __m128d __B) +{ + return (__m128d) __builtin_ia32_vfrczsd ((__v2df)__A, (__v2df)__B); +} + +extern __inline __m256 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_frcz_ps (__m256 __A) +{ + return (__m256) __builtin_ia32_vfrczps256 ((__v8sf)__A); +} + +extern __inline __m256d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_frcz_pd (__m256d __A) +{ + return (__m256d) __builtin_ia32_vfrczpd256 ((__v4df)__A); +} + +/* PERMIL2 */ + +#ifdef __OPTIMIZE__ +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_permute2_pd (__m128d __X, __m128d __Y, __m128i __C, const int __I) +{ + return (__m128d) __builtin_ia32_vpermil2pd ((__v2df)__X, + (__v2df)__Y, + (__v2di)__C, + __I); +} + +extern __inline __m256d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_permute2_pd (__m256d __X, __m256d __Y, __m256i __C, const int __I) +{ + return (__m256d) __builtin_ia32_vpermil2pd256 ((__v4df)__X, + (__v4df)__Y, + (__v4di)__C, + __I); +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_permute2_ps (__m128 __X, __m128 __Y, __m128i __C, const int __I) +{ + return (__m128) __builtin_ia32_vpermil2ps ((__v4sf)__X, + (__v4sf)__Y, + (__v4si)__C, + __I); +} + +extern __inline __m256 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_permute2_ps (__m256 __X, __m256 __Y, __m256i __C, const int __I) +{ + return (__m256) __builtin_ia32_vpermil2ps256 ((__v8sf)__X, + (__v8sf)__Y, + (__v8si)__C, + __I); +} +#else +#define _mm_permute2_pd(X, Y, C, I) \ + ((__m128d) __builtin_ia32_vpermil2pd ((__v2df)(__m128d)(X), \ + (__v2df)(__m128d)(Y), \ + (__v2di)(__m128d)(C), \ + (int)(I))) + +#define _mm256_permute2_pd(X, Y, C, I) \ + ((__m256d) __builtin_ia32_vpermil2pd256 ((__v4df)(__m256d)(X), \ + (__v4df)(__m256d)(Y), \ + (__v4di)(__m256d)(C), \ + (int)(I))) + +#define _mm_permute2_ps(X, Y, C, I) \ + ((__m128) __builtin_ia32_vpermil2ps ((__v4sf)(__m128)(X), \ + (__v4sf)(__m128)(Y), \ + (__v4si)(__m128)(C), \ + (int)(I))) + +#define _mm256_permute2_ps(X, Y, C, I) \ + ((__m256) __builtin_ia32_vpermil2ps256 ((__v8sf)(__m256)(X), \ + (__v8sf)(__m256)(Y), \ + (__v8si)(__m256)(C), \ + (int)(I))) +#endif /* __OPTIMIZE__ */ + +#endif /* __XOP__ */ + +#endif /* _XOPMMINTRIN_H_INCLUDED */ diff --git a/lib/gcc/i686-linux-android/4.6/libgcc.a b/lib/gcc/i686-linux-android/4.6/libgcc.a Binary files differnew file mode 100644 index 0000000..3287f3f --- /dev/null +++ b/lib/gcc/i686-linux-android/4.6/libgcc.a diff --git a/lib/gcc/i686-linux-android/4.6/libgcov.a b/lib/gcc/i686-linux-android/4.6/libgcov.a Binary files differnew file mode 100644 index 0000000..b9eab40 --- /dev/null +++ b/lib/gcc/i686-linux-android/4.6/libgcov.a |