aboutsummaryrefslogtreecommitdiff
path: root/src/cmd/compile/internal
diff options
context:
space:
mode:
Diffstat (limited to 'src/cmd/compile/internal')
-rw-r--r--src/cmd/compile/internal/abi/abiutils.go386
-rw-r--r--src/cmd/compile/internal/amd64/ssa.go21
-rw-r--r--src/cmd/compile/internal/arm/galign.go2
-rw-r--r--src/cmd/compile/internal/arm/ssa.go4
-rw-r--r--src/cmd/compile/internal/arm64/ssa.go4
-rw-r--r--src/cmd/compile/internal/base/base.go53
-rw-r--r--src/cmd/compile/internal/base/debug.go13
-rw-r--r--src/cmd/compile/internal/base/flag.go64
-rw-r--r--src/cmd/compile/internal/base/hashdebug.go32
-rw-r--r--src/cmd/compile/internal/base/mapfile_mmap.go2
-rw-r--r--src/cmd/compile/internal/base/mapfile_read.go2
-rw-r--r--src/cmd/compile/internal/base/print.go5
-rw-r--r--src/cmd/compile/internal/compare/compare.go45
-rw-r--r--src/cmd/compile/internal/compare/compare_test.go155
-rw-r--r--src/cmd/compile/internal/coverage/cover.go45
-rw-r--r--src/cmd/compile/internal/deadcode/deadcode.go247
-rw-r--r--src/cmd/compile/internal/devirtualize/devirtualize.go66
-rw-r--r--src/cmd/compile/internal/devirtualize/pgo.go514
-rw-r--r--src/cmd/compile/internal/devirtualize/pgo_test.go217
-rw-r--r--src/cmd/compile/internal/dwarfgen/dwarf.go23
-rw-r--r--src/cmd/compile/internal/dwarfgen/dwinl.go25
-rw-r--r--src/cmd/compile/internal/dwarfgen/scope_test.go5
-rw-r--r--src/cmd/compile/internal/escape/assign.go14
-rw-r--r--src/cmd/compile/internal/escape/call.go380
-rw-r--r--src/cmd/compile/internal/escape/desugar.go37
-rw-r--r--src/cmd/compile/internal/escape/escape.go115
-rw-r--r--src/cmd/compile/internal/escape/expr.go10
-rw-r--r--src/cmd/compile/internal/escape/graph.go94
-rw-r--r--src/cmd/compile/internal/escape/leaks.go54
-rw-r--r--src/cmd/compile/internal/escape/solve.go132
-rw-r--r--src/cmd/compile/internal/escape/stmt.go11
-rw-r--r--src/cmd/compile/internal/escape/utils.go2
-rw-r--r--src/cmd/compile/internal/gc/compile.go14
-rw-r--r--src/cmd/compile/internal/gc/export.go4
-rw-r--r--src/cmd/compile/internal/gc/main.go160
-rw-r--r--src/cmd/compile/internal/gc/obj.go72
-rw-r--r--src/cmd/compile/internal/gc/util.go55
-rw-r--r--src/cmd/compile/internal/importer/gcimporter.go64
-rw-r--r--src/cmd/compile/internal/importer/gcimporter_test.go10
-rw-r--r--src/cmd/compile/internal/importer/iimport.go6
-rw-r--r--src/cmd/compile/internal/inline/inl.go679
-rw-r--r--src/cmd/compile/internal/inline/inlheur/actualexprpropbits_string.go58
-rw-r--r--src/cmd/compile/internal/inline/inlheur/analyze.go370
-rw-r--r--src/cmd/compile/internal/inline/inlheur/analyze_func_callsites.go413
-rw-r--r--src/cmd/compile/internal/inline/inlheur/analyze_func_flags.go356
-rw-r--r--src/cmd/compile/internal/inline/inlheur/analyze_func_params.go355
-rw-r--r--src/cmd/compile/internal/inline/inlheur/analyze_func_returns.go277
-rw-r--r--src/cmd/compile/internal/inline/inlheur/callsite.go149
-rw-r--r--src/cmd/compile/internal/inline/inlheur/cspropbits_string.go56
-rw-r--r--src/cmd/compile/internal/inline/inlheur/debugflags_test.go65
-rw-r--r--src/cmd/compile/internal/inline/inlheur/dumpscores_test.go109
-rw-r--r--src/cmd/compile/internal/inline/inlheur/eclassify.go247
-rw-r--r--src/cmd/compile/internal/inline/inlheur/funcprop_string.go44
-rw-r--r--src/cmd/compile/internal/inline/inlheur/funcpropbits_string.go58
-rw-r--r--src/cmd/compile/internal/inline/inlheur/funcprops_test.go530
-rw-r--r--src/cmd/compile/internal/inline/inlheur/function_properties.go98
-rw-r--r--src/cmd/compile/internal/inline/inlheur/names.go129
-rw-r--r--src/cmd/compile/internal/inline/inlheur/parampropbits_string.go70
-rw-r--r--src/cmd/compile/internal/inline/inlheur/pstate_string.go30
-rw-r--r--src/cmd/compile/internal/inline/inlheur/resultpropbits_string.go68
-rw-r--r--src/cmd/compile/internal/inline/inlheur/score_callresult_uses.go413
-rw-r--r--src/cmd/compile/internal/inline/inlheur/scoreadjusttyp_string.go80
-rw-r--r--src/cmd/compile/internal/inline/inlheur/scoring.go751
-rw-r--r--src/cmd/compile/internal/inline/inlheur/serialize.go80
-rw-r--r--src/cmd/compile/internal/inline/inlheur/testdata/dumpscores.go45
-rw-r--r--src/cmd/compile/internal/inline/inlheur/testdata/props/README.txt77
-rw-r--r--src/cmd/compile/internal/inline/inlheur/testdata/props/acrosscall.go214
-rw-r--r--src/cmd/compile/internal/inline/inlheur/testdata/props/calls.go240
-rw-r--r--src/cmd/compile/internal/inline/inlheur/testdata/props/funcflags.go341
-rw-r--r--src/cmd/compile/internal/inline/inlheur/testdata/props/params.go367
-rw-r--r--src/cmd/compile/internal/inline/inlheur/testdata/props/returns.go370
-rw-r--r--src/cmd/compile/internal/inline/inlheur/testdata/props/returns2.go231
-rw-r--r--src/cmd/compile/internal/inline/inlheur/texpr_classify_test.go217
-rw-r--r--src/cmd/compile/internal/inline/inlheur/trace_off.go18
-rw-r--r--src/cmd/compile/internal/inline/inlheur/trace_on.go40
-rw-r--r--src/cmd/compile/internal/inline/inlheur/tserial_test.go65
-rw-r--r--src/cmd/compile/internal/inline/interleaved/interleaved.go132
-rw-r--r--src/cmd/compile/internal/ir/abi.go3
-rw-r--r--src/cmd/compile/internal/ir/check_reassign_no.go9
-rw-r--r--src/cmd/compile/internal/ir/check_reassign_yes.go9
-rw-r--r--src/cmd/compile/internal/ir/const.go67
-rw-r--r--src/cmd/compile/internal/ir/copy.go61
-rw-r--r--src/cmd/compile/internal/ir/expr.go266
-rw-r--r--src/cmd/compile/internal/ir/fmt.go108
-rw-r--r--src/cmd/compile/internal/ir/func.go367
-rw-r--r--src/cmd/compile/internal/ir/func_test.go82
-rw-r--r--src/cmd/compile/internal/ir/mknode.go4
-rw-r--r--src/cmd/compile/internal/ir/name.go209
-rw-r--r--src/cmd/compile/internal/ir/node.go65
-rw-r--r--src/cmd/compile/internal/ir/node_gen.go112
-rw-r--r--src/cmd/compile/internal/ir/op_string.go224
-rw-r--r--src/cmd/compile/internal/ir/package.go23
-rw-r--r--src/cmd/compile/internal/ir/reassign_consistency_check.go46
-rw-r--r--src/cmd/compile/internal/ir/reassignment.go205
-rw-r--r--src/cmd/compile/internal/ir/scc.go9
-rw-r--r--src/cmd/compile/internal/ir/sizeof_test.go4
-rw-r--r--src/cmd/compile/internal/ir/stmt.go52
-rw-r--r--src/cmd/compile/internal/ir/symtab.go9
-rw-r--r--src/cmd/compile/internal/ir/type.go40
-rw-r--r--src/cmd/compile/internal/ir/val.go17
-rw-r--r--src/cmd/compile/internal/liveness/arg.go6
-rw-r--r--src/cmd/compile/internal/liveness/plive.go139
-rw-r--r--src/cmd/compile/internal/logopt/logopt_test.go8
-rw-r--r--src/cmd/compile/internal/loong64/galign.go2
-rw-r--r--src/cmd/compile/internal/loong64/ggen.go23
-rw-r--r--src/cmd/compile/internal/loong64/ssa.go210
-rw-r--r--src/cmd/compile/internal/loopvar/loopvar.go29
-rw-r--r--src/cmd/compile/internal/loopvar/loopvar_test.go135
-rw-r--r--src/cmd/compile/internal/loopvar/testdata/opt-121.go43
-rw-r--r--src/cmd/compile/internal/loopvar/testdata/opt-122.go43
-rw-r--r--src/cmd/compile/internal/loopvar/testdata/opt.go1
-rw-r--r--src/cmd/compile/internal/mips/ggen.go6
-rw-r--r--src/cmd/compile/internal/mips64/ggen.go6
-rw-r--r--src/cmd/compile/internal/noder/codes.go6
-rw-r--r--src/cmd/compile/internal/noder/decl.go18
-rw-r--r--src/cmd/compile/internal/noder/expr.go55
-rw-r--r--src/cmd/compile/internal/noder/helpers.go156
-rw-r--r--src/cmd/compile/internal/noder/import.go5
-rw-r--r--src/cmd/compile/internal/noder/irgen.go89
-rw-r--r--src/cmd/compile/internal/noder/linker.go15
-rw-r--r--src/cmd/compile/internal/noder/noder.go3
-rw-r--r--src/cmd/compile/internal/noder/quirks.go2
-rw-r--r--src/cmd/compile/internal/noder/reader.go878
-rw-r--r--src/cmd/compile/internal/noder/sizes.go185
-rw-r--r--src/cmd/compile/internal/noder/stmt.go30
-rw-r--r--src/cmd/compile/internal/noder/types.go2
-rw-r--r--src/cmd/compile/internal/noder/unified.go152
-rw-r--r--src/cmd/compile/internal/noder/writer.go398
-rw-r--r--src/cmd/compile/internal/objw/objw.go17
-rw-r--r--src/cmd/compile/internal/objw/prog.go48
-rw-r--r--src/cmd/compile/internal/pgo/internal/graph/graph.go10
-rw-r--r--src/cmd/compile/internal/pgo/irgraph.go339
-rw-r--r--src/cmd/compile/internal/pkginit/init.go112
-rw-r--r--src/cmd/compile/internal/pkginit/initAsanGlobals.go15
-rw-r--r--src/cmd/compile/internal/pkginit/initorder.go369
-rw-r--r--src/cmd/compile/internal/ppc64/ssa.go65
-rw-r--r--src/cmd/compile/internal/rangefunc/rangefunc_test.go1297
-rw-r--r--src/cmd/compile/internal/rangefunc/rewrite.go1334
-rw-r--r--src/cmd/compile/internal/reflectdata/alg.go84
-rw-r--r--src/cmd/compile/internal/reflectdata/helpers.go14
-rw-r--r--src/cmd/compile/internal/reflectdata/reflect.go698
-rw-r--r--src/cmd/compile/internal/riscv64/ssa.go16
-rw-r--r--src/cmd/compile/internal/rttype/rttype.go283
-rw-r--r--src/cmd/compile/internal/ssa/_gen/AMD64.rules140
-rw-r--r--src/cmd/compile/internal/ssa/_gen/AMD64Ops.go67
-rw-r--r--src/cmd/compile/internal/ssa/_gen/ARM.rules58
-rw-r--r--src/cmd/compile/internal/ssa/_gen/ARM64.rules176
-rw-r--r--src/cmd/compile/internal/ssa/_gen/ARM64Ops.go9
-rw-r--r--src/cmd/compile/internal/ssa/_gen/LOONG64.rules88
-rw-r--r--src/cmd/compile/internal/ssa/_gen/LOONG64Ops.go74
-rw-r--r--src/cmd/compile/internal/ssa/_gen/MIPS64.rules12
-rw-r--r--src/cmd/compile/internal/ssa/_gen/PPC64.rules16
-rw-r--r--src/cmd/compile/internal/ssa/_gen/PPC64Ops.go88
-rw-r--r--src/cmd/compile/internal/ssa/_gen/PPC64latelower.rules36
-rw-r--r--src/cmd/compile/internal/ssa/_gen/RISCV64.rules253
-rw-r--r--src/cmd/compile/internal/ssa/_gen/RISCV64Ops.go31
-rw-r--r--src/cmd/compile/internal/ssa/_gen/S390X.rules8
-rw-r--r--src/cmd/compile/internal/ssa/_gen/allocators.go4
-rwxr-xr-xsrc/cmd/compile/internal/ssa/_gen/cover.bash2
-rw-r--r--src/cmd/compile/internal/ssa/_gen/dec.rules108
-rw-r--r--src/cmd/compile/internal/ssa/_gen/generic.rules45
-rw-r--r--src/cmd/compile/internal/ssa/_gen/genericOps.go10
-rw-r--r--src/cmd/compile/internal/ssa/_gen/rulegen.go2
-rw-r--r--src/cmd/compile/internal/ssa/addressingmodes.go11
-rw-r--r--src/cmd/compile/internal/ssa/bench_test.go18
-rw-r--r--src/cmd/compile/internal/ssa/block.go31
-rw-r--r--src/cmd/compile/internal/ssa/check.go23
-rw-r--r--src/cmd/compile/internal/ssa/compile.go8
-rw-r--r--src/cmd/compile/internal/ssa/config.go19
-rw-r--r--src/cmd/compile/internal/ssa/cse_test.go5
-rw-r--r--src/cmd/compile/internal/ssa/deadcode.go9
-rw-r--r--src/cmd/compile/internal/ssa/deadstore.go66
-rw-r--r--src/cmd/compile/internal/ssa/debug.go21
-rw-r--r--src/cmd/compile/internal/ssa/debug_lines_test.go2
-rw-r--r--src/cmd/compile/internal/ssa/expand_calls.go2411
-rw-r--r--src/cmd/compile/internal/ssa/export_test.go36
-rw-r--r--src/cmd/compile/internal/ssa/func.go43
-rw-r--r--src/cmd/compile/internal/ssa/func_test.go4
-rw-r--r--src/cmd/compile/internal/ssa/fuse.go2
-rw-r--r--src/cmd/compile/internal/ssa/fuse_test.go2
-rw-r--r--src/cmd/compile/internal/ssa/html.go4
-rw-r--r--src/cmd/compile/internal/ssa/lca.go2
-rw-r--r--src/cmd/compile/internal/ssa/loopbce.go15
-rw-r--r--src/cmd/compile/internal/ssa/loopreschedchecks.go2
-rw-r--r--src/cmd/compile/internal/ssa/magic.go2
-rw-r--r--src/cmd/compile/internal/ssa/memcombine.go87
-rw-r--r--src/cmd/compile/internal/ssa/nilcheck.go42
-rw-r--r--src/cmd/compile/internal/ssa/nilcheck_test.go4
-rw-r--r--src/cmd/compile/internal/ssa/numberlines.go2
-rw-r--r--src/cmd/compile/internal/ssa/op.go8
-rw-r--r--src/cmd/compile/internal/ssa/opGen.go1187
-rw-r--r--src/cmd/compile/internal/ssa/poset.go4
-rw-r--r--src/cmd/compile/internal/ssa/prove.go176
-rw-r--r--src/cmd/compile/internal/ssa/regalloc.go4
-rw-r--r--src/cmd/compile/internal/ssa/regalloc_test.go15
-rw-r--r--src/cmd/compile/internal/ssa/rewrite.go92
-rw-r--r--src/cmd/compile/internal/ssa/rewriteAMD64.go994
-rw-r--r--src/cmd/compile/internal/ssa/rewriteARM.go124
-rw-r--r--src/cmd/compile/internal/ssa/rewriteARM64.go968
-rw-r--r--src/cmd/compile/internal/ssa/rewriteLOONG64.go217
-rw-r--r--src/cmd/compile/internal/ssa/rewriteMIPS64.go98
-rw-r--r--src/cmd/compile/internal/ssa/rewritePPC64.go380
-rw-r--r--src/cmd/compile/internal/ssa/rewritePPC64latelower.go531
-rw-r--r--src/cmd/compile/internal/ssa/rewriteRISCV64.go878
-rw-r--r--src/cmd/compile/internal/ssa/rewriteS390X.go32
-rw-r--r--src/cmd/compile/internal/ssa/rewritedec.go666
-rw-r--r--src/cmd/compile/internal/ssa/rewritegeneric.go169
-rw-r--r--src/cmd/compile/internal/ssa/sccp.go585
-rw-r--r--src/cmd/compile/internal/ssa/sccp_test.go95
-rw-r--r--src/cmd/compile/internal/ssa/schedule.go23
-rw-r--r--src/cmd/compile/internal/ssa/stackalloc.go2
-rw-r--r--src/cmd/compile/internal/ssa/stackframe.go10
-rw-r--r--src/cmd/compile/internal/ssa/value.go39
-rw-r--r--src/cmd/compile/internal/ssa/writebarrier.go25
-rw-r--r--src/cmd/compile/internal/ssagen/abi.go31
-rw-r--r--src/cmd/compile/internal/ssagen/nowb.go23
-rw-r--r--src/cmd/compile/internal/ssagen/pgen.go51
-rw-r--r--src/cmd/compile/internal/ssagen/ssa.go840
-rw-r--r--src/cmd/compile/internal/staticdata/data.go27
-rw-r--r--src/cmd/compile/internal/staticinit/sched.go239
-rw-r--r--src/cmd/compile/internal/syntax/nodes.go11
-rw-r--r--src/cmd/compile/internal/syntax/nodes_test.go5
-rw-r--r--src/cmd/compile/internal/syntax/parser.go68
-rw-r--r--src/cmd/compile/internal/syntax/parser_test.go19
-rw-r--r--src/cmd/compile/internal/syntax/printer.go2
-rw-r--r--src/cmd/compile/internal/syntax/printer_test.go17
-rw-r--r--src/cmd/compile/internal/syntax/testdata/issue23434.go4
-rw-r--r--src/cmd/compile/internal/syntax/testdata/issue31092.go2
-rw-r--r--src/cmd/compile/internal/syntax/testdata/issue43527.go14
-rw-r--r--src/cmd/compile/internal/syntax/testdata/issue63835.go9
-rw-r--r--src/cmd/compile/internal/syntax/testdata/map2.go12
-rw-r--r--src/cmd/compile/internal/syntax/testdata/tparams.go15
-rw-r--r--src/cmd/compile/internal/syntax/testdata/typeset.go18
-rw-r--r--src/cmd/compile/internal/syntax/tokens.go4
-rw-r--r--src/cmd/compile/internal/syntax/type.go36
-rw-r--r--src/cmd/compile/internal/test/abiutils_test.go41
-rw-r--r--src/cmd/compile/internal/test/abiutilsaux_test.go7
-rw-r--r--src/cmd/compile/internal/test/iface_test.go12
-rw-r--r--src/cmd/compile/internal/test/inl_test.go17
-rw-r--r--src/cmd/compile/internal/test/logic_test.go4
-rw-r--r--src/cmd/compile/internal/test/math_test.go4
-rw-r--r--src/cmd/compile/internal/test/memcombine_test.go126
-rw-r--r--src/cmd/compile/internal/test/pgo_devirtualize_test.go181
-rw-r--r--src/cmd/compile/internal/test/pgo_inl_test.go101
-rw-r--r--src/cmd/compile/internal/test/ssa_test.go2
-rw-r--r--src/cmd/compile/internal/test/switch_test.go159
-rw-r--r--src/cmd/compile/internal/test/test.go4
-rw-r--r--src/cmd/compile/internal/test/testdata/arith_test.go66
-rw-r--r--src/cmd/compile/internal/test/testdata/ctl_test.go2
-rw-r--r--src/cmd/compile/internal/test/testdata/pgo/devirtualize/devirt.go207
-rw-r--r--src/cmd/compile/internal/test/testdata/pgo/devirtualize/devirt.pprofbin699 -> 1345 bytes
-rw-r--r--src/cmd/compile/internal/test/testdata/pgo/devirtualize/devirt_test.go50
-rw-r--r--src/cmd/compile/internal/test/testdata/pgo/devirtualize/mult.pkg/mult.go72
-rw-r--r--src/cmd/compile/internal/test/testdata/pgo/devirtualize/mult/mult.go32
-rw-r--r--src/cmd/compile/internal/typebits/typebits.go2
-rw-r--r--src/cmd/compile/internal/typecheck/_builtin/coverage.go1
-rw-r--r--src/cmd/compile/internal/typecheck/_builtin/runtime.go24
-rw-r--r--src/cmd/compile/internal/typecheck/builtin.go284
-rw-r--r--src/cmd/compile/internal/typecheck/const.go182
-rw-r--r--src/cmd/compile/internal/typecheck/dcl.go264
-rw-r--r--src/cmd/compile/internal/typecheck/export.go67
-rw-r--r--src/cmd/compile/internal/typecheck/expr.go114
-rw-r--r--src/cmd/compile/internal/typecheck/func.go152
-rw-r--r--src/cmd/compile/internal/typecheck/iexport.go136
-rw-r--r--src/cmd/compile/internal/typecheck/stmt.go328
-rw-r--r--src/cmd/compile/internal/typecheck/subr.go157
-rw-r--r--src/cmd/compile/internal/typecheck/syms.go30
-rw-r--r--src/cmd/compile/internal/typecheck/typecheck.go447
-rw-r--r--src/cmd/compile/internal/typecheck/universe.go45
-rw-r--r--src/cmd/compile/internal/types/alg.go4
-rw-r--r--src/cmd/compile/internal/types/fmt.go155
-rw-r--r--src/cmd/compile/internal/types/goversion.go6
-rw-r--r--src/cmd/compile/internal/types/identity.go28
-rw-r--r--src/cmd/compile/internal/types/pkg.go37
-rw-r--r--src/cmd/compile/internal/types/scope.go11
-rw-r--r--src/cmd/compile/internal/types/size.go236
-rw-r--r--src/cmd/compile/internal/types/sizeof_test.go4
-rw-r--r--src/cmd/compile/internal/types/type.go446
-rw-r--r--src/cmd/compile/internal/types2/alias.go88
-rw-r--r--src/cmd/compile/internal/types2/api.go110
-rw-r--r--src/cmd/compile/internal/types2/api_predicates.go84
-rw-r--r--src/cmd/compile/internal/types2/api_test.go234
-rw-r--r--src/cmd/compile/internal/types2/assignments.go57
-rw-r--r--src/cmd/compile/internal/types2/builtins.go16
-rw-r--r--src/cmd/compile/internal/types2/call.go37
-rw-r--r--src/cmd/compile/internal/types2/check.go113
-rw-r--r--src/cmd/compile/internal/types2/check_test.go74
-rw-r--r--src/cmd/compile/internal/types2/context.go2
-rw-r--r--src/cmd/compile/internal/types2/conversions.go15
-rw-r--r--src/cmd/compile/internal/types2/decl.go82
-rw-r--r--src/cmd/compile/internal/types2/errorcalls_test.go2
-rw-r--r--src/cmd/compile/internal/types2/errors.go4
-rw-r--r--src/cmd/compile/internal/types2/expr.go67
-rw-r--r--src/cmd/compile/internal/types2/gcsizes.go170
-rw-r--r--src/cmd/compile/internal/types2/index.go4
-rw-r--r--src/cmd/compile/internal/types2/infer.go38
-rw-r--r--src/cmd/compile/internal/types2/instantiate.go39
-rw-r--r--src/cmd/compile/internal/types2/interface.go9
-rw-r--r--src/cmd/compile/internal/types2/issues_test.go129
-rw-r--r--src/cmd/compile/internal/types2/lookup.go38
-rw-r--r--src/cmd/compile/internal/types2/mono.go2
-rw-r--r--src/cmd/compile/internal/types2/named.go26
-rw-r--r--src/cmd/compile/internal/types2/object.go8
-rw-r--r--src/cmd/compile/internal/types2/operand.go4
-rw-r--r--src/cmd/compile/internal/types2/predicates.go40
-rw-r--r--src/cmd/compile/internal/types2/resolver.go61
-rw-r--r--src/cmd/compile/internal/types2/return.go2
-rw-r--r--src/cmd/compile/internal/types2/selection.go38
-rw-r--r--src/cmd/compile/internal/types2/signature.go35
-rw-r--r--src/cmd/compile/internal/types2/sizes.go23
-rw-r--r--src/cmd/compile/internal/types2/sizes_test.go59
-rw-r--r--src/cmd/compile/internal/types2/stdlib_test.go7
-rw-r--r--src/cmd/compile/internal/types2/stmt.go205
-rw-r--r--src/cmd/compile/internal/types2/struct.go2
-rw-r--r--src/cmd/compile/internal/types2/subst.go7
-rw-r--r--src/cmd/compile/internal/types2/typeparam.go8
-rw-r--r--src/cmd/compile/internal/types2/typeset.go5
-rw-r--r--src/cmd/compile/internal/types2/typestring.go11
-rw-r--r--src/cmd/compile/internal/types2/typexpr.go84
-rw-r--r--src/cmd/compile/internal/types2/under.go2
-rw-r--r--src/cmd/compile/internal/types2/unify.go29
-rw-r--r--src/cmd/compile/internal/types2/union.go6
-rw-r--r--src/cmd/compile/internal/types2/universe.go2
-rw-r--r--src/cmd/compile/internal/types2/util_test.go7
-rw-r--r--src/cmd/compile/internal/types2/validtype.go4
-rw-r--r--src/cmd/compile/internal/types2/version.go129
-rw-r--r--src/cmd/compile/internal/types2/version_test.go24
-rw-r--r--src/cmd/compile/internal/walk/assign.go48
-rw-r--r--src/cmd/compile/internal/walk/builtin.go105
-rw-r--r--src/cmd/compile/internal/walk/closure.go15
-rw-r--r--src/cmd/compile/internal/walk/complit.go16
-rw-r--r--src/cmd/compile/internal/walk/convert.go87
-rw-r--r--src/cmd/compile/internal/walk/expr.go190
-rw-r--r--src/cmd/compile/internal/walk/order.go116
-rw-r--r--src/cmd/compile/internal/walk/race.go34
-rw-r--r--src/cmd/compile/internal/walk/range.go77
-rw-r--r--src/cmd/compile/internal/walk/select.go27
-rw-r--r--src/cmd/compile/internal/walk/stmt.go15
-rw-r--r--src/cmd/compile/internal/walk/switch.go479
-rw-r--r--src/cmd/compile/internal/walk/temp.go4
-rw-r--r--src/cmd/compile/internal/walk/walk.go34
341 files changed, 27516 insertions, 13430 deletions
diff --git a/src/cmd/compile/internal/abi/abiutils.go b/src/cmd/compile/internal/abi/abiutils.go
index 71fbb88888..607d462493 100644
--- a/src/cmd/compile/internal/abi/abiutils.go
+++ b/src/cmd/compile/internal/abi/abiutils.go
@@ -8,8 +8,10 @@ import (
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
"cmd/compile/internal/types"
+ "cmd/internal/obj"
"cmd/internal/src"
"fmt"
+ "math"
"sync"
)
@@ -96,7 +98,7 @@ type RegIndex uint8
// (as described above), not architected registers.
type ABIParamAssignment struct {
Type *types.Type
- Name types.Object // should always be *ir.Name, used to match with a particular ssa.OpArg.
+ Name *ir.Name
Registers []RegIndex
offset int32
}
@@ -173,7 +175,7 @@ func appendParamTypes(rts []*types.Type, t *types.Type) []*types.Type {
rts = appendParamTypes(rts, t.Elem())
}
case types.TSTRUCT:
- for _, f := range t.FieldSlice() {
+ for _, f := range t.Fields() {
if f.Type.Size() > 0 { // embedded zero-width types receive no registers
rts = appendParamTypes(rts, f.Type)
}
@@ -212,7 +214,7 @@ func appendParamOffsets(offsets []int64, at int64, t *types.Type) ([]int64, int6
offsets, at = appendParamOffsets(offsets, at, t.Elem())
}
case types.TSTRUCT:
- for i, f := range t.FieldSlice() {
+ for i, f := range t.Fields() {
offsets, at = appendParamOffsets(offsets, at, f.Type)
if f.Type.Size() == 0 && i == t.NumFields()-1 {
at++ // last field has zero width
@@ -258,166 +260,131 @@ type RegAmounts struct {
// by the ABI rules for parameter passing and result returning.
type ABIConfig struct {
// Do we need anything more than this?
- offsetForLocals int64 // e.g., obj.(*Link).Arch.FixedFrameSize -- extra linkage information on some architectures.
- regAmounts RegAmounts
- regsForTypeCache map[*types.Type]int
+ offsetForLocals int64 // e.g., obj.(*Link).Arch.FixedFrameSize -- extra linkage information on some architectures.
+ regAmounts RegAmounts
+ which obj.ABI
}
// NewABIConfig returns a new ABI configuration for an architecture with
// iRegsCount integer/pointer registers and fRegsCount floating point registers.
-func NewABIConfig(iRegsCount, fRegsCount int, offsetForLocals int64) *ABIConfig {
- return &ABIConfig{offsetForLocals: offsetForLocals, regAmounts: RegAmounts{iRegsCount, fRegsCount}, regsForTypeCache: make(map[*types.Type]int)}
+func NewABIConfig(iRegsCount, fRegsCount int, offsetForLocals int64, which uint8) *ABIConfig {
+ return &ABIConfig{offsetForLocals: offsetForLocals, regAmounts: RegAmounts{iRegsCount, fRegsCount}, which: obj.ABI(which)}
}
-// Copy returns a copy of an ABIConfig for use in a function's compilation so that access to the cache does not need to be protected with a mutex.
-func (a *ABIConfig) Copy() *ABIConfig {
- b := *a
- b.regsForTypeCache = make(map[*types.Type]int)
- return &b
+// Copy returns config.
+//
+// TODO(mdempsky): Remove.
+func (config *ABIConfig) Copy() *ABIConfig {
+ return config
+}
+
+// Which returns the ABI number
+func (config *ABIConfig) Which() obj.ABI {
+ return config.which
}
// LocalsOffset returns the architecture-dependent offset from SP for args and results.
// In theory this is only used for debugging; it ought to already be incorporated into
// results from the ABI-related methods
-func (a *ABIConfig) LocalsOffset() int64 {
- return a.offsetForLocals
+func (config *ABIConfig) LocalsOffset() int64 {
+ return config.offsetForLocals
}
// FloatIndexFor translates r into an index in the floating point parameter
// registers. If the result is negative, the input index was actually for the
// integer parameter registers.
-func (a *ABIConfig) FloatIndexFor(r RegIndex) int64 {
- return int64(r) - int64(a.regAmounts.intRegs)
-}
-
-// NumParamRegs returns the number of parameter registers used for a given type,
-// without regard for the number available.
-func (a *ABIConfig) NumParamRegs(t *types.Type) int {
- var n int
- if n, ok := a.regsForTypeCache[t]; ok {
- return n
- }
-
- if t.IsScalar() || t.IsPtrShaped() {
- if t.IsComplex() {
- n = 2
- } else {
- n = (int(t.Size()) + types.RegSize - 1) / types.RegSize
- }
- } else {
- typ := t.Kind()
- switch typ {
- case types.TARRAY:
- n = a.NumParamRegs(t.Elem()) * int(t.NumElem())
- case types.TSTRUCT:
- for _, f := range t.FieldSlice() {
- n += a.NumParamRegs(f.Type)
- }
- case types.TSLICE:
- n = a.NumParamRegs(synthSlice)
- case types.TSTRING:
- n = a.NumParamRegs(synthString)
- case types.TINTER:
- n = a.NumParamRegs(synthIface)
- }
- }
- a.regsForTypeCache[t] = n
-
- return n
+func (config *ABIConfig) FloatIndexFor(r RegIndex) int64 {
+ return int64(r) - int64(config.regAmounts.intRegs)
}
-// preAllocateParams gets the slice sizes right for inputs and outputs.
-func (a *ABIParamResultInfo) preAllocateParams(hasRcvr bool, nIns, nOuts int) {
- if hasRcvr {
- nIns++
+// NumParamRegs returns the total number of registers used to
+// represent a parameter of the given type, which must be register
+// assignable.
+func (config *ABIConfig) NumParamRegs(typ *types.Type) int {
+ intRegs, floatRegs := typ.Registers()
+ if intRegs == math.MaxUint8 && floatRegs == math.MaxUint8 {
+ base.Fatalf("cannot represent parameters of type %v in registers", typ)
}
- a.inparams = make([]ABIParamAssignment, 0, nIns)
- a.outparams = make([]ABIParamAssignment, 0, nOuts)
+ return int(intRegs) + int(floatRegs)
}
-// ABIAnalyzeTypes takes an optional receiver type, arrays of ins and outs, and returns an ABIParamResultInfo,
+// ABIAnalyzeTypes takes slices of parameter and result types, and returns an ABIParamResultInfo,
// based on the given configuration. This is the same result computed by config.ABIAnalyze applied to the
// corresponding method/function type, except that all the embedded parameter names are nil.
// This is intended for use by ssagen/ssa.go:(*state).rtcall, for runtime functions that lack a parsed function type.
-func (config *ABIConfig) ABIAnalyzeTypes(rcvr *types.Type, ins, outs []*types.Type) *ABIParamResultInfo {
+func (config *ABIConfig) ABIAnalyzeTypes(params, results []*types.Type) *ABIParamResultInfo {
setup()
s := assignState{
stackOffset: config.offsetForLocals,
rTotal: config.regAmounts,
}
- result := &ABIParamResultInfo{config: config}
- result.preAllocateParams(rcvr != nil, len(ins), len(outs))
- // Receiver
- if rcvr != nil {
- result.inparams = append(result.inparams,
- s.assignParamOrReturn(rcvr, nil, false))
+ assignParams := func(params []*types.Type, isResult bool) []ABIParamAssignment {
+ res := make([]ABIParamAssignment, len(params))
+ for i, param := range params {
+ res[i] = s.assignParam(param, nil, isResult)
+ }
+ return res
}
+ info := &ABIParamResultInfo{config: config}
+
// Inputs
- for _, t := range ins {
- result.inparams = append(result.inparams,
- s.assignParamOrReturn(t, nil, false))
- }
+ info.inparams = assignParams(params, false)
s.stackOffset = types.RoundUp(s.stackOffset, int64(types.RegSize))
- result.inRegistersUsed = s.rUsed.intRegs + s.rUsed.floatRegs
+ info.inRegistersUsed = s.rUsed.intRegs + s.rUsed.floatRegs
// Outputs
s.rUsed = RegAmounts{}
- for _, t := range outs {
- result.outparams = append(result.outparams, s.assignParamOrReturn(t, nil, true))
- }
+ info.outparams = assignParams(results, true)
// The spill area is at a register-aligned offset and its size is rounded up to a register alignment.
// TODO in theory could align offset only to minimum required by spilled data types.
- result.offsetToSpillArea = alignTo(s.stackOffset, types.RegSize)
- result.spillAreaSize = alignTo(s.spillOffset, types.RegSize)
- result.outRegistersUsed = s.rUsed.intRegs + s.rUsed.floatRegs
+ info.offsetToSpillArea = alignTo(s.stackOffset, types.RegSize)
+ info.spillAreaSize = alignTo(s.spillOffset, types.RegSize)
+ info.outRegistersUsed = s.rUsed.intRegs + s.rUsed.floatRegs
- return result
+ return info
}
// ABIAnalyzeFuncType takes a function type 'ft' and an ABI rules description
// 'config' and analyzes the function to determine how its parameters
// and results will be passed (in registers or on the stack), returning
// an ABIParamResultInfo object that holds the results of the analysis.
-func (config *ABIConfig) ABIAnalyzeFuncType(ft *types.Func) *ABIParamResultInfo {
+func (config *ABIConfig) ABIAnalyzeFuncType(ft *types.Type) *ABIParamResultInfo {
setup()
s := assignState{
stackOffset: config.offsetForLocals,
rTotal: config.regAmounts,
}
- result := &ABIParamResultInfo{config: config}
- result.preAllocateParams(ft.Receiver != nil, ft.Params.NumFields(), ft.Results.NumFields())
- // Receiver
- // TODO(register args) ? seems like "struct" and "fields" is not right anymore for describing function parameters
- if ft.Receiver != nil && ft.Receiver.NumFields() != 0 {
- r := ft.Receiver.FieldSlice()[0]
- result.inparams = append(result.inparams,
- s.assignParamOrReturn(r.Type, r.Nname, false))
+ assignParams := func(params []*types.Field, isResult bool) []ABIParamAssignment {
+ res := make([]ABIParamAssignment, len(params))
+ for i, param := range params {
+ var name *ir.Name
+ if param.Nname != nil {
+ name = param.Nname.(*ir.Name)
+ }
+ res[i] = s.assignParam(param.Type, name, isResult)
+ }
+ return res
}
+ info := &ABIParamResultInfo{config: config}
+
// Inputs
- ifsl := ft.Params.FieldSlice()
- for _, f := range ifsl {
- result.inparams = append(result.inparams,
- s.assignParamOrReturn(f.Type, f.Nname, false))
- }
+ info.inparams = assignParams(ft.RecvParams(), false)
s.stackOffset = types.RoundUp(s.stackOffset, int64(types.RegSize))
- result.inRegistersUsed = s.rUsed.intRegs + s.rUsed.floatRegs
+ info.inRegistersUsed = s.rUsed.intRegs + s.rUsed.floatRegs
// Outputs
s.rUsed = RegAmounts{}
- ofsl := ft.Results.FieldSlice()
- for _, f := range ofsl {
- result.outparams = append(result.outparams, s.assignParamOrReturn(f.Type, f.Nname, true))
- }
+ info.outparams = assignParams(ft.Results(), true)
// The spill area is at a register-aligned offset and its size is rounded up to a register alignment.
// TODO in theory could align offset only to minimum required by spilled data types.
- result.offsetToSpillArea = alignTo(s.stackOffset, types.RegSize)
- result.spillAreaSize = alignTo(s.spillOffset, types.RegSize)
- result.outRegistersUsed = s.rUsed.intRegs + s.rUsed.floatRegs
- return result
+ info.offsetToSpillArea = alignTo(s.stackOffset, types.RegSize)
+ info.spillAreaSize = alignTo(s.spillOffset, types.RegSize)
+ info.outRegistersUsed = s.rUsed.intRegs + s.rUsed.floatRegs
+ return info
}
// ABIAnalyze returns the same result as ABIAnalyzeFuncType, but also
@@ -428,38 +395,31 @@ func (config *ABIConfig) ABIAnalyzeFuncType(ft *types.Func) *ABIParamResultInfo
// outputs because their frame location transitions from BOGUS_FUNARG_OFFSET
// to zero to an as-if-AUTO offset that has no use for callers.
func (config *ABIConfig) ABIAnalyze(t *types.Type, setNname bool) *ABIParamResultInfo {
- ft := t.FuncType()
- result := config.ABIAnalyzeFuncType(ft)
+ result := config.ABIAnalyzeFuncType(t)
// Fill in the frame offsets for receiver, inputs, results
- k := 0
- if t.NumRecvs() != 0 {
- config.updateOffset(result, ft.Receiver.FieldSlice()[0], result.inparams[0], false, setNname)
- k++
- }
- for i, f := range ft.Params.FieldSlice() {
- config.updateOffset(result, f, result.inparams[k+i], false, setNname)
+ for i, f := range t.RecvParams() {
+ config.updateOffset(result, f, result.inparams[i], false, setNname)
}
- for i, f := range ft.Results.FieldSlice() {
+ for i, f := range t.Results() {
config.updateOffset(result, f, result.outparams[i], true, setNname)
}
return result
}
-func (config *ABIConfig) updateOffset(result *ABIParamResultInfo, f *types.Field, a ABIParamAssignment, isReturn, setNname bool) {
+func (config *ABIConfig) updateOffset(result *ABIParamResultInfo, f *types.Field, a ABIParamAssignment, isResult, setNname bool) {
+ if f.Offset != types.BADWIDTH {
+ base.Fatalf("field offset for %s at %s has been set to %d", f.Sym, base.FmtPos(f.Pos), f.Offset)
+ }
+
// Everything except return values in registers has either a frame home (if not in a register) or a frame spill location.
- if !isReturn || len(a.Registers) == 0 {
+ if !isResult || len(a.Registers) == 0 {
// The type frame offset DOES NOT show effects of minimum frame size.
// Getting this wrong breaks stackmaps, see liveness/plive.go:WriteFuncMap and typebits/typebits.go:Set
off := a.FrameOffset(result)
- fOffset := f.Offset
- if fOffset == types.BOGUS_FUNARG_OFFSET {
- if setNname && f.Nname != nil {
- f.Nname.(*ir.Name).SetFrameOffset(off)
- f.Nname.(*ir.Name).SetIsOutputParamInRegisters(false)
- }
- } else {
- base.Fatalf("field offset for %s at %s has been set to %d", f.Sym.Name, base.FmtPos(f.Pos), fOffset)
+ if setNname && f.Nname != nil {
+ f.Nname.(*ir.Name).SetFrameOffset(off)
+ f.Nname.(*ir.Name).SetIsOutputParamInRegisters(false)
}
} else {
if setNname && f.Nname != nil {
@@ -524,7 +484,6 @@ func (ri *ABIParamResultInfo) String() string {
type assignState struct {
rTotal RegAmounts // total reg amounts from ABI rules
rUsed RegAmounts // regs used by params completely assigned so far
- pUsed RegAmounts // regs used by the current param (or pieces therein)
stackOffset int64 // current stack offset
spillOffset int64 // current spill offset
}
@@ -542,12 +501,11 @@ func alignTo(a int64, t int) int64 {
return types.RoundUp(a, int64(t))
}
-// stackSlot returns a stack offset for a param or result of the
-// specified type.
-func (state *assignState) stackSlot(t *types.Type) int64 {
- rv := align(state.stackOffset, t)
- state.stackOffset = rv + t.Size()
- return rv
+// nextSlot allocates the next available slot for typ.
+func nextSlot(offsetp *int64, typ *types.Type) int64 {
+ offset := align(*offsetp, typ)
+ *offsetp = offset + typ.Size()
+ return offset
}
// allocateRegs returns an ordered list of register indices for a parameter or result
@@ -585,7 +543,7 @@ func (state *assignState) allocateRegs(regs []RegIndex, t *types.Type) []RegInde
}
return regs
case types.TSTRUCT:
- for _, f := range t.FieldSlice() {
+ for _, f := range t.Fields() {
regs = state.allocateRegs(regs, f.Type)
}
return regs
@@ -601,105 +559,6 @@ func (state *assignState) allocateRegs(regs []RegIndex, t *types.Type) []RegInde
panic("unreachable")
}
-// regAllocate creates a register ABIParamAssignment object for a param
-// or result with the specified type, as a final step (this assumes
-// that all of the safety/suitability analysis is complete).
-func (state *assignState) regAllocate(t *types.Type, name types.Object, isReturn bool) ABIParamAssignment {
- spillLoc := int64(-1)
- if !isReturn {
- // Spill for register-resident t must be aligned for storage of a t.
- spillLoc = align(state.spillOffset, t)
- state.spillOffset = spillLoc + t.Size()
- }
- return ABIParamAssignment{
- Type: t,
- Name: name,
- Registers: state.allocateRegs([]RegIndex{}, t),
- offset: int32(spillLoc),
- }
-}
-
-// stackAllocate creates a stack memory ABIParamAssignment object for
-// a param or result with the specified type, as a final step (this
-// assumes that all of the safety/suitability analysis is complete).
-func (state *assignState) stackAllocate(t *types.Type, name types.Object) ABIParamAssignment {
- return ABIParamAssignment{
- Type: t,
- Name: name,
- offset: int32(state.stackSlot(t)),
- }
-}
-
-// intUsed returns the number of integer registers consumed
-// at a given point within an assignment stage.
-func (state *assignState) intUsed() int {
- return state.rUsed.intRegs + state.pUsed.intRegs
-}
-
-// floatUsed returns the number of floating point registers consumed at
-// a given point within an assignment stage.
-func (state *assignState) floatUsed() int {
- return state.rUsed.floatRegs + state.pUsed.floatRegs
-}
-
-// regassignIntegral examines a param/result of integral type 't' to
-// determines whether it can be register-assigned. Returns TRUE if we
-// can register allocate, FALSE otherwise (and updates state
-// accordingly).
-func (state *assignState) regassignIntegral(t *types.Type) bool {
- regsNeeded := int(types.RoundUp(t.Size(), int64(types.PtrSize)) / int64(types.PtrSize))
- if t.IsComplex() {
- regsNeeded = 2
- }
-
- // Floating point and complex.
- if t.IsFloat() || t.IsComplex() {
- if regsNeeded+state.floatUsed() > state.rTotal.floatRegs {
- // not enough regs
- return false
- }
- state.pUsed.floatRegs += regsNeeded
- return true
- }
-
- // Non-floating point
- if regsNeeded+state.intUsed() > state.rTotal.intRegs {
- // not enough regs
- return false
- }
- state.pUsed.intRegs += regsNeeded
- return true
-}
-
-// regassignArray processes an array type (or array component within some
-// other enclosing type) to determine if it can be register assigned.
-// Returns TRUE if we can register allocate, FALSE otherwise.
-func (state *assignState) regassignArray(t *types.Type) bool {
-
- nel := t.NumElem()
- if nel == 0 {
- return true
- }
- if nel > 1 {
- // Not an array of length 1: stack assign
- return false
- }
- // Visit element
- return state.regassign(t.Elem())
-}
-
-// regassignStruct processes a struct type (or struct component within
-// some other enclosing type) to determine if it can be register
-// assigned. Returns TRUE if we can register allocate, FALSE otherwise.
-func (state *assignState) regassignStruct(t *types.Type) bool {
- for _, field := range t.FieldSlice() {
- if !state.regassign(field.Type) {
- return false
- }
- }
- return true
-}
-
// synthOnce ensures that we only create the synth* fake types once.
var synthOnce sync.Once
@@ -737,47 +596,42 @@ func setup() {
})
}
-// regassign examines a given param type (or component within some
-// composite) to determine if it can be register assigned. Returns
-// TRUE if we can register allocate, FALSE otherwise.
-func (state *assignState) regassign(pt *types.Type) bool {
- typ := pt.Kind()
- if pt.IsScalar() || pt.IsPtrShaped() {
- return state.regassignIntegral(pt)
- }
- switch typ {
- case types.TARRAY:
- return state.regassignArray(pt)
- case types.TSTRUCT:
- return state.regassignStruct(pt)
- case types.TSLICE:
- return state.regassignStruct(synthSlice)
- case types.TSTRING:
- return state.regassignStruct(synthString)
- case types.TINTER:
- return state.regassignStruct(synthIface)
- default:
- base.Fatalf("not expected")
- panic("unreachable")
- }
-}
-
-// assignParamOrReturn processes a given receiver, param, or result
+// assignParam processes a given receiver, param, or result
// of field f to determine whether it can be register assigned.
// The result of the analysis is recorded in the result
// ABIParamResultInfo held in 'state'.
-func (state *assignState) assignParamOrReturn(pt *types.Type, n types.Object, isReturn bool) ABIParamAssignment {
- state.pUsed = RegAmounts{}
- if pt.Size() == types.BADWIDTH {
- base.Fatalf("should never happen")
- panic("unreachable")
- } else if pt.Size() == 0 {
- return state.stackAllocate(pt, n)
- } else if state.regassign(pt) {
- return state.regAllocate(pt, n, isReturn)
- } else {
- return state.stackAllocate(pt, n)
+func (state *assignState) assignParam(typ *types.Type, name *ir.Name, isResult bool) ABIParamAssignment {
+ registers := state.tryAllocRegs(typ)
+
+ var offset int64 = -1
+ if registers == nil { // stack allocated; needs stack slot
+ offset = nextSlot(&state.stackOffset, typ)
+ } else if !isResult { // register-allocated param; needs spill slot
+ offset = nextSlot(&state.spillOffset, typ)
}
+
+ return ABIParamAssignment{
+ Type: typ,
+ Name: name,
+ Registers: registers,
+ offset: int32(offset),
+ }
+}
+
+// tryAllocRegs attempts to allocate registers to represent a
+// parameter of the given type. If unsuccessful, it returns nil.
+func (state *assignState) tryAllocRegs(typ *types.Type) []RegIndex {
+ if typ.Size() == 0 {
+ return nil // zero-size parameters are defined as being stack allocated
+ }
+
+ intRegs, floatRegs := typ.Registers()
+ if int(intRegs) > state.rTotal.intRegs-state.rUsed.intRegs || int(floatRegs) > state.rTotal.floatRegs-state.rUsed.floatRegs {
+ return nil // too few available registers
+ }
+
+ regs := make([]RegIndex, 0, int(intRegs)+int(floatRegs))
+ return state.allocateRegs(regs, typ)
}
// ComputePadding returns a list of "post element" padding values in
diff --git a/src/cmd/compile/internal/amd64/ssa.go b/src/cmd/compile/internal/amd64/ssa.go
index 113875861c..ab762c24f6 100644
--- a/src/cmd/compile/internal/amd64/ssa.go
+++ b/src/cmd/compile/internal/amd64/ssa.go
@@ -252,7 +252,8 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
ssa.OpAMD64RORQ, ssa.OpAMD64RORL, ssa.OpAMD64RORW, ssa.OpAMD64RORB,
ssa.OpAMD64ADDSS, ssa.OpAMD64ADDSD, ssa.OpAMD64SUBSS, ssa.OpAMD64SUBSD,
ssa.OpAMD64MULSS, ssa.OpAMD64MULSD, ssa.OpAMD64DIVSS, ssa.OpAMD64DIVSD,
- ssa.OpAMD64PXOR,
+ ssa.OpAMD64MINSS, ssa.OpAMD64MINSD,
+ ssa.OpAMD64POR, ssa.OpAMD64PXOR,
ssa.OpAMD64BTSL, ssa.OpAMD64BTSQ,
ssa.OpAMD64BTCL, ssa.OpAMD64BTCQ,
ssa.OpAMD64BTRL, ssa.OpAMD64BTRQ:
@@ -713,9 +714,9 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
p.To.Offset = v.AuxInt
case ssa.OpAMD64BTLconst, ssa.OpAMD64BTQconst,
ssa.OpAMD64TESTQconst, ssa.OpAMD64TESTLconst, ssa.OpAMD64TESTWconst, ssa.OpAMD64TESTBconst,
- ssa.OpAMD64BTSLconst, ssa.OpAMD64BTSQconst,
- ssa.OpAMD64BTCLconst, ssa.OpAMD64BTCQconst,
- ssa.OpAMD64BTRLconst, ssa.OpAMD64BTRQconst:
+ ssa.OpAMD64BTSQconst,
+ ssa.OpAMD64BTCQconst,
+ ssa.OpAMD64BTRQconst:
op := v.Op
if op == ssa.OpAMD64BTQconst && v.AuxInt < 32 {
// Emit 32-bit version because it's shorter
@@ -850,7 +851,8 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
}
fallthrough
case ssa.OpAMD64ANDQconstmodify, ssa.OpAMD64ANDLconstmodify, ssa.OpAMD64ORQconstmodify, ssa.OpAMD64ORLconstmodify,
- ssa.OpAMD64XORQconstmodify, ssa.OpAMD64XORLconstmodify:
+ ssa.OpAMD64XORQconstmodify, ssa.OpAMD64XORLconstmodify,
+ ssa.OpAMD64BTSQconstmodify, ssa.OpAMD64BTRQconstmodify, ssa.OpAMD64BTCQconstmodify:
sc := v.AuxValAndOff()
off := sc.Off64()
val := sc.Val64()
@@ -1187,6 +1189,15 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
p.To.Reg = v.Args[0].Reg()
ssagen.AddAux(&p.To, v)
+ case ssa.OpAMD64SETEQstoreidx1, ssa.OpAMD64SETNEstoreidx1,
+ ssa.OpAMD64SETLstoreidx1, ssa.OpAMD64SETLEstoreidx1,
+ ssa.OpAMD64SETGstoreidx1, ssa.OpAMD64SETGEstoreidx1,
+ ssa.OpAMD64SETBstoreidx1, ssa.OpAMD64SETBEstoreidx1,
+ ssa.OpAMD64SETAstoreidx1, ssa.OpAMD64SETAEstoreidx1:
+ p := s.Prog(v.Op.Asm())
+ memIdx(&p.To, v)
+ ssagen.AddAux(&p.To, v)
+
case ssa.OpAMD64SETNEF:
t := v.RegTmp()
p := s.Prog(v.Op.Asm())
diff --git a/src/cmd/compile/internal/arm/galign.go b/src/cmd/compile/internal/arm/galign.go
index 23e52bacbf..43d811832e 100644
--- a/src/cmd/compile/internal/arm/galign.go
+++ b/src/cmd/compile/internal/arm/galign.go
@@ -15,7 +15,7 @@ func Init(arch *ssagen.ArchInfo) {
arch.LinkArch = &arm.Linkarm
arch.REGSP = arm.REGSP
arch.MAXWIDTH = (1 << 32) - 1
- arch.SoftFloat = buildcfg.GOARM == 5
+ arch.SoftFloat = buildcfg.GOARM.SoftFloat
arch.ZeroRange = zerorange
arch.Ginsnop = ginsnop
diff --git a/src/cmd/compile/internal/arm/ssa.go b/src/cmd/compile/internal/arm/ssa.go
index 7fcbb4d024..638ed3ed4e 100644
--- a/src/cmd/compile/internal/arm/ssa.go
+++ b/src/cmd/compile/internal/arm/ssa.go
@@ -289,7 +289,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
case ssa.OpARMANDconst, ssa.OpARMBICconst:
// try to optimize ANDconst and BICconst to BFC, which saves bytes and ticks
// BFC is only available on ARMv7, and its result and source are in the same register
- if buildcfg.GOARM == 7 && v.Reg() == v.Args[0].Reg() {
+ if buildcfg.GOARM.Version == 7 && v.Reg() == v.Args[0].Reg() {
var val uint32
if v.Op == ssa.OpARMANDconst {
val = ^uint32(v.AuxInt)
@@ -646,7 +646,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
default:
}
}
- if buildcfg.GOARM >= 6 {
+ if buildcfg.GOARM.Version >= 6 {
// generate more efficient "MOVB/MOVBU/MOVH/MOVHU Reg@>0, Reg" on ARMv6 & ARMv7
genshift(s, v, v.Op.Asm(), 0, v.Args[0].Reg(), v.Reg(), arm.SHIFT_RR, 0)
return
diff --git a/src/cmd/compile/internal/arm64/ssa.go b/src/cmd/compile/internal/arm64/ssa.go
index a0b432bd97..27b4e881c0 100644
--- a/src/cmd/compile/internal/arm64/ssa.go
+++ b/src/cmd/compile/internal/arm64/ssa.go
@@ -215,6 +215,10 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
ssa.OpARM64FNMULD,
ssa.OpARM64FDIVS,
ssa.OpARM64FDIVD,
+ ssa.OpARM64FMINS,
+ ssa.OpARM64FMIND,
+ ssa.OpARM64FMAXS,
+ ssa.OpARM64FMAXD,
ssa.OpARM64ROR,
ssa.OpARM64RORW:
r := v.Reg()
diff --git a/src/cmd/compile/internal/base/base.go b/src/cmd/compile/internal/base/base.go
index 8caeb41d2f..ee3772c5ca 100644
--- a/src/cmd/compile/internal/base/base.go
+++ b/src/cmd/compile/internal/base/base.go
@@ -107,7 +107,7 @@ func AdjustStartingHeap(requestedHeapGoal uint64) {
// interface. Instead, live is estimated by knowing the adjusted value of
// GOGC and the new heap goal following a GC (this requires knowing that
// at least one GC has occurred):
- // estLive = 100 * newGoal / (100 + currentGogc)]
+ // estLive = 100 * newGoal / (100 + currentGogc)
// this new value of GOGC
// newGogc = 100*requestedHeapGoal/estLive - 100
// will result in the desired goal. The logging code checks that the
@@ -219,54 +219,3 @@ func AdjustStartingHeap(requestedHeapGoal uint64) {
forEachGC(adjustFunc)
}
-
-func Compiling(pkgs []string) bool {
- if Ctxt.Pkgpath != "" {
- for _, p := range pkgs {
- if Ctxt.Pkgpath == p {
- return true
- }
- }
- }
-
- return false
-}
-
-// The racewalk pass is currently handled in three parts.
-//
-// First, for flag_race, it inserts calls to racefuncenter and
-// racefuncexit at the start and end (respectively) of each
-// function. This is handled below.
-//
-// Second, during buildssa, it inserts appropriate instrumentation
-// calls immediately before each memory load or store. This is handled
-// by the (*state).instrument method in ssa.go, so here we just set
-// the Func.InstrumentBody flag as needed. For background on why this
-// is done during SSA construction rather than a separate SSA pass,
-// see issue #19054.
-//
-// Third we remove calls to racefuncenter and racefuncexit, for leaf
-// functions without instrumented operations. This is done as part of
-// ssa opt pass via special rule.
-
-// TODO(dvyukov): do not instrument initialization as writes:
-// a := make([]int, 10)
-
-// Do not instrument the following packages at all,
-// at best instrumentation would cause infinite recursion.
-var NoInstrumentPkgs = []string{
- "runtime/internal/atomic",
- "runtime/internal/math",
- "runtime/internal/sys",
- "runtime/internal/syscall",
- "runtime",
- "runtime/race",
- "runtime/msan",
- "runtime/asan",
- "internal/cpu",
- "internal/abi",
-}
-
-// Don't insert racefuncenter/racefuncexit into the following packages.
-// Memory accesses in the packages are either uninteresting or will cause false positives.
-var NoRacePkgs = []string{"sync", "sync/atomic"}
diff --git a/src/cmd/compile/internal/base/debug.go b/src/cmd/compile/internal/base/debug.go
index 1f05ed9831..420ad1305e 100644
--- a/src/cmd/compile/internal/base/debug.go
+++ b/src/cmd/compile/internal/base/debug.go
@@ -21,8 +21,13 @@ type DebugFlags struct {
Closure int `help:"print information about closure compilation"`
Defer int `help:"print information about defer compilation"`
DisableNil int `help:"disable nil checks" concurrent:"ok"`
+ DumpInlFuncProps string `help:"dump function properties from inl heuristics to specified file"`
+ DumpInlCallSiteScores int `help:"dump scored callsites during inlining"`
+ InlScoreAdj string `help:"set inliner score adjustments (ex: -d=inlscoreadj=panicPathAdj:10/passConstToNestedIfAdj:-90)"`
+ InlBudgetSlack int `help:"amount to expand the initial inline budget when new inliner enabled. Defaults to 80 if option not set." concurrent:"ok"`
DumpPtrs int `help:"show Node pointers values in dump output"`
DwarfInl int `help:"print information about DWARF inlined function creation"`
+ EscapeMutationsCalls int `help:"print extra escape analysis diagnostics about mutations and calls" concurrent:"ok"`
Export int `help:"print export data"`
Fmahash string `help:"hash value for use in debugging platform-dependent multiply-add use" concurrent:"ok"`
GCAdjust int `help:"log adjustments to GOGC" concurrent:"ok"`
@@ -31,11 +36,11 @@ type DebugFlags struct {
Gossahash string `help:"hash value for use in debugging the compiler"`
InlFuncsWithClosures int `help:"allow functions with closures to be inlined" concurrent:"ok"`
InlStaticInit int `help:"allow static initialization of inlined calls" concurrent:"ok"`
- InterfaceCycles int `help:"allow anonymous interface cycles"`
Libfuzzer int `help:"enable coverage instrumentation for libfuzzer"`
LoopVar int `help:"shared (0, default), 1 (private loop variables), 2, private + log"`
LoopVarHash string `help:"for debugging changes in loop behavior. Overrides experiment and loopvar flag."`
LocationLists int `help:"print information about DWARF location list creation"`
+ MaxShapeLen int `help:"hash shape names longer than this threshold (default 500)" concurrent:"ok"`
Nil int `help:"print information about nil checks"`
NoOpenDefer int `help:"disable open-coded defers" concurrent:"ok"`
NoRefName int `help:"do not include referenced symbol names in object file" concurrent:"ok"`
@@ -45,18 +50,22 @@ type DebugFlags struct {
Shapify int `help:"print information about shaping recursive types"`
Slice int `help:"print information about slice compilation"`
SoftFloat int `help:"force compiler to emit soft-float code" concurrent:"ok"`
+ StaticCopy int `help:"print information about missed static copies" concurrent:"ok"`
SyncFrames int `help:"how many writer stack frames to include at sync points in unified export data"`
TypeAssert int `help:"print information about type assertion inlining"`
WB int `help:"print information about write barriers"`
ABIWrap int `help:"print information about ABI wrapper generation"`
MayMoreStack string `help:"call named function before all stack growth checks" concurrent:"ok"`
PGODebug int `help:"debug profile-guided optimizations"`
+ PGOHash string `help:"hash value for debugging profile-guided optimizations" concurrent:"ok"`
PGOInline int `help:"enable profile-guided inlining" concurrent:"ok"`
PGOInlineCDFThreshold string `help:"cumulative threshold percentage for determining call sites as hot candidates for inlining" concurrent:"ok"`
PGOInlineBudget int `help:"inline budget for hot functions" concurrent:"ok"`
- PGODevirtualize int `help:"enable profile-guided devirtualization" concurrent:"ok"`
+ PGODevirtualize int `help:"enable profile-guided devirtualization; 0 to disable, 1 to enable interface devirtualization, 2 to enable function devirtualization" concurrent:"ok"`
+ RangeFuncCheck int `help:"insert code to check behavior of range iterator functions" concurrent:"ok"`
WrapGlobalMapDbg int `help:"debug trace output for global map init wrapping"`
WrapGlobalMapCtl int `help:"global map init wrap control (0 => default, 1 => off, 2 => stress mode, no size cutoff)"`
+ ZeroCopy int `help:"enable zero-copy string->[]byte conversions" concurrent:"ok"`
ConcurrentOk bool // true if only concurrentOk flags seen
}
diff --git a/src/cmd/compile/internal/base/flag.go b/src/cmd/compile/internal/base/flag.go
index 753a60ae1e..a3144f8fb4 100644
--- a/src/cmd/compile/internal/base/flag.go
+++ b/src/cmd/compile/internal/base/flag.go
@@ -5,11 +5,11 @@
package base
import (
+ "cmd/internal/cov/covcmd"
"encoding/json"
"flag"
"fmt"
"internal/buildcfg"
- "internal/coverage"
"internal/platform"
"log"
"os"
@@ -98,6 +98,7 @@ type CmdFlags struct {
DwarfLocationLists *bool "help:\"add location lists to DWARF in optimized mode\"" // &Ctxt.Flag_locationlists, set below
Dynlink *bool "help:\"support references to Go symbols defined in other shared libraries\"" // &Ctxt.Flag_dynlink, set below
EmbedCfg func(string) "help:\"read go:embed configuration from `file`\""
+ Env func(string) "help:\"add `definition` of the form key=value to environment\""
GenDwarfInl int "help:\"generate DWARF inline info records\"" // 0=disabled, 1=funcs, 2=funcs+formals/locals
GoVersion string "help:\"required version of the runtime\""
ImportCfg func(string) "help:\"read import configuration from `file`\""
@@ -132,17 +133,25 @@ type CmdFlags struct {
Patterns map[string][]string
Files map[string]string
}
- ImportDirs []string // appended to by -I
- ImportMap map[string]string // set by -importcfg
- PackageFile map[string]string // set by -importcfg; nil means not in use
- CoverageInfo *coverage.CoverFixupConfig // set by -coveragecfg
- SpectreIndex bool // set by -spectre=index or -spectre=all
+ ImportDirs []string // appended to by -I
+ ImportMap map[string]string // set by -importcfg
+ PackageFile map[string]string // set by -importcfg; nil means not in use
+ CoverageInfo *covcmd.CoverFixupConfig // set by -coveragecfg
+ SpectreIndex bool // set by -spectre=index or -spectre=all
// Whether we are adding any sort of code instrumentation, such as
// when the race detector is enabled.
Instrumenting bool
}
}
+func addEnv(s string) {
+ i := strings.Index(s, "=")
+ if i < 0 {
+ log.Fatal("-env argument must be of the form key=value")
+ }
+ os.Setenv(s[:i], s[i+1:])
+}
+
// ParseFlags parses the command-line flags into Flag.
func ParseFlags() {
Flag.I = addImportDir
@@ -158,6 +167,7 @@ func ParseFlags() {
*Flag.DwarfLocationLists = true
Flag.Dynlink = &Ctxt.Flag_dynlink
Flag.EmbedCfg = readEmbedCfg
+ Flag.Env = addEnv
Flag.GenDwarfInl = 2
Flag.ImportCfg = readImportCfg
Flag.CoverageCfg = readCoverageCfg
@@ -166,11 +176,14 @@ func ParseFlags() {
Flag.WB = true
Debug.ConcurrentOk = true
+ Debug.MaxShapeLen = 500
Debug.InlFuncsWithClosures = 1
Debug.InlStaticInit = 1
Debug.PGOInline = 1
- Debug.PGODevirtualize = 1
+ Debug.PGODevirtualize = 2
Debug.SyncFrames = -1 // disable sync markers by default
+ Debug.ZeroCopy = 1
+ Debug.RangeFuncCheck = 1
Debug.Checkptr = -1 // so we can tell whether it is set explicitly
@@ -190,6 +203,12 @@ func ParseFlags() {
hashDebug = NewHashDebug("gossahash", Debug.Gossahash, nil)
}
+ // Compute whether we're compiling the runtime from the package path. Test
+ // code can also use the flag to set this explicitly.
+ if Flag.Std && objabi.LookupPkgSpecial(Ctxt.Pkgpath).Runtime {
+ Flag.CompilingRuntime = true
+ }
+
// Three inputs govern loop iteration variable rewriting, hash, experiment, flag.
// The loop variable rewriting is:
// IF non-empty hash, then hash determines behavior (function+line match) (*)
@@ -238,6 +257,9 @@ func ParseFlags() {
if Debug.Fmahash != "" {
FmaHash = NewHashDebug("fmahash", Debug.Fmahash, nil)
}
+ if Debug.PGOHash != "" {
+ PGOHash = NewHashDebug("pgohash", Debug.PGOHash, nil)
+ }
if Flag.MSan && !platform.MSanSupported(buildcfg.GOOS, buildcfg.GOARCH) {
log.Fatalf("%s/%s does not support -msan", buildcfg.GOOS, buildcfg.GOARCH)
@@ -306,9 +328,6 @@ func ParseFlags() {
}
}
- if Flag.CompilingRuntime && Flag.N != 0 {
- log.Fatal("cannot disable optimizations while compiling runtime")
- }
if Flag.LowerC < 1 {
log.Fatalf("-c must be at least 1, got %d", Flag.LowerC)
}
@@ -317,6 +336,11 @@ func ParseFlags() {
}
if Flag.CompilingRuntime {
+ // It is not possible to build the runtime with no optimizations,
+ // because the compiler cannot eliminate enough write barriers.
+ Flag.N = 0
+ Ctxt.Flag_optimize = true
+
// Runtime can't use -d=checkptr, at least not yet.
Debug.Checkptr = 0
@@ -470,26 +494,22 @@ func readImportCfg(file string) {
continue
}
- var verb, args string
- if i := strings.Index(line, " "); i < 0 {
- verb = line
- } else {
- verb, args = line[:i], strings.TrimSpace(line[i+1:])
- }
- var before, after string
- if i := strings.Index(args, "="); i >= 0 {
- before, after = args[:i], args[i+1:]
+ verb, args, found := strings.Cut(line, " ")
+ if found {
+ args = strings.TrimSpace(args)
}
+ before, after, hasEq := strings.Cut(args, "=")
+
switch verb {
default:
log.Fatalf("%s:%d: unknown directive %q", file, lineNum, verb)
case "importmap":
- if before == "" || after == "" {
+ if !hasEq || before == "" || after == "" {
log.Fatalf(`%s:%d: invalid importmap: syntax is "importmap old=new"`, file, lineNum)
}
Flag.Cfg.ImportMap[before] = after
case "packagefile":
- if before == "" || after == "" {
+ if !hasEq || before == "" || after == "" {
log.Fatalf(`%s:%d: invalid packagefile: syntax is "packagefile path=filename"`, file, lineNum)
}
Flag.Cfg.PackageFile[before] = after
@@ -498,7 +518,7 @@ func readImportCfg(file string) {
}
func readCoverageCfg(file string) {
- var cfg coverage.CoverFixupConfig
+ var cfg covcmd.CoverFixupConfig
data, err := os.ReadFile(file)
if err != nil {
log.Fatalf("-coveragecfg: %v", err)
diff --git a/src/cmd/compile/internal/base/hashdebug.go b/src/cmd/compile/internal/base/hashdebug.go
index 167b0df4f0..8342a5b9d9 100644
--- a/src/cmd/compile/internal/base/hashdebug.go
+++ b/src/cmd/compile/internal/base/hashdebug.go
@@ -55,6 +55,7 @@ var hashDebug *HashDebug
var FmaHash *HashDebug // for debugging fused-multiply-add floating point changes
var LoopVarHash *HashDebug // for debugging shared/private loop variable changes
+var PGOHash *HashDebug // for debugging PGO optimization decisions
// DebugHashMatchPkgFunc reports whether debug variable Gossahash
//
@@ -203,7 +204,6 @@ func NewHashDebug(ev, s string, file io.Writer) *HashDebug {
i++
}
return hd
-
}
// TODO: Delete when we switch to bisect-only.
@@ -274,8 +274,36 @@ func (d *HashDebug) MatchPos(pos src.XPos, desc func() string) bool {
}
func (d *HashDebug) matchPos(ctxt *obj.Link, pos src.XPos, note func() string) bool {
+ return d.matchPosWithInfo(ctxt, pos, nil, note)
+}
+
+func (d *HashDebug) matchPosWithInfo(ctxt *obj.Link, pos src.XPos, info any, note func() string) bool {
hash := d.hashPos(ctxt, pos)
- return d.matchAndLog(hash, func() string { return d.fmtPos(ctxt, pos) }, note)
+ if info != nil {
+ hash = bisect.Hash(hash, info)
+ }
+ return d.matchAndLog(hash,
+ func() string {
+ r := d.fmtPos(ctxt, pos)
+ if info != nil {
+ r += fmt.Sprintf(" (%v)", info)
+ }
+ return r
+ },
+ note)
+}
+
+// MatchPosWithInfo is similar to MatchPos, but with additional information
+// that is included for hash computation, so it can distinguish multiple
+// matches on the same source location.
+// Note that the default answer for no environment variable (d == nil)
+// is "yes", do the thing.
+func (d *HashDebug) MatchPosWithInfo(pos src.XPos, info any, desc func() string) bool {
+ if d == nil {
+ return true
+ }
+ // Written this way to make inlining likely.
+ return d.matchPosWithInfo(Ctxt, pos, info, desc)
}
// matchAndLog is the core matcher. It reports whether the hash matches the pattern.
diff --git a/src/cmd/compile/internal/base/mapfile_mmap.go b/src/cmd/compile/internal/base/mapfile_mmap.go
index bbcfda244f..b66c9eb260 100644
--- a/src/cmd/compile/internal/base/mapfile_mmap.go
+++ b/src/cmd/compile/internal/base/mapfile_mmap.go
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || (solaris && go1.20)
+//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris
package base
diff --git a/src/cmd/compile/internal/base/mapfile_read.go b/src/cmd/compile/internal/base/mapfile_read.go
index c1b84db96f..783f8c4602 100644
--- a/src/cmd/compile/internal/base/mapfile_read.go
+++ b/src/cmd/compile/internal/base/mapfile_read.go
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-//go:build !aix && !darwin && !dragonfly && !freebsd && !linux && !netbsd && !openbsd && !(solaris && go1.20)
+//go:build !aix && !darwin && !dragonfly && !freebsd && !linux && !netbsd && !openbsd && !solaris
package base
diff --git a/src/cmd/compile/internal/base/print.go b/src/cmd/compile/internal/base/print.go
index efd70f7cc5..cc36acec4b 100644
--- a/src/cmd/compile/internal/base/print.go
+++ b/src/cmd/compile/internal/base/print.go
@@ -146,11 +146,6 @@ func ErrorfAt(pos src.XPos, code errors.Code, format string, args ...interface{}
}
}
-// ErrorfVers reports that a language feature (format, args) requires a later version of Go.
-func ErrorfVers(lang string, format string, args ...interface{}) {
- Errorf("%s requires %s or later (-lang was set to %s; check go.mod)", fmt.Sprintf(format, args...), lang, Flag.Lang)
-}
-
// UpdateErrorDot is a clumsy hack that rewrites the last error,
// if it was "LINE: undefined: NAME", to be "LINE: undefined: NAME in EXPR".
// It is used to give better error messages for dot (selector) expressions.
diff --git a/src/cmd/compile/internal/compare/compare.go b/src/cmd/compile/internal/compare/compare.go
index 1674065556..e165cd67db 100644
--- a/src/cmd/compile/internal/compare/compare.go
+++ b/src/cmd/compile/internal/compare/compare.go
@@ -70,7 +70,7 @@ func EqCanPanic(t *types.Type) bool {
case types.TARRAY:
return EqCanPanic(t.Elem())
case types.TSTRUCT:
- for _, f := range t.FieldSlice() {
+ for _, f := range t.Fields() {
if !f.Sym.IsBlank() && EqCanPanic(f.Type) {
return true
}
@@ -87,7 +87,7 @@ func EqCanPanic(t *types.Type) bool {
func EqStructCost(t *types.Type) int64 {
cost := int64(0)
- for i, fields := 0, t.FieldSlice(); i < len(fields); {
+ for i, fields := 0, t.Fields(); i < len(fields); {
f := fields[i]
// Skip blank-named fields.
@@ -181,7 +181,7 @@ func EqStruct(t *types.Type, np, nq ir.Node) ([]ir.Node, bool) {
// Walk the struct using memequal for runs of AMEM
// and calling specific equality tests for the others.
- for i, fields := 0, t.FieldSlice(); i < len(fields); {
+ for i, fields := 0, t.Fields(); i < len(fields); {
f := fields[i]
// Skip blank-named fields.
@@ -198,15 +198,15 @@ func EqStruct(t *types.Type, np, nq ir.Node) ([]ir.Node, bool) {
// Enforce ordering by starting a new set of reorderable conditions.
conds = append(conds, []ir.Node{})
}
- p := ir.NewSelectorExpr(base.Pos, ir.OXDOT, np, f.Sym)
- q := ir.NewSelectorExpr(base.Pos, ir.OXDOT, nq, f.Sym)
switch {
case f.Type.IsString():
+ p := typecheck.DotField(base.Pos, typecheck.Expr(np), i)
+ q := typecheck.DotField(base.Pos, typecheck.Expr(nq), i)
eqlen, eqmem := EqString(p, q)
and(eqlen)
and(eqmem)
default:
- and(ir.NewBinaryExpr(base.Pos, ir.OEQ, p, q))
+ and(eqfield(np, nq, i))
}
if typeCanPanic {
// Also enforce ordering after something that can panic.
@@ -219,13 +219,12 @@ func EqStruct(t *types.Type, np, nq ir.Node) ([]ir.Node, bool) {
cost, size, next := eqStructFieldCost(t, i)
if cost <= 4 {
// Cost of 4 or less: use plain field equality.
- s := fields[i:next]
- for _, f := range s {
- and(eqfield(np, nq, ir.OEQ, f.Sym))
+ for j := i; j < next; j++ {
+ and(eqfield(np, nq, j))
}
} else {
// Higher cost: use memequal.
- cc := eqmem(np, nq, f.Sym, size)
+ cc := eqmem(np, nq, i, size)
and(cc)
}
i = next
@@ -295,8 +294,7 @@ func EqString(s, t ir.Node) (eqlen *ir.BinaryExpr, eqmem *ir.CallExpr) {
cmplen = tlen
}
- fn := typecheck.LookupRuntime("memequal")
- fn = typecheck.SubstArgTypes(fn, types.Types[types.TUINT8], types.Types[types.TUINT8])
+ fn := typecheck.LookupRuntime("memequal", types.Types[types.TUINT8], types.Types[types.TUINT8])
call := typecheck.Call(base.Pos, fn, []ir.Node{sptr, tptr, ir.Copy(cmplen)}, false).(*ir.CallExpr)
cmp := ir.NewBinaryExpr(base.Pos, ir.OEQ, slen, tlen)
@@ -348,19 +346,18 @@ func EqInterface(s, t ir.Node) (eqtab *ir.BinaryExpr, eqdata *ir.CallExpr) {
// eqfield returns the node
//
// p.field == q.field
-func eqfield(p ir.Node, q ir.Node, op ir.Op, field *types.Sym) ir.Node {
- nx := ir.NewSelectorExpr(base.Pos, ir.OXDOT, p, field)
- ny := ir.NewSelectorExpr(base.Pos, ir.OXDOT, q, field)
- ne := ir.NewBinaryExpr(base.Pos, op, nx, ny)
- return ne
+func eqfield(p, q ir.Node, field int) ir.Node {
+ nx := typecheck.DotField(base.Pos, typecheck.Expr(p), field)
+ ny := typecheck.DotField(base.Pos, typecheck.Expr(q), field)
+ return typecheck.Expr(ir.NewBinaryExpr(base.Pos, ir.OEQ, nx, ny))
}
// eqmem returns the node
//
// memequal(&p.field, &q.field, size)
-func eqmem(p ir.Node, q ir.Node, field *types.Sym, size int64) ir.Node {
- nx := typecheck.Expr(typecheck.NodAddr(ir.NewSelectorExpr(base.Pos, ir.OXDOT, p, field)))
- ny := typecheck.Expr(typecheck.NodAddr(ir.NewSelectorExpr(base.Pos, ir.OXDOT, q, field)))
+func eqmem(p, q ir.Node, field int, size int64) ir.Node {
+ nx := typecheck.Expr(typecheck.NodAddr(typecheck.DotField(base.Pos, p, field)))
+ ny := typecheck.Expr(typecheck.NodAddr(typecheck.DotField(base.Pos, q, field)))
fn, needsize := eqmemfunc(size, nx.Type().Elem())
call := ir.NewCallExpr(base.Pos, ir.OCALL, fn, nil)
@@ -375,14 +372,10 @@ func eqmem(p ir.Node, q ir.Node, field *types.Sym, size int64) ir.Node {
func eqmemfunc(size int64, t *types.Type) (fn *ir.Name, needsize bool) {
switch size {
- default:
- fn = typecheck.LookupRuntime("memequal")
- needsize = true
case 1, 2, 4, 8, 16:
buf := fmt.Sprintf("memequal%d", int(size)*8)
- fn = typecheck.LookupRuntime(buf)
+ return typecheck.LookupRuntime(buf, t, t), false
}
- fn = typecheck.SubstArgTypes(fn, t, t)
- return fn, needsize
+ return typecheck.LookupRuntime("memequal", t, t), true
}
diff --git a/src/cmd/compile/internal/compare/compare_test.go b/src/cmd/compile/internal/compare/compare_test.go
index c65537f64c..2f76165509 100644
--- a/src/cmd/compile/internal/compare/compare_test.go
+++ b/src/cmd/compile/internal/compare/compare_test.go
@@ -28,154 +28,73 @@ func init() {
}
func TestEqStructCost(t *testing.T) {
- newByteField := func(parent *types.Type, offset int64) *types.Field {
- f := types.NewField(src.XPos{}, parent.Sym(), types.ByteType)
- f.Offset = offset
- return f
- }
- newArrayField := func(parent *types.Type, offset int64, len int64, kind types.Kind) *types.Field {
- f := types.NewField(src.XPos{}, parent.Sym(), types.NewArray(types.Types[kind], len))
- // Call Type.Size here to force the size calculation to be done. If not done here the size returned later is incorrect.
- f.Type.Size()
- f.Offset = offset
- return f
- }
- newField := func(parent *types.Type, offset int64, kind types.Kind) *types.Field {
- f := types.NewField(src.XPos{}, parent.Sym(), types.Types[kind])
- f.Offset = offset
- return f
+ repeat := func(n int, typ *types.Type) []*types.Type {
+ typs := make([]*types.Type, n)
+ for i := range typs {
+ typs[i] = typ
+ }
+ return typs
}
+
tt := []struct {
name string
cost int64
nonMergeLoadCost int64
- tfn typefn
+ fieldTypes []*types.Type
}{
- {"struct without fields", 0, 0,
- func() *types.Type {
- return types.NewStruct([]*types.Field{})
- }},
- {"struct with 1 byte field", 1, 1,
- func() *types.Type {
- parent := types.NewStruct([]*types.Field{})
- fields := []*types.Field{
- newByteField(parent, 0),
- }
- parent.SetFields(fields)
- return parent
- },
- },
- {"struct with 8 byte fields", 1, 8,
- func() *types.Type {
- parent := types.NewStruct([]*types.Field{})
- fields := make([]*types.Field, 8)
- for i := range fields {
- fields[i] = newByteField(parent, int64(i))
- }
- parent.SetFields(fields)
- return parent
- },
- },
- {"struct with 16 byte fields", 2, 16,
- func() *types.Type {
- parent := types.NewStruct([]*types.Field{})
- fields := make([]*types.Field, 16)
- for i := range fields {
- fields[i] = newByteField(parent, int64(i))
- }
- parent.SetFields(fields)
- return parent
- },
- },
- {"struct with 32 byte fields", 4, 32,
- func() *types.Type {
- parent := types.NewStruct([]*types.Field{})
- fields := make([]*types.Field, 32)
- for i := range fields {
- fields[i] = newByteField(parent, int64(i))
- }
- parent.SetFields(fields)
- return parent
- },
- },
- {"struct with 2 int32 fields", 1, 2,
- func() *types.Type {
- parent := types.NewStruct([]*types.Field{})
- fields := make([]*types.Field, 2)
- for i := range fields {
- fields[i] = newField(parent, int64(i*4), types.TINT32)
- }
- parent.SetFields(fields)
- return parent
- },
- },
+ {"struct without fields", 0, 0, nil},
+ {"struct with 1 byte field", 1, 1, repeat(1, types.ByteType)},
+ {"struct with 8 byte fields", 1, 8, repeat(8, types.ByteType)},
+ {"struct with 16 byte fields", 2, 16, repeat(16, types.ByteType)},
+ {"struct with 32 byte fields", 4, 32, repeat(32, types.ByteType)},
+ {"struct with 2 int32 fields", 1, 2, repeat(2, types.Types[types.TINT32])},
{"struct with 2 int32 fields and 1 int64", 2, 3,
- func() *types.Type {
- parent := types.NewStruct([]*types.Field{})
- fields := make([]*types.Field, 3)
- fields[0] = newField(parent, int64(0), types.TINT32)
- fields[1] = newField(parent, int64(4), types.TINT32)
- fields[2] = newField(parent, int64(8), types.TINT64)
- parent.SetFields(fields)
- return parent
+ []*types.Type{
+ types.Types[types.TINT32],
+ types.Types[types.TINT32],
+ types.Types[types.TINT64],
},
},
{"struct with 1 int field and 1 string", 3, 3,
- func() *types.Type {
- parent := types.NewStruct([]*types.Field{})
- fields := make([]*types.Field, 2)
- fields[0] = newField(parent, int64(0), types.TINT64)
- fields[1] = newField(parent, int64(8), types.TSTRING)
- parent.SetFields(fields)
- return parent
- },
- },
- {"struct with 2 strings", 4, 4,
- func() *types.Type {
- parent := types.NewStruct([]*types.Field{})
- fields := make([]*types.Field, 2)
- fields[0] = newField(parent, int64(0), types.TSTRING)
- fields[1] = newField(parent, int64(8), types.TSTRING)
- parent.SetFields(fields)
- return parent
+ []*types.Type{
+ types.Types[types.TINT64],
+ types.Types[types.TSTRING],
},
},
+ {"struct with 2 strings", 4, 4, repeat(2, types.Types[types.TSTRING])},
{"struct with 1 large byte array field", 26, 101,
- func() *types.Type {
- parent := types.NewStruct([]*types.Field{})
- fields := []*types.Field{
- newArrayField(parent, 0, 101, types.TUINT16),
- }
- parent.SetFields(fields)
- return parent
+ []*types.Type{
+ types.NewArray(types.Types[types.TUINT16], 101),
},
},
{"struct with string array field", 4, 4,
- func() *types.Type {
- parent := types.NewStruct([]*types.Field{})
- fields := []*types.Field{
- newArrayField(parent, 0, 2, types.TSTRING),
- }
- parent.SetFields(fields)
- return parent
+ []*types.Type{
+ types.NewArray(types.Types[types.TSTRING], 2),
},
},
}
for _, tc := range tt {
t.Run(tc.name, func(t *testing.T) {
+ fields := make([]*types.Field, len(tc.fieldTypes))
+ for i, ftyp := range tc.fieldTypes {
+ fields[i] = types.NewField(src.NoXPos, typecheck.LookupNum("f", i), ftyp)
+ }
+ typ := types.NewStruct(fields)
+ types.CalcSize(typ)
+
want := tc.cost
base.Ctxt.Arch.CanMergeLoads = true
- actual := EqStructCost(tc.tfn())
+ actual := EqStructCost(typ)
if actual != want {
- t.Errorf("CanMergeLoads=true EqStructCost(%v) = %d, want %d", tc.tfn, actual, want)
+ t.Errorf("CanMergeLoads=true EqStructCost(%v) = %d, want %d", typ, actual, want)
}
base.Ctxt.Arch.CanMergeLoads = false
want = tc.nonMergeLoadCost
- actual = EqStructCost(tc.tfn())
+ actual = EqStructCost(typ)
if actual != want {
- t.Errorf("CanMergeLoads=false EqStructCost(%v) = %d, want %d", tc.tfn, actual, want)
+ t.Errorf("CanMergeLoads=false EqStructCost(%v) = %d, want %d", typ, actual, want)
}
})
}
diff --git a/src/cmd/compile/internal/coverage/cover.go b/src/cmd/compile/internal/coverage/cover.go
index 3e0350b51a..5320f004da 100644
--- a/src/cmd/compile/internal/coverage/cover.go
+++ b/src/cmd/compile/internal/coverage/cover.go
@@ -22,9 +22,9 @@ import (
"strings"
)
-// Names records state information collected in the first fixup
+// names records state information collected in the first fixup
// phase so that it can be passed to the second fixup phase.
-type Names struct {
+type names struct {
MetaVar *ir.Name
PkgIdVar *ir.Name
InitFn *ir.Func
@@ -32,13 +32,17 @@ type Names struct {
CounterGran coverage.CounterGranularity
}
-// FixupVars is the first of two entry points for coverage compiler
-// fixup. It collects and returns the package ID and meta-data
-// variables being used for this "-cover" build, along with the
-// coverage counter mode and granularity. It also reclassifies selected
-// variables (for example, tagging coverage counter variables with
-// flags so that they can be handled properly downstream).
-func FixupVars() Names {
+// Fixup adds calls to the pkg init function as appropriate to
+// register coverage-related variables with the runtime.
+//
+// It also reclassifies selected variables (for example, tagging
+// coverage counter variables with flags so that they can be handled
+// properly downstream).
+func Fixup() {
+ if base.Flag.Cfg.CoverageInfo == nil {
+ return // not using coverage
+ }
+
metaVarName := base.Flag.Cfg.CoverageInfo.MetaVar
pkgIdVarName := base.Flag.Cfg.CoverageInfo.PkgIdVar
counterMode := base.Flag.Cfg.CoverageInfo.CounterMode
@@ -53,15 +57,7 @@ func FixupVars() Names {
}
}
- for _, n := range typecheck.Target.Decls {
- as, ok := n.(*ir.AssignStmt)
- if !ok {
- continue
- }
- nm, ok := as.X.(*ir.Name)
- if !ok {
- continue
- }
+ for _, nm := range typecheck.Target.Externs {
s := nm.Sym()
switch s.Name {
case metaVarName:
@@ -100,20 +96,15 @@ func FixupVars() Names {
counterGran)
}
- return Names{
+ cnames := names{
MetaVar: metavar,
PkgIdVar: pkgidvar,
CounterMode: cm,
CounterGran: cg,
}
-}
-// FixupInit is the second main entry point for coverage compiler
-// fixup. It adds calls to the pkg init function as appropriate to
-// register coverage-related variables with the runtime.
-func FixupInit(cnames Names) {
- for _, n := range typecheck.Target.Decls {
- if fn, ok := n.(*ir.Func); ok && ir.FuncName(fn) == "init" {
+ for _, fn := range typecheck.Target.Funcs {
+ if ir.FuncName(fn) == "init" {
cnames.InitFn = fn
break
}
@@ -152,7 +143,7 @@ func metaHashAndLen() ([16]byte, int) {
return hv, base.Flag.Cfg.CoverageInfo.MetaLen
}
-func registerMeta(cnames Names, hashv [16]byte, mdlen int) {
+func registerMeta(cnames names, hashv [16]byte, mdlen int) {
// Materialize expression for hash (an array literal)
pos := cnames.InitFn.Pos()
elist := make([]ir.Node, 0, 16)
diff --git a/src/cmd/compile/internal/deadcode/deadcode.go b/src/cmd/compile/internal/deadcode/deadcode.go
deleted file mode 100644
index 46a2239f48..0000000000
--- a/src/cmd/compile/internal/deadcode/deadcode.go
+++ /dev/null
@@ -1,247 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package deadcode
-
-import (
- "go/constant"
- "go/token"
-
- "cmd/compile/internal/base"
- "cmd/compile/internal/ir"
-)
-
-func Func(fn *ir.Func) {
- stmts(&fn.Body)
-
- if len(fn.Body) == 0 {
- return
- }
-
- for _, n := range fn.Body {
- if len(n.Init()) > 0 {
- return
- }
- switch n.Op() {
- case ir.OIF:
- n := n.(*ir.IfStmt)
- if !ir.IsConst(n.Cond, constant.Bool) || len(n.Body) > 0 || len(n.Else) > 0 {
- return
- }
- case ir.OFOR:
- n := n.(*ir.ForStmt)
- if !ir.IsConst(n.Cond, constant.Bool) || ir.BoolVal(n.Cond) {
- return
- }
- default:
- return
- }
- }
-
- ir.VisitList(fn.Body, markHiddenClosureDead)
- fn.Body = []ir.Node{ir.NewBlockStmt(base.Pos, nil)}
-}
-
-func stmts(nn *ir.Nodes) {
- var lastLabel = -1
- for i, n := range *nn {
- if n != nil && n.Op() == ir.OLABEL {
- lastLabel = i
- }
- }
- for i, n := range *nn {
- // Cut is set to true when all nodes after i'th position
- // should be removed.
- // In other words, it marks whole slice "tail" as dead.
- cut := false
- if n == nil {
- continue
- }
- if n.Op() == ir.OIF {
- n := n.(*ir.IfStmt)
- n.Cond = expr(n.Cond)
- if ir.IsConst(n.Cond, constant.Bool) {
- var body ir.Nodes
- if ir.BoolVal(n.Cond) {
- ir.VisitList(n.Else, markHiddenClosureDead)
- n.Else = ir.Nodes{}
- body = n.Body
- } else {
- ir.VisitList(n.Body, markHiddenClosureDead)
- n.Body = ir.Nodes{}
- body = n.Else
- }
- // If "then" or "else" branch ends with panic or return statement,
- // it is safe to remove all statements after this node.
- // isterminating is not used to avoid goto-related complications.
- // We must be careful not to deadcode-remove labels, as they
- // might be the target of a goto. See issue 28616.
- if body := body; len(body) != 0 {
- switch body[(len(body) - 1)].Op() {
- case ir.ORETURN, ir.OTAILCALL, ir.OPANIC:
- if i > lastLabel {
- cut = true
- }
- }
- }
- }
- }
- if n.Op() == ir.OSWITCH {
- n := n.(*ir.SwitchStmt)
- // Use a closure wrapper here so we can use "return" to abort the analysis.
- func() {
- if n.Tag != nil && n.Tag.Op() == ir.OTYPESW {
- return // no special type-switch case yet.
- }
- var x constant.Value // value we're switching on
- if n.Tag != nil {
- if ir.ConstType(n.Tag) == constant.Unknown {
- return
- }
- x = n.Tag.Val()
- } else {
- x = constant.MakeBool(true) // switch { ... } => switch true { ... }
- }
- var def *ir.CaseClause
- for _, cas := range n.Cases {
- if len(cas.List) == 0 { // default case
- def = cas
- continue
- }
- for _, c := range cas.List {
- if ir.ConstType(c) == constant.Unknown {
- return // can't statically tell if it matches or not - give up.
- }
- if constant.Compare(x, token.EQL, c.Val()) {
- for _, n := range cas.Body {
- if n.Op() == ir.OFALL {
- return // fallthrough makes it complicated - abort.
- }
- }
- // This switch entry is the one that always triggers.
- for _, cas2 := range n.Cases {
- for _, c2 := range cas2.List {
- ir.Visit(c2, markHiddenClosureDead)
- }
- if cas2 != cas {
- ir.VisitList(cas2.Body, markHiddenClosureDead)
- }
- }
-
- // Rewrite to switch { case true: ... }
- n.Tag = nil
- cas.List[0] = ir.NewBool(c.Pos(), true)
- cas.List = cas.List[:1]
- n.Cases[0] = cas
- n.Cases = n.Cases[:1]
- return
- }
- }
- }
- if def != nil {
- for _, n := range def.Body {
- if n.Op() == ir.OFALL {
- return // fallthrough makes it complicated - abort.
- }
- }
- for _, cas := range n.Cases {
- if cas != def {
- ir.VisitList(cas.List, markHiddenClosureDead)
- ir.VisitList(cas.Body, markHiddenClosureDead)
- }
- }
- n.Cases[0] = def
- n.Cases = n.Cases[:1]
- return
- }
-
- // TODO: handle case bodies ending with panic/return as we do in the IF case above.
-
- // entire switch is a nop - no case ever triggers
- for _, cas := range n.Cases {
- ir.VisitList(cas.List, markHiddenClosureDead)
- ir.VisitList(cas.Body, markHiddenClosureDead)
- }
- n.Cases = n.Cases[:0]
- }()
- }
-
- if len(n.Init()) != 0 {
- stmts(n.(ir.InitNode).PtrInit())
- }
- switch n.Op() {
- case ir.OBLOCK:
- n := n.(*ir.BlockStmt)
- stmts(&n.List)
- case ir.OFOR:
- n := n.(*ir.ForStmt)
- stmts(&n.Body)
- case ir.OIF:
- n := n.(*ir.IfStmt)
- stmts(&n.Body)
- stmts(&n.Else)
- case ir.ORANGE:
- n := n.(*ir.RangeStmt)
- stmts(&n.Body)
- case ir.OSELECT:
- n := n.(*ir.SelectStmt)
- for _, cas := range n.Cases {
- stmts(&cas.Body)
- }
- case ir.OSWITCH:
- n := n.(*ir.SwitchStmt)
- for _, cas := range n.Cases {
- stmts(&cas.Body)
- }
- }
-
- if cut {
- ir.VisitList((*nn)[i+1:len(*nn)], markHiddenClosureDead)
- *nn = (*nn)[:i+1]
- break
- }
- }
-}
-
-func expr(n ir.Node) ir.Node {
- // Perform dead-code elimination on short-circuited boolean
- // expressions involving constants with the intent of
- // producing a constant 'if' condition.
- switch n.Op() {
- case ir.OANDAND:
- n := n.(*ir.LogicalExpr)
- n.X = expr(n.X)
- n.Y = expr(n.Y)
- if ir.IsConst(n.X, constant.Bool) {
- if ir.BoolVal(n.X) {
- return n.Y // true && x => x
- } else {
- return n.X // false && x => false
- }
- }
- case ir.OOROR:
- n := n.(*ir.LogicalExpr)
- n.X = expr(n.X)
- n.Y = expr(n.Y)
- if ir.IsConst(n.X, constant.Bool) {
- if ir.BoolVal(n.X) {
- return n.X // true || x => true
- } else {
- return n.Y // false || x => x
- }
- }
- }
- return n
-}
-
-func markHiddenClosureDead(n ir.Node) {
- if n.Op() != ir.OCLOSURE {
- return
- }
- clo := n.(*ir.ClosureExpr)
- if clo.Func.IsHiddenClosure() {
- clo.Func.SetIsDeadcodeClosure(true)
- }
- ir.VisitList(clo.Func.Body, markHiddenClosureDead)
-}
diff --git a/src/cmd/compile/internal/devirtualize/devirtualize.go b/src/cmd/compile/internal/devirtualize/devirtualize.go
index b156b66312..5d1b952627 100644
--- a/src/cmd/compile/internal/devirtualize/devirtualize.go
+++ b/src/cmd/compile/internal/devirtualize/devirtualize.go
@@ -18,40 +18,28 @@ import (
"cmd/compile/internal/types"
)
-// Static devirtualizes calls within fn where possible when the concrete callee
+// StaticCall devirtualizes the given call if possible when the concrete callee
// is available statically.
-func Static(fn *ir.Func) {
- ir.CurFunc = fn
-
- // For promoted methods (including value-receiver methods promoted to pointer-receivers),
- // the interface method wrapper may contain expressions that can panic (e.g., ODEREF, ODOTPTR, ODOTINTER).
- // Devirtualization involves inlining these expressions (and possible panics) to the call site.
- // This normally isn't a problem, but for go/defer statements it can move the panic from when/where
- // the call executes to the go/defer statement itself, which is a visible change in semantics (e.g., #52072).
- // To prevent this, we skip devirtualizing calls within go/defer statements altogether.
- goDeferCall := make(map[*ir.CallExpr]bool)
- ir.VisitList(fn.Body, func(n ir.Node) {
- switch n := n.(type) {
- case *ir.GoDeferStmt:
- if call, ok := n.Call.(*ir.CallExpr); ok {
- goDeferCall[call] = true
- }
- return
- case *ir.CallExpr:
- if !goDeferCall[n] {
- staticCall(n)
- }
- }
- })
-}
+func StaticCall(call *ir.CallExpr) {
+ // For promoted methods (including value-receiver methods promoted
+ // to pointer-receivers), the interface method wrapper may contain
+ // expressions that can panic (e.g., ODEREF, ODOTPTR,
+ // ODOTINTER). Devirtualization involves inlining these expressions
+ // (and possible panics) to the call site. This normally isn't a
+ // problem, but for go/defer statements it can move the panic from
+ // when/where the call executes to the go/defer statement itself,
+ // which is a visible change in semantics (e.g., #52072). To prevent
+ // this, we skip devirtualizing calls within go/defer statements
+ // altogether.
+ if call.GoDefer {
+ return
+ }
-// staticCall devirtualizes the given call if possible when the concrete callee
-// is available statically.
-func staticCall(call *ir.CallExpr) {
if call.Op() != ir.OCALLINTER {
return
}
- sel := call.X.(*ir.SelectorExpr)
+
+ sel := call.Fun.(*ir.SelectorExpr)
r := ir.StaticValue(sel.X)
if r.Op() != ir.OCONVIFACE {
return
@@ -70,7 +58,7 @@ func staticCall(call *ir.CallExpr) {
return
}
- // If typ *has* a shape type, then it's an shaped, instantiated
+ // If typ *has* a shape type, then it's a shaped, instantiated
// type like T[go.shape.int], and its methods (may) have an extra
// dictionary parameter. We could devirtualize this call if we
// could derive an appropriate dictionary argument.
@@ -113,29 +101,23 @@ func staticCall(call *ir.CallExpr) {
dt := ir.NewTypeAssertExpr(sel.Pos(), sel.X, nil)
dt.SetType(typ)
- x := typecheck.Callee(ir.NewSelectorExpr(sel.Pos(), ir.OXDOT, dt, sel.Sel))
+ x := typecheck.XDotMethod(sel.Pos(), dt, sel.Sel, true)
switch x.Op() {
case ir.ODOTMETH:
- x := x.(*ir.SelectorExpr)
if base.Flag.LowerM != 0 {
base.WarnfAt(call.Pos(), "devirtualizing %v to %v", sel, typ)
}
call.SetOp(ir.OCALLMETH)
- call.X = x
+ call.Fun = x
case ir.ODOTINTER:
// Promoted method from embedded interface-typed field (#42279).
- x := x.(*ir.SelectorExpr)
if base.Flag.LowerM != 0 {
base.WarnfAt(call.Pos(), "partially devirtualizing %v to %v", sel, typ)
}
call.SetOp(ir.OCALLINTER)
- call.X = x
+ call.Fun = x
default:
- // TODO(mdempsky): Turn back into Fatalf after more testing.
- if base.Flag.LowerM != 0 {
- base.WarnfAt(call.Pos(), "failed to devirtualize %v (%v)", x, x.Op())
- }
- return
+ base.FatalfAt(call.Pos(), "failed to devirtualize %v (%v)", x, x.Op())
}
// Duplicated logic from typecheck for function call return
@@ -148,9 +130,9 @@ func staticCall(call *ir.CallExpr) {
switch ft := x.Type(); ft.NumResults() {
case 0:
case 1:
- call.SetType(ft.Results().Field(0).Type)
+ call.SetType(ft.Result(0).Type)
default:
- call.SetType(ft.Results())
+ call.SetType(ft.ResultsTuple())
}
// Desugar OCALLMETH, if we created one (#57309).
diff --git a/src/cmd/compile/internal/devirtualize/pgo.go b/src/cmd/compile/internal/devirtualize/pgo.go
index 068e0ef8f2..170bf74673 100644
--- a/src/cmd/compile/internal/devirtualize/pgo.go
+++ b/src/cmd/compile/internal/devirtualize/pgo.go
@@ -12,6 +12,8 @@ import (
"cmd/compile/internal/pgo"
"cmd/compile/internal/typecheck"
"cmd/compile/internal/types"
+ "cmd/internal/obj"
+ "cmd/internal/src"
"encoding/json"
"fmt"
"os"
@@ -53,8 +55,10 @@ type CallStat struct {
// ProfileGuided performs call devirtualization of indirect calls based on
// profile information.
//
-// Specifically, it performs conditional devirtualization of interface calls
-// for the hottest callee. That is, it performs a transformation like:
+// Specifically, it performs conditional devirtualization of interface calls or
+// function value calls for the hottest callee.
+//
+// That is, for interface calls it performs a transformation like:
//
// type Iface interface {
// Foo()
@@ -78,6 +82,24 @@ type CallStat struct {
// }
// }
//
+// For function value calls it performs a transformation like:
+//
+// func Concrete() {}
+//
+// func foo(fn func()) {
+// fn()
+// }
+//
+// to:
+//
+// func foo(fn func()) {
+// if internal/abi.FuncPCABIInternal(fn) == internal/abi.FuncPCABIInternal(Concrete) {
+// Concrete()
+// } else {
+// fn()
+// }
+// }
+//
// The primary benefit of this transformation is enabling inlining of the
// direct call.
func ProfileGuided(fn *ir.Func, p *pgo.Profile) {
@@ -85,9 +107,6 @@ func ProfileGuided(fn *ir.Func, p *pgo.Profile) {
name := ir.LinkFuncName(fn)
- // Can't devirtualize go/defer calls. See comment in Static.
- goDeferCall := make(map[*ir.CallExpr]bool)
-
var jsonW *json.Encoder
if base.Debug.PGODebug >= 3 {
jsonW = json.NewEncoder(os.Stdout)
@@ -99,12 +118,6 @@ func ProfileGuided(fn *ir.Func, p *pgo.Profile) {
return n
}
- if gds, ok := n.(*ir.GoDeferStmt); ok {
- if call, ok := gds.Call.(*ir.CallExpr); ok {
- goDeferCall[call] = true
- }
- }
-
ir.EditChildren(n, edit)
call, ok := n.(*ir.CallExpr)
@@ -125,7 +138,8 @@ func ProfileGuided(fn *ir.Func, p *pgo.Profile) {
}
}
- if call.Op() != ir.OCALLINTER {
+ op := call.Op()
+ if op != ir.OCALLFUNC && op != ir.OCALLINTER {
return n
}
@@ -133,25 +147,26 @@ func ProfileGuided(fn *ir.Func, p *pgo.Profile) {
fmt.Printf("%v: PGO devirtualize considering call %v\n", ir.Line(call), call)
}
- if goDeferCall[call] {
+ if call.GoDefer {
if base.Debug.PGODebug >= 2 {
fmt.Printf("%v: can't PGO devirtualize go/defer call %v\n", ir.Line(call), call)
}
return n
}
- // Bail if we do not have a hot callee.
- callee, weight := findHotConcreteCallee(p, fn, call)
- if callee == nil {
- return n
+ var newNode ir.Node
+ var callee *ir.Func
+ var weight int64
+ switch op {
+ case ir.OCALLFUNC:
+ newNode, callee, weight = maybeDevirtualizeFunctionCall(p, fn, call)
+ case ir.OCALLINTER:
+ newNode, callee, weight = maybeDevirtualizeInterfaceCall(p, fn, call)
+ default:
+ panic("unreachable")
}
- // Bail if we do not have a Type node for the hot callee.
- ctyp := methodRecvType(callee)
- if ctyp == nil {
- return n
- }
- // Bail if we know for sure it won't inline.
- if !shouldPGODevirt(callee) {
+
+ if newNode == nil {
return n
}
@@ -160,12 +175,126 @@ func ProfileGuided(fn *ir.Func, p *pgo.Profile) {
stat.DevirtualizedWeight = weight
}
- return rewriteCondCall(call, fn, callee, ctyp)
+ return newNode
}
ir.EditChildren(fn, edit)
}
+// Devirtualize interface call if possible and eligible. Returns the new
+// ir.Node if call was devirtualized, and if so also the callee and weight of
+// the devirtualized edge.
+func maybeDevirtualizeInterfaceCall(p *pgo.Profile, fn *ir.Func, call *ir.CallExpr) (ir.Node, *ir.Func, int64) {
+ if base.Debug.PGODevirtualize < 1 {
+ return nil, nil, 0
+ }
+
+ // Bail if we do not have a hot callee.
+ callee, weight := findHotConcreteInterfaceCallee(p, fn, call)
+ if callee == nil {
+ return nil, nil, 0
+ }
+ // Bail if we do not have a Type node for the hot callee.
+ ctyp := methodRecvType(callee)
+ if ctyp == nil {
+ return nil, nil, 0
+ }
+ // Bail if we know for sure it won't inline.
+ if !shouldPGODevirt(callee) {
+ return nil, nil, 0
+ }
+ // Bail if de-selected by PGO Hash.
+ if !base.PGOHash.MatchPosWithInfo(call.Pos(), "devirt", nil) {
+ return nil, nil, 0
+ }
+
+ return rewriteInterfaceCall(call, fn, callee, ctyp), callee, weight
+}
+
+// Devirtualize an indirect function call if possible and eligible. Returns the new
+// ir.Node if call was devirtualized, and if so also the callee and weight of
+// the devirtualized edge.
+func maybeDevirtualizeFunctionCall(p *pgo.Profile, fn *ir.Func, call *ir.CallExpr) (ir.Node, *ir.Func, int64) {
+ if base.Debug.PGODevirtualize < 2 {
+ return nil, nil, 0
+ }
+
+ // Bail if this is a direct call; no devirtualization necessary.
+ callee := pgo.DirectCallee(call.Fun)
+ if callee != nil {
+ return nil, nil, 0
+ }
+
+ // Bail if we do not have a hot callee.
+ callee, weight := findHotConcreteFunctionCallee(p, fn, call)
+ if callee == nil {
+ return nil, nil, 0
+ }
+
+ // TODO(go.dev/issue/61577): Closures need the closure context passed
+ // via the context register. That requires extra plumbing that we
+ // haven't done yet.
+ if callee.OClosure != nil {
+ if base.Debug.PGODebug >= 3 {
+ fmt.Printf("callee %s is a closure, skipping\n", ir.FuncName(callee))
+ }
+ return nil, nil, 0
+ }
+ // runtime.memhash_varlen does not look like a closure, but it uses
+ // runtime.getclosureptr to access data encoded by callers, which are
+ // are generated by cmd/compile/internal/reflectdata.genhash.
+ if callee.Sym().Pkg.Path == "runtime" && callee.Sym().Name == "memhash_varlen" {
+ if base.Debug.PGODebug >= 3 {
+ fmt.Printf("callee %s is a closure (runtime.memhash_varlen), skipping\n", ir.FuncName(callee))
+ }
+ return nil, nil, 0
+ }
+ // TODO(prattmic): We don't properly handle methods as callees in two
+ // different dimensions:
+ //
+ // 1. Method expressions. e.g.,
+ //
+ // var fn func(*os.File, []byte) (int, error) = (*os.File).Read
+ //
+ // In this case, typ will report *os.File as the receiver while
+ // ctyp reports it as the first argument. types.Identical ignores
+ // receiver parameters, so it treats these as different, even though
+ // they are still call compatible.
+ //
+ // 2. Method values. e.g.,
+ //
+ // var f *os.File
+ // var fn func([]byte) (int, error) = f.Read
+ //
+ // types.Identical will treat these as compatible (since receiver
+ // parameters are ignored). However, in this case, we do not call
+ // (*os.File).Read directly. Instead, f is stored in closure context
+ // and we call the wrapper (*os.File).Read-fm. However, runtime/pprof
+ // hides wrappers from profiles, making it appear that there is a call
+ // directly to the method. We could recognize this pattern return the
+ // wrapper rather than the method.
+ //
+ // N.B. perf profiles will report wrapper symbols directly, so
+ // ideally we should support direct wrapper references as well.
+ if callee.Type().Recv() != nil {
+ if base.Debug.PGODebug >= 3 {
+ fmt.Printf("callee %s is a method, skipping\n", ir.FuncName(callee))
+ }
+ return nil, nil, 0
+ }
+
+ // Bail if we know for sure it won't inline.
+ if !shouldPGODevirt(callee) {
+ return nil, nil, 0
+ }
+ // Bail if de-selected by PGO Hash.
+ if !base.PGOHash.MatchPosWithInfo(call.Pos(), "devirt", nil) {
+ return nil, nil, 0
+ }
+
+ return rewriteFunctionCall(call, fn, callee), callee, weight
+}
+
// shouldPGODevirt checks if we should perform PGO devirtualization to the
// target function.
//
@@ -223,6 +352,18 @@ func constructCallStat(p *pgo.Profile, fn *ir.Func, name string, call *ir.CallEx
offset := pgo.NodeLineOffset(call, fn)
+ hotter := func(e *pgo.IREdge) bool {
+ if stat.Hottest == "" {
+ return true
+ }
+ if e.Weight != stat.HottestWeight {
+ return e.Weight > stat.HottestWeight
+ }
+ // If weight is the same, arbitrarily sort lexicographally, as
+ // findHotConcreteCallee does.
+ return e.Dst.Name() < stat.Hottest
+ }
+
// Sum of all edges from this callsite, regardless of callee.
// For direct calls, this should be the same as the single edge
// weight (except for multiple calls on one line, which we
@@ -233,7 +374,7 @@ func constructCallStat(p *pgo.Profile, fn *ir.Func, name string, call *ir.CallEx
continue
}
stat.Weight += edge.Weight
- if edge.Weight > stat.HottestWeight {
+ if hotter(edge) {
stat.HottestWeight = edge.Weight
stat.Hottest = edge.Dst.Name()
}
@@ -243,7 +384,7 @@ func constructCallStat(p *pgo.Profile, fn *ir.Func, name string, call *ir.CallEx
case ir.OCALLFUNC:
stat.Interface = false
- callee := pgo.DirectCallee(call.X)
+ callee := pgo.DirectCallee(call.Fun)
if callee != nil {
stat.Direct = true
if stat.Hottest == "" {
@@ -262,11 +403,90 @@ func constructCallStat(p *pgo.Profile, fn *ir.Func, name string, call *ir.CallEx
return &stat
}
-// rewriteCondCall devirtualizes the given call using a direct method call to
-// concretetyp.
-func rewriteCondCall(call *ir.CallExpr, curfn, callee *ir.Func, concretetyp *types.Type) ir.Node {
+// copyInputs copies the inputs to a call: the receiver (for interface calls)
+// or function value (for function value calls) and the arguments. These
+// expressions are evaluated once and assigned to temporaries.
+//
+// The assignment statement is added to init and the copied receiver/fn
+// expression and copied arguments expressions are returned.
+func copyInputs(curfn *ir.Func, pos src.XPos, recvOrFn ir.Node, args []ir.Node, init *ir.Nodes) (ir.Node, []ir.Node) {
+ // Evaluate receiver/fn and argument expressions. The receiver/fn is
+ // used twice but we don't want to cause side effects twice. The
+ // arguments are used in two different calls and we can't trivially
+ // copy them.
+ //
+ // recvOrFn must be first in the assignment list as its side effects
+ // must be ordered before argument side effects.
+ var lhs, rhs []ir.Node
+ newRecvOrFn := typecheck.TempAt(pos, curfn, recvOrFn.Type())
+ lhs = append(lhs, newRecvOrFn)
+ rhs = append(rhs, recvOrFn)
+
+ for _, arg := range args {
+ argvar := typecheck.TempAt(pos, curfn, arg.Type())
+
+ lhs = append(lhs, argvar)
+ rhs = append(rhs, arg)
+ }
+
+ asList := ir.NewAssignListStmt(pos, ir.OAS2, lhs, rhs)
+ init.Append(typecheck.Stmt(asList))
+
+ return newRecvOrFn, lhs[1:]
+}
+
+// retTemps returns a slice of temporaries to be used for storing result values from call.
+func retTemps(curfn *ir.Func, pos src.XPos, call *ir.CallExpr) []ir.Node {
+ sig := call.Fun.Type()
+ var retvars []ir.Node
+ for _, ret := range sig.Results() {
+ retvars = append(retvars, typecheck.TempAt(pos, curfn, ret.Type))
+ }
+ return retvars
+}
+
+// condCall returns an ir.InlinedCallExpr that performs a call to thenCall if
+// cond is true and elseCall if cond is false. The return variables of the
+// InlinedCallExpr evaluate to the return values from the call.
+func condCall(curfn *ir.Func, pos src.XPos, cond ir.Node, thenCall, elseCall *ir.CallExpr, init ir.Nodes) *ir.InlinedCallExpr {
+ // Doesn't matter whether we use thenCall or elseCall, they must have
+ // the same return types.
+ retvars := retTemps(curfn, pos, thenCall)
+
+ var thenBlock, elseBlock ir.Nodes
+ if len(retvars) == 0 {
+ thenBlock.Append(thenCall)
+ elseBlock.Append(elseCall)
+ } else {
+ // Copy slice so edits in one location don't affect another.
+ thenRet := append([]ir.Node(nil), retvars...)
+ thenAsList := ir.NewAssignListStmt(pos, ir.OAS2, thenRet, []ir.Node{thenCall})
+ thenBlock.Append(typecheck.Stmt(thenAsList))
+
+ elseRet := append([]ir.Node(nil), retvars...)
+ elseAsList := ir.NewAssignListStmt(pos, ir.OAS2, elseRet, []ir.Node{elseCall})
+ elseBlock.Append(typecheck.Stmt(elseAsList))
+ }
+
+ nif := ir.NewIfStmt(pos, cond, thenBlock, elseBlock)
+ nif.SetInit(init)
+ nif.Likely = true
+
+ body := []ir.Node{typecheck.Stmt(nif)}
+
+ // This isn't really an inlined call of course, but InlinedCallExpr
+ // makes handling reassignment of return values easier.
+ res := ir.NewInlinedCallExpr(pos, body, retvars)
+ res.SetType(thenCall.Type())
+ res.SetTypecheck(1)
+ return res
+}
+
+// rewriteInterfaceCall devirtualizes the given interface call using a direct
+// method call to concretetyp.
+func rewriteInterfaceCall(call *ir.CallExpr, curfn, callee *ir.Func, concretetyp *types.Type) ir.Node {
if base.Flag.LowerM != 0 {
- fmt.Printf("%v: PGO devirtualizing %v to %v\n", ir.Line(call), call.X, callee)
+ fmt.Printf("%v: PGO devirtualizing interface call %v to %v\n", ir.Line(call), call.Fun, callee)
}
// We generate an OINCALL of:
@@ -297,91 +517,106 @@ func rewriteCondCall(call *ir.CallExpr, curfn, callee *ir.Func, concretetyp *typ
// making it less like to inline. We may want to compensate for this
// somehow.
- var retvars []ir.Node
-
- sig := call.X.Type()
-
- for _, ret := range sig.Results().FieldSlice() {
- retvars = append(retvars, typecheck.Temp(ret.Type))
- }
-
- sel := call.X.(*ir.SelectorExpr)
+ sel := call.Fun.(*ir.SelectorExpr)
method := sel.Sel
pos := call.Pos()
init := ir.TakeInit(call)
- // Evaluate receiver and argument expressions. The receiver is used
- // twice but we don't want to cause side effects twice. The arguments
- // are used in two different calls and we can't trivially copy them.
- //
- // recv must be first in the assignment list as its side effects must
- // be ordered before argument side effects.
- var lhs, rhs []ir.Node
- recv := typecheck.Temp(sel.X.Type())
- lhs = append(lhs, recv)
- rhs = append(rhs, sel.X)
-
- // Move arguments to assignments prior to the if statement. We cannot
- // simply copy the args' IR, as some IR constructs cannot be copied,
- // such as labels (possible in InlinedCall nodes).
- args := call.Args.Take()
- for _, arg := range args {
- argvar := typecheck.Temp(arg.Type())
-
- lhs = append(lhs, argvar)
- rhs = append(rhs, arg)
- }
-
- asList := ir.NewAssignListStmt(pos, ir.OAS2, lhs, rhs)
- init.Append(typecheck.Stmt(asList))
+ recv, args := copyInputs(curfn, pos, sel.X, call.Args.Take(), &init)
// Copy slice so edits in one location don't affect another.
- argvars := append([]ir.Node(nil), lhs[1:]...)
+ argvars := append([]ir.Node(nil), args...)
call.Args = argvars
- tmpnode := typecheck.Temp(concretetyp)
- tmpok := typecheck.Temp(types.Types[types.TBOOL])
+ tmpnode := typecheck.TempAt(base.Pos, curfn, concretetyp)
+ tmpok := typecheck.TempAt(base.Pos, curfn, types.Types[types.TBOOL])
assert := ir.NewTypeAssertExpr(pos, recv, concretetyp)
assertAsList := ir.NewAssignListStmt(pos, ir.OAS2, []ir.Node{tmpnode, tmpok}, []ir.Node{typecheck.Expr(assert)})
init.Append(typecheck.Stmt(assertAsList))
- concreteCallee := typecheck.Callee(ir.NewSelectorExpr(pos, ir.OXDOT, tmpnode, method))
+ concreteCallee := typecheck.XDotMethod(pos, tmpnode, method, true)
// Copy slice so edits in one location don't affect another.
argvars = append([]ir.Node(nil), argvars...)
- concreteCall := typecheck.Call(pos, concreteCallee, argvars, call.IsDDD)
+ concreteCall := typecheck.Call(pos, concreteCallee, argvars, call.IsDDD).(*ir.CallExpr)
- var thenBlock, elseBlock ir.Nodes
- if len(retvars) == 0 {
- thenBlock.Append(concreteCall)
- elseBlock.Append(call)
- } else {
- // Copy slice so edits in one location don't affect another.
- thenRet := append([]ir.Node(nil), retvars...)
- thenAsList := ir.NewAssignListStmt(pos, ir.OAS2, thenRet, []ir.Node{concreteCall})
- thenBlock.Append(typecheck.Stmt(thenAsList))
+ res := condCall(curfn, pos, tmpok, concreteCall, call, init)
- elseRet := append([]ir.Node(nil), retvars...)
- elseAsList := ir.NewAssignListStmt(pos, ir.OAS2, elseRet, []ir.Node{call})
- elseBlock.Append(typecheck.Stmt(elseAsList))
+ if base.Debug.PGODebug >= 3 {
+ fmt.Printf("PGO devirtualizing interface call to %+v. After: %+v\n", concretetyp, res)
}
- cond := ir.NewIfStmt(pos, nil, nil, nil)
- cond.SetInit(init)
- cond.Cond = tmpok
- cond.Body = thenBlock
- cond.Else = elseBlock
- cond.Likely = true
+ return res
+}
- body := []ir.Node{typecheck.Stmt(cond)}
+// rewriteFunctionCall devirtualizes the given OCALLFUNC using a direct
+// function call to callee.
+func rewriteFunctionCall(call *ir.CallExpr, curfn, callee *ir.Func) ir.Node {
+ if base.Flag.LowerM != 0 {
+ fmt.Printf("%v: PGO devirtualizing function call %v to %v\n", ir.Line(call), call.Fun, callee)
+ }
- res := ir.NewInlinedCallExpr(pos, body, retvars)
- res.SetType(call.Type())
- res.SetTypecheck(1)
+ // We generate an OINCALL of:
+ //
+ // var fn FuncType
+ //
+ // var arg1 A1
+ // var argN AN
+ //
+ // var ret1 R1
+ // var retN RN
+ //
+ // fn, arg1, argN = fn expr, arg1 expr, argN expr
+ //
+ // fnPC := internal/abi.FuncPCABIInternal(fn)
+ // concretePC := internal/abi.FuncPCABIInternal(concrete)
+ //
+ // if fnPC == concretePC {
+ // ret1, retN = concrete(arg1, ... argN) // Same closure context passed (TODO)
+ // } else {
+ // ret1, retN = fn(arg1, ... argN)
+ // }
+ //
+ // OINCALL retvars: ret1, ... retN
+ //
+ // This isn't really an inlined call of course, but InlinedCallExpr
+ // makes handling reassignment of return values easier.
+
+ pos := call.Pos()
+ init := ir.TakeInit(call)
+
+ fn, args := copyInputs(curfn, pos, call.Fun, call.Args.Take(), &init)
+
+ // Copy slice so edits in one location don't affect another.
+ argvars := append([]ir.Node(nil), args...)
+ call.Args = argvars
+
+ // FuncPCABIInternal takes an interface{}, emulate that. This is needed
+ // for to ensure we get the MAKEFACE we need for SSA.
+ fnIface := typecheck.Expr(ir.NewConvExpr(pos, ir.OCONV, types.Types[types.TINTER], fn))
+ calleeIface := typecheck.Expr(ir.NewConvExpr(pos, ir.OCONV, types.Types[types.TINTER], callee.Nname))
+
+ fnPC := ir.FuncPC(pos, fnIface, obj.ABIInternal)
+ concretePC := ir.FuncPC(pos, calleeIface, obj.ABIInternal)
+
+ pcEq := typecheck.Expr(ir.NewBinaryExpr(base.Pos, ir.OEQ, fnPC, concretePC))
+
+ // TODO(go.dev/issue/61577): Handle callees that a closures and need a
+ // copy of the closure context from call. For now, we skip callees that
+ // are closures in maybeDevirtualizeFunctionCall.
+ if callee.OClosure != nil {
+ base.Fatalf("Callee is a closure: %+v", callee)
+ }
+
+ // Copy slice so edits in one location don't affect another.
+ argvars = append([]ir.Node(nil), argvars...)
+ concreteCall := typecheck.Call(pos, callee.Nname, argvars, call.IsDDD).(*ir.CallExpr)
+
+ res := condCall(curfn, pos, pcEq, concreteCall, call, init)
if base.Debug.PGODebug >= 3 {
- fmt.Printf("PGO devirtualizing call to %+v. After: %+v\n", concretetyp, res)
+ fmt.Printf("PGO devirtualizing function call to %+v. After: %+v\n", ir.FuncName(callee), res)
}
return res
@@ -404,7 +639,7 @@ func interfaceCallRecvTypeAndMethod(call *ir.CallExpr) (*types.Type, *types.Sym)
base.Fatalf("Call isn't OCALLINTER: %+v", call)
}
- sel, ok := call.X.(*ir.SelectorExpr)
+ sel, ok := call.Fun.(*ir.SelectorExpr)
if !ok {
base.Fatalf("OCALLINTER doesn't contain SelectorExpr: %+v", call)
}
@@ -412,15 +647,15 @@ func interfaceCallRecvTypeAndMethod(call *ir.CallExpr) (*types.Type, *types.Sym)
return sel.X.Type(), sel.Sel
}
-// findHotConcreteCallee returns the *ir.Func of the hottest callee of an
-// indirect call, if available, and its edge weight.
-func findHotConcreteCallee(p *pgo.Profile, caller *ir.Func, call *ir.CallExpr) (*ir.Func, int64) {
+// findHotConcreteCallee returns the *ir.Func of the hottest callee of a call,
+// if available, and its edge weight. extraFn can perform additional
+// applicability checks on each candidate edge. If extraFn returns false,
+// candidate will not be considered a valid callee candidate.
+func findHotConcreteCallee(p *pgo.Profile, caller *ir.Func, call *ir.CallExpr, extraFn func(callerName string, callOffset int, candidate *pgo.IREdge) bool) (*ir.Func, int64) {
callerName := ir.LinkFuncName(caller)
callerNode := p.WeightedCG.IRNodes[callerName]
callOffset := pgo.NodeLineOffset(call, caller)
- inter, method := interfaceCallRecvTypeAndMethod(call)
-
var hottest *pgo.IREdge
// Returns true if e is hotter than hottest.
@@ -440,9 +675,12 @@ func findHotConcreteCallee(p *pgo.Profile, caller *ir.Func, call *ir.CallExpr) (
// Now e.Weight == hottest.Weight, we must select on other
// criteria.
- if hottest.Dst.AST == nil && e.Dst.AST != nil {
- // Prefer the edge with IR available.
- return true
+ // If only one edge has IR, prefer that one.
+ if (hottest.Dst.AST == nil) != (e.Dst.AST == nil) {
+ if e.Dst.AST != nil {
+ return true
+ }
+ return false
}
// Arbitrary, but the callee names will always differ. Select
@@ -484,6 +722,35 @@ func findHotConcreteCallee(p *pgo.Profile, caller *ir.Func, call *ir.CallExpr) (
continue
}
+ if extraFn != nil && !extraFn(callerName, callOffset, e) {
+ continue
+ }
+
+ if base.Debug.PGODebug >= 2 {
+ fmt.Printf("%v: edge %s:%d -> %s (weight %d): hottest so far\n", ir.Line(call), callerName, callOffset, e.Dst.Name(), e.Weight)
+ }
+ hottest = e
+ }
+
+ if hottest == nil {
+ if base.Debug.PGODebug >= 2 {
+ fmt.Printf("%v: call %s:%d: no hot callee\n", ir.Line(call), callerName, callOffset)
+ }
+ return nil, 0
+ }
+
+ if base.Debug.PGODebug >= 2 {
+ fmt.Printf("%v call %s:%d: hottest callee %s (weight %d)\n", ir.Line(call), callerName, callOffset, hottest.Dst.Name(), hottest.Weight)
+ }
+ return hottest.Dst.AST, hottest.Weight
+}
+
+// findHotConcreteInterfaceCallee returns the *ir.Func of the hottest callee of an
+// interface call, if available, and its edge weight.
+func findHotConcreteInterfaceCallee(p *pgo.Profile, caller *ir.Func, call *ir.CallExpr) (*ir.Func, int64) {
+ inter, method := interfaceCallRecvTypeAndMethod(call)
+
+ return findHotConcreteCallee(p, caller, call, func(callerName string, callOffset int, e *pgo.IREdge) bool {
ctyp := methodRecvType(e.Dst.AST)
if ctyp == nil {
// Not a method.
@@ -491,7 +758,7 @@ func findHotConcreteCallee(p *pgo.Profile, caller *ir.Func, call *ir.CallExpr) (
if base.Debug.PGODebug >= 2 {
fmt.Printf("%v: edge %s:%d -> %s (weight %d): callee not a method\n", ir.Line(call), callerName, callOffset, e.Dst.Name(), e.Weight)
}
- continue
+ return false
}
// If ctyp doesn't implement inter it is most likely from a
@@ -510,7 +777,7 @@ func findHotConcreteCallee(p *pgo.Profile, caller *ir.Func, call *ir.CallExpr) (
why := typecheck.ImplementsExplain(ctyp, inter)
fmt.Printf("%v: edge %s:%d -> %s (weight %d): %v doesn't implement %v (%s)\n", ir.Line(call), callerName, callOffset, e.Dst.Name(), e.Weight, ctyp, inter, why)
}
- continue
+ return false
}
// If the method name is different it is most likely from a
@@ -519,24 +786,35 @@ func findHotConcreteCallee(p *pgo.Profile, caller *ir.Func, call *ir.CallExpr) (
if base.Debug.PGODebug >= 2 {
fmt.Printf("%v: edge %s:%d -> %s (weight %d): callee is a different method\n", ir.Line(call), callerName, callOffset, e.Dst.Name(), e.Weight)
}
- continue
+ return false
}
- if base.Debug.PGODebug >= 2 {
- fmt.Printf("%v: edge %s:%d -> %s (weight %d): hottest so far\n", ir.Line(call), callerName, callOffset, e.Dst.Name(), e.Weight)
- }
- hottest = e
- }
+ return true
+ })
+}
- if hottest == nil {
- if base.Debug.PGODebug >= 2 {
- fmt.Printf("%v: call %s:%d: no hot callee\n", ir.Line(call), callerName, callOffset)
+// findHotConcreteFunctionCallee returns the *ir.Func of the hottest callee of an
+// indirect function call, if available, and its edge weight.
+func findHotConcreteFunctionCallee(p *pgo.Profile, caller *ir.Func, call *ir.CallExpr) (*ir.Func, int64) {
+ typ := call.Fun.Type().Underlying()
+
+ return findHotConcreteCallee(p, caller, call, func(callerName string, callOffset int, e *pgo.IREdge) bool {
+ ctyp := e.Dst.AST.Type().Underlying()
+
+ // If ctyp doesn't match typ it is most likely from a different
+ // call on the same line.
+ //
+ // Note that we are comparing underlying types, as different
+ // defined types are OK. e.g., a call to a value of type
+ // net/http.HandlerFunc can be devirtualized to a function with
+ // the same underlying type.
+ if !types.Identical(typ, ctyp) {
+ if base.Debug.PGODebug >= 2 {
+ fmt.Printf("%v: edge %s:%d -> %s (weight %d): %v doesn't match %v\n", ir.Line(call), callerName, callOffset, e.Dst.Name(), e.Weight, ctyp, typ)
+ }
+ return false
}
- return nil, 0
- }
- if base.Debug.PGODebug >= 2 {
- fmt.Printf("%v call %s:%d: hottest callee %s (weight %d)\n", ir.Line(call), callerName, callOffset, hottest.Dst.Name(), hottest.Weight)
- }
- return hottest.Dst.AST, hottest.Weight
+ return true
+ })
}
diff --git a/src/cmd/compile/internal/devirtualize/pgo_test.go b/src/cmd/compile/internal/devirtualize/pgo_test.go
new file mode 100644
index 0000000000..84c96df122
--- /dev/null
+++ b/src/cmd/compile/internal/devirtualize/pgo_test.go
@@ -0,0 +1,217 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package devirtualize
+
+import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/pgo"
+ "cmd/compile/internal/typecheck"
+ "cmd/compile/internal/types"
+ "cmd/internal/obj"
+ "cmd/internal/src"
+ "testing"
+)
+
+func init() {
+ // These are the few constants that need to be initialized in order to use
+ // the types package without using the typecheck package by calling
+ // typecheck.InitUniverse() (the normal way to initialize the types package).
+ types.PtrSize = 8
+ types.RegSize = 8
+ types.MaxWidth = 1 << 50
+ typecheck.InitUniverse()
+ base.Ctxt = &obj.Link{}
+ base.Debug.PGODebug = 3
+}
+
+func makePos(b *src.PosBase, line, col uint) src.XPos {
+ return base.Ctxt.PosTable.XPos(src.MakePos(b, line, col))
+}
+
+type profileBuilder struct {
+ p *pgo.Profile
+}
+
+func newProfileBuilder() *profileBuilder {
+ // findHotConcreteCallee only uses pgo.Profile.WeightedCG, so we're
+ // going to take a shortcut and only construct that.
+ return &profileBuilder{
+ p: &pgo.Profile{
+ WeightedCG: &pgo.IRGraph{
+ IRNodes: make(map[string]*pgo.IRNode),
+ },
+ },
+ }
+}
+
+// Profile returns the constructed profile.
+func (p *profileBuilder) Profile() *pgo.Profile {
+ return p.p
+}
+
+// NewNode creates a new IRNode and adds it to the profile.
+//
+// fn may be nil, in which case the node will set LinkerSymbolName.
+func (p *profileBuilder) NewNode(name string, fn *ir.Func) *pgo.IRNode {
+ n := &pgo.IRNode{
+ OutEdges: make(map[pgo.NamedCallEdge]*pgo.IREdge),
+ }
+ if fn != nil {
+ n.AST = fn
+ } else {
+ n.LinkerSymbolName = name
+ }
+ p.p.WeightedCG.IRNodes[name] = n
+ return n
+}
+
+// Add a new call edge from caller to callee.
+func addEdge(caller, callee *pgo.IRNode, offset int, weight int64) {
+ namedEdge := pgo.NamedCallEdge{
+ CallerName: caller.Name(),
+ CalleeName: callee.Name(),
+ CallSiteOffset: offset,
+ }
+ irEdge := &pgo.IREdge{
+ Src: caller,
+ Dst: callee,
+ CallSiteOffset: offset,
+ Weight: weight,
+ }
+ caller.OutEdges[namedEdge] = irEdge
+}
+
+// Create a new struct type named structName with a method named methName and
+// return the method.
+func makeStructWithMethod(pkg *types.Pkg, structName, methName string) *ir.Func {
+ // type structName struct{}
+ structType := types.NewStruct(nil)
+
+ // func (structName) methodName()
+ recv := types.NewField(src.NoXPos, typecheck.Lookup(structName), structType)
+ sig := types.NewSignature(recv, nil, nil)
+ fn := ir.NewFunc(src.NoXPos, src.NoXPos, pkg.Lookup(structName+"."+methName), sig)
+
+ // Add the method to the struct.
+ structType.SetMethods([]*types.Field{types.NewField(src.NoXPos, typecheck.Lookup(methName), sig)})
+
+ return fn
+}
+
+func TestFindHotConcreteInterfaceCallee(t *testing.T) {
+ p := newProfileBuilder()
+
+ pkgFoo := types.NewPkg("example.com/foo", "foo")
+ basePos := src.NewFileBase("foo.go", "/foo.go")
+
+ const (
+ // Caller start line.
+ callerStart = 42
+
+ // The line offset of the call we care about.
+ callOffset = 1
+
+ // The line offset of some other call we don't care about.
+ wrongCallOffset = 2
+ )
+
+ // type IFace interface {
+ // Foo()
+ // }
+ fooSig := types.NewSignature(types.FakeRecv(), nil, nil)
+ method := types.NewField(src.NoXPos, typecheck.Lookup("Foo"), fooSig)
+ iface := types.NewInterface([]*types.Field{method})
+
+ callerFn := ir.NewFunc(makePos(basePos, callerStart, 1), src.NoXPos, pkgFoo.Lookup("Caller"), types.NewSignature(nil, nil, nil))
+
+ hotCalleeFn := makeStructWithMethod(pkgFoo, "HotCallee", "Foo")
+ coldCalleeFn := makeStructWithMethod(pkgFoo, "ColdCallee", "Foo")
+ wrongLineCalleeFn := makeStructWithMethod(pkgFoo, "WrongLineCallee", "Foo")
+ wrongMethodCalleeFn := makeStructWithMethod(pkgFoo, "WrongMethodCallee", "Bar")
+
+ callerNode := p.NewNode("example.com/foo.Caller", callerFn)
+ hotCalleeNode := p.NewNode("example.com/foo.HotCallee.Foo", hotCalleeFn)
+ coldCalleeNode := p.NewNode("example.com/foo.ColdCallee.Foo", coldCalleeFn)
+ wrongLineCalleeNode := p.NewNode("example.com/foo.WrongCalleeLine.Foo", wrongLineCalleeFn)
+ wrongMethodCalleeNode := p.NewNode("example.com/foo.WrongCalleeMethod.Foo", wrongMethodCalleeFn)
+
+ hotMissingCalleeNode := p.NewNode("example.com/bar.HotMissingCallee.Foo", nil)
+
+ addEdge(callerNode, wrongLineCalleeNode, wrongCallOffset, 100) // Really hot, but wrong line.
+ addEdge(callerNode, wrongMethodCalleeNode, callOffset, 100) // Really hot, but wrong method type.
+ addEdge(callerNode, hotCalleeNode, callOffset, 10)
+ addEdge(callerNode, coldCalleeNode, callOffset, 1)
+
+ // Equal weight, but IR missing.
+ //
+ // N.B. example.com/bar sorts lexicographically before example.com/foo,
+ // so if the IR availability of hotCalleeNode doesn't get precedence,
+ // this would be mistakenly selected.
+ addEdge(callerNode, hotMissingCalleeNode, callOffset, 10)
+
+ // IFace.Foo()
+ sel := typecheck.NewMethodExpr(src.NoXPos, iface, typecheck.Lookup("Foo"))
+ call := ir.NewCallExpr(makePos(basePos, callerStart+callOffset, 1), ir.OCALLINTER, sel, nil)
+
+ gotFn, gotWeight := findHotConcreteInterfaceCallee(p.Profile(), callerFn, call)
+ if gotFn != hotCalleeFn {
+ t.Errorf("findHotConcreteInterfaceCallee func got %v want %v", gotFn, hotCalleeFn)
+ }
+ if gotWeight != 10 {
+ t.Errorf("findHotConcreteInterfaceCallee weight got %v want 10", gotWeight)
+ }
+}
+
+func TestFindHotConcreteFunctionCallee(t *testing.T) {
+ // TestFindHotConcreteInterfaceCallee already covered basic weight
+ // comparisons, which is shared logic. Here we just test type signature
+ // disambiguation.
+
+ p := newProfileBuilder()
+
+ pkgFoo := types.NewPkg("example.com/foo", "foo")
+ basePos := src.NewFileBase("foo.go", "/foo.go")
+
+ const (
+ // Caller start line.
+ callerStart = 42
+
+ // The line offset of the call we care about.
+ callOffset = 1
+ )
+
+ callerFn := ir.NewFunc(makePos(basePos, callerStart, 1), src.NoXPos, pkgFoo.Lookup("Caller"), types.NewSignature(nil, nil, nil))
+
+ // func HotCallee()
+ hotCalleeFn := ir.NewFunc(src.NoXPos, src.NoXPos, pkgFoo.Lookup("HotCallee"), types.NewSignature(nil, nil, nil))
+
+ // func WrongCallee() bool
+ wrongCalleeFn := ir.NewFunc(src.NoXPos, src.NoXPos, pkgFoo.Lookup("WrongCallee"), types.NewSignature(nil, nil,
+ []*types.Field{
+ types.NewField(src.NoXPos, nil, types.Types[types.TBOOL]),
+ },
+ ))
+
+ callerNode := p.NewNode("example.com/foo.Caller", callerFn)
+ hotCalleeNode := p.NewNode("example.com/foo.HotCallee", hotCalleeFn)
+ wrongCalleeNode := p.NewNode("example.com/foo.WrongCallee", wrongCalleeFn)
+
+ addEdge(callerNode, wrongCalleeNode, callOffset, 100) // Really hot, but wrong function type.
+ addEdge(callerNode, hotCalleeNode, callOffset, 10)
+
+ // var fn func()
+ name := ir.NewNameAt(src.NoXPos, typecheck.Lookup("fn"), types.NewSignature(nil, nil, nil))
+ // fn()
+ call := ir.NewCallExpr(makePos(basePos, callerStart+callOffset, 1), ir.OCALL, name, nil)
+
+ gotFn, gotWeight := findHotConcreteFunctionCallee(p.Profile(), callerFn, call)
+ if gotFn != hotCalleeFn {
+ t.Errorf("findHotConcreteFunctionCallee func got %v want %v", gotFn, hotCalleeFn)
+ }
+ if gotWeight != 10 {
+ t.Errorf("findHotConcreteFunctionCallee weight got %v want 10", gotWeight)
+ }
+}
diff --git a/src/cmd/compile/internal/dwarfgen/dwarf.go b/src/cmd/compile/internal/dwarfgen/dwarf.go
index 886250a62f..e9553d1185 100644
--- a/src/cmd/compile/internal/dwarfgen/dwarf.go
+++ b/src/cmd/compile/internal/dwarfgen/dwarf.go
@@ -23,7 +23,7 @@ import (
"cmd/internal/src"
)
-func Info(fnsym *obj.LSym, infosym *obj.LSym, curfn interface{}) (scopes []dwarf.Scope, inlcalls dwarf.InlCalls, startPos src.XPos) {
+func Info(fnsym *obj.LSym, infosym *obj.LSym, curfn obj.Func) (scopes []dwarf.Scope, inlcalls dwarf.InlCalls) {
fn := curfn.(*ir.Func)
if fn.Nname != nil {
@@ -128,7 +128,7 @@ func Info(fnsym *obj.LSym, infosym *obj.LSym, curfn interface{}) (scopes []dwarf
if base.Flag.GenDwarfInl > 0 {
inlcalls = assembleInlines(fnsym, dwarfVars)
}
- return scopes, inlcalls, fn.Pos()
+ return scopes, inlcalls
}
func declPos(decl *ir.Name) src.XPos {
@@ -204,7 +204,7 @@ func createDwarfVars(fnsym *obj.LSym, complexOK bool, fn *ir.Func, apDecls []*ir
if c == '.' || n.Type().IsUntyped() {
continue
}
- if n.Class == ir.PPARAM && !ssagen.TypeOK(n.Type()) {
+ if n.Class == ir.PPARAM && !ssa.CanSSA(n.Type()) {
// SSA-able args get location lists, and may move in and
// out of registers, so those are handled elsewhere.
// Autos and named output params seem to get handled
@@ -270,13 +270,10 @@ func createDwarfVars(fnsym *obj.LSym, complexOK bool, fn *ir.Func, apDecls []*ir
func sortDeclsAndVars(fn *ir.Func, decls []*ir.Name, vars []*dwarf.Var) {
paramOrder := make(map[*ir.Name]int)
idx := 1
- for _, selfn := range types.RecvsParamsResults {
- fsl := selfn(fn.Type()).FieldSlice()
- for _, f := range fsl {
- if n, ok := f.Nname.(*ir.Name); ok {
- paramOrder[n] = idx
- idx++
- }
+ for _, f := range fn.Type().RecvParamsResults() {
+ if n, ok := f.Nname.(*ir.Name); ok {
+ paramOrder[n] = idx
+ idx++
}
}
sort.Stable(varsAndDecls{decls, vars, paramOrder})
@@ -326,7 +323,7 @@ func preInliningDcls(fnsym *obj.LSym) []*ir.Name {
c := n.Sym().Name[0]
// Avoid reporting "_" parameters, since if there are more than
// one, it can result in a collision later on, as in #23179.
- if unversion(n.Sym().Name) == "_" || c == '.' || n.Type().IsUntyped() {
+ if n.Sym().Name == "_" || c == '.' || n.Type().IsUntyped() {
continue
}
rdcl = append(rdcl, n)
@@ -527,9 +524,7 @@ func createComplexVar(fnsym *obj.LSym, fn *ir.Func, varID ssa.VarID) *dwarf.Var
// in the DWARF info.
func RecordFlags(flags ...string) {
if base.Ctxt.Pkgpath == "" {
- // We can't record the flags if we don't know what the
- // package name is.
- return
+ panic("missing pkgpath")
}
type BoolFlag interface {
diff --git a/src/cmd/compile/internal/dwarfgen/dwinl.go b/src/cmd/compile/internal/dwarfgen/dwinl.go
index 99e1ce9a81..655e7c66ac 100644
--- a/src/cmd/compile/internal/dwarfgen/dwinl.go
+++ b/src/cmd/compile/internal/dwarfgen/dwinl.go
@@ -124,18 +124,16 @@ func assembleInlines(fnsym *obj.LSym, dwVars []*dwarf.Var) dwarf.InlCalls {
// caller.
synthCount := len(m)
for _, v := range sl {
- canonName := unversion(v.Name)
vp := varPos{
- DeclName: canonName,
+ DeclName: v.Name,
DeclFile: v.DeclFile,
DeclLine: v.DeclLine,
DeclCol: v.DeclCol,
}
- synthesized := strings.HasPrefix(v.Name, "~r") || canonName == "_" || strings.HasPrefix(v.Name, "~b")
+ synthesized := strings.HasPrefix(v.Name, "~") || v.Name == "_"
if idx, found := m[vp]; found {
v.ChildIndex = int32(idx)
v.IsInAbstract = !synthesized
- v.Name = canonName
} else {
// Variable can't be found in the pre-inline dcl list.
// In the top-level case (ii=0) this can happen
@@ -217,16 +215,7 @@ func AbstractFunc(fn *obj.LSym) {
if base.Debug.DwarfInl != 0 {
base.Ctxt.Logf("DwarfAbstractFunc(%v)\n", fn.Name)
}
- base.Ctxt.DwarfAbstractFunc(ifn, fn, base.Ctxt.Pkgpath)
-}
-
-// Undo any versioning performed when a name was written
-// out as part of export data.
-func unversion(name string) string {
- if i := strings.Index(name, "·"); i > 0 {
- name = name[:i]
- }
- return name
+ base.Ctxt.DwarfAbstractFunc(ifn, fn)
}
// Given a function that was inlined as part of the compilation, dig
@@ -241,7 +230,7 @@ func makePreinlineDclMap(fnsym *obj.LSym) map[varPos]int {
for i, n := range dcl {
pos := base.Ctxt.InnermostPos(n.Pos())
vp := varPos{
- DeclName: unversion(n.Sym().Name),
+ DeclName: n.Sym().Name,
DeclFile: pos.RelFilename(),
DeclLine: pos.RelLine(),
DeclCol: pos.RelCol(),
@@ -273,13 +262,11 @@ func insertInlCall(dwcalls *dwarf.InlCalls, inlIdx int, imap map[int]int) int {
// Create new entry for this inline
inlinedFn := base.Ctxt.InlTree.InlinedFunction(inlIdx)
callXPos := base.Ctxt.InlTree.CallPos(inlIdx)
- callPos := base.Ctxt.PosTable.Pos(callXPos)
- callFileSym := base.Ctxt.Lookup(callPos.Base().SymFilename())
+ callPos := base.Ctxt.InnermostPos(callXPos)
absFnSym := base.Ctxt.DwFixups.AbsFuncDwarfSym(inlinedFn)
ic := dwarf.InlCall{
InlIndex: inlIdx,
- CallFile: callFileSym,
- CallLine: uint32(callPos.RelLine()),
+ CallPos: callPos,
AbsFunSym: absFnSym,
Root: parCallIdx == -1,
}
diff --git a/src/cmd/compile/internal/dwarfgen/scope_test.go b/src/cmd/compile/internal/dwarfgen/scope_test.go
index ae4a87c52a..ee4170ef44 100644
--- a/src/cmd/compile/internal/dwarfgen/scope_test.go
+++ b/src/cmd/compile/internal/dwarfgen/scope_test.go
@@ -50,13 +50,14 @@ type testline struct {
var testfile = []testline{
{line: "package main"},
+ {line: "var sink any"},
{line: "func f1(x int) { }"},
{line: "func f2(x int) { }"},
{line: "func f3(x int) { }"},
{line: "func f4(x int) { }"},
{line: "func f5(x int) { }"},
{line: "func f6(x int) { }"},
- {line: "func fi(x interface{}) { if a, ok := x.(error); ok { a.Error() } }"},
+ {line: "func leak(x interface{}) { sink = x }"},
{line: "func gret1() int { return 2 }"},
{line: "func gretbool() bool { return true }"},
{line: "func gret3() (int, int, int) { return 0, 1, 2 }"},
@@ -177,7 +178,7 @@ var testfile = []testline{
{line: " b := 2", scopes: []int{1}, vars: []string{"var &b *int", "var p *int"}},
{line: " p := &b", scopes: []int{1}},
{line: " f1(a)", scopes: []int{1}},
- {line: " fi(p)", scopes: []int{1}},
+ {line: " leak(p)", scopes: []int{1}},
{line: " }"},
{line: "}"},
{line: "var fglob func() int"},
diff --git a/src/cmd/compile/internal/escape/assign.go b/src/cmd/compile/internal/escape/assign.go
index 80697bf37b..6af5388683 100644
--- a/src/cmd/compile/internal/escape/assign.go
+++ b/src/cmd/compile/internal/escape/assign.go
@@ -39,10 +39,14 @@ func (e *escape) addr(n ir.Node) hole {
if n.X.Type().IsArray() {
k = e.addr(n.X)
} else {
- e.discard(n.X)
+ e.mutate(n.X)
}
- case ir.ODEREF, ir.ODOTPTR:
- e.discard(n)
+ case ir.ODEREF:
+ n := n.(*ir.StarExpr)
+ e.mutate(n.X)
+ case ir.ODOTPTR:
+ n := n.(*ir.SelectorExpr)
+ e.mutate(n.X)
case ir.OINDEXMAP:
n := n.(*ir.IndexExpr)
e.discard(n.X)
@@ -52,6 +56,10 @@ func (e *escape) addr(n ir.Node) hole {
return k
}
+func (e *escape) mutate(n ir.Node) {
+ e.expr(e.mutatorHole(), n)
+}
+
func (e *escape) addrs(l ir.Nodes) []hole {
var ks []hole
for _, n := range l {
diff --git a/src/cmd/compile/internal/escape/call.go b/src/cmd/compile/internal/escape/call.go
index c69eca1998..4a3753ada9 100644
--- a/src/cmd/compile/internal/escape/call.go
+++ b/src/cmd/compile/internal/escape/call.go
@@ -16,38 +16,9 @@ import (
// should contain the holes representing where the function callee's
// results flows.
func (e *escape) call(ks []hole, call ir.Node) {
- var init ir.Nodes
- e.callCommon(ks, call, &init, nil)
- if len(init) != 0 {
- call.(ir.InitNode).PtrInit().Append(init...)
- }
-}
-
-func (e *escape) callCommon(ks []hole, call ir.Node, init *ir.Nodes, wrapper *ir.Func) {
-
- // argumentPragma handles escape analysis of argument *argp to the
- // given hole. If the function callee is known, pragma is the
- // function's pragma flags; otherwise 0.
- argumentFunc := func(fn *ir.Name, k hole, argp *ir.Node) {
- e.rewriteArgument(argp, init, call, fn, wrapper)
-
- e.expr(k.note(call, "call parameter"), *argp)
- }
-
- argument := func(k hole, argp *ir.Node) {
- argumentFunc(nil, k, argp)
- }
-
- argumentRType := func(rtypep *ir.Node) {
- rtype := *rtypep
- if rtype == nil {
- return
- }
- // common case: static rtype/itab argument, which can be evaluated within the wrapper instead.
- if addr, ok := rtype.(*ir.AddrExpr); ok && addr.Op() == ir.OADDR && addr.X.Op() == ir.OLINKSYMOFFSET {
- return
- }
- e.wrapExpr(rtype.Pos(), rtypep, init, call, wrapper)
+ argument := func(k hole, arg ir.Node) {
+ // TODO(mdempsky): Should be "call argument".
+ e.expr(k.note(call, "call parameter"), arg)
}
switch call.Op() {
@@ -55,7 +26,7 @@ func (e *escape) callCommon(ks []hole, call ir.Node, init *ir.Nodes, wrapper *ir
ir.Dump("esc", call)
base.Fatalf("unexpected call op: %v", call.Op())
- case ir.OCALLFUNC, ir.OCALLMETH, ir.OCALLINTER:
+ case ir.OCALLFUNC, ir.OCALLINTER:
call := call.(*ir.CallExpr)
typecheck.AssertFixedCall(call)
@@ -68,64 +39,62 @@ func (e *escape) callCommon(ks []hole, call ir.Node, init *ir.Nodes, wrapper *ir
var fn *ir.Name
switch call.Op() {
case ir.OCALLFUNC:
- // If we have a direct call to a closure (not just one we were
- // able to statically resolve with ir.StaticValue), mark it as
- // such so batch.outlives can optimize the flow results.
- if call.X.Op() == ir.OCLOSURE {
- call.X.(*ir.ClosureExpr).Func.SetClosureCalled(true)
- }
-
- switch v := ir.StaticValue(call.X); v.Op() {
- case ir.ONAME:
- if v := v.(*ir.Name); v.Class == ir.PFUNC {
- fn = v
- }
- case ir.OCLOSURE:
- fn = v.(*ir.ClosureExpr).Func.Nname
- case ir.OMETHEXPR:
- fn = ir.MethodExprName(v)
- }
- case ir.OCALLMETH:
- base.FatalfAt(call.Pos(), "OCALLMETH missed by typecheck")
+ v := ir.StaticValue(call.Fun)
+ fn = ir.StaticCalleeName(v)
}
- fntype := call.X.Type()
+ fntype := call.Fun.Type()
if fn != nil {
fntype = fn.Type()
}
if ks != nil && fn != nil && e.inMutualBatch(fn) {
- for i, result := range fn.Type().Results().FieldSlice() {
- e.expr(ks[i], ir.AsNode(result.Nname))
+ for i, result := range fn.Type().Results() {
+ e.expr(ks[i], result.Nname.(*ir.Name))
}
}
- var recvp *ir.Node
+ var recvArg ir.Node
if call.Op() == ir.OCALLFUNC {
// Evaluate callee function expression.
- //
- // Note: We use argument and not argumentFunc, because while
- // call.X here may be an argument to runtime.{new,defer}proc,
- // it's not an argument to fn itself.
- argument(e.discardHole(), &call.X)
+ calleeK := e.discardHole()
+ if fn == nil { // unknown callee
+ for _, k := range ks {
+ if k.dst != &e.blankLoc {
+ // The results flow somewhere, but we don't statically
+ // know the callee function. If a closure flows here, we
+ // need to conservatively assume its results might flow to
+ // the heap.
+ calleeK = e.calleeHole().note(call, "callee operand")
+ break
+ }
+ }
+ }
+ e.expr(calleeK, call.Fun)
} else {
- recvp = &call.X.(*ir.SelectorExpr).X
+ recvArg = call.Fun.(*ir.SelectorExpr).X
+ }
+
+ // argumentParam handles escape analysis of assigning a call
+ // argument to its corresponding parameter.
+ argumentParam := func(param *types.Field, arg ir.Node) {
+ e.rewriteArgument(arg, call, fn)
+ argument(e.tagHole(ks, fn, param), arg)
}
args := call.Args
- if recv := fntype.Recv(); recv != nil {
- if recvp == nil {
+ if recvParam := fntype.Recv(); recvParam != nil {
+ if recvArg == nil {
// Function call using method expression. Receiver argument is
// at the front of the regular arguments list.
- recvp = &args[0]
- args = args[1:]
+ recvArg, args = args[0], args[1:]
}
- argumentFunc(fn, e.tagHole(ks, fn, recv), recvp)
+ argumentParam(recvParam, recvArg)
}
- for i, param := range fntype.Params().FieldSlice() {
- argumentFunc(fn, e.tagHole(ks, fn, param), &args[i])
+ for i, param := range fntype.Params() {
+ argumentParam(param, args[i])
}
case ir.OINLCALL:
@@ -147,84 +116,83 @@ func (e *escape) callCommon(ks []hole, call ir.Node, init *ir.Nodes, wrapper *ir
// it has enough capacity. Alternatively, a new heap
// slice might be allocated, and all slice elements
// might flow to heap.
- appendeeK := ks[0]
+ appendeeK := e.teeHole(ks[0], e.mutatorHole())
if args[0].Type().Elem().HasPointers() {
appendeeK = e.teeHole(appendeeK, e.heapHole().deref(call, "appendee slice"))
}
- argument(appendeeK, &args[0])
+ argument(appendeeK, args[0])
if call.IsDDD {
appendedK := e.discardHole()
if args[1].Type().IsSlice() && args[1].Type().Elem().HasPointers() {
appendedK = e.heapHole().deref(call, "appended slice...")
}
- argument(appendedK, &args[1])
+ argument(appendedK, args[1])
} else {
for i := 1; i < len(args); i++ {
- argument(e.heapHole(), &args[i])
+ argument(e.heapHole(), args[i])
}
}
- argumentRType(&call.RType)
+ e.discard(call.RType)
case ir.OCOPY:
call := call.(*ir.BinaryExpr)
- argument(e.discardHole(), &call.X)
+ argument(e.mutatorHole(), call.X)
copiedK := e.discardHole()
if call.Y.Type().IsSlice() && call.Y.Type().Elem().HasPointers() {
copiedK = e.heapHole().deref(call, "copied slice")
}
- argument(copiedK, &call.Y)
- argumentRType(&call.RType)
+ argument(copiedK, call.Y)
+ e.discard(call.RType)
case ir.OPANIC:
call := call.(*ir.UnaryExpr)
- argument(e.heapHole(), &call.X)
+ argument(e.heapHole(), call.X)
case ir.OCOMPLEX:
call := call.(*ir.BinaryExpr)
- argument(e.discardHole(), &call.X)
- argument(e.discardHole(), &call.Y)
+ e.discard(call.X)
+ e.discard(call.Y)
+
+ case ir.ODELETE, ir.OPRINT, ir.OPRINTLN, ir.ORECOVERFP:
+ call := call.(*ir.CallExpr)
+ for _, arg := range call.Args {
+ e.discard(arg)
+ }
+ e.discard(call.RType)
- case ir.ODELETE, ir.OMAX, ir.OMIN, ir.OPRINT, ir.OPRINTN, ir.ORECOVER:
+ case ir.OMIN, ir.OMAX:
call := call.(*ir.CallExpr)
- fixRecoverCall(call)
- for i := range call.Args {
- argument(e.discardHole(), &call.Args[i])
+ for _, arg := range call.Args {
+ argument(ks[0], arg)
}
- argumentRType(&call.RType)
+ e.discard(call.RType)
+
+ case ir.OLEN, ir.OCAP, ir.OREAL, ir.OIMAG, ir.OCLOSE:
+ call := call.(*ir.UnaryExpr)
+ e.discard(call.X)
- case ir.OLEN, ir.OCAP, ir.OREAL, ir.OIMAG, ir.OCLOSE, ir.OCLEAR:
+ case ir.OCLEAR:
call := call.(*ir.UnaryExpr)
- argument(e.discardHole(), &call.X)
+ argument(e.mutatorHole(), call.X)
case ir.OUNSAFESTRINGDATA, ir.OUNSAFESLICEDATA:
call := call.(*ir.UnaryExpr)
- argument(ks[0], &call.X)
+ argument(ks[0], call.X)
case ir.OUNSAFEADD, ir.OUNSAFESLICE, ir.OUNSAFESTRING:
call := call.(*ir.BinaryExpr)
- argument(ks[0], &call.X)
- argument(e.discardHole(), &call.Y)
- argumentRType(&call.RType)
+ argument(ks[0], call.X)
+ e.discard(call.Y)
+ e.discard(call.RType)
}
}
// goDeferStmt analyzes a "go" or "defer" statement.
-//
-// In the process, it also normalizes the statement to always use a
-// simple function call with no arguments and no results. For example,
-// it rewrites:
-//
-// defer f(x, y)
-//
-// into:
-//
-// x1, y1 := x, y
-// defer func() { f(x1, y1) }()
func (e *escape) goDeferStmt(n *ir.GoDeferStmt) {
k := e.heapHole()
- if n.Op() == ir.ODEFER && e.loopDepth == 1 {
+ if n.Op() == ir.ODEFER && e.loopDepth == 1 && n.DeferAt == nil {
// Top-level defer arguments don't escape to the heap,
// but they do need to last until they're invoked.
k = e.later(e.discardHole())
@@ -234,145 +202,75 @@ func (e *escape) goDeferStmt(n *ir.GoDeferStmt) {
n.SetEsc(ir.EscNever)
}
- call := n.Call
-
- init := n.PtrInit()
- init.Append(ir.TakeInit(call)...)
- e.stmts(*init)
-
// If the function is already a zero argument/result function call,
// just escape analyze it normally.
//
// Note that the runtime is aware of this optimization for
// "go" statements that start in reflect.makeFuncStub or
// reflect.methodValueCall.
- if call, ok := call.(*ir.CallExpr); ok && call.Op() == ir.OCALLFUNC {
- if sig := call.X.Type(); sig.NumParams()+sig.NumResults() == 0 {
- if clo, ok := call.X.(*ir.ClosureExpr); ok && n.Op() == ir.OGO {
- clo.IsGoWrap = true
- }
- e.expr(k, call.X)
- return
- }
- }
- // Create a new no-argument function that we'll hand off to defer.
- fn := ir.NewClosureFunc(n.Pos(), true)
- fn.SetWrapper(true)
- fn.Nname.SetType(types.NewSignature(nil, nil, nil))
- fn.Body = []ir.Node{call}
- if call, ok := call.(*ir.CallExpr); ok && call.Op() == ir.OCALLFUNC {
- // If the callee is a named function, link to the original callee.
- x := call.X
- if x.Op() == ir.ONAME && x.(*ir.Name).Class == ir.PFUNC {
- fn.WrappedFunc = call.X.(*ir.Name).Func
- } else if x.Op() == ir.OMETHEXPR && ir.MethodExprFunc(x).Nname != nil {
- fn.WrappedFunc = ir.MethodExprName(x).Func
- }
+ call, ok := n.Call.(*ir.CallExpr)
+ if !ok || call.Op() != ir.OCALLFUNC {
+ base.FatalfAt(n.Pos(), "expected function call: %v", n.Call)
+ }
+ if sig := call.Fun.Type(); sig.NumParams()+sig.NumResults() != 0 {
+ base.FatalfAt(n.Pos(), "expected signature without parameters or results: %v", sig)
}
- clo := fn.OClosure
- if n.Op() == ir.OGO {
+ if clo, ok := call.Fun.(*ir.ClosureExpr); ok && n.Op() == ir.OGO {
clo.IsGoWrap = true
}
- e.callCommon(nil, call, init, fn)
- e.closures = append(e.closures, closure{e.spill(k, clo), clo})
-
- // Create new top level call to closure.
- n.Call = ir.NewCallExpr(call.Pos(), ir.OCALL, clo, nil)
- ir.WithFunc(e.curfn, func() {
- typecheck.Stmt(n.Call)
- })
+ e.expr(k, call.Fun)
}
-// rewriteArgument rewrites the argument *argp of the given call expression.
+// rewriteArgument rewrites the argument arg of the given call expression.
// fn is the static callee function, if known.
-// wrapper is the go/defer wrapper function for call, if any.
-func (e *escape) rewriteArgument(argp *ir.Node, init *ir.Nodes, call ir.Node, fn *ir.Name, wrapper *ir.Func) {
- var pragma ir.PragmaFlag
- if fn != nil && fn.Func != nil {
- pragma = fn.Func.Pragma
+func (e *escape) rewriteArgument(arg ir.Node, call *ir.CallExpr, fn *ir.Name) {
+ if fn == nil || fn.Func == nil {
+ return
+ }
+ pragma := fn.Func.Pragma
+ if pragma&(ir.UintptrKeepAlive|ir.UintptrEscapes) == 0 {
+ return
}
// unsafeUintptr rewrites "uintptr(ptr)" arguments to syscall-like
// functions, so that ptr is kept alive and/or escaped as
// appropriate. unsafeUintptr also reports whether it modified arg0.
- unsafeUintptr := func(arg0 ir.Node) bool {
- if pragma&(ir.UintptrKeepAlive|ir.UintptrEscapes) == 0 {
- return false
- }
-
+ unsafeUintptr := func(arg ir.Node) {
// If the argument is really a pointer being converted to uintptr,
- // arrange for the pointer to be kept alive until the call returns,
- // by copying it into a temp and marking that temp
- // still alive when we pop the temp stack.
- if arg0.Op() != ir.OCONVNOP || !arg0.Type().IsUintptr() {
- return false
+ // arrange for the pointer to be kept alive until the call
+ // returns, by copying it into a temp and marking that temp still
+ // alive when we pop the temp stack.
+ conv, ok := arg.(*ir.ConvExpr)
+ if !ok || conv.Op() != ir.OCONVNOP {
+ return // not a conversion
}
- arg := arg0.(*ir.ConvExpr)
-
- if !arg.X.Type().IsUnsafePtr() {
- return false
+ if !conv.X.Type().IsUnsafePtr() || !conv.Type().IsUintptr() {
+ return // not an unsafe.Pointer->uintptr conversion
}
// Create and declare a new pointer-typed temp variable.
- tmp := e.wrapExpr(arg.Pos(), &arg.X, init, call, wrapper)
+ //
+ // TODO(mdempsky): This potentially violates the Go spec's order
+ // of evaluations, by evaluating arg.X before any other
+ // operands.
+ tmp := e.copyExpr(conv.Pos(), conv.X, call.PtrInit())
+ conv.X = tmp
+ k := e.mutatorHole()
if pragma&ir.UintptrEscapes != 0 {
- e.flow(e.heapHole().note(arg, "//go:uintptrescapes"), e.oldLoc(tmp))
+ k = e.heapHole().note(conv, "//go:uintptrescapes")
}
+ e.flow(k, e.oldLoc(tmp))
if pragma&ir.UintptrKeepAlive != 0 {
- call := call.(*ir.CallExpr)
-
- // SSA implements CallExpr.KeepAlive using OpVarLive, which
- // doesn't support PAUTOHEAP variables. I tried changing it to
- // use OpKeepAlive, but that ran into issues of its own.
- // For now, the easy solution is to explicitly copy to (yet
- // another) new temporary variable.
- keep := tmp
- if keep.Class == ir.PAUTOHEAP {
- keep = e.copyExpr(arg.Pos(), tmp, call.PtrInit(), wrapper, false)
- }
-
- keep.SetAddrtaken(true) // ensure SSA keeps the tmp variable
- call.KeepAlive = append(call.KeepAlive, keep)
- }
-
- return true
- }
-
- visit := func(pos src.XPos, argp *ir.Node) {
- // Optimize a few common constant expressions. By leaving these
- // untouched in the call expression, we let the wrapper handle
- // evaluating them, rather than taking up closure context space.
- switch arg := *argp; arg.Op() {
- case ir.OLITERAL, ir.ONIL, ir.OMETHEXPR:
- return
- case ir.ONAME:
- if arg.(*ir.Name).Class == ir.PFUNC {
- return
- }
- }
-
- if unsafeUintptr(*argp) {
- return
- }
-
- if wrapper != nil {
- e.wrapExpr(pos, argp, init, call, wrapper)
+ tmp.SetAddrtaken(true) // ensure SSA keeps the tmp variable
+ call.KeepAlive = append(call.KeepAlive, tmp)
}
}
- // Peel away any slice literals for better escape analyze
- // them. For example:
- //
- // go F([]int{a, b})
- //
- // If F doesn't escape its arguments, then the slice can
- // be allocated on the new goroutine's stack.
- //
// For variadic functions, the compiler has already rewritten:
//
// f(a, b, c)
@@ -382,54 +280,29 @@ func (e *escape) rewriteArgument(argp *ir.Node, init *ir.Nodes, call ir.Node, fn
// f([]T{a, b, c}...)
//
// So we need to look into slice elements to handle uintptr(ptr)
- // arguments to syscall-like functions correctly.
- if arg := *argp; arg.Op() == ir.OSLICELIT {
+ // arguments to variadic syscall-like functions correctly.
+ if arg.Op() == ir.OSLICELIT {
list := arg.(*ir.CompLitExpr).List
- for i := range list {
- el := &list[i]
- if list[i].Op() == ir.OKEY {
- el = &list[i].(*ir.KeyExpr).Value
+ for _, el := range list {
+ if el.Op() == ir.OKEY {
+ el = el.(*ir.KeyExpr).Value
}
- visit(arg.Pos(), el)
+ unsafeUintptr(el)
}
} else {
- visit(call.Pos(), argp)
- }
-}
-
-// wrapExpr replaces *exprp with a temporary variable copy. If wrapper
-// is non-nil, the variable will be captured for use within that
-// function.
-func (e *escape) wrapExpr(pos src.XPos, exprp *ir.Node, init *ir.Nodes, call ir.Node, wrapper *ir.Func) *ir.Name {
- tmp := e.copyExpr(pos, *exprp, init, e.curfn, true)
-
- if wrapper != nil {
- // Currently for "defer i.M()" if i is nil it panics at the point
- // of defer statement, not when deferred function is called. We
- // need to do the nil check outside of the wrapper.
- if call.Op() == ir.OCALLINTER && exprp == &call.(*ir.CallExpr).X.(*ir.SelectorExpr).X {
- check := ir.NewUnaryExpr(pos, ir.OCHECKNIL, ir.NewUnaryExpr(pos, ir.OITAB, tmp))
- init.Append(typecheck.Stmt(check))
- }
-
- e.oldLoc(tmp).captured = true
-
- tmp = ir.NewClosureVar(pos, wrapper, tmp)
+ unsafeUintptr(arg)
}
-
- *exprp = tmp
- return tmp
}
// copyExpr creates and returns a new temporary variable within fn;
// appends statements to init to declare and initialize it to expr;
-// and escape analyzes the data flow if analyze is true.
-func (e *escape) copyExpr(pos src.XPos, expr ir.Node, init *ir.Nodes, fn *ir.Func, analyze bool) *ir.Name {
+// and escape analyzes the data flow.
+func (e *escape) copyExpr(pos src.XPos, expr ir.Node, init *ir.Nodes) *ir.Name {
if ir.HasUniquePos(expr) {
pos = expr.Pos()
}
- tmp := typecheck.TempAt(pos, fn, expr.Type())
+ tmp := typecheck.TempAt(pos, e.curfn, expr.Type())
stmts := []ir.Node{
ir.NewDecl(pos, ir.ODCL, tmp),
@@ -438,10 +311,8 @@ func (e *escape) copyExpr(pos src.XPos, expr ir.Node, init *ir.Nodes, fn *ir.Fun
typecheck.Stmts(stmts)
init.Append(stmts...)
- if analyze {
- e.newLoc(tmp, false)
- e.stmts(stmts)
- }
+ e.newLoc(tmp, true)
+ e.stmts(stmts)
return tmp
}
@@ -457,17 +328,26 @@ func (e *escape) tagHole(ks []hole, fn *ir.Name, param *types.Field) hole {
}
if e.inMutualBatch(fn) {
- return e.addr(ir.AsNode(param.Nname))
+ if param.Nname == nil {
+ return e.discardHole()
+ }
+ return e.addr(param.Nname.(*ir.Name))
}
// Call to previously tagged function.
var tagKs []hole
-
esc := parseLeaks(param.Note)
+
if x := esc.Heap(); x >= 0 {
tagKs = append(tagKs, e.heapHole().shift(x))
}
+ if x := esc.Mutator(); x >= 0 {
+ tagKs = append(tagKs, e.mutatorHole().shift(x))
+ }
+ if x := esc.Callee(); x >= 0 {
+ tagKs = append(tagKs, e.calleeHole().shift(x))
+ }
if ks != nil {
for i := 0; i < numEscResults; i++ {
diff --git a/src/cmd/compile/internal/escape/desugar.go b/src/cmd/compile/internal/escape/desugar.go
deleted file mode 100644
index b2c42947dd..0000000000
--- a/src/cmd/compile/internal/escape/desugar.go
+++ /dev/null
@@ -1,37 +0,0 @@
-// Copyright 2021 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package escape
-
-import (
- "cmd/compile/internal/base"
- "cmd/compile/internal/ir"
- "cmd/compile/internal/typecheck"
- "cmd/compile/internal/types"
-)
-
-// TODO(mdempsky): Desugaring doesn't belong during escape analysis,
-// but for now it's the most convenient place for some rewrites.
-
-// fixRecoverCall rewrites an ORECOVER call into ORECOVERFP,
-// adding an explicit frame pointer argument.
-// If call is not an ORECOVER call, it's left unmodified.
-func fixRecoverCall(call *ir.CallExpr) {
- if call.Op() != ir.ORECOVER {
- return
- }
-
- pos := call.Pos()
-
- // FP is equal to caller's SP plus FixedFrameSize.
- var fp ir.Node = ir.NewCallExpr(pos, ir.OGETCALLERSP, nil, nil)
- if off := base.Ctxt.Arch.FixedFrameSize; off != 0 {
- fp = ir.NewBinaryExpr(fp.Pos(), ir.OADD, fp, ir.NewInt(base.Pos, off))
- }
- // TODO(mdempsky): Replace *int32 with unsafe.Pointer, without upsetting checkptr.
- fp = ir.NewConvExpr(pos, ir.OCONVNOP, types.NewPtr(types.Types[types.TINT32]), fp)
-
- call.SetOp(ir.ORECOVERFP)
- call.Args = []ir.Node{typecheck.Expr(fp)}
-}
diff --git a/src/cmd/compile/internal/escape/escape.go b/src/cmd/compile/internal/escape/escape.go
index f17ac13fe8..7df367caf7 100644
--- a/src/cmd/compile/internal/escape/escape.go
+++ b/src/cmd/compile/internal/escape/escape.go
@@ -12,6 +12,7 @@ import (
"cmd/compile/internal/logopt"
"cmd/compile/internal/typecheck"
"cmd/compile/internal/types"
+ "cmd/internal/src"
)
// Escape analysis.
@@ -88,8 +89,10 @@ type batch struct {
allLocs []*location
closures []closure
- heapLoc location
- blankLoc location
+ heapLoc location
+ mutatorLoc location
+ calleeLoc location
+ blankLoc location
}
// A closure holds a closure expression and its spill hole (i.e.,
@@ -115,21 +118,17 @@ type escape struct {
loopDepth int
}
-func Funcs(all []ir.Node) {
+func Funcs(all []*ir.Func) {
ir.VisitFuncsBottomUp(all, Batch)
}
// Batch performs escape analysis on a minimal batch of
// functions.
func Batch(fns []*ir.Func, recursive bool) {
- for _, fn := range fns {
- if fn.Op() != ir.ODCLFUNC {
- base.Fatalf("unexpected node: %v", fn)
- }
- }
-
var b batch
- b.heapLoc.escapes = true
+ b.heapLoc.attrs = attrEscapes | attrPersists | attrMutates | attrCalls
+ b.mutatorLoc.attrs = attrMutates
+ b.calleeLoc.attrs = attrCalls
// Construct data-flow graph from syntax trees.
for _, fn := range fns {
@@ -184,19 +183,19 @@ func (b *batch) initFunc(fn *ir.Func) {
// Allocate locations for local variables.
for _, n := range fn.Dcl {
- e.newLoc(n, false)
+ e.newLoc(n, true)
}
// Also for hidden parameters (e.g., the ".this" parameter to a
// method value wrapper).
if fn.OClosure == nil {
for _, n := range fn.ClosureVars {
- e.newLoc(n.Canonical(), false)
+ e.newLoc(n.Canonical(), true)
}
}
// Initialize resultIndex for result parameters.
- for i, f := range fn.Type().Results().FieldSlice() {
+ for i, f := range fn.Type().Results() {
e.oldLoc(f.Nname.(*ir.Name)).resultIndex = 1 + i
}
}
@@ -274,12 +273,8 @@ func (b *batch) finish(fns []*ir.Func) {
for _, fn := range fns {
fn.SetEsc(escFuncTagged)
- narg := 0
- for _, fs := range &types.RecvsParams {
- for _, f := range fs(fn.Type()).Fields().Slice() {
- narg++
- f.Note = b.paramTag(fn, narg, f)
- }
+ for i, param := range fn.Type().RecvParams() {
+ param.Note = b.paramTag(fn, 1+i, param)
}
}
@@ -288,6 +283,7 @@ func (b *batch) finish(fns []*ir.Func) {
if n == nil {
continue
}
+
if n.Op() == ir.ONAME {
n := n.(*ir.Name)
n.Opt = nil
@@ -300,7 +296,7 @@ func (b *batch) finish(fns []*ir.Func) {
// TODO(mdempsky): Update tests to expect this.
goDeferWrapper := n.Op() == ir.OCLOSURE && n.(*ir.ClosureExpr).Func.Wrapper()
- if loc.escapes {
+ if loc.hasAttr(attrEscapes) {
if n.Op() == ir.ONAME {
if base.Flag.CompilingRuntime {
base.ErrorfAt(n.Pos(), 0, "%v escapes to heap, not allowed in runtime", n)
@@ -323,7 +319,7 @@ func (b *batch) finish(fns []*ir.Func) {
base.WarnfAt(n.Pos(), "%v does not escape", n)
}
n.SetEsc(ir.EscNone)
- if loc.transient {
+ if !loc.hasAttr(attrPersists) {
switch n.Op() {
case ir.OCLOSURE:
n := n.(*ir.ClosureExpr)
@@ -337,6 +333,17 @@ func (b *batch) finish(fns []*ir.Func) {
}
}
}
+
+ // If the result of a string->[]byte conversion is never mutated,
+ // then it can simply reuse the string's memory directly.
+ if base.Debug.ZeroCopy != 0 {
+ if n, ok := n.(*ir.ConvExpr); ok && n.Op() == ir.OSTR2BYTES && !loc.hasAttr(attrMutates) {
+ if base.Flag.LowerM >= 1 {
+ base.WarnfAt(n.Pos(), "zero-copy string->[]byte conversion")
+ }
+ n.SetOp(ir.OSTR2BYTESTMP)
+ }
+ }
}
}
@@ -345,10 +352,10 @@ func (b *batch) finish(fns []*ir.Func) {
// fn has not yet been analyzed, so its parameters and results
// should be incorporated directly into the flow graph instead of
// relying on its escape analysis tagging.
-func (e *escape) inMutualBatch(fn *ir.Name) bool {
+func (b *batch) inMutualBatch(fn *ir.Name) bool {
if fn.Defn != nil && fn.Defn.Esc() < escFuncTagged {
if fn.Defn.Esc() == escFuncUnknown {
- base.Fatalf("graph inconsistency: %v", fn)
+ base.FatalfAt(fn.Pos(), "graph inconsistency: %v", fn)
}
return true
}
@@ -372,8 +379,8 @@ const (
func (b *batch) paramTag(fn *ir.Func, narg int, f *types.Field) string {
name := func() string {
- if f.Sym != nil {
- return f.Sym.Name
+ if f.Nname != nil {
+ return f.Nname.Sym().Name
}
return fmt.Sprintf("arg#%d", narg)
}
@@ -411,6 +418,8 @@ func (b *batch) paramTag(fn *ir.Func, narg int, f *types.Field) string {
if diagnose && f.Sym != nil {
base.WarnfAt(f.Pos, "%v does not escape", name())
}
+ esc.AddMutator(0)
+ esc.AddCallee(0)
} else {
if diagnose && f.Sym != nil {
base.WarnfAt(f.Pos, "leaking param: %v", name())
@@ -452,25 +461,49 @@ func (b *batch) paramTag(fn *ir.Func, narg int, f *types.Field) string {
esc := loc.paramEsc
esc.Optimize()
- if diagnose && !loc.escapes {
- if esc.Empty() {
- base.WarnfAt(f.Pos, "%v does not escape", name())
+ if diagnose && !loc.hasAttr(attrEscapes) {
+ b.reportLeaks(f.Pos, name(), esc, fn.Type())
+ }
+
+ return esc.Encode()
+}
+
+func (b *batch) reportLeaks(pos src.XPos, name string, esc leaks, sig *types.Type) {
+ warned := false
+ if x := esc.Heap(); x >= 0 {
+ if x == 0 {
+ base.WarnfAt(pos, "leaking param: %v", name)
+ } else {
+ // TODO(mdempsky): Mention level=x like below?
+ base.WarnfAt(pos, "leaking param content: %v", name)
}
- if x := esc.Heap(); x >= 0 {
- if x == 0 {
- base.WarnfAt(f.Pos, "leaking param: %v", name())
- } else {
- // TODO(mdempsky): Mention level=x like below?
- base.WarnfAt(f.Pos, "leaking param content: %v", name())
- }
+ warned = true
+ }
+ for i := 0; i < numEscResults; i++ {
+ if x := esc.Result(i); x >= 0 {
+ res := sig.Result(i).Nname.Sym().Name
+ base.WarnfAt(pos, "leaking param: %v to result %v level=%d", name, res, x)
+ warned = true
}
- for i := 0; i < numEscResults; i++ {
- if x := esc.Result(i); x >= 0 {
- res := fn.Type().Results().Field(i).Sym
- base.WarnfAt(f.Pos, "leaking param: %v to result %v level=%d", name(), res, x)
- }
+ }
+
+ if base.Debug.EscapeMutationsCalls <= 0 {
+ if !warned {
+ base.WarnfAt(pos, "%v does not escape", name)
}
+ return
}
- return esc.Encode()
+ if x := esc.Mutator(); x >= 0 {
+ base.WarnfAt(pos, "mutates param: %v derefs=%v", name, x)
+ warned = true
+ }
+ if x := esc.Callee(); x >= 0 {
+ base.WarnfAt(pos, "calls param: %v derefs=%v", name, x)
+ warned = true
+ }
+
+ if !warned {
+ base.WarnfAt(pos, "%v does not escape, mutate, or call", name)
+ }
}
diff --git a/src/cmd/compile/internal/escape/expr.go b/src/cmd/compile/internal/escape/expr.go
index e5f590ddcb..6aa5ad7413 100644
--- a/src/cmd/compile/internal/escape/expr.go
+++ b/src/cmd/compile/internal/escape/expr.go
@@ -113,13 +113,13 @@ func (e *escape) exprSkipInit(k hole, n ir.Node) {
} else {
e.expr(k, n.X)
}
- case ir.OCONVIFACE, ir.OCONVIDATA:
+ case ir.OCONVIFACE:
n := n.(*ir.ConvExpr)
if !n.X.Type().IsInterface() && !types.IsDirectIface(n.X.Type()) {
k = e.spill(k, n)
}
e.expr(k.note(n, "interface-converted"), n.X)
- case ir.OEFACE:
+ case ir.OMAKEFACE:
n := n.(*ir.BinaryExpr)
// Note: n.X is not needed because it can never point to memory that might escape.
e.expr(k, n.Y)
@@ -139,7 +139,7 @@ func (e *escape) exprSkipInit(k hole, n ir.Node) {
e.discard(n.X)
case ir.OCALLMETH, ir.OCALLFUNC, ir.OCALLINTER, ir.OINLCALL,
- ir.OLEN, ir.OCAP, ir.OMIN, ir.OMAX, ir.OCOMPLEX, ir.OREAL, ir.OIMAG, ir.OAPPEND, ir.OCOPY, ir.ORECOVER,
+ ir.OLEN, ir.OCAP, ir.OMIN, ir.OMAX, ir.OCOMPLEX, ir.OREAL, ir.OIMAG, ir.OAPPEND, ir.OCOPY, ir.ORECOVERFP,
ir.OUNSAFEADD, ir.OUNSAFESLICE, ir.OUNSAFESTRING, ir.OUNSAFESTRINGDATA, ir.OUNSAFESLICEDATA:
e.call([]hole{k}, n)
@@ -250,7 +250,7 @@ func (e *escape) exprSkipInit(k hole, n ir.Node) {
// analysis (happens for escape analysis called
// from reflectdata.methodWrapper)
if n.Op() == ir.ONAME && n.Opt == nil {
- e.with(fn).newLoc(n, false)
+ e.with(fn).newLoc(n, true)
}
}
e.walkFunc(fn)
@@ -335,7 +335,7 @@ func (e *escape) discards(l ir.Nodes) {
// its address to k, and returns a hole that flows values to it. It's
// intended for use with most expressions that allocate storage.
func (e *escape) spill(k hole, n ir.Node) hole {
- loc := e.newLoc(n, true)
+ loc := e.newLoc(n, false)
e.flow(k.addr(n, "spill"), loc)
return loc.asHole()
}
diff --git a/src/cmd/compile/internal/escape/graph.go b/src/cmd/compile/internal/escape/graph.go
index fc18f7715f..75e2546a7b 100644
--- a/src/cmd/compile/internal/escape/graph.go
+++ b/src/cmd/compile/internal/escape/graph.go
@@ -38,7 +38,7 @@ import (
// e.value(k, n.Left)
// }
-// An location represents an abstract location that stores a Go
+// A location represents an abstract location that stores a Go
// variable.
type location struct {
n ir.Node // represented variable or expression, if any
@@ -66,15 +66,8 @@ type location struct {
// in the walk queue.
queued bool
- // escapes reports whether the represented variable's address
- // escapes; that is, whether the variable must be heap
- // allocated.
- escapes bool
-
- // transient reports whether the represented expression's
- // address does not outlive the statement; that is, whether
- // its storage can be immediately reused.
- transient bool
+ // attrs is a bitset of location attributes.
+ attrs locAttr
// paramEsc records the represented parameter's leak set.
paramEsc leaks
@@ -84,6 +77,32 @@ type location struct {
addrtaken bool // has this variable's address been taken?
}
+type locAttr uint8
+
+const (
+ // attrEscapes indicates whether the represented variable's address
+ // escapes; that is, whether the variable must be heap allocated.
+ attrEscapes locAttr = 1 << iota
+
+ // attrPersists indicates whether the represented expression's
+ // address outlives the statement; that is, whether its storage
+ // cannot be immediately reused.
+ attrPersists
+
+ // attrMutates indicates whether pointers that are reachable from
+ // this location may have their addressed memory mutated. This is
+ // used to detect string->[]byte conversions that can be safely
+ // optimized away.
+ attrMutates
+
+ // attrCalls indicates whether closures that are reachable from this
+ // location may be called without tracking their results. This is
+ // used to better optimize indirect closure calls.
+ attrCalls
+)
+
+func (l *location) hasAttr(attr locAttr) bool { return l.attrs&attr != 0 }
+
// An edge represents an assignment edge between two Go variables.
type edge struct {
src *location
@@ -100,7 +119,7 @@ func (l *location) leakTo(sink *location, derefs int) {
// If sink is a result parameter that doesn't escape (#44614)
// and we can fit return bits into the escape analysis tag,
// then record as a result leak.
- if !sink.escapes && sink.isName(ir.PPARAMOUT) && sink.curfn == l.curfn {
+ if !sink.hasAttr(attrEscapes) && sink.isName(ir.PPARAMOUT) && sink.curfn == l.curfn {
ri := sink.resultIndex - 1
if ri < numEscResults {
// Leak to result parameter.
@@ -113,6 +132,35 @@ func (l *location) leakTo(sink *location, derefs int) {
l.paramEsc.AddHeap(derefs)
}
+// leakTo records that parameter l leaks to sink.
+func (b *batch) leakTo(l, sink *location, derefs int) {
+ if (logopt.Enabled() || base.Flag.LowerM >= 2) && !l.hasAttr(attrEscapes) {
+ if base.Flag.LowerM >= 2 {
+ fmt.Printf("%s: parameter %v leaks to %s with derefs=%d:\n", base.FmtPos(l.n.Pos()), l.n, b.explainLoc(sink), derefs)
+ }
+ explanation := b.explainPath(sink, l)
+ if logopt.Enabled() {
+ var e_curfn *ir.Func // TODO(mdempsky): Fix.
+ logopt.LogOpt(l.n.Pos(), "leak", "escape", ir.FuncName(e_curfn),
+ fmt.Sprintf("parameter %v leaks to %s with derefs=%d", l.n, b.explainLoc(sink), derefs), explanation)
+ }
+ }
+
+ // If sink is a result parameter that doesn't escape (#44614)
+ // and we can fit return bits into the escape analysis tag,
+ // then record as a result leak.
+ if !sink.hasAttr(attrEscapes) && sink.isName(ir.PPARAMOUT) && sink.curfn == l.curfn {
+ if ri := sink.resultIndex - 1; ri < numEscResults {
+ // Leak to result parameter.
+ l.paramEsc.AddResult(ri, derefs)
+ return
+ }
+ }
+
+ // Otherwise, record as heap leak.
+ l.paramEsc.AddHeap(derefs)
+}
+
func (l *location) isName(c ir.Class) bool {
return l.n != nil && l.n.Op() == ir.ONAME && l.n.(*ir.Name).Class == c
}
@@ -182,7 +230,7 @@ func (b *batch) flow(k hole, src *location) {
if dst == src && k.derefs >= 0 { // dst = dst, dst = *dst, ...
return
}
- if dst.escapes && k.derefs < 0 { // dst = &src
+ if dst.hasAttr(attrEscapes) && k.derefs < 0 { // dst = &src
if base.Flag.LowerM >= 2 || logopt.Enabled() {
pos := base.FmtPos(src.n.Pos())
if base.Flag.LowerM >= 2 {
@@ -195,7 +243,7 @@ func (b *batch) flow(k hole, src *location) {
}
}
- src.escapes = true
+ src.attrs |= attrEscapes | attrPersists | attrMutates | attrCalls
return
}
@@ -204,16 +252,18 @@ func (b *batch) flow(k hole, src *location) {
}
func (b *batch) heapHole() hole { return b.heapLoc.asHole() }
+func (b *batch) mutatorHole() hole { return b.mutatorLoc.asHole() }
+func (b *batch) calleeHole() hole { return b.calleeLoc.asHole() }
func (b *batch) discardHole() hole { return b.blankLoc.asHole() }
func (b *batch) oldLoc(n *ir.Name) *location {
if n.Canonical().Opt == nil {
- base.Fatalf("%v has no location", n)
+ base.FatalfAt(n.Pos(), "%v has no location", n)
}
return n.Canonical().Opt.(*location)
}
-func (e *escape) newLoc(n ir.Node, transient bool) *location {
+func (e *escape) newLoc(n ir.Node, persists bool) *location {
if e.curfn == nil {
base.Fatalf("e.curfn isn't set")
}
@@ -223,14 +273,16 @@ func (e *escape) newLoc(n ir.Node, transient bool) *location {
if n != nil && n.Op() == ir.ONAME {
if canon := n.(*ir.Name).Canonical(); n != canon {
- base.Fatalf("newLoc on non-canonical %v (canonical is %v)", n, canon)
+ base.FatalfAt(n.Pos(), "newLoc on non-canonical %v (canonical is %v)", n, canon)
}
}
loc := &location{
n: n,
curfn: e.curfn,
loopDepth: e.loopDepth,
- transient: transient,
+ }
+ if persists {
+ loc.attrs |= attrPersists
}
e.allLocs = append(e.allLocs, loc)
if n != nil {
@@ -239,11 +291,11 @@ func (e *escape) newLoc(n ir.Node, transient bool) *location {
if n.Class == ir.PPARAM && n.Curfn == nil {
// ok; hidden parameter
} else if n.Curfn != e.curfn {
- base.Fatalf("curfn mismatch: %v != %v for %v", n.Curfn, e.curfn, n)
+ base.FatalfAt(n.Pos(), "curfn mismatch: %v != %v for %v", n.Curfn, e.curfn, n)
}
if n.Opt != nil {
- base.Fatalf("%v already has a location", n)
+ base.FatalfAt(n.Pos(), "%v already has a location", n)
}
n.Opt = loc
}
@@ -265,7 +317,7 @@ func (e *escape) teeHole(ks ...hole) hole {
// Given holes "l1 = _", "l2 = **_", "l3 = *_", ..., create a
// new temporary location ltmp, wire it into place, and return
// a hole for "ltmp = _".
- loc := e.newLoc(nil, true)
+ loc := e.newLoc(nil, false)
for _, k := range ks {
// N.B., "p = &q" and "p = &tmp; tmp = q" are not
// semantically equivalent. To combine holes like "l1
@@ -285,7 +337,7 @@ func (e *escape) teeHole(ks ...hole) hole {
// Its main effect is to prevent immediate reuse of temporary
// variables introduced during Order.
func (e *escape) later(k hole) hole {
- loc := e.newLoc(nil, false)
+ loc := e.newLoc(nil, true)
e.flow(k, loc)
return loc.asHole()
}
diff --git a/src/cmd/compile/internal/escape/leaks.go b/src/cmd/compile/internal/escape/leaks.go
index 1432607c2d..942f87d2a2 100644
--- a/src/cmd/compile/internal/escape/leaks.go
+++ b/src/cmd/compile/internal/escape/leaks.go
@@ -10,33 +10,53 @@ import (
"strings"
)
-const numEscResults = 7
-
-// An leaks represents a set of assignment flows from a parameter
-// to the heap or to any of its function's (first numEscResults)
-// result parameters.
-type leaks [1 + numEscResults]uint8
+// A leaks represents a set of assignment flows from a parameter to
+// the heap, mutator, callee, or to any of its function's (first
+// numEscResults) result parameters.
+type leaks [8]uint8
+
+const (
+ leakHeap = iota
+ leakMutator
+ leakCallee
+ leakResult0
+)
-// Empty reports whether l is an empty set (i.e., no assignment flows).
-func (l leaks) Empty() bool { return l == leaks{} }
+const numEscResults = len(leaks{}) - leakResult0
// Heap returns the minimum deref count of any assignment flow from l
// to the heap. If no such flows exist, Heap returns -1.
-func (l leaks) Heap() int { return l.get(0) }
+func (l leaks) Heap() int { return l.get(leakHeap) }
+
+// Mutator returns the minimum deref count of any assignment flow from
+// l to the pointer operand of an indirect assignment statement. If no
+// such flows exist, Mutator returns -1.
+func (l leaks) Mutator() int { return l.get(leakMutator) }
+
+// Callee returns the minimum deref count of any assignment flow from
+// l to the callee operand of call expression. If no such flows exist,
+// Callee returns -1.
+func (l leaks) Callee() int { return l.get(leakCallee) }
// Result returns the minimum deref count of any assignment flow from
// l to its function's i'th result parameter. If no such flows exist,
// Result returns -1.
-func (l leaks) Result(i int) int { return l.get(1 + i) }
+func (l leaks) Result(i int) int { return l.get(leakResult0 + i) }
// AddHeap adds an assignment flow from l to the heap.
-func (l *leaks) AddHeap(derefs int) { l.add(0, derefs) }
+func (l *leaks) AddHeap(derefs int) { l.add(leakHeap, derefs) }
+
+// AddMutator adds a flow from l to the mutator (i.e., a pointer
+// operand of an indirect assignment statement).
+func (l *leaks) AddMutator(derefs int) { l.add(leakMutator, derefs) }
+
+// AddCallee adds an assignment flow from l to the callee operand of a
+// call expression.
+func (l *leaks) AddCallee(derefs int) { l.add(leakCallee, derefs) }
// AddResult adds an assignment flow from l to its function's i'th
// result parameter.
-func (l *leaks) AddResult(i, derefs int) { l.add(1+i, derefs) }
-
-func (l *leaks) setResult(i, derefs int) { l.set(1+i, derefs) }
+func (l *leaks) AddResult(i, derefs int) { l.add(leakResult0+i, derefs) }
func (l leaks) get(i int) int { return int(l[i]) - 1 }
@@ -64,9 +84,9 @@ func (l *leaks) Optimize() {
// If we have a path to the heap, then there's no use in
// keeping equal or longer paths elsewhere.
if x := l.Heap(); x >= 0 {
- for i := 0; i < numEscResults; i++ {
- if l.Result(i) >= x {
- l.setResult(i, -1)
+ for i := 1; i < len(*l); i++ {
+ if l.get(i) >= x {
+ l.set(i, -1)
}
}
}
diff --git a/src/cmd/compile/internal/escape/solve.go b/src/cmd/compile/internal/escape/solve.go
index a2d3b6d2fd..2675a16a24 100644
--- a/src/cmd/compile/internal/escape/solve.go
+++ b/src/cmd/compile/internal/escape/solve.go
@@ -21,7 +21,7 @@ func (b *batch) walkAll() {
//
// We walk once from each location (including the heap), and
// then re-enqueue each location on its transition from
- // transient->!transient and !escapes->escapes, which can each
+ // !persists->persists and !escapes->escapes, which can each
// happen at most once. So we take Θ(len(e.allLocs)) walks.
// LIFO queue, has enough room for e.allLocs and e.heapLoc.
@@ -36,6 +36,8 @@ func (b *batch) walkAll() {
for _, loc := range b.allLocs {
enqueue(loc)
}
+ enqueue(&b.mutatorLoc)
+ enqueue(&b.calleeLoc)
enqueue(&b.heapLoc)
var walkgen uint32
@@ -61,12 +63,27 @@ func (b *batch) walkOne(root *location, walkgen uint32, enqueue func(*location))
root.derefs = 0
root.dst = nil
+ if root.hasAttr(attrCalls) {
+ if clo, ok := root.n.(*ir.ClosureExpr); ok {
+ if fn := clo.Func; b.inMutualBatch(fn.Nname) && !fn.ClosureResultsLost() {
+ fn.SetClosureResultsLost(true)
+
+ // Re-flow from the closure's results, now that we're aware
+ // we lost track of them.
+ for _, result := range fn.Type().Results() {
+ enqueue(b.oldLoc(result.Nname.(*ir.Name)))
+ }
+ }
+ }
+ }
+
todo := []*location{root} // LIFO queue
for len(todo) > 0 {
l := todo[len(todo)-1]
todo = todo[:len(todo)-1]
derefs := l.derefs
+ var newAttrs locAttr
// If l.derefs < 0, then l's address flows to root.
addressOf := derefs < 0
@@ -77,58 +94,71 @@ func (b *batch) walkOne(root *location, walkgen uint32, enqueue func(*location))
// derefs at 0.
derefs = 0
- // If l's address flows to a non-transient
- // location, then l can't be transiently
+ // If l's address flows somewhere that
+ // outlives it, then l needs to be heap
// allocated.
- if !root.transient && l.transient {
- l.transient = false
- enqueue(l)
- }
- }
-
- if b.outlives(root, l) {
- // l's value flows to root. If l is a function
- // parameter and root is the heap or a
- // corresponding result parameter, then record
- // that value flow for tagging the function
- // later.
- if l.isName(ir.PPARAM) {
- if (logopt.Enabled() || base.Flag.LowerM >= 2) && !l.escapes {
+ if b.outlives(root, l) {
+ if !l.hasAttr(attrEscapes) && (logopt.Enabled() || base.Flag.LowerM >= 2) {
if base.Flag.LowerM >= 2 {
- fmt.Printf("%s: parameter %v leaks to %s with derefs=%d:\n", base.FmtPos(l.n.Pos()), l.n, b.explainLoc(root), derefs)
+ fmt.Printf("%s: %v escapes to heap:\n", base.FmtPos(l.n.Pos()), l.n)
}
explanation := b.explainPath(root, l)
if logopt.Enabled() {
var e_curfn *ir.Func // TODO(mdempsky): Fix.
- logopt.LogOpt(l.n.Pos(), "leak", "escape", ir.FuncName(e_curfn),
- fmt.Sprintf("parameter %v leaks to %s with derefs=%d", l.n, b.explainLoc(root), derefs), explanation)
+ logopt.LogOpt(l.n.Pos(), "escape", "escape", ir.FuncName(e_curfn), fmt.Sprintf("%v escapes to heap", l.n), explanation)
}
}
- l.leakTo(root, derefs)
+ newAttrs |= attrEscapes | attrPersists | attrMutates | attrCalls
+ } else
+ // If l's address flows to a persistent location, then l needs
+ // to persist too.
+ if root.hasAttr(attrPersists) {
+ newAttrs |= attrPersists
}
+ }
- // If l's address flows somewhere that
- // outlives it, then l needs to be heap
- // allocated.
- if addressOf && !l.escapes {
- if logopt.Enabled() || base.Flag.LowerM >= 2 {
+ if derefs == 0 {
+ newAttrs |= root.attrs & (attrMutates | attrCalls)
+ }
+
+ // l's value flows to root. If l is a function
+ // parameter and root is the heap or a
+ // corresponding result parameter, then record
+ // that value flow for tagging the function
+ // later.
+ if l.isName(ir.PPARAM) {
+ if b.outlives(root, l) {
+ if !l.hasAttr(attrEscapes) && (logopt.Enabled() || base.Flag.LowerM >= 2) {
if base.Flag.LowerM >= 2 {
- fmt.Printf("%s: %v escapes to heap:\n", base.FmtPos(l.n.Pos()), l.n)
+ fmt.Printf("%s: parameter %v leaks to %s with derefs=%d:\n", base.FmtPos(l.n.Pos()), l.n, b.explainLoc(root), derefs)
}
explanation := b.explainPath(root, l)
if logopt.Enabled() {
var e_curfn *ir.Func // TODO(mdempsky): Fix.
- logopt.LogOpt(l.n.Pos(), "escape", "escape", ir.FuncName(e_curfn), fmt.Sprintf("%v escapes to heap", l.n), explanation)
+ logopt.LogOpt(l.n.Pos(), "leak", "escape", ir.FuncName(e_curfn),
+ fmt.Sprintf("parameter %v leaks to %s with derefs=%d", l.n, b.explainLoc(root), derefs), explanation)
}
}
- l.escapes = true
- enqueue(l)
+ l.leakTo(root, derefs)
+ }
+ if root.hasAttr(attrMutates) {
+ l.paramEsc.AddMutator(derefs)
+ }
+ if root.hasAttr(attrCalls) {
+ l.paramEsc.AddCallee(derefs)
+ }
+ }
+
+ if newAttrs&^l.attrs != 0 {
+ l.attrs |= newAttrs
+ enqueue(l)
+ if l.attrs&attrEscapes != 0 {
continue
}
}
for i, edge := range l.edges {
- if edge.src.escapes {
+ if edge.src.hasAttr(attrEscapes) {
continue
}
d := derefs + edge.derefs
@@ -228,21 +258,27 @@ func (b *batch) explainLoc(l *location) string {
// other's lifetime if stack allocated.
func (b *batch) outlives(l, other *location) bool {
// The heap outlives everything.
- if l.escapes {
+ if l.hasAttr(attrEscapes) {
return true
}
+ // Pseudo-locations that don't really exist.
+ if l == &b.mutatorLoc || l == &b.calleeLoc {
+ return false
+ }
+
// We don't know what callers do with returned values, so
// pessimistically we need to assume they flow to the heap and
// outlive everything too.
if l.isName(ir.PPARAMOUT) {
- // Exception: Directly called closures can return
- // locations allocated outside of them without forcing
- // them to the heap. For example:
+ // Exception: Closures can return locations allocated outside of
+ // them without forcing them to the heap, if we can statically
+ // identify all call sites. For example:
//
- // var u int // okay to stack allocate
- // *(func() *int { return &u }()) = 42
- if containsClosure(other.curfn, l.curfn) && l.curfn.ClosureCalled() {
+ // var u int // okay to stack allocate
+ // fn := func() *int { return &u }()
+ // *fn() = 42
+ if containsClosure(other.curfn, l.curfn) && !l.curfn.ClosureResultsLost() {
return false
}
@@ -253,10 +289,10 @@ func (b *batch) outlives(l, other *location) bool {
// outlives other if it was declared outside other's loop
// scope. For example:
//
- // var l *int
- // for {
- // l = new(int)
- // }
+ // var l *int
+ // for {
+ // l = new(int) // must heap allocate: outlives for loop
+ // }
if l.curfn == other.curfn && l.loopDepth < other.loopDepth {
return true
}
@@ -264,10 +300,10 @@ func (b *batch) outlives(l, other *location) bool {
// If other is declared within a child closure of where l is
// declared, then l outlives it. For example:
//
- // var l *int
- // func() {
- // l = new(int)
- // }
+ // var l *int
+ // func() {
+ // l = new(int) // must heap allocate: outlives call frame (if not inlined)
+ // }()
if containsClosure(l.curfn, other.curfn) {
return true
}
@@ -277,8 +313,8 @@ func (b *batch) outlives(l, other *location) bool {
// containsClosure reports whether c is a closure contained within f.
func containsClosure(f, c *ir.Func) bool {
- // Common case.
- if f == c {
+ // Common cases.
+ if f == c || c.OClosure == nil {
return false
}
diff --git a/src/cmd/compile/internal/escape/stmt.go b/src/cmd/compile/internal/escape/stmt.go
index 5ae78e35fc..b766864a30 100644
--- a/src/cmd/compile/internal/escape/stmt.go
+++ b/src/cmd/compile/internal/escape/stmt.go
@@ -31,7 +31,7 @@ func (e *escape) stmt(n ir.Node) {
default:
base.Fatalf("unexpected stmt: %v", n)
- case ir.ODCLCONST, ir.ODCLTYPE, ir.OFALL, ir.OINLMARK:
+ case ir.OFALL, ir.OINLMARK:
// nop
case ir.OBREAK, ir.OCONTINUE, ir.OGOTO:
@@ -92,8 +92,9 @@ func (e *escape) stmt(n ir.Node) {
n := n.(*ir.RangeStmt)
base.Assert(!n.DistinctVars) // Should all be rewritten before escape analysis
- // X is evaluated outside the loop.
- tmp := e.newLoc(nil, false)
+ // X is evaluated outside the loop and persists until the loop
+ // terminates.
+ tmp := e.newLoc(nil, true)
e.expr(tmp.asHole(), n.X)
e.loopDepth++
@@ -176,13 +177,13 @@ func (e *escape) stmt(n ir.Node) {
e.reassigned(ks, n)
case ir.ORETURN:
n := n.(*ir.ReturnStmt)
- results := e.curfn.Type().Results().FieldSlice()
+ results := e.curfn.Type().Results()
dsts := make([]ir.Node, len(results))
for i, res := range results {
dsts[i] = res.Nname.(*ir.Name)
}
e.assignList(dsts, n.Results, "return", n)
- case ir.OCALLFUNC, ir.OCALLMETH, ir.OCALLINTER, ir.OINLCALL, ir.OCLEAR, ir.OCLOSE, ir.OCOPY, ir.ODELETE, ir.OPANIC, ir.OPRINT, ir.OPRINTN, ir.ORECOVER:
+ case ir.OCALLFUNC, ir.OCALLMETH, ir.OCALLINTER, ir.OINLCALL, ir.OCLEAR, ir.OCLOSE, ir.OCOPY, ir.ODELETE, ir.OPANIC, ir.OPRINT, ir.OPRINTLN, ir.ORECOVERFP:
e.call(nil, n)
case ir.OGO, ir.ODEFER:
n := n.(*ir.GoDeferStmt)
diff --git a/src/cmd/compile/internal/escape/utils.go b/src/cmd/compile/internal/escape/utils.go
index b481d8e4b6..bd1d2c22a2 100644
--- a/src/cmd/compile/internal/escape/utils.go
+++ b/src/cmd/compile/internal/escape/utils.go
@@ -151,7 +151,7 @@ func mayAffectMemory(n ir.Node) bool {
n := n.(*ir.ConvExpr)
return mayAffectMemory(n.X)
- case ir.OLEN, ir.OCAP, ir.ONOT, ir.OBITNOT, ir.OPLUS, ir.ONEG, ir.OALIGNOF, ir.OOFFSETOF, ir.OSIZEOF:
+ case ir.OLEN, ir.OCAP, ir.ONOT, ir.OBITNOT, ir.OPLUS, ir.ONEG:
n := n.(*ir.UnaryExpr)
return mayAffectMemory(n.X)
diff --git a/src/cmd/compile/internal/gc/compile.go b/src/cmd/compile/internal/gc/compile.go
index 4795297e7e..0f57f8ca82 100644
--- a/src/cmd/compile/internal/gc/compile.go
+++ b/src/cmd/compile/internal/gc/compile.go
@@ -16,7 +16,6 @@ import (
"cmd/compile/internal/objw"
"cmd/compile/internal/ssagen"
"cmd/compile/internal/staticinit"
- "cmd/compile/internal/typecheck"
"cmd/compile/internal/types"
"cmd/compile/internal/walk"
"cmd/internal/obj"
@@ -39,6 +38,11 @@ func enqueueFunc(fn *ir.Func) {
return
}
+ // Don't try compiling dead hidden closure.
+ if fn.IsDeadcodeClosure() {
+ return
+ }
+
if clo := fn.OClosure; clo != nil && !ir.IsTrivialClosure(clo) {
return // we'll get this as part of its enclosing function
}
@@ -52,7 +56,7 @@ func enqueueFunc(fn *ir.Func) {
ir.InitLSym(fn, false)
types.CalcSize(fn.Type())
a := ssagen.AbiForBodylessFuncStackMap(fn)
- abiInfo := a.ABIAnalyzeFuncType(fn.Type().FuncType()) // abiInfo has spill/home locations for wrapper
+ abiInfo := a.ABIAnalyzeFuncType(fn.Type()) // abiInfo has spill/home locations for wrapper
liveness.WriteFuncMap(fn, abiInfo)
if fn.ABI == obj.ABI0 {
x := ssagen.EmitArgInfo(fn, abiInfo)
@@ -100,21 +104,15 @@ func prepareFunc(fn *ir.Func) {
// Calculate parameter offsets.
types.CalcSize(fn.Type())
- typecheck.DeclContext = ir.PAUTO
ir.CurFunc = fn
walk.Walk(fn)
ir.CurFunc = nil // enforce no further uses of CurFunc
- typecheck.DeclContext = ir.PEXTERN
}
// compileFunctions compiles all functions in compilequeue.
// It fans out nBackendWorkers to do the work
// and waits for them to complete.
func compileFunctions() {
- if len(compilequeue) == 0 {
- return
- }
-
if race.Enabled {
// Randomize compilation order to try to shake out races.
tmp := make([]*ir.Func, len(compilequeue))
diff --git a/src/cmd/compile/internal/gc/export.go b/src/cmd/compile/internal/gc/export.go
index c9acfc1710..c93f008ba2 100644
--- a/src/cmd/compile/internal/gc/export.go
+++ b/src/cmd/compile/internal/gc/export.go
@@ -21,7 +21,7 @@ func dumpasmhdr() {
base.Fatalf("%v", err)
}
fmt.Fprintf(b, "// generated by compile -asmhdr from package %s\n\n", types.LocalPkg.Name)
- for _, n := range typecheck.Target.Asms {
+ for _, n := range typecheck.Target.AsmHdrDecls {
if n.Sym().IsBlank() {
continue
}
@@ -39,7 +39,7 @@ func dumpasmhdr() {
break
}
fmt.Fprintf(b, "#define %s__size %d\n", n.Sym().Name, int(t.Size()))
- for _, f := range t.Fields().Slice() {
+ for _, f := range t.Fields() {
if !f.Sym.IsBlank() {
fmt.Fprintf(b, "#define %s_%s %d\n", n.Sym().Name, f.Sym.Name, int(f.Offset))
}
diff --git a/src/cmd/compile/internal/gc/main.go b/src/cmd/compile/internal/gc/main.go
index 937d1c4751..7e5069fced 100644
--- a/src/cmd/compile/internal/gc/main.go
+++ b/src/cmd/compile/internal/gc/main.go
@@ -9,11 +9,10 @@ import (
"bytes"
"cmd/compile/internal/base"
"cmd/compile/internal/coverage"
- "cmd/compile/internal/deadcode"
- "cmd/compile/internal/devirtualize"
"cmd/compile/internal/dwarfgen"
"cmd/compile/internal/escape"
"cmd/compile/internal/inline"
+ "cmd/compile/internal/inline/interleaved"
"cmd/compile/internal/ir"
"cmd/compile/internal/logopt"
"cmd/compile/internal/loopvar"
@@ -21,6 +20,7 @@ import (
"cmd/compile/internal/pgo"
"cmd/compile/internal/pkginit"
"cmd/compile/internal/reflectdata"
+ "cmd/compile/internal/rttype"
"cmd/compile/internal/ssa"
"cmd/compile/internal/ssagen"
"cmd/compile/internal/staticinit"
@@ -153,7 +153,7 @@ func Main(archInit func(*ssagen.ArchInfo)) {
symABIs.ReadSymABIs(base.Flag.SymABIs)
}
- if base.Compiling(base.NoInstrumentPkgs) {
+ if objabi.LookupPkgSpecial(base.Ctxt.Pkgpath).NoInstrument {
base.Flag.Race = false
base.Flag.MSan = false
base.Flag.ASan = false
@@ -187,12 +187,11 @@ func Main(archInit func(*ssagen.ArchInfo)) {
typecheck.Target = new(ir.Package)
- typecheck.NeedRuntimeType = reflectdata.NeedRuntimeType // TODO(rsc): TypeSym for lock?
-
base.AutogeneratedPos = makePos(src.NewFileBase("<autogenerated>", "<autogenerated>"), 1, 0)
typecheck.InitUniverse()
typecheck.InitRuntime()
+ rttype.Init()
// Parse and typecheck input.
noder.LoadPackage(flag.Args())
@@ -208,49 +207,11 @@ func Main(archInit func(*ssagen.ArchInfo)) {
dwarfgen.RecordPackageName()
- // Prepare for backend processing. This must happen before pkginit,
- // because it generates itabs for initializing global variables.
+ // Prepare for backend processing.
ssagen.InitConfig()
- // First part of coverage fixup (if applicable).
- var cnames coverage.Names
- if base.Flag.Cfg.CoverageInfo != nil {
- cnames = coverage.FixupVars()
- }
-
- // Create "init" function for package-scope variable initialization
- // statements, if any.
- //
- // Note: This needs to happen early, before any optimizations. The
- // Go spec defines a precise order than initialization should be
- // carried out in, and even mundane optimizations like dead code
- // removal can skew the results (e.g., #43444).
- pkginit.MakeInit()
-
- // Second part of code coverage fixup (init func modification),
- // if applicable.
- if base.Flag.Cfg.CoverageInfo != nil {
- coverage.FixupInit(cnames)
- }
-
- // Eliminate some obviously dead code.
- // Must happen after typechecking.
- for _, n := range typecheck.Target.Decls {
- if n.Op() == ir.ODCLFUNC {
- deadcode.Func(n.(*ir.Func))
- }
- }
-
- // Compute Addrtaken for names.
- // We need to wait until typechecking is done so that when we see &x[i]
- // we know that x has its address taken if x is an array, but not if x is a slice.
- // We compute Addrtaken in bulk here.
- // After this phase, we maintain Addrtaken incrementally.
- if typecheck.DirtyAddrtaken {
- typecheck.ComputeAddrtaken(typecheck.Target.Decls)
- typecheck.DirtyAddrtaken = false
- }
- typecheck.IncrementalAddrtaken = true
+ // Apply coverage fixups, if applicable.
+ coverage.Fixup()
// Read profile file and build profile-graph and weighted-call-graph.
base.Timer.Start("fe", "pgo-load-profile")
@@ -263,40 +224,21 @@ func Main(archInit func(*ssagen.ArchInfo)) {
}
}
- base.Timer.Start("fe", "pgo-devirtualization")
- if profile != nil && base.Debug.PGODevirtualize > 0 {
- // TODO(prattmic): No need to use bottom-up visit order. This
- // is mirroring the PGO IRGraph visit order, which also need
- // not be bottom-up.
- ir.VisitFuncsBottomUp(typecheck.Target.Decls, func(list []*ir.Func, recursive bool) {
- for _, fn := range list {
- devirtualize.ProfileGuided(fn, profile)
- }
- })
- ir.CurFunc = nil
- }
+ // Interleaved devirtualization and inlining.
+ base.Timer.Start("fe", "devirtualize-and-inline")
+ interleaved.DevirtualizeAndInlinePackage(typecheck.Target, profile)
- // Inlining
- base.Timer.Start("fe", "inlining")
- if base.Flag.LowerL != 0 {
- inline.InlinePackage(profile)
- }
noder.MakeWrappers(typecheck.Target) // must happen after inlining
- // Devirtualize and get variable capture right in for loops
+ // Get variable capture right in for loops.
var transformed []loopvar.VarAndLoop
- for _, n := range typecheck.Target.Decls {
- if n.Op() == ir.ODCLFUNC {
- devirtualize.Static(n.(*ir.Func))
- transformed = append(transformed, loopvar.ForCapture(n.(*ir.Func))...)
- }
+ for _, fn := range typecheck.Target.Funcs {
+ transformed = append(transformed, loopvar.ForCapture(fn)...)
}
ir.CurFunc = nil
// Build init task, if needed.
- if initTask := pkginit.Task(); initTask != nil {
- typecheck.Export(initTask)
- }
+ pkginit.MakeTask()
// Generate ABI wrappers. Must happen before escape analysis
// and doesn't benefit from dead-coding or inlining.
@@ -311,7 +253,7 @@ func Main(archInit func(*ssagen.ArchInfo)) {
// Large values are also moved off stack in escape analysis;
// because large values may contain pointers, it must happen early.
base.Timer.Start("fe", "escapes")
- escape.Funcs(typecheck.Target.Decls)
+ escape.Funcs(typecheck.Target.Funcs)
loopvar.LogTransformations(transformed)
@@ -325,23 +267,62 @@ func Main(archInit func(*ssagen.ArchInfo)) {
ir.CurFunc = nil
- // Compile top level functions.
- // Don't use range--walk can add functions to Target.Decls.
+ reflectdata.WriteBasicTypes()
+
+ // Compile top-level declarations.
+ //
+ // There are cyclic dependencies between all of these phases, so we
+ // need to iterate all of them until we reach a fixed point.
base.Timer.Start("be", "compilefuncs")
- fcount := int64(0)
- for i := 0; i < len(typecheck.Target.Decls); i++ {
- if fn, ok := typecheck.Target.Decls[i].(*ir.Func); ok {
- // Don't try compiling dead hidden closure.
- if fn.IsDeadcodeClosure() {
- continue
+ for nextFunc, nextExtern := 0, 0; ; {
+ reflectdata.WriteRuntimeTypes()
+
+ if nextExtern < len(typecheck.Target.Externs) {
+ switch n := typecheck.Target.Externs[nextExtern]; n.Op() {
+ case ir.ONAME:
+ dumpGlobal(n)
+ case ir.OLITERAL:
+ dumpGlobalConst(n)
+ case ir.OTYPE:
+ reflectdata.NeedRuntimeType(n.Type())
}
- enqueueFunc(fn)
- fcount++
+ nextExtern++
+ continue
+ }
+
+ if nextFunc < len(typecheck.Target.Funcs) {
+ enqueueFunc(typecheck.Target.Funcs[nextFunc])
+ nextFunc++
+ continue
+ }
+
+ // The SSA backend supports using multiple goroutines, so keep it
+ // as late as possible to maximize how much work we can batch and
+ // process concurrently.
+ if len(compilequeue) != 0 {
+ compileFunctions()
+ continue
}
+
+ // Finalize DWARF inline routine DIEs, then explicitly turn off
+ // further DWARF inlining generation to avoid problems with
+ // generated method wrappers.
+ //
+ // Note: The DWARF fixup code for inlined calls currently doesn't
+ // allow multiple invocations, so we intentionally run it just
+ // once after everything else. Worst case, some generated
+ // functions have slightly larger DWARF DIEs.
+ if base.Ctxt.DwFixups != nil {
+ base.Ctxt.DwFixups.Finalize(base.Ctxt.Pkgpath, base.Debug.DwarfInl != 0)
+ base.Ctxt.DwFixups = nil
+ base.Flag.GenDwarfInl = 0
+ continue // may have called reflectdata.TypeLinksym (#62156)
+ }
+
+ break
}
- base.Timer.AddEvent(fcount, "funcs")
- compileFunctions()
+ base.Timer.AddEvent(int64(len(typecheck.Target.Funcs)), "funcs")
if base.Flag.CompilingRuntime {
// Write barriers are now known. Check the call graph.
@@ -353,15 +334,6 @@ func Main(archInit func(*ssagen.ArchInfo)) {
staticinit.AddKeepRelocations()
}
- // Finalize DWARF inline routine DIEs, then explicitly turn off
- // DWARF inlining gen so as to avoid problems with generated
- // method wrappers.
- if base.Ctxt.DwFixups != nil {
- base.Ctxt.DwFixups.Finalize(base.Ctxt.Pkgpath, base.Debug.DwarfInl != 0)
- base.Ctxt.DwFixups = nil
- base.Flag.GenDwarfInl = 0
- }
-
// Write object data to disk.
base.Timer.Start("be", "dumpobj")
dumpdata()
diff --git a/src/cmd/compile/internal/gc/obj.go b/src/cmd/compile/internal/gc/obj.go
index e895c452f2..e090cafb61 100644
--- a/src/cmd/compile/internal/gc/obj.go
+++ b/src/cmd/compile/internal/gc/obj.go
@@ -110,43 +110,10 @@ func dumpCompilerObj(bout *bio.Writer) {
}
func dumpdata() {
- numExterns := len(typecheck.Target.Externs)
- numDecls := len(typecheck.Target.Decls)
- dumpglobls(typecheck.Target.Externs)
- reflectdata.CollectPTabs()
- numExports := len(typecheck.Target.Exports)
- addsignats(typecheck.Target.Externs)
- reflectdata.WriteRuntimeTypes()
- reflectdata.WriteTabs()
- numPTabs := reflectdata.CountPTabs()
- reflectdata.WriteImportStrings()
- reflectdata.WriteBasicTypes()
+ reflectdata.WriteGCSymbols()
+ reflectdata.WritePluginTable()
dumpembeds()
- // Calls to WriteRuntimeTypes can generate functions,
- // like method wrappers and hash and equality routines.
- // Compile any generated functions, process any new resulting types, repeat.
- // This can't loop forever, because there is no way to generate an infinite
- // number of types in a finite amount of code.
- // In the typical case, we loop 0 or 1 times.
- // It was not until issue 24761 that we found any code that required a loop at all.
- for {
- for i := numDecls; i < len(typecheck.Target.Decls); i++ {
- if n, ok := typecheck.Target.Decls[i].(*ir.Func); ok {
- enqueueFunc(n)
- }
- }
- numDecls = len(typecheck.Target.Decls)
- compileFunctions()
- reflectdata.WriteRuntimeTypes()
- if numDecls == len(typecheck.Target.Decls) {
- break
- }
- }
-
- // Dump extra globals.
- dumpglobls(typecheck.Target.Externs[numExterns:])
-
if reflectdata.ZeroSize > 0 {
zero := base.PkgLinksym("go:map", "zero", obj.ABI0)
objw.Global(zero, int32(reflectdata.ZeroSize), obj.DUPOK|obj.RODATA)
@@ -155,14 +122,6 @@ func dumpdata() {
staticdata.WriteFuncSyms()
addGCLocals()
-
- if numExports != len(typecheck.Target.Exports) {
- base.Fatalf("Target.Exports changed after compile functions loop")
- }
- newNumPTabs := reflectdata.CountPTabs()
- if newNumPTabs != numPTabs {
- base.Fatalf("ptabs changed after compile functions loop")
- }
}
func dumpLinkerObj(bout *bio.Writer) {
@@ -198,10 +157,10 @@ func dumpGlobal(n *ir.Name) {
if n.CoverageCounter() || n.CoverageAuxVar() || n.Linksym().Static() {
return
}
- base.Ctxt.DwarfGlobal(base.Ctxt.Pkgpath, types.TypeSymName(n.Type()), n.Linksym())
+ base.Ctxt.DwarfGlobal(types.TypeSymName(n.Type()), n.Linksym())
}
-func dumpGlobalConst(n ir.Node) {
+func dumpGlobalConst(n *ir.Name) {
// only export typed constants
t := n.Type()
if t == nil {
@@ -226,19 +185,7 @@ func dumpGlobalConst(n ir.Node) {
// that type so the linker knows about it. See issue 51245.
_ = reflectdata.TypeLinksym(t)
}
- base.Ctxt.DwarfIntConst(base.Ctxt.Pkgpath, n.Sym().Name, types.TypeSymName(t), ir.IntVal(t, v))
-}
-
-func dumpglobls(externs []ir.Node) {
- // add globals
- for _, n := range externs {
- switch n.Op() {
- case ir.ONAME:
- dumpGlobal(n.(*ir.Name))
- case ir.OLITERAL:
- dumpGlobalConst(n)
- }
- }
+ base.Ctxt.DwarfIntConst(n.Sym().Name, types.TypeSymName(t), ir.IntVal(t, v))
}
// addGCLocals adds gcargs, gclocals, gcregs, and stack object symbols to Ctxt.Data.
@@ -335,12 +282,3 @@ func dumpembeds() {
staticdata.WriteEmbed(v)
}
}
-
-func addsignats(dcls []ir.Node) {
- // copy types from dcl list to signatset
- for _, n := range dcls {
- if n.Op() == ir.OTYPE {
- reflectdata.NeedRuntimeType(n.Type())
- }
- }
-}
diff --git a/src/cmd/compile/internal/gc/util.go b/src/cmd/compile/internal/gc/util.go
index dcac0ce79a..b82a983d9f 100644
--- a/src/cmd/compile/internal/gc/util.go
+++ b/src/cmd/compile/internal/gc/util.go
@@ -5,17 +5,34 @@
package gc
import (
+ "net/url"
"os"
+ "path/filepath"
"runtime"
"runtime/pprof"
tracepkg "runtime/trace"
+ "strings"
"cmd/compile/internal/base"
)
+func profileName(fn, suffix string) string {
+ if strings.HasSuffix(fn, string(os.PathSeparator)) {
+ err := os.MkdirAll(fn, 0755)
+ if err != nil {
+ base.Fatalf("%v", err)
+ }
+ }
+ if fi, statErr := os.Stat(fn); statErr == nil && fi.IsDir() {
+ fn = filepath.Join(fn, url.PathEscape(base.Ctxt.Pkgpath)+suffix)
+ }
+ return fn
+}
+
func startProfile() {
if base.Flag.CPUProfile != "" {
- f, err := os.Create(base.Flag.CPUProfile)
+ fn := profileName(base.Flag.CPUProfile, ".cpuprof")
+ f, err := os.Create(fn)
if err != nil {
base.Fatalf("%v", err)
}
@@ -28,18 +45,36 @@ func startProfile() {
if base.Flag.MemProfileRate != 0 {
runtime.MemProfileRate = base.Flag.MemProfileRate
}
- f, err := os.Create(base.Flag.MemProfile)
+ const (
+ gzipFormat = 0
+ textFormat = 1
+ )
+ // compilebench parses the memory profile to extract memstats,
+ // which are only written in the legacy (text) pprof format.
+ // See golang.org/issue/18641 and runtime/pprof/pprof.go:writeHeap.
+ // gzipFormat is what most people want, otherwise
+ var format = textFormat
+ fn := base.Flag.MemProfile
+ if strings.HasSuffix(fn, string(os.PathSeparator)) {
+ err := os.MkdirAll(fn, 0755)
+ if err != nil {
+ base.Fatalf("%v", err)
+ }
+ }
+ if fi, statErr := os.Stat(fn); statErr == nil && fi.IsDir() {
+ fn = filepath.Join(fn, url.PathEscape(base.Ctxt.Pkgpath)+".memprof")
+ format = gzipFormat
+ }
+
+ f, err := os.Create(fn)
+
if err != nil {
base.Fatalf("%v", err)
}
base.AtExit(func() {
// Profile all outstanding allocations.
runtime.GC()
- // compilebench parses the memory profile to extract memstats,
- // which are only written in the legacy pprof format.
- // See golang.org/issue/18641 and runtime/pprof/pprof.go:writeHeap.
- const writeLegacyFormat = 1
- if err := pprof.Lookup("heap").WriteTo(f, writeLegacyFormat); err != nil {
+ if err := pprof.Lookup("heap").WriteTo(f, format); err != nil {
base.Fatalf("%v", err)
}
})
@@ -48,7 +83,7 @@ func startProfile() {
runtime.MemProfileRate = 0
}
if base.Flag.BlockProfile != "" {
- f, err := os.Create(base.Flag.BlockProfile)
+ f, err := os.Create(profileName(base.Flag.BlockProfile, ".blockprof"))
if err != nil {
base.Fatalf("%v", err)
}
@@ -59,7 +94,7 @@ func startProfile() {
})
}
if base.Flag.MutexProfile != "" {
- f, err := os.Create(base.Flag.MutexProfile)
+ f, err := os.Create(profileName(base.Flag.MutexProfile, ".mutexprof"))
if err != nil {
base.Fatalf("%v", err)
}
@@ -70,7 +105,7 @@ func startProfile() {
})
}
if base.Flag.TraceProfile != "" {
- f, err := os.Create(base.Flag.TraceProfile)
+ f, err := os.Create(profileName(base.Flag.TraceProfile, ".trace"))
if err != nil {
base.Fatalf("%v", err)
}
diff --git a/src/cmd/compile/internal/importer/gcimporter.go b/src/cmd/compile/internal/importer/gcimporter.go
index 490cdf94df..1f7b49c8c3 100644
--- a/src/cmd/compile/internal/importer/gcimporter.go
+++ b/src/cmd/compile/internal/importer/gcimporter.go
@@ -8,6 +8,7 @@ package importer
import (
"bufio"
"bytes"
+ "errors"
"fmt"
"go/build"
"internal/pkgbits"
@@ -21,7 +22,7 @@ import (
"cmd/compile/internal/types2"
)
-var exportMap sync.Map // package dir → func() (string, bool)
+var exportMap sync.Map // package dir → func() (string, error)
// lookupGorootExport returns the location of the export data
// (normally found in the build cache, but located in GOROOT/pkg
@@ -30,37 +31,42 @@ var exportMap sync.Map // package dir → func() (string, bool)
// (We use the package's directory instead of its import path
// mainly to simplify handling of the packages in src/vendor
// and cmd/vendor.)
-func lookupGorootExport(pkgDir string) (string, bool) {
+func lookupGorootExport(pkgDir string) (string, error) {
f, ok := exportMap.Load(pkgDir)
if !ok {
var (
listOnce sync.Once
exportPath string
+ err error
)
- f, _ = exportMap.LoadOrStore(pkgDir, func() (string, bool) {
+ f, _ = exportMap.LoadOrStore(pkgDir, func() (string, error) {
listOnce.Do(func() {
cmd := exec.Command(filepath.Join(build.Default.GOROOT, "bin", "go"), "list", "-export", "-f", "{{.Export}}", pkgDir)
cmd.Dir = build.Default.GOROOT
cmd.Env = append(os.Environ(), "PWD="+cmd.Dir, "GOROOT="+build.Default.GOROOT)
var output []byte
- output, err := cmd.Output()
+ output, err = cmd.Output()
if err != nil {
+ if ee, ok := err.(*exec.ExitError); ok && len(ee.Stderr) > 0 {
+ err = errors.New(string(ee.Stderr))
+ }
return
}
exports := strings.Split(string(bytes.TrimSpace(output)), "\n")
if len(exports) != 1 {
+ err = fmt.Errorf("go list reported %d exports; expected 1", len(exports))
return
}
exportPath = exports[0]
})
- return exportPath, exportPath != ""
+ return exportPath, err
})
}
- return f.(func() (string, bool))()
+ return f.(func() (string, error))()
}
var pkgExts = [...]string{".a", ".o"} // a file from the build cache will have no extension
@@ -69,10 +75,9 @@ var pkgExts = [...]string{".a", ".o"} // a file from the build cache will have n
// path based on package information provided by build.Import (using
// the build.Default build.Context). A relative srcDir is interpreted
// relative to the current working directory.
-// If no file was found, an empty filename is returned.
-func FindPkg(path, srcDir string) (filename, id string) {
+func FindPkg(path, srcDir string) (filename, id string, err error) {
if path == "" {
- return
+ return "", "", errors.New("path is empty")
}
var noext string
@@ -83,16 +88,19 @@ func FindPkg(path, srcDir string) (filename, id string) {
if abs, err := filepath.Abs(srcDir); err == nil { // see issue 14282
srcDir = abs
}
- bp, _ := build.Import(path, srcDir, build.FindOnly|build.AllowBinary)
+ var bp *build.Package
+ bp, err = build.Import(path, srcDir, build.FindOnly|build.AllowBinary)
if bp.PkgObj == "" {
- var ok bool
if bp.Goroot && bp.Dir != "" {
- filename, ok = lookupGorootExport(bp.Dir)
- }
- if !ok {
- id = path // make sure we have an id to print in error message
- return
+ filename, err = lookupGorootExport(bp.Dir)
+ if err == nil {
+ _, err = os.Stat(filename)
+ }
+ if err == nil {
+ return filename, bp.ImportPath, nil
+ }
}
+ goto notfound
} else {
noext = strings.TrimSuffix(bp.PkgObj, ".a")
}
@@ -117,21 +125,23 @@ func FindPkg(path, srcDir string) (filename, id string) {
}
}
- if filename != "" {
- if f, err := os.Stat(filename); err == nil && !f.IsDir() {
- return
- }
- }
// try extensions
for _, ext := range pkgExts {
filename = noext + ext
- if f, err := os.Stat(filename); err == nil && !f.IsDir() {
- return
+ f, statErr := os.Stat(filename)
+ if statErr == nil && !f.IsDir() {
+ return filename, id, nil
+ }
+ if err == nil {
+ err = statErr
}
}
- filename = "" // not found
- return
+notfound:
+ if err == nil {
+ return "", path, fmt.Errorf("can't find import: %q", path)
+ }
+ return "", path, fmt.Errorf("can't find import: %q: %w", path, err)
}
// Import imports a gc-generated package given its import path and srcDir, adds
@@ -159,12 +169,12 @@ func Import(packages map[string]*types2.Package, path, srcDir string, lookup fun
rc = f
} else {
var filename string
- filename, id = FindPkg(path, srcDir)
+ filename, id, err = FindPkg(path, srcDir)
if filename == "" {
if path == "unsafe" {
return types2.Unsafe, nil
}
- return nil, fmt.Errorf("can't find import: %q", id)
+ return nil, err
}
// no need to re-import if the package was imported completely before
diff --git a/src/cmd/compile/internal/importer/gcimporter_test.go b/src/cmd/compile/internal/importer/gcimporter_test.go
index 96c5f69e64..7fe4445dad 100644
--- a/src/cmd/compile/internal/importer/gcimporter_test.go
+++ b/src/cmd/compile/internal/importer/gcimporter_test.go
@@ -105,9 +105,9 @@ func TestImportTestdata(t *testing.T) {
importMap := map[string]string{}
for _, pkg := range wantImports {
- export, _ := FindPkg(pkg, "testdata")
+ export, _, err := FindPkg(pkg, "testdata")
if export == "" {
- t.Fatalf("no export data found for %s", pkg)
+ t.Fatalf("no export data found for %s: %v", pkg, err)
}
importMap[pkg] = export
}
@@ -268,7 +268,7 @@ var importedObjectTests = []struct {
{"math.Pi", "const Pi untyped float"},
{"math.Sin", "func Sin(x float64) float64"},
{"go/ast.NotNilFilter", "func NotNilFilter(_ string, v reflect.Value) bool"},
- {"go/internal/gcimporter.FindPkg", "func FindPkg(path string, srcDir string) (filename string, id string)"},
+ {"go/internal/gcimporter.FindPkg", "func FindPkg(path string, srcDir string) (filename string, id string, err error)"},
// interfaces
{"context.Context", "type Context interface{Deadline() (deadline time.Time, ok bool); Done() <-chan struct{}; Err() error; Value(key any) any}"},
@@ -437,9 +437,9 @@ func TestIssue13566(t *testing.T) {
t.Fatal(err)
}
- jsonExport, _ := FindPkg("encoding/json", "testdata")
+ jsonExport, _, err := FindPkg("encoding/json", "testdata")
if jsonExport == "" {
- t.Fatalf("no export data found for encoding/json")
+ t.Fatalf("no export data found for encoding/json: %v", err)
}
compile(t, "testdata", "a.go", testoutdir, map[string]string{"encoding/json": jsonExport})
diff --git a/src/cmd/compile/internal/importer/iimport.go b/src/cmd/compile/internal/importer/iimport.go
index 24d3d4b6e7..498134755d 100644
--- a/src/cmd/compile/internal/importer/iimport.go
+++ b/src/cmd/compile/internal/importer/iimport.go
@@ -77,8 +77,6 @@ const (
unionType
)
-const io_SeekCurrent = 1 // io.SeekCurrent (not defined in Go 1.4)
-
// ImportData imports a package from the serialized package data
// and returns the number of bytes consumed and a reference to the package.
// If the export data version is not recognized or the format is otherwise
@@ -108,10 +106,10 @@ func ImportData(imports map[string]*types2.Package, data, path string) (pkg *typ
sLen := int64(r.uint64())
dLen := int64(r.uint64())
- whence, _ := r.Seek(0, io_SeekCurrent)
+ whence, _ := r.Seek(0, io.SeekCurrent)
stringData := data[whence : whence+sLen]
declData := data[whence+sLen : whence+sLen+dLen]
- r.Seek(sLen+dLen, io_SeekCurrent)
+ r.Seek(sLen+dLen, io.SeekCurrent)
p := iimporter{
exportVersion: version,
diff --git a/src/cmd/compile/internal/inline/inl.go b/src/cmd/compile/internal/inline/inl.go
index 4ae7fa95d2..b365008c76 100644
--- a/src/cmd/compile/internal/inline/inl.go
+++ b/src/cmd/compile/internal/inline/inl.go
@@ -29,10 +29,11 @@ package inline
import (
"fmt"
"go/constant"
- "sort"
+ "internal/buildcfg"
"strconv"
"cmd/compile/internal/base"
+ "cmd/compile/internal/inline/inlheur"
"cmd/compile/internal/ir"
"cmd/compile/internal/logopt"
"cmd/compile/internal/pgo"
@@ -76,8 +77,8 @@ var (
inlineHotMaxBudget int32 = 2000
)
-// pgoInlinePrologue records the hot callsites from ir-graph.
-func pgoInlinePrologue(p *pgo.Profile, decls []ir.Node) {
+// PGOInlinePrologue records the hot callsites from ir-graph.
+func PGOInlinePrologue(p *pgo.Profile, funcs []*ir.Func) {
if base.Debug.PGOInlineCDFThreshold != "" {
if s, err := strconv.ParseFloat(base.Debug.PGOInlineCDFThreshold, 64); err == nil && s >= 0 && s <= 100 {
inlineCDFHotCallSiteThresholdPercent = s
@@ -85,7 +86,7 @@ func pgoInlinePrologue(p *pgo.Profile, decls []ir.Node) {
base.Fatalf("invalid PGOInlineCDFThreshold, must be between 0 and 100")
}
}
- var hotCallsites []pgo.NodeMapKey
+ var hotCallsites []pgo.NamedCallEdge
inlineHotCallSiteThresholdPercent, hotCallsites = hotNodesFromCDF(p)
if base.Debug.PGODebug > 0 {
fmt.Printf("hot-callsite-thres-from-CDF=%v\n", inlineHotCallSiteThresholdPercent)
@@ -119,101 +120,67 @@ func pgoInlinePrologue(p *pgo.Profile, decls []ir.Node) {
// (currently only used in debug prints) (in case of equal weights,
// comparing with the threshold may not accurately reflect which nodes are
// considiered hot).
-func hotNodesFromCDF(p *pgo.Profile) (float64, []pgo.NodeMapKey) {
- nodes := make([]pgo.NodeMapKey, len(p.NodeMap))
- i := 0
- for n := range p.NodeMap {
- nodes[i] = n
- i++
- }
- sort.Slice(nodes, func(i, j int) bool {
- ni, nj := nodes[i], nodes[j]
- if wi, wj := p.NodeMap[ni].EWeight, p.NodeMap[nj].EWeight; wi != wj {
- return wi > wj // want larger weight first
- }
- // same weight, order by name/line number
- if ni.CallerName != nj.CallerName {
- return ni.CallerName < nj.CallerName
- }
- if ni.CalleeName != nj.CalleeName {
- return ni.CalleeName < nj.CalleeName
- }
- return ni.CallSiteOffset < nj.CallSiteOffset
- })
+func hotNodesFromCDF(p *pgo.Profile) (float64, []pgo.NamedCallEdge) {
cum := int64(0)
- for i, n := range nodes {
- w := p.NodeMap[n].EWeight
+ for i, n := range p.NamedEdgeMap.ByWeight {
+ w := p.NamedEdgeMap.Weight[n]
cum += w
- if pgo.WeightInPercentage(cum, p.TotalEdgeWeight) > inlineCDFHotCallSiteThresholdPercent {
+ if pgo.WeightInPercentage(cum, p.TotalWeight) > inlineCDFHotCallSiteThresholdPercent {
// nodes[:i+1] to include the very last node that makes it to go over the threshold.
// (Say, if the CDF threshold is 50% and one hot node takes 60% of weight, we want to
// include that node instead of excluding it.)
- return pgo.WeightInPercentage(w, p.TotalEdgeWeight), nodes[:i+1]
+ return pgo.WeightInPercentage(w, p.TotalWeight), p.NamedEdgeMap.ByWeight[:i+1]
}
}
- return 0, nodes
+ return 0, p.NamedEdgeMap.ByWeight
}
-// InlinePackage finds functions that can be inlined and clones them before walk expands them.
-func InlinePackage(p *pgo.Profile) {
- if base.Debug.PGOInline == 0 {
- p = nil
+// CanInlineFuncs computes whether a batch of functions are inlinable.
+func CanInlineFuncs(funcs []*ir.Func, profile *pgo.Profile) {
+ if profile != nil {
+ PGOInlinePrologue(profile, funcs)
}
- InlineDecls(p, typecheck.Target.Decls, true)
-
- // Perform a garbage collection of hidden closures functions that
- // are no longer reachable from top-level functions following
- // inlining. See #59404 and #59638 for more context.
- garbageCollectUnreferencedHiddenClosures()
+ ir.VisitFuncsBottomUp(funcs, func(list []*ir.Func, recursive bool) {
+ CanInlineSCC(list, recursive, profile)
+ })
}
-// InlineDecls applies inlining to the given batch of declarations.
-func InlineDecls(p *pgo.Profile, decls []ir.Node, doInline bool) {
- if p != nil {
- pgoInlinePrologue(p, decls)
+// CanInlineSCC computes the inlinability of functions within an SCC
+// (strongly connected component).
+//
+// CanInlineSCC is designed to be used by ir.VisitFuncsBottomUp
+// callbacks.
+func CanInlineSCC(funcs []*ir.Func, recursive bool, profile *pgo.Profile) {
+ if base.Flag.LowerL == 0 {
+ return
}
- doCanInline := func(n *ir.Func, recursive bool, numfns int) {
+ numfns := numNonClosures(funcs)
+
+ for _, fn := range funcs {
if !recursive || numfns > 1 {
// We allow inlining if there is no
// recursion, or the recursion cycle is
// across more than one function.
- CanInline(n, p)
+ CanInline(fn, profile)
} else {
- if base.Flag.LowerM > 1 && n.OClosure == nil {
- fmt.Printf("%v: cannot inline %v: recursive\n", ir.Line(n), n.Nname)
+ if base.Flag.LowerM > 1 && fn.OClosure == nil {
+ fmt.Printf("%v: cannot inline %v: recursive\n", ir.Line(fn), fn.Nname)
}
}
- }
-
- ir.VisitFuncsBottomUp(decls, func(list []*ir.Func, recursive bool) {
- numfns := numNonClosures(list)
- // We visit functions within an SCC in fairly arbitrary order,
- // so by computing inlinability for all functions in the SCC
- // before performing any inlining, the results are less
- // sensitive to the order within the SCC (see #58905 for an
- // example).
-
- // First compute inlinability for all functions in the SCC ...
- for _, n := range list {
- doCanInline(n, recursive, numfns)
- }
- // ... then make a second pass to do inlining of calls.
- if doInline {
- for _, n := range list {
- InlineCalls(n, p)
- }
+ if inlheur.Enabled() {
+ analyzeFuncProps(fn, profile)
}
- })
+ }
}
-// garbageCollectUnreferencedHiddenClosures makes a pass over all the
+// GarbageCollectUnreferencedHiddenClosures makes a pass over all the
// top-level (non-hidden-closure) functions looking for nested closure
// functions that are reachable, then sweeps through the Target.Decls
// list and marks any non-reachable hidden closure function as dead.
// See issues #59404 and #59638 for more context.
-func garbageCollectUnreferencedHiddenClosures() {
+func GarbageCollectUnreferencedHiddenClosures() {
liveFuncs := make(map[*ir.Func]bool)
@@ -230,35 +197,59 @@ func garbageCollectUnreferencedHiddenClosures() {
})
}
- for i := 0; i < len(typecheck.Target.Decls); i++ {
- if fn, ok := typecheck.Target.Decls[i].(*ir.Func); ok {
- if fn.IsHiddenClosure() {
- continue
- }
- markLiveFuncs(fn)
+ for i := 0; i < len(typecheck.Target.Funcs); i++ {
+ fn := typecheck.Target.Funcs[i]
+ if fn.IsHiddenClosure() {
+ continue
}
+ markLiveFuncs(fn)
}
- for i := 0; i < len(typecheck.Target.Decls); i++ {
- if fn, ok := typecheck.Target.Decls[i].(*ir.Func); ok {
- if !fn.IsHiddenClosure() {
- continue
- }
- if fn.IsDeadcodeClosure() {
- continue
- }
- if liveFuncs[fn] {
- continue
- }
- fn.SetIsDeadcodeClosure(true)
- if base.Flag.LowerM > 2 {
- fmt.Printf("%v: unreferenced closure %v marked as dead\n", ir.Line(fn), fn)
- }
- if fn.Inl != nil && fn.LSym == nil {
- ir.InitLSym(fn, true)
+ for i := 0; i < len(typecheck.Target.Funcs); i++ {
+ fn := typecheck.Target.Funcs[i]
+ if !fn.IsHiddenClosure() {
+ continue
+ }
+ if fn.IsDeadcodeClosure() {
+ continue
+ }
+ if liveFuncs[fn] {
+ continue
+ }
+ fn.SetIsDeadcodeClosure(true)
+ if base.Flag.LowerM > 2 {
+ fmt.Printf("%v: unreferenced closure %v marked as dead\n", ir.Line(fn), fn)
+ }
+ if fn.Inl != nil && fn.LSym == nil {
+ ir.InitLSym(fn, true)
+ }
+ }
+}
+
+// inlineBudget determines the max budget for function 'fn' prior to
+// analyzing the hairyness of the body of 'fn'. We pass in the pgo
+// profile if available (which can change the budget), also a
+// 'relaxed' flag, which expands the budget slightly to allow for the
+// possibility that a call to the function might have its score
+// adjusted downwards. If 'verbose' is set, then print a remark where
+// we boost the budget due to PGO.
+func inlineBudget(fn *ir.Func, profile *pgo.Profile, relaxed bool, verbose bool) int32 {
+ // Update the budget for profile-guided inlining.
+ budget := int32(inlineMaxBudget)
+ if profile != nil {
+ if n, ok := profile.WeightedCG.IRNodes[ir.LinkFuncName(fn)]; ok {
+ if _, ok := candHotCalleeMap[n]; ok {
+ budget = int32(inlineHotMaxBudget)
+ if verbose {
+ fmt.Printf("hot-node enabled increased budget=%v for func=%v\n", budget, ir.PkgFuncName(fn))
+ }
}
}
}
+ if relaxed {
+ budget += inlheur.BudgetExpansion(inlineMaxBudget)
+ }
+ return budget
}
// CanInline determines whether fn is inlineable.
@@ -302,18 +293,11 @@ func CanInline(fn *ir.Func, profile *pgo.Profile) {
cc = 1 // this appears to yield better performance than 0.
}
- // Update the budget for profile-guided inlining.
- budget := int32(inlineMaxBudget)
- if profile != nil {
- if n, ok := profile.WeightedCG.IRNodes[ir.LinkFuncName(fn)]; ok {
- if _, ok := candHotCalleeMap[n]; ok {
- budget = int32(inlineHotMaxBudget)
- if base.Debug.PGODebug > 0 {
- fmt.Printf("hot-node enabled increased budget=%v for func=%v\n", budget, ir.PkgFuncName(fn))
- }
- }
- }
- }
+ // Used a "relaxed" inline budget if the new inliner is enabled.
+ relaxed := inlheur.Enabled()
+
+ // Compute the inline budget for this func.
+ budget := inlineBudget(fn, profile, relaxed, base.Debug.PGODebug > 0)
// At this point in the game the function we're looking at may
// have "stale" autos, vars that still appear in the Dcl list, but
@@ -322,10 +306,11 @@ func CanInline(fn *ir.Func, profile *pgo.Profile) {
// when creating the "Inline.Dcl" field below; to accomplish this,
// the hairyVisitor below builds up a map of used/referenced
// locals, and we use this map to produce a pruned Inline.Dcl
- // list. See issue 25249 for more context.
+ // list. See issue 25459 for more context.
visitor := hairyVisitor{
curFunc: fn,
+ isBigFunc: IsBigFunc(fn),
budget: budget,
maxBudget: budget,
extraCallCost: cc,
@@ -337,20 +322,28 @@ func CanInline(fn *ir.Func, profile *pgo.Profile) {
}
n.Func.Inl = &ir.Inline{
- Cost: budget - visitor.budget,
- Dcl: pruneUnusedAutos(n.Defn.(*ir.Func).Dcl, &visitor),
- Body: inlcopylist(fn.Body),
+ Cost: budget - visitor.budget,
+ Dcl: pruneUnusedAutos(n.Func.Dcl, &visitor),
+ HaveDcl: true,
CanDelayResults: canDelayResults(fn),
}
+ if base.Flag.LowerM != 0 || logopt.Enabled() {
+ noteInlinableFunc(n, fn, budget-visitor.budget)
+ }
+}
+// noteInlinableFunc issues a message to the user that the specified
+// function is inlinable.
+func noteInlinableFunc(n *ir.Name, fn *ir.Func, cost int32) {
if base.Flag.LowerM > 1 {
- fmt.Printf("%v: can inline %v with cost %d as: %v { %v }\n", ir.Line(fn), n, budget-visitor.budget, fn.Type(), ir.Nodes(n.Func.Inl.Body))
+ fmt.Printf("%v: can inline %v with cost %d as: %v { %v }\n", ir.Line(fn), n, cost, fn.Type(), ir.Nodes(fn.Body))
} else if base.Flag.LowerM != 0 {
fmt.Printf("%v: can inline %v\n", ir.Line(fn), n)
}
+ // JSON optimization log output.
if logopt.Enabled() {
- logopt.LogOpt(fn.Pos(), "canInlineFunction", "inline", ir.FuncName(fn), fmt.Sprintf("cost: %d", budget-visitor.budget))
+ logopt.LogOpt(fn.Pos(), "canInlineFunction", "inline", ir.FuncName(fn), fmt.Sprintf("cost: %d", cost))
}
}
@@ -421,13 +414,6 @@ func InlineImpossible(fn *ir.Func) string {
return reason
}
- // If fn is synthetic hash or eq function, cannot inline it.
- // The function is not generated in Unified IR frontend at this moment.
- if ir.IsEqOrHashFunc(fn) {
- reason = "type eq/hash function"
- return reason
- }
-
return ""
}
@@ -454,8 +440,8 @@ func canDelayResults(fn *ir.Func) bool {
}
// temporaries for return values.
- for _, param := range fn.Type().Results().FieldSlice() {
- if sym := types.OrigSym(param.Sym); sym != nil && !sym.IsBlank() {
+ for _, param := range fn.Type().Results() {
+ if sym := param.Sym; sym != nil && !sym.IsBlank() {
return false // found a named result parameter (case 3)
}
}
@@ -468,6 +454,7 @@ func canDelayResults(fn *ir.Func) bool {
type hairyVisitor struct {
// This is needed to access the current caller in the doNode function.
curFunc *ir.Func
+ isBigFunc bool
budget int32
maxBudget int32
reason string
@@ -495,6 +482,7 @@ func (v *hairyVisitor) doNode(n ir.Node) bool {
if n == nil {
return false
}
+opSwitch:
switch n.Op() {
// Call is okay if inlinable and we have the budget for the body.
case ir.OCALLFUNC:
@@ -504,24 +492,23 @@ func (v *hairyVisitor) doNode(n ir.Node) bool {
//
// runtime.throw is a "cheap call" like panic in normal code.
var cheap bool
- if n.X.Op() == ir.ONAME {
- name := n.X.(*ir.Name)
- if name.Class == ir.PFUNC && types.IsRuntimePkg(name.Sym().Pkg) {
- fn := name.Sym().Name
- if fn == "getcallerpc" || fn == "getcallersp" {
+ if n.Fun.Op() == ir.ONAME {
+ name := n.Fun.(*ir.Name)
+ if name.Class == ir.PFUNC {
+ switch fn := types.RuntimeSymName(name.Sym()); fn {
+ case "getcallerpc", "getcallersp":
v.reason = "call to " + fn
return true
- }
- if fn == "throw" {
+ case "throw":
v.budget -= inlineExtraThrowCost
- break
+ break opSwitch
+ case "panicrangeexit":
+ cheap = true
}
- }
- // Special case for reflect.noescpae. It does just type
- // conversions to appease the escape analysis, and doesn't
- // generate code.
- if name.Class == ir.PFUNC && types.IsReflectPkg(name.Sym().Pkg) {
- if name.Sym().Name == "noescape" {
+ // Special case for reflect.noescape. It does just type
+ // conversions to appease the escape analysis, and doesn't
+ // generate code.
+ if types.ReflectSymName(name.Sym()) == "noescape" {
cheap = true
}
}
@@ -539,11 +526,11 @@ func (v *hairyVisitor) doNode(n ir.Node) bool {
return false
}
}
- if n.X.Op() == ir.OMETHEXPR {
- if meth := ir.MethodExprName(n.X); meth != nil {
+ if n.Fun.Op() == ir.OMETHEXPR {
+ if meth := ir.MethodExprName(n.Fun); meth != nil {
if fn := meth.Func; fn != nil {
s := fn.Sym()
- if types.IsRuntimePkg(s.Pkg) && s.Name == "heapBits.nextArena" {
+ if types.RuntimeSymName(s) == "heapBits.nextArena" {
// Special case: explicitly allow mid-stack inlining of
// runtime.heapBits.next even though it calls slow-path
// runtime.heapBits.nextArena.
@@ -571,27 +558,30 @@ func (v *hairyVisitor) doNode(n ir.Node) bool {
break // treat like any other node, that is, cost of 1
}
- // Determine if the callee edge is for an inlinable hot callee or not.
- if v.profile != nil && v.curFunc != nil {
- if fn := inlCallee(n.X, v.profile); fn != nil && typecheck.HaveInlineBody(fn) {
- lineOffset := pgo.NodeLineOffset(n, fn)
- csi := pgo.CallSiteInfo{LineOffset: lineOffset, Caller: v.curFunc}
- if _, o := candHotEdgeMap[csi]; o {
- if base.Debug.PGODebug > 0 {
- fmt.Printf("hot-callsite identified at line=%v for func=%v\n", ir.Line(n), ir.PkgFuncName(v.curFunc))
- }
- }
- }
- }
-
if ir.IsIntrinsicCall(n) {
// Treat like any other node.
break
}
- if fn := inlCallee(n.X, v.profile); fn != nil && typecheck.HaveInlineBody(fn) {
- v.budget -= fn.Inl.Cost
- break
+ if callee := inlCallee(v.curFunc, n.Fun, v.profile); callee != nil && typecheck.HaveInlineBody(callee) {
+ // Check whether we'd actually inline this call. Set
+ // log == false since we aren't actually doing inlining
+ // yet.
+ if ok, _ := canInlineCallExpr(v.curFunc, n, callee, v.isBigFunc, false); ok {
+ // mkinlcall would inline this call [1], so use
+ // the cost of the inline body as the cost of
+ // the call, as that is what will actually
+ // appear in the code.
+ //
+ // [1] This is almost a perfect match to the
+ // mkinlcall logic, except that
+ // canInlineCallExpr considers inlining cycles
+ // by looking at what has already been inlined.
+ // Since we haven't done any inlining yet we
+ // will miss those.
+ v.budget -= callee.Inl.Cost
+ break
+ }
}
// Call cost for non-leaf inlining.
@@ -616,6 +606,8 @@ func (v *hairyVisitor) doNode(n ir.Node) bool {
v.budget -= inlineExtraPanicCost
case ir.ORECOVER:
+ base.FatalfAt(n.Pos(), "ORECOVER missed typecheck")
+ case ir.ORECOVERFP:
// recover matches the argument frame pointer to find
// the right panic value, so it needs an argument frame.
v.reason = "call to recover"
@@ -635,10 +627,7 @@ func (v *hairyVisitor) doNode(n ir.Node) bool {
// should try to account for that if we're going to account for captures.
v.budget -= 15
- case ir.OGO,
- ir.ODEFER,
- ir.ODCLTYPE, // can't print yet
- ir.OTAILCALL:
+ case ir.OGO, ir.ODEFER, ir.OTAILCALL:
v.reason = "unhandled op " + n.Op().String()
return true
@@ -670,7 +659,7 @@ func (v *hairyVisitor) doNode(n ir.Node) bool {
// This doesn't produce code, but the children might.
v.budget++ // undo default cost
- case ir.ODCLCONST, ir.OFALL, ir.OTYPE:
+ case ir.OFALL, ir.OTYPE:
// These nodes don't produce code; omit from inlining budget.
return false
@@ -725,14 +714,16 @@ func (v *hairyVisitor) doNode(n ir.Node) bool {
// particular, to avoid breaking the existing inlinability regress
// tests), we need to compensate for this here.
//
- // See also identical logic in isBigFunc.
- if init := n.Rhs[0].Init(); len(init) == 1 {
- if _, ok := init[0].(*ir.AssignListStmt); ok {
- // 4 for each value, because each temporary variable now
- // appears 3 times (DCL, LHS, RHS), plus an extra DCL node.
- //
- // 1 for the extra "tmp1, tmp2 = f()" assignment statement.
- v.budget += 4*int32(len(n.Lhs)) + 1
+ // See also identical logic in IsBigFunc.
+ if len(n.Rhs) > 0 {
+ if init := n.Rhs[0].Init(); len(init) == 1 {
+ if _, ok := init[0].(*ir.AssignListStmt); ok {
+ // 4 for each value, because each temporary variable now
+ // appears 3 times (DCL, LHS, RHS), plus an extra DCL node.
+ //
+ // 1 for the extra "tmp1, tmp2 = f()" assignment statement.
+ v.budget += 4*int32(len(n.Lhs)) + 1
+ }
}
}
@@ -764,12 +755,15 @@ func (v *hairyVisitor) doNode(n ir.Node) bool {
return ir.DoChildren(n, v.do)
}
-func isBigFunc(fn *ir.Func) bool {
+// IsBigFunc reports whether fn is a "big" function.
+//
+// Note: The criteria for "big" is heuristic and subject to change.
+func IsBigFunc(fn *ir.Func) bool {
budget := inlineBigFunctionNodes
return ir.Any(fn, func(n ir.Node) bool {
// See logic in hairyVisitor.doNode, explaining unified IR's
// handling of "a, b = f()" assignments.
- if n, ok := n.(*ir.AssignListStmt); ok && n.Op() == ir.OAS2 {
+ if n, ok := n.(*ir.AssignListStmt); ok && n.Op() == ir.OAS2 && len(n.Rhs) > 0 {
if init := n.Rhs[0].Init(); len(init) == 1 {
if _, ok := init[0].(*ir.AssignListStmt); ok {
budget += 4*len(n.Lhs) + 1
@@ -782,165 +776,45 @@ func isBigFunc(fn *ir.Func) bool {
})
}
-// inlcopylist (together with inlcopy) recursively copies a list of nodes, except
-// that it keeps the same ONAME, OTYPE, and OLITERAL nodes. It is used for copying
-// the body and dcls of an inlineable function.
-func inlcopylist(ll []ir.Node) []ir.Node {
- s := make([]ir.Node, len(ll))
- for i, n := range ll {
- s[i] = inlcopy(n)
+// TryInlineCall returns an inlined call expression for call, or nil
+// if inlining is not possible.
+func TryInlineCall(callerfn *ir.Func, call *ir.CallExpr, bigCaller bool, profile *pgo.Profile) *ir.InlinedCallExpr {
+ if base.Flag.LowerL == 0 {
+ return nil
}
- return s
-}
-
-// inlcopy is like DeepCopy(), but does extra work to copy closures.
-func inlcopy(n ir.Node) ir.Node {
- var edit func(ir.Node) ir.Node
- edit = func(x ir.Node) ir.Node {
- switch x.Op() {
- case ir.ONAME, ir.OTYPE, ir.OLITERAL, ir.ONIL:
- return x
- }
- m := ir.Copy(x)
- ir.EditChildren(m, edit)
- if x.Op() == ir.OCLOSURE {
- x := x.(*ir.ClosureExpr)
- // Need to save/duplicate x.Func.Nname,
- // x.Func.Nname.Ntype, x.Func.Dcl, x.Func.ClosureVars, and
- // x.Func.Body for iexport and local inlining.
- oldfn := x.Func
- newfn := ir.NewFunc(oldfn.Pos())
- m.(*ir.ClosureExpr).Func = newfn
- newfn.Nname = ir.NewNameAt(oldfn.Nname.Pos(), oldfn.Nname.Sym())
- // XXX OK to share fn.Type() ??
- newfn.Nname.SetType(oldfn.Nname.Type())
- newfn.Body = inlcopylist(oldfn.Body)
- // Make shallow copy of the Dcl and ClosureVar slices
- newfn.Dcl = append([]*ir.Name(nil), oldfn.Dcl...)
- newfn.ClosureVars = append([]*ir.Name(nil), oldfn.ClosureVars...)
- }
- return m
+ if call.Op() != ir.OCALLFUNC {
+ return nil
}
- return edit(n)
-}
-
-// InlineCalls/inlnode walks fn's statements and expressions and substitutes any
-// calls made to inlineable functions. This is the external entry point.
-func InlineCalls(fn *ir.Func, profile *pgo.Profile) {
- savefn := ir.CurFunc
- ir.CurFunc = fn
- bigCaller := isBigFunc(fn)
- if bigCaller && base.Flag.LowerM > 1 {
- fmt.Printf("%v: function %v considered 'big'; reducing max cost of inlinees\n", ir.Line(fn), fn)
- }
- var inlCalls []*ir.InlinedCallExpr
- var edit func(ir.Node) ir.Node
- edit = func(n ir.Node) ir.Node {
- return inlnode(n, bigCaller, &inlCalls, edit, profile)
- }
- ir.EditChildren(fn, edit)
-
- // If we inlined any calls, we want to recursively visit their
- // bodies for further inlining. However, we need to wait until
- // *after* the original function body has been expanded, or else
- // inlCallee can have false positives (e.g., #54632).
- for len(inlCalls) > 0 {
- call := inlCalls[0]
- inlCalls = inlCalls[1:]
- ir.EditChildren(call, edit)
- }
-
- ir.CurFunc = savefn
-}
-
-// inlnode recurses over the tree to find inlineable calls, which will
-// be turned into OINLCALLs by mkinlcall. When the recursion comes
-// back up will examine left, right, list, rlist, ninit, ntest, nincr,
-// nbody and nelse and use one of the 4 inlconv/glue functions above
-// to turn the OINLCALL into an expression, a statement, or patch it
-// in to this nodes list or rlist as appropriate.
-// NOTE it makes no sense to pass the glue functions down the
-// recursion to the level where the OINLCALL gets created because they
-// have to edit /this/ n, so you'd have to push that one down as well,
-// but then you may as well do it here. so this is cleaner and
-// shorter and less complicated.
-// The result of inlnode MUST be assigned back to n, e.g.
-//
-// n.Left = inlnode(n.Left)
-func inlnode(n ir.Node, bigCaller bool, inlCalls *[]*ir.InlinedCallExpr, edit func(ir.Node) ir.Node, profile *pgo.Profile) ir.Node {
- if n == nil {
- return n
+ if call.GoDefer || call.NoInline {
+ return nil
}
- switch n.Op() {
- case ir.ODEFER, ir.OGO:
- n := n.(*ir.GoDeferStmt)
- switch call := n.Call; call.Op() {
- case ir.OCALLMETH:
- base.FatalfAt(call.Pos(), "OCALLMETH missed by typecheck")
- case ir.OCALLFUNC:
- call := call.(*ir.CallExpr)
- call.NoInline = true
- }
- case ir.OTAILCALL:
- n := n.(*ir.TailCallStmt)
- n.Call.NoInline = true // Not inline a tail call for now. Maybe we could inline it just like RETURN fn(arg)?
-
- // TODO do them here (or earlier),
- // so escape analysis can avoid more heapmoves.
- case ir.OCLOSURE:
- return n
- case ir.OCALLMETH:
- base.FatalfAt(n.Pos(), "OCALLMETH missed by typecheck")
- case ir.OCALLFUNC:
- n := n.(*ir.CallExpr)
- if n.X.Op() == ir.OMETHEXPR {
- // Prevent inlining some reflect.Value methods when using checkptr,
- // even when package reflect was compiled without it (#35073).
- if meth := ir.MethodExprName(n.X); meth != nil {
- s := meth.Sym()
- if base.Debug.Checkptr != 0 && types.IsReflectPkg(s.Pkg) && (s.Name == "Value.UnsafeAddr" || s.Name == "Value.Pointer") {
- return n
- }
+ // Prevent inlining some reflect.Value methods when using checkptr,
+ // even when package reflect was compiled without it (#35073).
+ if base.Debug.Checkptr != 0 && call.Fun.Op() == ir.OMETHEXPR {
+ if method := ir.MethodExprName(call.Fun); method != nil {
+ switch types.ReflectSymName(method.Sym()) {
+ case "Value.UnsafeAddr", "Value.Pointer":
+ return nil
}
}
}
- lno := ir.SetPos(n)
-
- ir.EditChildren(n, edit)
-
- // with all the branches out of the way, it is now time to
- // transmogrify this node itself unless inhibited by the
- // switch at the top of this function.
- switch n.Op() {
- case ir.OCALLMETH:
- base.FatalfAt(n.Pos(), "OCALLMETH missed by typecheck")
-
- case ir.OCALLFUNC:
- call := n.(*ir.CallExpr)
- if call.NoInline {
- break
- }
- if base.Flag.LowerM > 3 {
- fmt.Printf("%v:call to func %+v\n", ir.Line(n), call.X)
- }
- if ir.IsIntrinsicCall(call) {
- break
- }
- if fn := inlCallee(call.X, profile); fn != nil && typecheck.HaveInlineBody(fn) {
- n = mkinlcall(call, fn, bigCaller, inlCalls)
- }
+ if base.Flag.LowerM > 3 {
+ fmt.Printf("%v:call to func %+v\n", ir.Line(call), call.Fun)
}
-
- base.Pos = lno
-
- return n
+ if ir.IsIntrinsicCall(call) {
+ return nil
+ }
+ if fn := inlCallee(callerfn, call.Fun, profile); fn != nil && typecheck.HaveInlineBody(fn) {
+ return mkinlcall(callerfn, call, fn, bigCaller)
+ }
+ return nil
}
// inlCallee takes a function-typed expression and returns the underlying function ONAME
// that it refers to if statically known. Otherwise, it returns nil.
-func inlCallee(fn ir.Node, profile *pgo.Profile) *ir.Func {
+func inlCallee(caller *ir.Func, fn ir.Node, profile *pgo.Profile) (res *ir.Func) {
fn = ir.StaticValue(fn)
switch fn.Op() {
case ir.OMETHEXPR:
@@ -961,6 +835,9 @@ func inlCallee(fn ir.Node, profile *pgo.Profile) *ir.Func {
case ir.OCLOSURE:
fn := fn.(*ir.ClosureExpr)
c := fn.Func
+ if len(c.ClosureVars) != 0 && c.ClosureVars[0].Outer.Curfn != caller {
+ return nil // inliner doesn't support inlining across closure frames
+ }
CanInline(c, profile)
return c
}
@@ -975,7 +852,7 @@ var SSADumpInline = func(*ir.Func) {}
// InlineCall allows the inliner implementation to be overridden.
// If it returns nil, the function will not be inlined.
-var InlineCall = func(call *ir.CallExpr, fn *ir.Func, inlIndex int) *ir.InlinedCallExpr {
+var InlineCall = func(callerfn *ir.Func, call *ir.CallExpr, fn *ir.Func, inlIndex int) *ir.InlinedCallExpr {
base.Fatalf("inline.InlineCall not overridden")
panic("unreachable")
}
@@ -983,9 +860,10 @@ var InlineCall = func(call *ir.CallExpr, fn *ir.Func, inlIndex int) *ir.InlinedC
// inlineCostOK returns true if call n from caller to callee is cheap enough to
// inline. bigCaller indicates that caller is a big function.
//
-// If inlineCostOK returns false, it also returns the max cost that the callee
-// exceeded.
-func inlineCostOK(n *ir.CallExpr, caller, callee *ir.Func, bigCaller bool) (bool, int32) {
+// In addition to the "cost OK" boolean, it also returns the "max
+// cost" limit used to make the decision (which may differ depending
+// on func size), and the score assigned to this specific callsite.
+func inlineCostOK(n *ir.CallExpr, caller, callee *ir.Func, bigCaller bool) (bool, int32, int32) {
maxCost := int32(inlineMaxBudget)
if bigCaller {
// We use this to restrict inlining into very big functions.
@@ -993,9 +871,17 @@ func inlineCostOK(n *ir.CallExpr, caller, callee *ir.Func, bigCaller bool) (bool
maxCost = inlineBigFunctionMaxCost
}
- if callee.Inl.Cost <= maxCost {
+ metric := callee.Inl.Cost
+ if inlheur.Enabled() {
+ score, ok := inlheur.GetCallSiteScore(caller, n)
+ if ok {
+ metric = int32(score)
+ }
+ }
+
+ if metric <= maxCost {
// Simple case. Function is already cheap enough.
- return true, 0
+ return true, 0, metric
}
// We'll also allow inlining of hot functions below inlineHotMaxBudget,
@@ -1005,7 +891,7 @@ func inlineCostOK(n *ir.CallExpr, caller, callee *ir.Func, bigCaller bool) (bool
csi := pgo.CallSiteInfo{LineOffset: lineOffset, Caller: caller}
if _, ok := candHotEdgeMap[csi]; !ok {
// Cold
- return false, maxCost
+ return false, maxCost, metric
}
// Hot
@@ -1014,68 +900,80 @@ func inlineCostOK(n *ir.CallExpr, caller, callee *ir.Func, bigCaller bool) (bool
if base.Debug.PGODebug > 0 {
fmt.Printf("hot-big check disallows inlining for call %s (cost %d) at %v in big function %s\n", ir.PkgFuncName(callee), callee.Inl.Cost, ir.Line(n), ir.PkgFuncName(caller))
}
- return false, maxCost
+ return false, maxCost, metric
+ }
+
+ if metric > inlineHotMaxBudget {
+ return false, inlineHotMaxBudget, metric
}
- if callee.Inl.Cost > inlineHotMaxBudget {
- return false, inlineHotMaxBudget
+ if !base.PGOHash.MatchPosWithInfo(n.Pos(), "inline", nil) {
+ // De-selected by PGO Hash.
+ return false, maxCost, metric
}
if base.Debug.PGODebug > 0 {
fmt.Printf("hot-budget check allows inlining for call %s (cost %d) at %v in function %s\n", ir.PkgFuncName(callee), callee.Inl.Cost, ir.Line(n), ir.PkgFuncName(caller))
}
- return true, 0
+ return true, 0, metric
}
-// If n is a OCALLFUNC node, and fn is an ONAME node for a
-// function with an inlinable body, return an OINLCALL node that can replace n.
-// The returned node's Ninit has the parameter assignments, the Nbody is the
-// inlined function body, and (List, Rlist) contain the (input, output)
-// parameters.
-// The result of mkinlcall MUST be assigned back to n, e.g.
+// canInlineCallsite returns true if the call n from caller to callee
+// can be inlined, plus the score computed for the call expr in
+// question. bigCaller indicates that caller is a big function. log
+// indicates that the 'cannot inline' reason should be logged.
//
-// n.Left = mkinlcall(n.Left, fn, isddd)
-func mkinlcall(n *ir.CallExpr, fn *ir.Func, bigCaller bool, inlCalls *[]*ir.InlinedCallExpr) ir.Node {
- if fn.Inl == nil {
- if logopt.Enabled() {
- logopt.LogOpt(n.Pos(), "cannotInlineCall", "inline", ir.FuncName(ir.CurFunc),
- fmt.Sprintf("%s cannot be inlined", ir.PkgFuncName(fn)))
+// Preconditions: CanInline(callee) has already been called.
+func canInlineCallExpr(callerfn *ir.Func, n *ir.CallExpr, callee *ir.Func, bigCaller bool, log bool) (bool, int32) {
+ if callee.Inl == nil {
+ // callee is never inlinable.
+ if log && logopt.Enabled() {
+ logopt.LogOpt(n.Pos(), "cannotInlineCall", "inline", ir.FuncName(callerfn),
+ fmt.Sprintf("%s cannot be inlined", ir.PkgFuncName(callee)))
}
- return n
+ return false, 0
}
- if ok, maxCost := inlineCostOK(n, ir.CurFunc, fn, bigCaller); !ok {
- if logopt.Enabled() {
- logopt.LogOpt(n.Pos(), "cannotInlineCall", "inline", ir.FuncName(ir.CurFunc),
- fmt.Sprintf("cost %d of %s exceeds max caller cost %d", fn.Inl.Cost, ir.PkgFuncName(fn), maxCost))
+ ok, maxCost, callSiteScore := inlineCostOK(n, callerfn, callee, bigCaller)
+ if !ok {
+ // callee cost too high for this call site.
+ if log && logopt.Enabled() {
+ logopt.LogOpt(n.Pos(), "cannotInlineCall", "inline", ir.FuncName(callerfn),
+ fmt.Sprintf("cost %d of %s exceeds max caller cost %d", callee.Inl.Cost, ir.PkgFuncName(callee), maxCost))
}
- return n
+ return false, 0
}
- if fn == ir.CurFunc {
+ if callee == callerfn {
// Can't recursively inline a function into itself.
- if logopt.Enabled() {
- logopt.LogOpt(n.Pos(), "cannotInlineCall", "inline", fmt.Sprintf("recursive call to %s", ir.FuncName(ir.CurFunc)))
+ if log && logopt.Enabled() {
+ logopt.LogOpt(n.Pos(), "cannotInlineCall", "inline", fmt.Sprintf("recursive call to %s", ir.FuncName(callerfn)))
}
- return n
+ return false, 0
}
- if base.Flag.Cfg.Instrumenting && types.IsNoInstrumentPkg(fn.Sym().Pkg) {
+ if base.Flag.Cfg.Instrumenting && types.IsNoInstrumentPkg(callee.Sym().Pkg) {
// Runtime package must not be instrumented.
// Instrument skips runtime package. However, some runtime code can be
// inlined into other packages and instrumented there. To avoid this,
// we disable inlining of runtime functions when instrumenting.
// The example that we observed is inlining of LockOSThread,
// which lead to false race reports on m contents.
- return n
- }
- if base.Flag.Race && types.IsNoRacePkg(fn.Sym().Pkg) {
- return n
+ if log && logopt.Enabled() {
+ logopt.LogOpt(n.Pos(), "cannotInlineCall", "inline", ir.FuncName(callerfn),
+ fmt.Sprintf("call to runtime function %s in instrumented build", ir.PkgFuncName(callee)))
+ }
+ return false, 0
}
- parent := base.Ctxt.PosTable.Pos(n.Pos()).Base().InliningIndex()
- sym := fn.Linksym()
+ if base.Flag.Race && types.IsNoRacePkg(callee.Sym().Pkg) {
+ if log && logopt.Enabled() {
+ logopt.LogOpt(n.Pos(), "cannotInlineCall", "inline", ir.FuncName(callerfn),
+ fmt.Sprintf(`call to into "no-race" package function %s in race build`, ir.PkgFuncName(callee)))
+ }
+ return false, 0
+ }
// Check if we've already inlined this function at this particular
// call site, in order to stop inlining when we reach the beginning
@@ -1084,17 +982,42 @@ func mkinlcall(n *ir.CallExpr, fn *ir.Func, bigCaller bool, inlCalls *[]*ir.Inli
// many functions. Most likely, the inlining will stop before we
// even hit the beginning of the cycle again, but this catches the
// unusual case.
+ parent := base.Ctxt.PosTable.Pos(n.Pos()).Base().InliningIndex()
+ sym := callee.Linksym()
for inlIndex := parent; inlIndex >= 0; inlIndex = base.Ctxt.InlTree.Parent(inlIndex) {
if base.Ctxt.InlTree.InlinedFunction(inlIndex) == sym {
- if base.Flag.LowerM > 1 {
- fmt.Printf("%v: cannot inline %v into %v: repeated recursive cycle\n", ir.Line(n), fn, ir.FuncName(ir.CurFunc))
+ if log {
+ if base.Flag.LowerM > 1 {
+ fmt.Printf("%v: cannot inline %v into %v: repeated recursive cycle\n", ir.Line(n), callee, ir.FuncName(callerfn))
+ }
+ if logopt.Enabled() {
+ logopt.LogOpt(n.Pos(), "cannotInlineCall", "inline", ir.FuncName(callerfn),
+ fmt.Sprintf("repeated recursive cycle to %s", ir.PkgFuncName(callee)))
+ }
}
- return n
+ return false, 0
}
}
+ return true, callSiteScore
+}
+
+// mkinlcall returns an OINLCALL node that can replace OCALLFUNC n, or
+// nil if it cannot be inlined. callerfn is the function that contains
+// n, and fn is the function being called.
+//
+// The result of mkinlcall MUST be assigned back to n, e.g.
+//
+// n.Left = mkinlcall(n.Left, fn, isddd)
+func mkinlcall(callerfn *ir.Func, n *ir.CallExpr, fn *ir.Func, bigCaller bool) *ir.InlinedCallExpr {
+ ok, score := canInlineCallExpr(callerfn, n, fn, bigCaller, true)
+ if !ok {
+ return nil
+ }
typecheck.AssertFixedCall(n)
+ parent := base.Ctxt.PosTable.Pos(n.Pos()).Base().InliningIndex()
+ sym := fn.Linksym()
inlIndex := base.Ctxt.InlTree.Add(parent, n.Pos(), sym, ir.FuncName(fn))
closureInitLSym := func(n *ir.CallExpr, fn *ir.Func) {
@@ -1123,12 +1046,12 @@ func mkinlcall(n *ir.CallExpr, fn *ir.Func, bigCaller bool, inlCalls *[]*ir.Inli
// Not a standard call.
return
}
- if n.X.Op() != ir.OCLOSURE {
+ if n.Fun.Op() != ir.OCLOSURE {
// Not a direct closure call.
return
}
- clo := n.X.(*ir.ClosureExpr)
+ clo := n.Fun.(*ir.ClosureExpr)
if ir.IsTrivialClosure(clo) {
// enqueueFunc will handle trivial closures anyways.
return
@@ -1147,13 +1070,18 @@ func mkinlcall(n *ir.CallExpr, fn *ir.Func, bigCaller bool, inlCalls *[]*ir.Inli
}
if base.Flag.LowerM != 0 {
- fmt.Printf("%v: inlining call to %v\n", ir.Line(n), fn)
+ if buildcfg.Experiment.NewInliner {
+ fmt.Printf("%v: inlining call to %v with score %d\n",
+ ir.Line(n), fn, score)
+ } else {
+ fmt.Printf("%v: inlining call to %v\n", ir.Line(n), fn)
+ }
}
if base.Flag.LowerM > 2 {
fmt.Printf("%v: Before inlining: %+v\n", ir.Line(n), n)
}
- res := InlineCall(n, fn, inlIndex)
+ res := InlineCall(callerfn, n, fn, inlIndex)
if res == nil {
base.FatalfAt(n.Pos(), "inlining call to %v failed", fn)
@@ -1163,7 +1091,9 @@ func mkinlcall(n *ir.CallExpr, fn *ir.Func, bigCaller bool, inlCalls *[]*ir.Inli
fmt.Printf("%v: After inlining %+v\n\n", ir.Line(res), res)
}
- *inlCalls = append(*inlCalls, res)
+ if inlheur.Enabled() {
+ inlheur.UpdateCallsiteTable(callerfn, n, res)
+ }
return res
}
@@ -1197,6 +1127,9 @@ func pruneUnusedAutos(ll []*ir.Name, vis *hairyVisitor) []*ir.Name {
for _, n := range ll {
if n.Class == ir.PAUTO {
if !vis.usedLocals.Has(n) {
+ // TODO(mdempsky): Simplify code after confident that this
+ // never happens anymore.
+ base.FatalfAt(n.Pos(), "unused auto: %v", n)
continue
}
}
@@ -1245,10 +1178,10 @@ func isIndexingCoverageCounter(n ir.Node) bool {
// determine whether it represents a call to sync/atomic.AddUint32 to
// increment a coverage counter.
func isAtomicCoverageCounterUpdate(cn *ir.CallExpr) bool {
- if cn.X.Op() != ir.ONAME {
+ if cn.Fun.Op() != ir.ONAME {
return false
}
- name := cn.X.(*ir.Name)
+ name := cn.Fun.(*ir.Name)
if name.Class != ir.PFUNC {
return false
}
@@ -1264,3 +1197,21 @@ func isAtomicCoverageCounterUpdate(cn *ir.CallExpr) bool {
v := isIndexingCoverageCounter(adn.X)
return v
}
+
+func PostProcessCallSites(profile *pgo.Profile) {
+ if base.Debug.DumpInlCallSiteScores != 0 {
+ budgetCallback := func(fn *ir.Func, prof *pgo.Profile) (int32, bool) {
+ v := inlineBudget(fn, prof, false, false)
+ return v, v == inlineHotMaxBudget
+ }
+ inlheur.DumpInlCallSiteScores(profile, budgetCallback)
+ }
+}
+
+func analyzeFuncProps(fn *ir.Func, p *pgo.Profile) {
+ canInline := func(fn *ir.Func) { CanInline(fn, p) }
+ budgetForFunc := func(fn *ir.Func) int32 {
+ return inlineBudget(fn, p, true, false)
+ }
+ inlheur.AnalyzeFunc(fn, canInline, budgetForFunc, inlineMaxBudget)
+}
diff --git a/src/cmd/compile/internal/inline/inlheur/actualexprpropbits_string.go b/src/cmd/compile/internal/inline/inlheur/actualexprpropbits_string.go
new file mode 100644
index 0000000000..2faf76f487
--- /dev/null
+++ b/src/cmd/compile/internal/inline/inlheur/actualexprpropbits_string.go
@@ -0,0 +1,58 @@
+// Code generated by "stringer -bitset -type ActualExprPropBits"; DO NOT EDIT.
+
+package inlheur
+
+import "strconv"
+import "bytes"
+
+func _() {
+ // An "invalid array index" compiler error signifies that the constant values have changed.
+ // Re-run the stringer command to generate them again.
+ var x [1]struct{}
+ _ = x[ActualExprConstant-1]
+ _ = x[ActualExprIsConcreteConvIface-2]
+ _ = x[ActualExprIsFunc-4]
+ _ = x[ActualExprIsInlinableFunc-8]
+}
+
+var _ActualExprPropBits_value = [...]uint64{
+ 0x1, /* ActualExprConstant */
+ 0x2, /* ActualExprIsConcreteConvIface */
+ 0x4, /* ActualExprIsFunc */
+ 0x8, /* ActualExprIsInlinableFunc */
+}
+
+const _ActualExprPropBits_name = "ActualExprConstantActualExprIsConcreteConvIfaceActualExprIsFuncActualExprIsInlinableFunc"
+
+var _ActualExprPropBits_index = [...]uint8{0, 18, 47, 63, 88}
+
+func (i ActualExprPropBits) String() string {
+ var b bytes.Buffer
+
+ remain := uint64(i)
+ seen := false
+
+ for k, v := range _ActualExprPropBits_value {
+ x := _ActualExprPropBits_name[_ActualExprPropBits_index[k]:_ActualExprPropBits_index[k+1]]
+ if v == 0 {
+ if i == 0 {
+ b.WriteString(x)
+ return b.String()
+ }
+ continue
+ }
+ if (v & remain) == v {
+ remain &^= v
+ x := _ActualExprPropBits_name[_ActualExprPropBits_index[k]:_ActualExprPropBits_index[k+1]]
+ if seen {
+ b.WriteString("|")
+ }
+ seen = true
+ b.WriteString(x)
+ }
+ }
+ if remain == 0 {
+ return b.String()
+ }
+ return "ActualExprPropBits(0x" + strconv.FormatInt(int64(i), 16) + ")"
+}
diff --git a/src/cmd/compile/internal/inline/inlheur/analyze.go b/src/cmd/compile/internal/inline/inlheur/analyze.go
new file mode 100644
index 0000000000..a1b6f358e1
--- /dev/null
+++ b/src/cmd/compile/internal/inline/inlheur/analyze.go
@@ -0,0 +1,370 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package inlheur
+
+import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/types"
+ "encoding/json"
+ "fmt"
+ "internal/buildcfg"
+ "io"
+ "os"
+ "path/filepath"
+ "sort"
+ "strings"
+)
+
+const (
+ debugTraceFuncs = 1 << iota
+ debugTraceFuncFlags
+ debugTraceResults
+ debugTraceParams
+ debugTraceExprClassify
+ debugTraceCalls
+ debugTraceScoring
+)
+
+// propAnalyzer interface is used for defining one or more analyzer
+// helper objects, each tasked with computing some specific subset of
+// the properties we're interested in. The assumption is that
+// properties are independent, so each new analyzer that implements
+// this interface can operate entirely on its own. For a given analyzer
+// there will be a sequence of calls to nodeVisitPre and nodeVisitPost
+// as the nodes within a function are visited, then a followup call to
+// setResults so that the analyzer can transfer its results into the
+// final properties object.
+type propAnalyzer interface {
+ nodeVisitPre(n ir.Node)
+ nodeVisitPost(n ir.Node)
+ setResults(funcProps *FuncProps)
+}
+
+// fnInlHeur contains inline heuristics state information about a
+// specific Go function being analyzed/considered by the inliner. Note
+// that in addition to constructing a fnInlHeur object by analyzing a
+// specific *ir.Func, there is also code in the test harness
+// (funcprops_test.go) that builds up fnInlHeur's by reading in and
+// parsing a dump. This is the reason why we have file/fname/line
+// fields below instead of just an *ir.Func field.
+type fnInlHeur struct {
+ props *FuncProps
+ cstab CallSiteTab
+ fname string
+ file string
+ line uint
+}
+
+var fpmap = map[*ir.Func]fnInlHeur{}
+
+// AnalyzeFunc computes function properties for fn and its contained
+// closures, updating the global 'fpmap' table. It is assumed that
+// "CanInline" has been run on fn and on the closures that feed
+// directly into calls; other closures not directly called will also
+// be checked inlinability for inlinability here in case they are
+// returned as a result.
+func AnalyzeFunc(fn *ir.Func, canInline func(*ir.Func), budgetForFunc func(*ir.Func) int32, inlineMaxBudget int) {
+ if fpmap == nil {
+ // If fpmap is nil this indicates that the main inliner pass is
+ // complete and we're doing inlining of wrappers (no heuristics
+ // used here).
+ return
+ }
+ if fn.OClosure != nil {
+ // closures will be processed along with their outer enclosing func.
+ return
+ }
+ enableDebugTraceIfEnv()
+ if debugTrace&debugTraceFuncs != 0 {
+ fmt.Fprintf(os.Stderr, "=-= AnalyzeFunc(%v)\n", fn)
+ }
+ // Build up a list containing 'fn' and any closures it contains. Along
+ // the way, test to see whether each closure is inlinable in case
+ // we might be returning it.
+ funcs := []*ir.Func{fn}
+ ir.VisitFuncAndClosures(fn, func(n ir.Node) {
+ if clo, ok := n.(*ir.ClosureExpr); ok {
+ funcs = append(funcs, clo.Func)
+ }
+ })
+
+ // Analyze the list of functions. We want to visit a given func
+ // only after the closures it contains have been processed, so
+ // iterate through the list in reverse order. Once a function has
+ // been analyzed, revisit the question of whether it should be
+ // inlinable; if it is over the default hairyness limit and it
+ // doesn't have any interesting properties, then we don't want
+ // the overhead of writing out its inline body.
+ nameFinder := newNameFinder(fn)
+ for i := len(funcs) - 1; i >= 0; i-- {
+ f := funcs[i]
+ if f.OClosure != nil && !f.InlinabilityChecked() {
+ canInline(f)
+ }
+ funcProps := analyzeFunc(f, inlineMaxBudget, nameFinder)
+ revisitInlinability(f, funcProps, budgetForFunc)
+ if f.Inl != nil {
+ f.Inl.Properties = funcProps.SerializeToString()
+ }
+ }
+ disableDebugTrace()
+}
+
+// TearDown is invoked at the end of the main inlining pass; doing
+// function analysis and call site scoring is unlikely to help a lot
+// after this point, so nil out fpmap and other globals to reclaim
+// storage.
+func TearDown() {
+ fpmap = nil
+ scoreCallsCache.tab = nil
+ scoreCallsCache.csl = nil
+}
+
+func analyzeFunc(fn *ir.Func, inlineMaxBudget int, nf *nameFinder) *FuncProps {
+ if funcInlHeur, ok := fpmap[fn]; ok {
+ return funcInlHeur.props
+ }
+ funcProps, fcstab := computeFuncProps(fn, inlineMaxBudget, nf)
+ file, line := fnFileLine(fn)
+ entry := fnInlHeur{
+ fname: fn.Sym().Name,
+ file: file,
+ line: line,
+ props: funcProps,
+ cstab: fcstab,
+ }
+ fn.SetNeverReturns(entry.props.Flags&FuncPropNeverReturns != 0)
+ fpmap[fn] = entry
+ if fn.Inl != nil && fn.Inl.Properties == "" {
+ fn.Inl.Properties = entry.props.SerializeToString()
+ }
+ return funcProps
+}
+
+// revisitInlinability revisits the question of whether to continue to
+// treat function 'fn' as an inline candidate based on the set of
+// properties we've computed for it. If (for example) it has an
+// initial size score of 150 and no interesting properties to speak
+// of, then there isn't really any point to moving ahead with it as an
+// inline candidate.
+func revisitInlinability(fn *ir.Func, funcProps *FuncProps, budgetForFunc func(*ir.Func) int32) {
+ if fn.Inl == nil {
+ return
+ }
+ maxAdj := int32(LargestNegativeScoreAdjustment(fn, funcProps))
+ budget := budgetForFunc(fn)
+ if fn.Inl.Cost+maxAdj > budget {
+ fn.Inl = nil
+ }
+}
+
+// computeFuncProps examines the Go function 'fn' and computes for it
+// a function "properties" object, to be used to drive inlining
+// heuristics. See comments on the FuncProps type for more info.
+func computeFuncProps(fn *ir.Func, inlineMaxBudget int, nf *nameFinder) (*FuncProps, CallSiteTab) {
+ if debugTrace&debugTraceFuncs != 0 {
+ fmt.Fprintf(os.Stderr, "=-= starting analysis of func %v:\n%+v\n",
+ fn, fn)
+ }
+ funcProps := new(FuncProps)
+ ffa := makeFuncFlagsAnalyzer(fn)
+ analyzers := []propAnalyzer{ffa}
+ analyzers = addResultsAnalyzer(fn, analyzers, funcProps, inlineMaxBudget, nf)
+ analyzers = addParamsAnalyzer(fn, analyzers, funcProps, nf)
+ runAnalyzersOnFunction(fn, analyzers)
+ for _, a := range analyzers {
+ a.setResults(funcProps)
+ }
+ cstab := computeCallSiteTable(fn, fn.Body, nil, ffa.panicPathTable(), 0, nf)
+ return funcProps, cstab
+}
+
+func runAnalyzersOnFunction(fn *ir.Func, analyzers []propAnalyzer) {
+ var doNode func(ir.Node) bool
+ doNode = func(n ir.Node) bool {
+ for _, a := range analyzers {
+ a.nodeVisitPre(n)
+ }
+ ir.DoChildren(n, doNode)
+ for _, a := range analyzers {
+ a.nodeVisitPost(n)
+ }
+ return false
+ }
+ doNode(fn)
+}
+
+func propsForFunc(fn *ir.Func) *FuncProps {
+ if funcInlHeur, ok := fpmap[fn]; ok {
+ return funcInlHeur.props
+ } else if fn.Inl != nil && fn.Inl.Properties != "" {
+ // FIXME: considering adding some sort of cache or table
+ // for deserialized properties of imported functions.
+ return DeserializeFromString(fn.Inl.Properties)
+ }
+ return nil
+}
+
+func fnFileLine(fn *ir.Func) (string, uint) {
+ p := base.Ctxt.InnermostPos(fn.Pos())
+ return filepath.Base(p.Filename()), p.Line()
+}
+
+func Enabled() bool {
+ return buildcfg.Experiment.NewInliner || UnitTesting()
+}
+
+func UnitTesting() bool {
+ return base.Debug.DumpInlFuncProps != "" ||
+ base.Debug.DumpInlCallSiteScores != 0
+}
+
+// DumpFuncProps computes and caches function properties for the func
+// 'fn', writing out a description of the previously computed set of
+// properties to the file given in 'dumpfile'. Used for the
+// "-d=dumpinlfuncprops=..." command line flag, intended for use
+// primarily in unit testing.
+func DumpFuncProps(fn *ir.Func, dumpfile string) {
+ if fn != nil {
+ if fn.OClosure != nil {
+ // closures will be processed along with their outer enclosing func.
+ return
+ }
+ captureFuncDumpEntry(fn)
+ ir.VisitFuncAndClosures(fn, func(n ir.Node) {
+ if clo, ok := n.(*ir.ClosureExpr); ok {
+ captureFuncDumpEntry(clo.Func)
+ }
+ })
+ } else {
+ emitDumpToFile(dumpfile)
+ }
+}
+
+// emitDumpToFile writes out the buffer function property dump entries
+// to a file, for unit testing. Dump entries need to be sorted by
+// definition line, and due to generics we need to account for the
+// possibility that several ir.Func's will have the same def line.
+func emitDumpToFile(dumpfile string) {
+ mode := os.O_WRONLY | os.O_CREATE | os.O_TRUNC
+ if dumpfile[0] == '+' {
+ dumpfile = dumpfile[1:]
+ mode = os.O_WRONLY | os.O_APPEND | os.O_CREATE
+ }
+ if dumpfile[0] == '%' {
+ dumpfile = dumpfile[1:]
+ d, b := filepath.Dir(dumpfile), filepath.Base(dumpfile)
+ ptag := strings.ReplaceAll(types.LocalPkg.Path, "/", ":")
+ dumpfile = d + "/" + ptag + "." + b
+ }
+ outf, err := os.OpenFile(dumpfile, mode, 0644)
+ if err != nil {
+ base.Fatalf("opening function props dump file %q: %v\n", dumpfile, err)
+ }
+ defer outf.Close()
+ dumpFilePreamble(outf)
+
+ atline := map[uint]uint{}
+ sl := make([]fnInlHeur, 0, len(dumpBuffer))
+ for _, e := range dumpBuffer {
+ sl = append(sl, e)
+ atline[e.line] = atline[e.line] + 1
+ }
+ sl = sortFnInlHeurSlice(sl)
+
+ prevline := uint(0)
+ for _, entry := range sl {
+ idx := uint(0)
+ if prevline == entry.line {
+ idx++
+ }
+ prevline = entry.line
+ atl := atline[entry.line]
+ if err := dumpFnPreamble(outf, &entry, nil, idx, atl); err != nil {
+ base.Fatalf("function props dump: %v\n", err)
+ }
+ }
+ dumpBuffer = nil
+}
+
+// captureFuncDumpEntry grabs the function properties object for 'fn'
+// and enqueues it for later dumping. Used for the
+// "-d=dumpinlfuncprops=..." command line flag, intended for use
+// primarily in unit testing.
+func captureFuncDumpEntry(fn *ir.Func) {
+ // avoid capturing compiler-generated equality funcs.
+ if strings.HasPrefix(fn.Sym().Name, ".eq.") {
+ return
+ }
+ funcInlHeur, ok := fpmap[fn]
+ if !ok {
+ // Missing entry is expected for functions that are too large
+ // to inline. We still want to write out call site scores in
+ // this case however.
+ funcInlHeur = fnInlHeur{cstab: callSiteTab}
+ }
+ if dumpBuffer == nil {
+ dumpBuffer = make(map[*ir.Func]fnInlHeur)
+ }
+ if _, ok := dumpBuffer[fn]; ok {
+ return
+ }
+ if debugTrace&debugTraceFuncs != 0 {
+ fmt.Fprintf(os.Stderr, "=-= capturing dump for %v:\n", fn)
+ }
+ dumpBuffer[fn] = funcInlHeur
+}
+
+// dumpFilePreamble writes out a file-level preamble for a given
+// Go function as part of a function properties dump.
+func dumpFilePreamble(w io.Writer) {
+ fmt.Fprintf(w, "// DO NOT EDIT (use 'go test -v -update-expected' instead.)\n")
+ fmt.Fprintf(w, "// See cmd/compile/internal/inline/inlheur/testdata/props/README.txt\n")
+ fmt.Fprintf(w, "// for more information on the format of this file.\n")
+ fmt.Fprintf(w, "// %s\n", preambleDelimiter)
+}
+
+// dumpFnPreamble writes out a function-level preamble for a given
+// Go function as part of a function properties dump. See the
+// README.txt file in testdata/props for more on the format of
+// this preamble.
+func dumpFnPreamble(w io.Writer, funcInlHeur *fnInlHeur, ecst encodedCallSiteTab, idx, atl uint) error {
+ fmt.Fprintf(w, "// %s %s %d %d %d\n",
+ funcInlHeur.file, funcInlHeur.fname, funcInlHeur.line, idx, atl)
+ // emit props as comments, followed by delimiter
+ fmt.Fprintf(w, "%s// %s\n", funcInlHeur.props.ToString("// "), comDelimiter)
+ data, err := json.Marshal(funcInlHeur.props)
+ if err != nil {
+ return fmt.Errorf("marshall error %v\n", err)
+ }
+ fmt.Fprintf(w, "// %s\n", string(data))
+ dumpCallSiteComments(w, funcInlHeur.cstab, ecst)
+ fmt.Fprintf(w, "// %s\n", fnDelimiter)
+ return nil
+}
+
+// sortFnInlHeurSlice sorts a slice of fnInlHeur based on
+// the starting line of the function definition, then by name.
+func sortFnInlHeurSlice(sl []fnInlHeur) []fnInlHeur {
+ sort.SliceStable(sl, func(i, j int) bool {
+ if sl[i].line != sl[j].line {
+ return sl[i].line < sl[j].line
+ }
+ return sl[i].fname < sl[j].fname
+ })
+ return sl
+}
+
+// delimiters written to various preambles to make parsing of
+// dumps easier.
+const preambleDelimiter = "<endfilepreamble>"
+const fnDelimiter = "<endfuncpreamble>"
+const comDelimiter = "<endpropsdump>"
+const csDelimiter = "<endcallsites>"
+
+// dumpBuffer stores up function properties dumps when
+// "-d=dumpinlfuncprops=..." is in effect.
+var dumpBuffer map[*ir.Func]fnInlHeur
diff --git a/src/cmd/compile/internal/inline/inlheur/analyze_func_callsites.go b/src/cmd/compile/internal/inline/inlheur/analyze_func_callsites.go
new file mode 100644
index 0000000000..36ebe18b82
--- /dev/null
+++ b/src/cmd/compile/internal/inline/inlheur/analyze_func_callsites.go
@@ -0,0 +1,413 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package inlheur
+
+import (
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/pgo"
+ "cmd/compile/internal/typecheck"
+ "fmt"
+ "os"
+ "strings"
+)
+
+type callSiteAnalyzer struct {
+ fn *ir.Func
+ *nameFinder
+}
+
+type callSiteTableBuilder struct {
+ fn *ir.Func
+ *nameFinder
+ cstab CallSiteTab
+ ptab map[ir.Node]pstate
+ nstack []ir.Node
+ loopNest int
+ isInit bool
+}
+
+func makeCallSiteAnalyzer(fn *ir.Func) *callSiteAnalyzer {
+ return &callSiteAnalyzer{
+ fn: fn,
+ nameFinder: newNameFinder(fn),
+ }
+}
+
+func makeCallSiteTableBuilder(fn *ir.Func, cstab CallSiteTab, ptab map[ir.Node]pstate, loopNestingLevel int, nf *nameFinder) *callSiteTableBuilder {
+ isInit := fn.IsPackageInit() || strings.HasPrefix(fn.Sym().Name, "init.")
+ return &callSiteTableBuilder{
+ fn: fn,
+ cstab: cstab,
+ ptab: ptab,
+ isInit: isInit,
+ loopNest: loopNestingLevel,
+ nstack: []ir.Node{fn},
+ nameFinder: nf,
+ }
+}
+
+// computeCallSiteTable builds and returns a table of call sites for
+// the specified region in function fn. A region here corresponds to a
+// specific subtree within the AST for a function. The main intended
+// use cases are for 'region' to be either A) an entire function body,
+// or B) an inlined call expression.
+func computeCallSiteTable(fn *ir.Func, region ir.Nodes, cstab CallSiteTab, ptab map[ir.Node]pstate, loopNestingLevel int, nf *nameFinder) CallSiteTab {
+ cstb := makeCallSiteTableBuilder(fn, cstab, ptab, loopNestingLevel, nf)
+ var doNode func(ir.Node) bool
+ doNode = func(n ir.Node) bool {
+ cstb.nodeVisitPre(n)
+ ir.DoChildren(n, doNode)
+ cstb.nodeVisitPost(n)
+ return false
+ }
+ for _, n := range region {
+ doNode(n)
+ }
+ return cstb.cstab
+}
+
+func (cstb *callSiteTableBuilder) flagsForNode(call *ir.CallExpr) CSPropBits {
+ var r CSPropBits
+
+ if debugTrace&debugTraceCalls != 0 {
+ fmt.Fprintf(os.Stderr, "=-= analyzing call at %s\n",
+ fmtFullPos(call.Pos()))
+ }
+
+ // Set a bit if this call is within a loop.
+ if cstb.loopNest > 0 {
+ r |= CallSiteInLoop
+ }
+
+ // Set a bit if the call is within an init function (either
+ // compiler-generated or user-written).
+ if cstb.isInit {
+ r |= CallSiteInInitFunc
+ }
+
+ // Decide whether to apply the panic path heuristic. Hack: don't
+ // apply this heuristic in the function "main.main" (mostly just
+ // to avoid annoying users).
+ if !isMainMain(cstb.fn) {
+ r = cstb.determinePanicPathBits(call, r)
+ }
+
+ return r
+}
+
+// determinePanicPathBits updates the CallSiteOnPanicPath bit within
+// "r" if we think this call is on an unconditional path to
+// panic/exit. Do this by walking back up the node stack to see if we
+// can find either A) an enclosing panic, or B) a statement node that
+// we've determined leads to a panic/exit.
+func (cstb *callSiteTableBuilder) determinePanicPathBits(call ir.Node, r CSPropBits) CSPropBits {
+ cstb.nstack = append(cstb.nstack, call)
+ defer func() {
+ cstb.nstack = cstb.nstack[:len(cstb.nstack)-1]
+ }()
+
+ for ri := range cstb.nstack[:len(cstb.nstack)-1] {
+ i := len(cstb.nstack) - ri - 1
+ n := cstb.nstack[i]
+ _, isCallExpr := n.(*ir.CallExpr)
+ _, isStmt := n.(ir.Stmt)
+ if isCallExpr {
+ isStmt = false
+ }
+
+ if debugTrace&debugTraceCalls != 0 {
+ ps, inps := cstb.ptab[n]
+ fmt.Fprintf(os.Stderr, "=-= callpar %d op=%s ps=%s inptab=%v stmt=%v\n", i, n.Op().String(), ps.String(), inps, isStmt)
+ }
+
+ if n.Op() == ir.OPANIC {
+ r |= CallSiteOnPanicPath
+ break
+ }
+ if v, ok := cstb.ptab[n]; ok {
+ if v == psCallsPanic {
+ r |= CallSiteOnPanicPath
+ break
+ }
+ if isStmt {
+ break
+ }
+ }
+ }
+ return r
+}
+
+// propsForArg returns property bits for a given call argument expression arg.
+func (cstb *callSiteTableBuilder) propsForArg(arg ir.Node) ActualExprPropBits {
+ if cval := cstb.constValue(arg); cval != nil {
+ return ActualExprConstant
+ }
+ if cstb.isConcreteConvIface(arg) {
+ return ActualExprIsConcreteConvIface
+ }
+ fname := cstb.funcName(arg)
+ if fname != nil {
+ if fn := fname.Func; fn != nil && typecheck.HaveInlineBody(fn) {
+ return ActualExprIsInlinableFunc
+ }
+ return ActualExprIsFunc
+ }
+ return 0
+}
+
+// argPropsForCall returns a slice of argument properties for the
+// expressions being passed to the callee in the specific call
+// expression; these will be stored in the CallSite object for a given
+// call and then consulted when scoring. If no arg has any interesting
+// properties we try to save some space and return a nil slice.
+func (cstb *callSiteTableBuilder) argPropsForCall(ce *ir.CallExpr) []ActualExprPropBits {
+ rv := make([]ActualExprPropBits, len(ce.Args))
+ somethingInteresting := false
+ for idx := range ce.Args {
+ argProp := cstb.propsForArg(ce.Args[idx])
+ somethingInteresting = somethingInteresting || (argProp != 0)
+ rv[idx] = argProp
+ }
+ if !somethingInteresting {
+ return nil
+ }
+ return rv
+}
+
+func (cstb *callSiteTableBuilder) addCallSite(callee *ir.Func, call *ir.CallExpr) {
+ flags := cstb.flagsForNode(call)
+ argProps := cstb.argPropsForCall(call)
+ if debugTrace&debugTraceCalls != 0 {
+ fmt.Fprintf(os.Stderr, "=-= props %+v for call %v\n", argProps, call)
+ }
+ // FIXME: maybe bulk-allocate these?
+ cs := &CallSite{
+ Call: call,
+ Callee: callee,
+ Assign: cstb.containingAssignment(call),
+ ArgProps: argProps,
+ Flags: flags,
+ ID: uint(len(cstb.cstab)),
+ }
+ if _, ok := cstb.cstab[call]; ok {
+ fmt.Fprintf(os.Stderr, "*** cstab duplicate entry at: %s\n",
+ fmtFullPos(call.Pos()))
+ fmt.Fprintf(os.Stderr, "*** call: %+v\n", call)
+ panic("bad")
+ }
+ // Set initial score for callsite to the cost computed
+ // by CanInline; this score will be refined later based
+ // on heuristics.
+ cs.Score = int(callee.Inl.Cost)
+
+ if cstb.cstab == nil {
+ cstb.cstab = make(CallSiteTab)
+ }
+ cstb.cstab[call] = cs
+ if debugTrace&debugTraceCalls != 0 {
+ fmt.Fprintf(os.Stderr, "=-= added callsite: caller=%v callee=%v n=%s\n",
+ cstb.fn, callee, fmtFullPos(call.Pos()))
+ }
+}
+
+func (cstb *callSiteTableBuilder) nodeVisitPre(n ir.Node) {
+ switch n.Op() {
+ case ir.ORANGE, ir.OFOR:
+ if !hasTopLevelLoopBodyReturnOrBreak(loopBody(n)) {
+ cstb.loopNest++
+ }
+ case ir.OCALLFUNC:
+ ce := n.(*ir.CallExpr)
+ callee := pgo.DirectCallee(ce.Fun)
+ if callee != nil && callee.Inl != nil {
+ cstb.addCallSite(callee, ce)
+ }
+ }
+ cstb.nstack = append(cstb.nstack, n)
+}
+
+func (cstb *callSiteTableBuilder) nodeVisitPost(n ir.Node) {
+ cstb.nstack = cstb.nstack[:len(cstb.nstack)-1]
+ switch n.Op() {
+ case ir.ORANGE, ir.OFOR:
+ if !hasTopLevelLoopBodyReturnOrBreak(loopBody(n)) {
+ cstb.loopNest--
+ }
+ }
+}
+
+func loopBody(n ir.Node) ir.Nodes {
+ if forst, ok := n.(*ir.ForStmt); ok {
+ return forst.Body
+ }
+ if rst, ok := n.(*ir.RangeStmt); ok {
+ return rst.Body
+ }
+ return nil
+}
+
+// hasTopLevelLoopBodyReturnOrBreak examines the body of a "for" or
+// "range" loop to try to verify that it is a real loop, as opposed to
+// a construct that is syntactically loopy but doesn't actually iterate
+// multiple times, like:
+//
+// for {
+// blah()
+// return 1
+// }
+//
+// [Remark: the pattern above crops up quite a bit in the source code
+// for the compiler itself, e.g. the auto-generated rewrite code]
+//
+// Note that we don't look for GOTO statements here, so it's possible
+// we'll get the wrong result for a loop with complicated control
+// jumps via gotos.
+func hasTopLevelLoopBodyReturnOrBreak(loopBody ir.Nodes) bool {
+ for _, n := range loopBody {
+ if n.Op() == ir.ORETURN || n.Op() == ir.OBREAK {
+ return true
+ }
+ }
+ return false
+}
+
+// containingAssignment returns the top-level assignment statement
+// for a statement level function call "n". Examples:
+//
+// x := foo()
+// x, y := bar(z, baz())
+// if blah() { ...
+//
+// Here the top-level assignment statement for the foo() call is the
+// statement assigning to "x"; the top-level assignment for "bar()"
+// call is the assignment to x,y. For the baz() and blah() calls,
+// there is no top level assignment statement.
+//
+// The unstated goal here is that we want to use the containing
+// assignment to establish a connection between a given call and the
+// variables to which its results/returns are being assigned.
+//
+// Note that for the "bar" command above, the front end sometimes
+// decomposes this into two assignments, the first one assigning the
+// call to a pair of auto-temps, then the second one assigning the
+// auto-temps to the user-visible vars. This helper will return the
+// second (outer) of these two.
+func (cstb *callSiteTableBuilder) containingAssignment(n ir.Node) ir.Node {
+ parent := cstb.nstack[len(cstb.nstack)-1]
+
+ // assignsOnlyAutoTemps returns TRUE of the specified OAS2FUNC
+ // node assigns only auto-temps.
+ assignsOnlyAutoTemps := func(x ir.Node) bool {
+ alst := x.(*ir.AssignListStmt)
+ oa2init := alst.Init()
+ if len(oa2init) == 0 {
+ return false
+ }
+ for _, v := range oa2init {
+ d := v.(*ir.Decl)
+ if !ir.IsAutoTmp(d.X) {
+ return false
+ }
+ }
+ return true
+ }
+
+ // Simple case: x := foo()
+ if parent.Op() == ir.OAS {
+ return parent
+ }
+
+ // Multi-return case: x, y := bar()
+ if parent.Op() == ir.OAS2FUNC {
+ // Hack city: if the result vars are auto-temps, try looking
+ // for an outer assignment in the tree. The code shape we're
+ // looking for here is:
+ //
+ // OAS1({x,y},OCONVNOP(OAS2FUNC({auto1,auto2},OCALLFUNC(bar))))
+ //
+ if assignsOnlyAutoTemps(parent) {
+ par2 := cstb.nstack[len(cstb.nstack)-2]
+ if par2.Op() == ir.OAS2 {
+ return par2
+ }
+ if par2.Op() == ir.OCONVNOP {
+ par3 := cstb.nstack[len(cstb.nstack)-3]
+ if par3.Op() == ir.OAS2 {
+ return par3
+ }
+ }
+ }
+ }
+
+ return nil
+}
+
+// UpdateCallsiteTable handles updating of callerfn's call site table
+// after an inlined has been carried out, e.g. the call at 'n' as been
+// turned into the inlined call expression 'ic' within function
+// callerfn. The chief thing of interest here is to make sure that any
+// call nodes within 'ic' are added to the call site table for
+// 'callerfn' and scored appropriately.
+func UpdateCallsiteTable(callerfn *ir.Func, n *ir.CallExpr, ic *ir.InlinedCallExpr) {
+ enableDebugTraceIfEnv()
+ defer disableDebugTrace()
+
+ funcInlHeur, ok := fpmap[callerfn]
+ if !ok {
+ // This can happen for compiler-generated wrappers.
+ if debugTrace&debugTraceCalls != 0 {
+ fmt.Fprintf(os.Stderr, "=-= early exit, no entry for caller fn %v\n", callerfn)
+ }
+ return
+ }
+
+ if debugTrace&debugTraceCalls != 0 {
+ fmt.Fprintf(os.Stderr, "=-= UpdateCallsiteTable(caller=%v, cs=%s)\n",
+ callerfn, fmtFullPos(n.Pos()))
+ }
+
+ // Mark the call in question as inlined.
+ oldcs, ok := funcInlHeur.cstab[n]
+ if !ok {
+ // This can happen for compiler-generated wrappers.
+ return
+ }
+ oldcs.aux |= csAuxInlined
+
+ if debugTrace&debugTraceCalls != 0 {
+ fmt.Fprintf(os.Stderr, "=-= marked as inlined: callee=%v %s\n",
+ oldcs.Callee, EncodeCallSiteKey(oldcs))
+ }
+
+ // Walk the inlined call region to collect new callsites.
+ var icp pstate
+ if oldcs.Flags&CallSiteOnPanicPath != 0 {
+ icp = psCallsPanic
+ }
+ var loopNestLevel int
+ if oldcs.Flags&CallSiteInLoop != 0 {
+ loopNestLevel = 1
+ }
+ ptab := map[ir.Node]pstate{ic: icp}
+ nf := newNameFinder(nil)
+ icstab := computeCallSiteTable(callerfn, ic.Body, nil, ptab, loopNestLevel, nf)
+
+ // Record parent callsite. This is primarily for debug output.
+ for _, cs := range icstab {
+ cs.parent = oldcs
+ }
+
+ // Score the calls in the inlined body. Note the setting of
+ // "doCallResults" to false here: at the moment there isn't any
+ // easy way to localize or region-ize the work done by
+ // "rescoreBasedOnCallResultUses", which currently does a walk
+ // over the entire function to look for uses of a given set of
+ // results. Similarly we're passing nil to makeCallSiteAnalyzer,
+ // so as to run name finding without the use of static value &
+ // friends.
+ csa := makeCallSiteAnalyzer(nil)
+ const doCallResults = false
+ csa.scoreCallsRegion(callerfn, ic.Body, icstab, doCallResults, ic)
+}
diff --git a/src/cmd/compile/internal/inline/inlheur/analyze_func_flags.go b/src/cmd/compile/internal/inline/inlheur/analyze_func_flags.go
new file mode 100644
index 0000000000..b7403a4f8c
--- /dev/null
+++ b/src/cmd/compile/internal/inline/inlheur/analyze_func_flags.go
@@ -0,0 +1,356 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package inlheur
+
+import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/types"
+ "fmt"
+ "os"
+)
+
+// funcFlagsAnalyzer computes the "Flags" value for the FuncProps
+// object we're computing. The main item of interest here is "nstate",
+// which stores the disposition of a given ir Node with respect to the
+// flags/properties we're trying to compute.
+type funcFlagsAnalyzer struct {
+ fn *ir.Func
+ nstate map[ir.Node]pstate
+ noInfo bool // set if we see something inscrutable/un-analyzable
+}
+
+// pstate keeps track of the disposition of a given node and its
+// children with respect to panic/exit calls.
+type pstate int
+
+const (
+ psNoInfo pstate = iota // nothing interesting about this node
+ psCallsPanic // node causes call to panic or os.Exit
+ psMayReturn // executing node may trigger a "return" stmt
+ psTop // dataflow lattice "top" element
+)
+
+func makeFuncFlagsAnalyzer(fn *ir.Func) *funcFlagsAnalyzer {
+ return &funcFlagsAnalyzer{
+ fn: fn,
+ nstate: make(map[ir.Node]pstate),
+ }
+}
+
+// setResults transfers func flag results to 'funcProps'.
+func (ffa *funcFlagsAnalyzer) setResults(funcProps *FuncProps) {
+ var rv FuncPropBits
+ if !ffa.noInfo && ffa.stateForList(ffa.fn.Body) == psCallsPanic {
+ rv = FuncPropNeverReturns
+ }
+ // This is slightly hacky and not at all required, but include a
+ // special case for main.main, which often ends in a call to
+ // os.Exit. People who write code like this (very common I
+ // imagine)
+ //
+ // func main() {
+ // rc = perform()
+ // ...
+ // foo()
+ // os.Exit(rc)
+ // }
+ //
+ // will be constantly surprised when foo() is inlined in many
+ // other spots in the program but not in main().
+ if isMainMain(ffa.fn) {
+ rv &^= FuncPropNeverReturns
+ }
+ funcProps.Flags = rv
+}
+
+func (ffa *funcFlagsAnalyzer) getState(n ir.Node) pstate {
+ return ffa.nstate[n]
+}
+
+func (ffa *funcFlagsAnalyzer) setState(n ir.Node, st pstate) {
+ if st != psNoInfo {
+ ffa.nstate[n] = st
+ }
+}
+
+func (ffa *funcFlagsAnalyzer) updateState(n ir.Node, st pstate) {
+ if st == psNoInfo {
+ delete(ffa.nstate, n)
+ } else {
+ ffa.nstate[n] = st
+ }
+}
+
+func (ffa *funcFlagsAnalyzer) panicPathTable() map[ir.Node]pstate {
+ return ffa.nstate
+}
+
+// blockCombine merges together states as part of a linear sequence of
+// statements, where 'pred' and 'succ' are analysis results for a pair
+// of consecutive statements. Examples:
+//
+// case 1: case 2:
+// panic("foo") if q { return x } <-pred
+// return x panic("boo") <-succ
+//
+// In case 1, since the pred state is "always panic" it doesn't matter
+// what the succ state is, hence the state for the combination of the
+// two blocks is "always panics". In case 2, because there is a path
+// to return that avoids the panic in succ, the state for the
+// combination of the two statements is "may return".
+func blockCombine(pred, succ pstate) pstate {
+ switch succ {
+ case psTop:
+ return pred
+ case psMayReturn:
+ if pred == psCallsPanic {
+ return psCallsPanic
+ }
+ return psMayReturn
+ case psNoInfo:
+ return pred
+ case psCallsPanic:
+ if pred == psMayReturn {
+ return psMayReturn
+ }
+ return psCallsPanic
+ }
+ panic("should never execute")
+}
+
+// branchCombine combines two states at a control flow branch point where
+// either p1 or p2 executes (as in an "if" statement).
+func branchCombine(p1, p2 pstate) pstate {
+ if p1 == psCallsPanic && p2 == psCallsPanic {
+ return psCallsPanic
+ }
+ if p1 == psMayReturn || p2 == psMayReturn {
+ return psMayReturn
+ }
+ return psNoInfo
+}
+
+// stateForList walks through a list of statements and computes the
+// state/diposition for the entire list as a whole, as well
+// as updating disposition of intermediate nodes.
+func (ffa *funcFlagsAnalyzer) stateForList(list ir.Nodes) pstate {
+ st := psTop
+ // Walk the list backwards so that we can update the state for
+ // earlier list elements based on what we find out about their
+ // successors. Example:
+ //
+ // if ... {
+ // L10: foo()
+ // L11: <stmt>
+ // L12: panic(...)
+ // }
+ //
+ // After combining the dispositions for line 11 and 12, we want to
+ // update the state for the call at line 10 based on that combined
+ // disposition (if L11 has no path to "return", then the call at
+ // line 10 will be on a panic path).
+ for i := len(list) - 1; i >= 0; i-- {
+ n := list[i]
+ psi := ffa.getState(n)
+ if debugTrace&debugTraceFuncFlags != 0 {
+ fmt.Fprintf(os.Stderr, "=-= %v: stateForList n=%s ps=%s\n",
+ ir.Line(n), n.Op().String(), psi.String())
+ }
+ st = blockCombine(psi, st)
+ ffa.updateState(n, st)
+ }
+ if st == psTop {
+ st = psNoInfo
+ }
+ return st
+}
+
+func isMainMain(fn *ir.Func) bool {
+ s := fn.Sym()
+ return (s.Pkg.Name == "main" && s.Name == "main")
+}
+
+func isWellKnownFunc(s *types.Sym, pkg, name string) bool {
+ return s.Pkg.Path == pkg && s.Name == name
+}
+
+// isExitCall reports TRUE if the node itself is an unconditional
+// call to os.Exit(), a panic, or a function that does likewise.
+func isExitCall(n ir.Node) bool {
+ if n.Op() != ir.OCALLFUNC {
+ return false
+ }
+ cx := n.(*ir.CallExpr)
+ name := ir.StaticCalleeName(cx.Fun)
+ if name == nil {
+ return false
+ }
+ s := name.Sym()
+ if isWellKnownFunc(s, "os", "Exit") ||
+ isWellKnownFunc(s, "runtime", "throw") {
+ return true
+ }
+ if funcProps := propsForFunc(name.Func); funcProps != nil {
+ if funcProps.Flags&FuncPropNeverReturns != 0 {
+ return true
+ }
+ }
+ return name.Func.NeverReturns()
+}
+
+// pessimize is called to record the fact that we saw something in the
+// function that renders it entirely impossible to analyze.
+func (ffa *funcFlagsAnalyzer) pessimize() {
+ ffa.noInfo = true
+}
+
+// shouldVisit reports TRUE if this is an interesting node from the
+// perspective of computing function flags. NB: due to the fact that
+// ir.CallExpr implements the Stmt interface, we wind up visiting
+// a lot of nodes that we don't really need to, but these can
+// simply be screened out as part of the visit.
+func shouldVisit(n ir.Node) bool {
+ _, isStmt := n.(ir.Stmt)
+ return n.Op() != ir.ODCL &&
+ (isStmt || n.Op() == ir.OCALLFUNC || n.Op() == ir.OPANIC)
+}
+
+// nodeVisitPost helps implement the propAnalyzer interface; when
+// called on a given node, it decides the disposition of that node
+// based on the state(s) of the node's children.
+func (ffa *funcFlagsAnalyzer) nodeVisitPost(n ir.Node) {
+ if debugTrace&debugTraceFuncFlags != 0 {
+ fmt.Fprintf(os.Stderr, "=+= nodevis %v %s should=%v\n",
+ ir.Line(n), n.Op().String(), shouldVisit(n))
+ }
+ if !shouldVisit(n) {
+ return
+ }
+ var st pstate
+ switch n.Op() {
+ case ir.OCALLFUNC:
+ if isExitCall(n) {
+ st = psCallsPanic
+ }
+ case ir.OPANIC:
+ st = psCallsPanic
+ case ir.ORETURN:
+ st = psMayReturn
+ case ir.OBREAK, ir.OCONTINUE:
+ // FIXME: this handling of break/continue is sub-optimal; we
+ // have them as "mayReturn" in order to help with this case:
+ //
+ // for {
+ // if q() { break }
+ // panic(...)
+ // }
+ //
+ // where the effect of the 'break' is to cause the subsequent
+ // panic to be skipped. One possible improvement would be to
+ // track whether the currently enclosing loop is a "for {" or
+ // a for/range with condition, then use mayReturn only for the
+ // former. Note also that "break X" or "continue X" is treated
+ // the same as "goto", since we don't have a good way to track
+ // the target of the branch.
+ st = psMayReturn
+ n := n.(*ir.BranchStmt)
+ if n.Label != nil {
+ ffa.pessimize()
+ }
+ case ir.OBLOCK:
+ n := n.(*ir.BlockStmt)
+ st = ffa.stateForList(n.List)
+ case ir.OCASE:
+ if ccst, ok := n.(*ir.CaseClause); ok {
+ st = ffa.stateForList(ccst.Body)
+ } else if ccst, ok := n.(*ir.CommClause); ok {
+ st = ffa.stateForList(ccst.Body)
+ } else {
+ panic("unexpected")
+ }
+ case ir.OIF:
+ n := n.(*ir.IfStmt)
+ st = branchCombine(ffa.stateForList(n.Body), ffa.stateForList(n.Else))
+ case ir.OFOR:
+ // Treat for { XXX } like a block.
+ // Treat for <cond> { XXX } like an if statement with no else.
+ n := n.(*ir.ForStmt)
+ bst := ffa.stateForList(n.Body)
+ if n.Cond == nil {
+ st = bst
+ } else {
+ if bst == psMayReturn {
+ st = psMayReturn
+ }
+ }
+ case ir.ORANGE:
+ // Treat for range { XXX } like an if statement with no else.
+ n := n.(*ir.RangeStmt)
+ if ffa.stateForList(n.Body) == psMayReturn {
+ st = psMayReturn
+ }
+ case ir.OGOTO:
+ // punt if we see even one goto. if we built a control
+ // flow graph we could do more, but this is just a tree walk.
+ ffa.pessimize()
+ case ir.OSELECT:
+ // process selects for "may return" but not "always panics",
+ // the latter case seems very improbable.
+ n := n.(*ir.SelectStmt)
+ if len(n.Cases) != 0 {
+ st = psTop
+ for _, c := range n.Cases {
+ st = branchCombine(ffa.stateForList(c.Body), st)
+ }
+ }
+ case ir.OSWITCH:
+ n := n.(*ir.SwitchStmt)
+ if len(n.Cases) != 0 {
+ st = psTop
+ for _, c := range n.Cases {
+ st = branchCombine(ffa.stateForList(c.Body), st)
+ }
+ }
+
+ st, fall := psTop, psNoInfo
+ for i := len(n.Cases) - 1; i >= 0; i-- {
+ cas := n.Cases[i]
+ cst := ffa.stateForList(cas.Body)
+ endsInFallthrough := false
+ if len(cas.Body) != 0 {
+ endsInFallthrough = cas.Body[0].Op() == ir.OFALL
+ }
+ if endsInFallthrough {
+ cst = blockCombine(cst, fall)
+ }
+ st = branchCombine(st, cst)
+ fall = cst
+ }
+ case ir.OFALL:
+ // Not important.
+ case ir.ODCLFUNC, ir.ORECOVER, ir.OAS, ir.OAS2, ir.OAS2FUNC, ir.OASOP,
+ ir.OPRINTLN, ir.OPRINT, ir.OLABEL, ir.OCALLINTER, ir.ODEFER,
+ ir.OSEND, ir.ORECV, ir.OSELRECV2, ir.OGO, ir.OAPPEND, ir.OAS2DOTTYPE,
+ ir.OAS2MAPR, ir.OGETG, ir.ODELETE, ir.OINLMARK, ir.OAS2RECV,
+ ir.OMIN, ir.OMAX, ir.OMAKE, ir.ORECOVERFP, ir.OGETCALLERSP:
+ // these should all be benign/uninteresting
+ case ir.OTAILCALL, ir.OJUMPTABLE, ir.OTYPESW:
+ // don't expect to see these at all.
+ base.Fatalf("unexpected op %s in func %s",
+ n.Op().String(), ir.FuncName(ffa.fn))
+ default:
+ base.Fatalf("%v: unhandled op %s in func %v",
+ ir.Line(n), n.Op().String(), ir.FuncName(ffa.fn))
+ }
+ if debugTrace&debugTraceFuncFlags != 0 {
+ fmt.Fprintf(os.Stderr, "=-= %v: visit n=%s returns %s\n",
+ ir.Line(n), n.Op().String(), st.String())
+ }
+ ffa.setState(n, st)
+}
+
+func (ffa *funcFlagsAnalyzer) nodeVisitPre(n ir.Node) {
+}
diff --git a/src/cmd/compile/internal/inline/inlheur/analyze_func_params.go b/src/cmd/compile/internal/inline/inlheur/analyze_func_params.go
new file mode 100644
index 0000000000..d85d73b2ef
--- /dev/null
+++ b/src/cmd/compile/internal/inline/inlheur/analyze_func_params.go
@@ -0,0 +1,355 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package inlheur
+
+import (
+ "cmd/compile/internal/ir"
+ "fmt"
+ "os"
+)
+
+// paramsAnalyzer holds state information for the phase that computes
+// flags for a Go functions parameters, for use in inline heuristics.
+// Note that the params slice below includes entries for blanks.
+type paramsAnalyzer struct {
+ fname string
+ values []ParamPropBits
+ params []*ir.Name
+ top []bool
+ *condLevelTracker
+ *nameFinder
+}
+
+// getParams returns an *ir.Name slice containing all params for the
+// function (plus rcvr as well if applicable).
+func getParams(fn *ir.Func) []*ir.Name {
+ sig := fn.Type()
+ numParams := sig.NumRecvs() + sig.NumParams()
+ return fn.Dcl[:numParams]
+}
+
+// addParamsAnalyzer creates a new paramsAnalyzer helper object for
+// the function fn, appends it to the analyzers list, and returns the
+// new list. If the function in question doesn't have any interesting
+// parameters then the analyzer list is returned unchanged, and the
+// params flags in "fp" are updated accordingly.
+func addParamsAnalyzer(fn *ir.Func, analyzers []propAnalyzer, fp *FuncProps, nf *nameFinder) []propAnalyzer {
+ pa, props := makeParamsAnalyzer(fn, nf)
+ if pa != nil {
+ analyzers = append(analyzers, pa)
+ } else {
+ fp.ParamFlags = props
+ }
+ return analyzers
+}
+
+// makeParamAnalyzer creates a new helper object to analyze parameters
+// of function fn. If the function doesn't have any interesting
+// params, a nil helper is returned along with a set of default param
+// flags for the func.
+func makeParamsAnalyzer(fn *ir.Func, nf *nameFinder) (*paramsAnalyzer, []ParamPropBits) {
+ params := getParams(fn) // includes receiver if applicable
+ if len(params) == 0 {
+ return nil, nil
+ }
+ vals := make([]ParamPropBits, len(params))
+ if fn.Inl == nil {
+ return nil, vals
+ }
+ top := make([]bool, len(params))
+ interestingToAnalyze := false
+ for i, pn := range params {
+ if pn == nil {
+ continue
+ }
+ pt := pn.Type()
+ if !pt.IsScalar() && !pt.HasNil() {
+ // existing properties not applicable here (for things
+ // like structs, arrays, slices, etc).
+ continue
+ }
+ // If param is reassigned, skip it.
+ if ir.Reassigned(pn) {
+ continue
+ }
+ top[i] = true
+ interestingToAnalyze = true
+ }
+ if !interestingToAnalyze {
+ return nil, vals
+ }
+
+ if debugTrace&debugTraceParams != 0 {
+ fmt.Fprintf(os.Stderr, "=-= param analysis of func %v:\n",
+ fn.Sym().Name)
+ for i := range vals {
+ n := "_"
+ if params[i] != nil {
+ n = params[i].Sym().String()
+ }
+ fmt.Fprintf(os.Stderr, "=-= %d: %q %s top=%v\n",
+ i, n, vals[i].String(), top[i])
+ }
+ }
+ pa := &paramsAnalyzer{
+ fname: fn.Sym().Name,
+ values: vals,
+ params: params,
+ top: top,
+ condLevelTracker: new(condLevelTracker),
+ nameFinder: nf,
+ }
+ return pa, nil
+}
+
+func (pa *paramsAnalyzer) setResults(funcProps *FuncProps) {
+ funcProps.ParamFlags = pa.values
+}
+
+func (pa *paramsAnalyzer) findParamIdx(n *ir.Name) int {
+ if n == nil {
+ panic("bad")
+ }
+ for i := range pa.params {
+ if pa.params[i] == n {
+ return i
+ }
+ }
+ return -1
+}
+
+type testfType func(x ir.Node, param *ir.Name, idx int) (bool, bool)
+
+// paramsAnalyzer invokes function 'testf' on the specified expression
+// 'x' for each parameter, and if the result is TRUE, or's 'flag' into
+// the flags for that param.
+func (pa *paramsAnalyzer) checkParams(x ir.Node, flag ParamPropBits, mayflag ParamPropBits, testf testfType) {
+ for idx, p := range pa.params {
+ if !pa.top[idx] && pa.values[idx] == ParamNoInfo {
+ continue
+ }
+ result, may := testf(x, p, idx)
+ if debugTrace&debugTraceParams != 0 {
+ fmt.Fprintf(os.Stderr, "=-= test expr %v param %s result=%v flag=%s\n", x, p.Sym().Name, result, flag.String())
+ }
+ if result {
+ v := flag
+ if pa.condLevel != 0 || may {
+ v = mayflag
+ }
+ pa.values[idx] |= v
+ pa.top[idx] = false
+ }
+ }
+}
+
+// foldCheckParams checks expression 'x' (an 'if' condition or
+// 'switch' stmt expr) to see if the expr would fold away if a
+// specific parameter had a constant value.
+func (pa *paramsAnalyzer) foldCheckParams(x ir.Node) {
+ pa.checkParams(x, ParamFeedsIfOrSwitch, ParamMayFeedIfOrSwitch,
+ func(x ir.Node, p *ir.Name, idx int) (bool, bool) {
+ return ShouldFoldIfNameConstant(x, []*ir.Name{p}), false
+ })
+}
+
+// callCheckParams examines the target of call expression 'ce' to see
+// if it is making a call to the value passed in for some parameter.
+func (pa *paramsAnalyzer) callCheckParams(ce *ir.CallExpr) {
+ switch ce.Op() {
+ case ir.OCALLINTER:
+ if ce.Op() != ir.OCALLINTER {
+ return
+ }
+ sel := ce.Fun.(*ir.SelectorExpr)
+ r := pa.staticValue(sel.X)
+ if r.Op() != ir.ONAME {
+ return
+ }
+ name := r.(*ir.Name)
+ if name.Class != ir.PPARAM {
+ return
+ }
+ pa.checkParams(r, ParamFeedsInterfaceMethodCall,
+ ParamMayFeedInterfaceMethodCall,
+ func(x ir.Node, p *ir.Name, idx int) (bool, bool) {
+ name := x.(*ir.Name)
+ return name == p, false
+ })
+ case ir.OCALLFUNC:
+ if ce.Fun.Op() != ir.ONAME {
+ return
+ }
+ called := ir.StaticValue(ce.Fun)
+ if called.Op() != ir.ONAME {
+ return
+ }
+ name := called.(*ir.Name)
+ if name.Class == ir.PPARAM {
+ pa.checkParams(called, ParamFeedsIndirectCall,
+ ParamMayFeedIndirectCall,
+ func(x ir.Node, p *ir.Name, idx int) (bool, bool) {
+ name := x.(*ir.Name)
+ return name == p, false
+ })
+ } else {
+ cname := pa.funcName(called)
+ if cname != nil {
+ pa.deriveFlagsFromCallee(ce, cname.Func)
+ }
+ }
+ }
+}
+
+// deriveFlagsFromCallee tries to derive flags for the current
+// function based on a call this function makes to some other
+// function. Example:
+//
+// /* Simple */ /* Derived from callee */
+// func foo(f func(int)) { func foo(f func(int)) {
+// f(2) bar(32, f)
+// } }
+// func bar(x int, f func()) {
+// f(x)
+// }
+//
+// Here we can set the "param feeds indirect call" flag for
+// foo's param 'f' since we know that bar has that flag set for
+// its second param, and we're passing that param a function.
+func (pa *paramsAnalyzer) deriveFlagsFromCallee(ce *ir.CallExpr, callee *ir.Func) {
+ calleeProps := propsForFunc(callee)
+ if calleeProps == nil {
+ return
+ }
+ if debugTrace&debugTraceParams != 0 {
+ fmt.Fprintf(os.Stderr, "=-= callee props for %v:\n%s",
+ callee.Sym().Name, calleeProps.String())
+ }
+
+ must := []ParamPropBits{ParamFeedsInterfaceMethodCall, ParamFeedsIndirectCall, ParamFeedsIfOrSwitch}
+ may := []ParamPropBits{ParamMayFeedInterfaceMethodCall, ParamMayFeedIndirectCall, ParamMayFeedIfOrSwitch}
+
+ for pidx, arg := range ce.Args {
+ // Does the callee param have any interesting properties?
+ // If not we can skip this one.
+ pflag := calleeProps.ParamFlags[pidx]
+ if pflag == 0 {
+ continue
+ }
+ // See if one of the caller's parameters is flowing unmodified
+ // into this actual expression.
+ r := pa.staticValue(arg)
+ if r.Op() != ir.ONAME {
+ return
+ }
+ name := r.(*ir.Name)
+ if name.Class != ir.PPARAM {
+ return
+ }
+ callerParamIdx := pa.findParamIdx(name)
+ // note that callerParamIdx may return -1 in the case where
+ // the param belongs not to the current closure func we're
+ // analyzing but to an outer enclosing func.
+ if callerParamIdx == -1 {
+ return
+ }
+ if pa.params[callerParamIdx] == nil {
+ panic("something went wrong")
+ }
+ if !pa.top[callerParamIdx] &&
+ pa.values[callerParamIdx] == ParamNoInfo {
+ continue
+ }
+ if debugTrace&debugTraceParams != 0 {
+ fmt.Fprintf(os.Stderr, "=-= pflag for arg %d is %s\n",
+ pidx, pflag.String())
+ }
+ for i := range must {
+ mayv := may[i]
+ mustv := must[i]
+ if pflag&mustv != 0 && pa.condLevel == 0 {
+ pa.values[callerParamIdx] |= mustv
+ } else if pflag&(mustv|mayv) != 0 {
+ pa.values[callerParamIdx] |= mayv
+ }
+ }
+ pa.top[callerParamIdx] = false
+ }
+}
+
+func (pa *paramsAnalyzer) nodeVisitPost(n ir.Node) {
+ if len(pa.values) == 0 {
+ return
+ }
+ pa.condLevelTracker.post(n)
+ switch n.Op() {
+ case ir.OCALLFUNC:
+ ce := n.(*ir.CallExpr)
+ pa.callCheckParams(ce)
+ case ir.OCALLINTER:
+ ce := n.(*ir.CallExpr)
+ pa.callCheckParams(ce)
+ case ir.OIF:
+ ifst := n.(*ir.IfStmt)
+ pa.foldCheckParams(ifst.Cond)
+ case ir.OSWITCH:
+ swst := n.(*ir.SwitchStmt)
+ if swst.Tag != nil {
+ pa.foldCheckParams(swst.Tag)
+ }
+ }
+}
+
+func (pa *paramsAnalyzer) nodeVisitPre(n ir.Node) {
+ if len(pa.values) == 0 {
+ return
+ }
+ pa.condLevelTracker.pre(n)
+}
+
+// condLevelTracker helps keeps track very roughly of "level of conditional
+// nesting", e.g. how many "if" statements you have to go through to
+// get to the point where a given stmt executes. Example:
+//
+// cond nesting level
+// func foo() {
+// G = 1 0
+// if x < 10 { 0
+// if y < 10 { 1
+// G = 0 2
+// }
+// }
+// }
+//
+// The intent here is to provide some sort of very abstract relative
+// hotness metric, e.g. "G = 1" above is expected to be executed more
+// often than "G = 0" (in the aggregate, across large numbers of
+// functions).
+type condLevelTracker struct {
+ condLevel int
+}
+
+func (c *condLevelTracker) pre(n ir.Node) {
+ // Increment level of "conditional testing" if we see
+ // an "if" or switch statement, and decrement if in
+ // a loop.
+ switch n.Op() {
+ case ir.OIF, ir.OSWITCH:
+ c.condLevel++
+ case ir.OFOR, ir.ORANGE:
+ c.condLevel--
+ }
+}
+
+func (c *condLevelTracker) post(n ir.Node) {
+ switch n.Op() {
+ case ir.OFOR, ir.ORANGE:
+ c.condLevel++
+ case ir.OIF:
+ c.condLevel--
+ case ir.OSWITCH:
+ c.condLevel--
+ }
+}
diff --git a/src/cmd/compile/internal/inline/inlheur/analyze_func_returns.go b/src/cmd/compile/internal/inline/inlheur/analyze_func_returns.go
new file mode 100644
index 0000000000..2aaa68d1b7
--- /dev/null
+++ b/src/cmd/compile/internal/inline/inlheur/analyze_func_returns.go
@@ -0,0 +1,277 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package inlheur
+
+import (
+ "cmd/compile/internal/ir"
+ "fmt"
+ "go/constant"
+ "go/token"
+ "os"
+)
+
+// resultsAnalyzer stores state information for the process of
+// computing flags/properties for the return values of a specific Go
+// function, as part of inline heuristics synthesis.
+type resultsAnalyzer struct {
+ fname string
+ props []ResultPropBits
+ values []resultVal
+ inlineMaxBudget int
+ *nameFinder
+}
+
+// resultVal captures information about a specific result returned from
+// the function we're analyzing; we are interested in cases where
+// the func always returns the same constant, or always returns
+// the same function, etc. This container stores info on a the specific
+// scenarios we're looking for.
+type resultVal struct {
+ cval constant.Value
+ fn *ir.Name
+ fnClo bool
+ top bool
+ derived bool // see deriveReturnFlagsFromCallee below
+}
+
+// addResultsAnalyzer creates a new resultsAnalyzer helper object for
+// the function fn, appends it to the analyzers list, and returns the
+// new list. If the function in question doesn't have any returns (or
+// any interesting returns) then the analyzer list is left as is, and
+// the result flags in "fp" are updated accordingly.
+func addResultsAnalyzer(fn *ir.Func, analyzers []propAnalyzer, fp *FuncProps, inlineMaxBudget int, nf *nameFinder) []propAnalyzer {
+ ra, props := makeResultsAnalyzer(fn, inlineMaxBudget, nf)
+ if ra != nil {
+ analyzers = append(analyzers, ra)
+ } else {
+ fp.ResultFlags = props
+ }
+ return analyzers
+}
+
+// makeResultsAnalyzer creates a new helper object to analyze results
+// in function fn. If the function doesn't have any interesting
+// results, a nil helper is returned along with a set of default
+// result flags for the func.
+func makeResultsAnalyzer(fn *ir.Func, inlineMaxBudget int, nf *nameFinder) (*resultsAnalyzer, []ResultPropBits) {
+ results := fn.Type().Results()
+ if len(results) == 0 {
+ return nil, nil
+ }
+ props := make([]ResultPropBits, len(results))
+ if fn.Inl == nil {
+ return nil, props
+ }
+ vals := make([]resultVal, len(results))
+ interestingToAnalyze := false
+ for i := range results {
+ rt := results[i].Type
+ if !rt.IsScalar() && !rt.HasNil() {
+ // existing properties not applicable here (for things
+ // like structs, arrays, slices, etc).
+ continue
+ }
+ // set the "top" flag (as in "top element of data flow lattice")
+ // meaning "we have no info yet, but we might later on".
+ vals[i].top = true
+ interestingToAnalyze = true
+ }
+ if !interestingToAnalyze {
+ return nil, props
+ }
+ ra := &resultsAnalyzer{
+ props: props,
+ values: vals,
+ inlineMaxBudget: inlineMaxBudget,
+ nameFinder: nf,
+ }
+ return ra, nil
+}
+
+// setResults transfers the calculated result properties for this
+// function to 'funcProps'.
+func (ra *resultsAnalyzer) setResults(funcProps *FuncProps) {
+ // Promote ResultAlwaysSameFunc to ResultAlwaysSameInlinableFunc
+ for i := range ra.values {
+ if ra.props[i] == ResultAlwaysSameFunc && !ra.values[i].derived {
+ f := ra.values[i].fn.Func
+ // HACK: in order to allow for call site score
+ // adjustments, we used a relaxed inline budget in
+ // determining inlinability. For the check below, however,
+ // we want to know is whether the func in question is
+ // likely to be inlined, as opposed to whether it might
+ // possibly be inlined if all the right score adjustments
+ // happened, so do a simple check based on the cost.
+ if f.Inl != nil && f.Inl.Cost <= int32(ra.inlineMaxBudget) {
+ ra.props[i] = ResultAlwaysSameInlinableFunc
+ }
+ }
+ }
+ funcProps.ResultFlags = ra.props
+}
+
+func (ra *resultsAnalyzer) pessimize() {
+ for i := range ra.props {
+ ra.props[i] = ResultNoInfo
+ }
+}
+
+func (ra *resultsAnalyzer) nodeVisitPre(n ir.Node) {
+}
+
+func (ra *resultsAnalyzer) nodeVisitPost(n ir.Node) {
+ if len(ra.values) == 0 {
+ return
+ }
+ if n.Op() != ir.ORETURN {
+ return
+ }
+ if debugTrace&debugTraceResults != 0 {
+ fmt.Fprintf(os.Stderr, "=+= returns nodevis %v %s\n",
+ ir.Line(n), n.Op().String())
+ }
+
+ // No support currently for named results, so if we see an empty
+ // "return" stmt, be conservative.
+ rs := n.(*ir.ReturnStmt)
+ if len(rs.Results) != len(ra.values) {
+ ra.pessimize()
+ return
+ }
+ for i, r := range rs.Results {
+ ra.analyzeResult(i, r)
+ }
+}
+
+// analyzeResult examines the expression 'n' being returned as the
+// 'ii'th argument in some return statement to see whether has
+// interesting characteristics (for example, returns a constant), then
+// applies a dataflow "meet" operation to combine this result with any
+// previous result (for the given return slot) that we've already
+// processed.
+func (ra *resultsAnalyzer) analyzeResult(ii int, n ir.Node) {
+ isAllocMem := ra.isAllocatedMem(n)
+ isConcConvItf := ra.isConcreteConvIface(n)
+ constVal := ra.constValue(n)
+ isConst := (constVal != nil)
+ isNil := ra.isNil(n)
+ rfunc := ra.funcName(n)
+ isFunc := (rfunc != nil)
+ isClo := (rfunc != nil && rfunc.Func.OClosure != nil)
+ curp := ra.props[ii]
+ dprops, isDerivedFromCall := ra.deriveReturnFlagsFromCallee(n)
+ newp := ResultNoInfo
+ var newcval constant.Value
+ var newfunc *ir.Name
+
+ if debugTrace&debugTraceResults != 0 {
+ fmt.Fprintf(os.Stderr, "=-= %v: analyzeResult n=%s ismem=%v isconcconv=%v isconst=%v isnil=%v isfunc=%v isclo=%v\n", ir.Line(n), n.Op().String(), isAllocMem, isConcConvItf, isConst, isNil, isFunc, isClo)
+ }
+
+ if ra.values[ii].top {
+ ra.values[ii].top = false
+ // this is the first return we've seen; record
+ // whatever properties it has.
+ switch {
+ case isAllocMem:
+ newp = ResultIsAllocatedMem
+ case isConcConvItf:
+ newp = ResultIsConcreteTypeConvertedToInterface
+ case isFunc:
+ newp = ResultAlwaysSameFunc
+ newfunc = rfunc
+ case isConst:
+ newp = ResultAlwaysSameConstant
+ newcval = constVal
+ case isNil:
+ newp = ResultAlwaysSameConstant
+ newcval = nil
+ case isDerivedFromCall:
+ newp = dprops
+ ra.values[ii].derived = true
+ }
+ } else {
+ if !ra.values[ii].derived {
+ // this is not the first return we've seen; apply
+ // what amounts of a "meet" operator to combine
+ // the properties we see here with what we saw on
+ // the previous returns.
+ switch curp {
+ case ResultIsAllocatedMem:
+ if isAllocMem {
+ newp = ResultIsAllocatedMem
+ }
+ case ResultIsConcreteTypeConvertedToInterface:
+ if isConcConvItf {
+ newp = ResultIsConcreteTypeConvertedToInterface
+ }
+ case ResultAlwaysSameConstant:
+ if isNil && ra.values[ii].cval == nil {
+ newp = ResultAlwaysSameConstant
+ newcval = nil
+ } else if isConst && constant.Compare(constVal, token.EQL, ra.values[ii].cval) {
+ newp = ResultAlwaysSameConstant
+ newcval = constVal
+ }
+ case ResultAlwaysSameFunc:
+ if isFunc && isSameFuncName(rfunc, ra.values[ii].fn) {
+ newp = ResultAlwaysSameFunc
+ newfunc = rfunc
+ }
+ }
+ }
+ }
+ ra.values[ii].fn = newfunc
+ ra.values[ii].fnClo = isClo
+ ra.values[ii].cval = newcval
+ ra.props[ii] = newp
+
+ if debugTrace&debugTraceResults != 0 {
+ fmt.Fprintf(os.Stderr, "=-= %v: analyzeResult newp=%s\n",
+ ir.Line(n), newp)
+ }
+}
+
+// deriveReturnFlagsFromCallee tries to set properties for a given
+// return result where we're returning call expression; return value
+// is a return property value and a boolean indicating whether the
+// prop is valid. Examples:
+//
+// func foo() int { return bar() }
+// func bar() int { return 42 }
+// func blix() int { return 43 }
+// func two(y int) int {
+// if y < 0 { return bar() } else { return blix() }
+// }
+//
+// Since "foo" always returns the result of a call to "bar", we can
+// set foo's return property to that of bar. In the case of "two", however,
+// even though each return path returns a constant, we don't know
+// whether the constants are identical, hence we need to be conservative.
+func (ra *resultsAnalyzer) deriveReturnFlagsFromCallee(n ir.Node) (ResultPropBits, bool) {
+ if n.Op() != ir.OCALLFUNC {
+ return 0, false
+ }
+ ce := n.(*ir.CallExpr)
+ if ce.Fun.Op() != ir.ONAME {
+ return 0, false
+ }
+ called := ir.StaticValue(ce.Fun)
+ if called.Op() != ir.ONAME {
+ return 0, false
+ }
+ cname := ra.funcName(called)
+ if cname == nil {
+ return 0, false
+ }
+ calleeProps := propsForFunc(cname.Func)
+ if calleeProps == nil {
+ return 0, false
+ }
+ if len(calleeProps.ResultFlags) != 1 {
+ return 0, false
+ }
+ return calleeProps.ResultFlags[0], true
+}
diff --git a/src/cmd/compile/internal/inline/inlheur/callsite.go b/src/cmd/compile/internal/inline/inlheur/callsite.go
new file mode 100644
index 0000000000..f457dd439b
--- /dev/null
+++ b/src/cmd/compile/internal/inline/inlheur/callsite.go
@@ -0,0 +1,149 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package inlheur
+
+import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/internal/src"
+ "fmt"
+ "io"
+ "path/filepath"
+ "sort"
+ "strings"
+)
+
+// CallSite records useful information about a potentially inlinable
+// (direct) function call. "Callee" is the target of the call, "Call"
+// is the ir node corresponding to the call itself, "Assign" is
+// the top-level assignment statement containing the call (if the call
+// appears in the form of a top-level statement, e.g. "x := foo()"),
+// "Flags" contains properties of the call that might be useful for
+// making inlining decisions, "Score" is the final score assigned to
+// the site, and "ID" is a numeric ID for the site within its
+// containing function.
+type CallSite struct {
+ Callee *ir.Func
+ Call *ir.CallExpr
+ parent *CallSite
+ Assign ir.Node
+ Flags CSPropBits
+
+ ArgProps []ActualExprPropBits
+ Score int
+ ScoreMask scoreAdjustTyp
+ ID uint
+ aux uint8
+}
+
+// CallSiteTab is a table of call sites, keyed by call expr.
+// Ideally it would be nice to key the table by src.XPos, but
+// this results in collisions for calls on very long lines (the
+// front end saturates column numbers at 255). We also wind up
+// with many calls that share the same auto-generated pos.
+type CallSiteTab map[*ir.CallExpr]*CallSite
+
+// ActualExprPropBits describes a property of an actual expression (value
+// passed to some specific func argument at a call site).
+type ActualExprPropBits uint8
+
+const (
+ ActualExprConstant ActualExprPropBits = 1 << iota
+ ActualExprIsConcreteConvIface
+ ActualExprIsFunc
+ ActualExprIsInlinableFunc
+)
+
+type CSPropBits uint32
+
+const (
+ CallSiteInLoop CSPropBits = 1 << iota
+ CallSiteOnPanicPath
+ CallSiteInInitFunc
+)
+
+type csAuxBits uint8
+
+const (
+ csAuxInlined = 1 << iota
+)
+
+// encodedCallSiteTab is a table keyed by "encoded" callsite
+// (stringified src.XPos plus call site ID) mapping to a value of call
+// property bits and score.
+type encodedCallSiteTab map[string]propsAndScore
+
+type propsAndScore struct {
+ props CSPropBits
+ score int
+ mask scoreAdjustTyp
+}
+
+func (pas propsAndScore) String() string {
+ return fmt.Sprintf("P=%s|S=%d|M=%s", pas.props.String(),
+ pas.score, pas.mask.String())
+}
+
+func (cst CallSiteTab) merge(other CallSiteTab) error {
+ for k, v := range other {
+ if prev, ok := cst[k]; ok {
+ return fmt.Errorf("internal error: collision during call site table merge, fn=%s callsite=%s", prev.Callee.Sym().Name, fmtFullPos(prev.Call.Pos()))
+ }
+ cst[k] = v
+ }
+ return nil
+}
+
+func fmtFullPos(p src.XPos) string {
+ var sb strings.Builder
+ sep := ""
+ base.Ctxt.AllPos(p, func(pos src.Pos) {
+ fmt.Fprintf(&sb, sep)
+ sep = "|"
+ file := filepath.Base(pos.Filename())
+ fmt.Fprintf(&sb, "%s:%d:%d", file, pos.Line(), pos.Col())
+ })
+ return sb.String()
+}
+
+func EncodeCallSiteKey(cs *CallSite) string {
+ var sb strings.Builder
+ // FIXME: maybe rewrite line offsets relative to function start?
+ sb.WriteString(fmtFullPos(cs.Call.Pos()))
+ fmt.Fprintf(&sb, "|%d", cs.ID)
+ return sb.String()
+}
+
+func buildEncodedCallSiteTab(tab CallSiteTab) encodedCallSiteTab {
+ r := make(encodedCallSiteTab)
+ for _, cs := range tab {
+ k := EncodeCallSiteKey(cs)
+ r[k] = propsAndScore{
+ props: cs.Flags,
+ score: cs.Score,
+ mask: cs.ScoreMask,
+ }
+ }
+ return r
+}
+
+// dumpCallSiteComments emits comments into the dump file for the
+// callsites in the function of interest. If "ecst" is non-nil, we use
+// that, otherwise generated a fresh encodedCallSiteTab from "tab".
+func dumpCallSiteComments(w io.Writer, tab CallSiteTab, ecst encodedCallSiteTab) {
+ if ecst == nil {
+ ecst = buildEncodedCallSiteTab(tab)
+ }
+ tags := make([]string, 0, len(ecst))
+ for k := range ecst {
+ tags = append(tags, k)
+ }
+ sort.Strings(tags)
+ for _, s := range tags {
+ v := ecst[s]
+ fmt.Fprintf(w, "// callsite: %s flagstr %q flagval %d score %d mask %d maskstr %q\n", s, v.props.String(), v.props, v.score, v.mask, v.mask.String())
+ }
+ fmt.Fprintf(w, "// %s\n", csDelimiter)
+}
diff --git a/src/cmd/compile/internal/inline/inlheur/cspropbits_string.go b/src/cmd/compile/internal/inline/inlheur/cspropbits_string.go
new file mode 100644
index 0000000000..216f510c99
--- /dev/null
+++ b/src/cmd/compile/internal/inline/inlheur/cspropbits_string.go
@@ -0,0 +1,56 @@
+// Code generated by "stringer -bitset -type CSPropBits"; DO NOT EDIT.
+
+package inlheur
+
+import "strconv"
+import "bytes"
+
+func _() {
+ // An "invalid array index" compiler error signifies that the constant values have changed.
+ // Re-run the stringer command to generate them again.
+ var x [1]struct{}
+ _ = x[CallSiteInLoop-1]
+ _ = x[CallSiteOnPanicPath-2]
+ _ = x[CallSiteInInitFunc-4]
+}
+
+var _CSPropBits_value = [...]uint64{
+ 0x1, /* CallSiteInLoop */
+ 0x2, /* CallSiteOnPanicPath */
+ 0x4, /* CallSiteInInitFunc */
+}
+
+const _CSPropBits_name = "CallSiteInLoopCallSiteOnPanicPathCallSiteInInitFunc"
+
+var _CSPropBits_index = [...]uint8{0, 14, 33, 51}
+
+func (i CSPropBits) String() string {
+ var b bytes.Buffer
+
+ remain := uint64(i)
+ seen := false
+
+ for k, v := range _CSPropBits_value {
+ x := _CSPropBits_name[_CSPropBits_index[k]:_CSPropBits_index[k+1]]
+ if v == 0 {
+ if i == 0 {
+ b.WriteString(x)
+ return b.String()
+ }
+ continue
+ }
+ if (v & remain) == v {
+ remain &^= v
+ x := _CSPropBits_name[_CSPropBits_index[k]:_CSPropBits_index[k+1]]
+ if seen {
+ b.WriteString("|")
+ }
+ seen = true
+ b.WriteString(x)
+ }
+ }
+ if remain == 0 {
+ return b.String()
+ }
+ return "CSPropBits(0x" + strconv.FormatInt(int64(i), 16) + ")"
+}
diff --git a/src/cmd/compile/internal/inline/inlheur/debugflags_test.go b/src/cmd/compile/internal/inline/inlheur/debugflags_test.go
new file mode 100644
index 0000000000..abf491070f
--- /dev/null
+++ b/src/cmd/compile/internal/inline/inlheur/debugflags_test.go
@@ -0,0 +1,65 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package inlheur
+
+import (
+ "testing"
+)
+
+func TestInlScoreAdjFlagParse(t *testing.T) {
+ scenarios := []struct {
+ value string
+ expok bool
+ }{
+ {
+ value: "returnFeedsConcreteToInterfaceCallAdj:9",
+ expok: true,
+ },
+ {
+ value: "panicPathAdj:-1/initFuncAdj:9",
+ expok: true,
+ },
+ {
+ value: "",
+ expok: false,
+ },
+ {
+ value: "nonsenseAdj:10",
+ expok: false,
+ },
+ {
+ value: "inLoopAdj:",
+ expok: false,
+ },
+ {
+ value: "inLoopAdj:10:10",
+ expok: false,
+ },
+ {
+ value: "inLoopAdj:blah",
+ expok: false,
+ },
+ {
+ value: "/",
+ expok: false,
+ },
+ }
+
+ for _, scenario := range scenarios {
+ err := parseScoreAdj(scenario.value)
+ t.Logf("for value=%q err is %v\n", scenario.value, err)
+ if scenario.expok {
+ if err != nil {
+ t.Errorf("expected parseScoreAdj(%s) ok, got err %v",
+ scenario.value, err)
+ }
+ } else {
+ if err == nil {
+ t.Errorf("expected parseScoreAdj(%s) failure, got success",
+ scenario.value)
+ }
+ }
+ }
+}
diff --git a/src/cmd/compile/internal/inline/inlheur/dumpscores_test.go b/src/cmd/compile/internal/inline/inlheur/dumpscores_test.go
new file mode 100644
index 0000000000..438b70096f
--- /dev/null
+++ b/src/cmd/compile/internal/inline/inlheur/dumpscores_test.go
@@ -0,0 +1,109 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package inlheur
+
+import (
+ "internal/testenv"
+ "os"
+ "path/filepath"
+ "strings"
+ "testing"
+)
+
+func TestDumpCallSiteScoreDump(t *testing.T) {
+ td := t.TempDir()
+ testenv.MustHaveGoBuild(t)
+
+ scenarios := []struct {
+ name string
+ promoted int
+ indirectlyPromoted int
+ demoted int
+ unchanged int
+ }{
+ {
+ name: "dumpscores",
+ promoted: 1,
+ indirectlyPromoted: 1,
+ demoted: 1,
+ unchanged: 5,
+ },
+ }
+
+ for _, scen := range scenarios {
+ dumpfile, err := gatherInlCallSitesScoresForFile(t, scen.name, td)
+ if err != nil {
+ t.Fatalf("dumping callsite scores for %q: error %v", scen.name, err)
+ }
+ var lines []string
+ if content, err := os.ReadFile(dumpfile); err != nil {
+ t.Fatalf("reading dump %q: error %v", dumpfile, err)
+ } else {
+ lines = strings.Split(string(content), "\n")
+ }
+ prom, indprom, dem, unch := 0, 0, 0, 0
+ for _, line := range lines {
+ switch {
+ case strings.TrimSpace(line) == "":
+ case !strings.Contains(line, "|"):
+ case strings.HasPrefix(line, "#"):
+ case strings.Contains(line, "PROMOTED"):
+ prom++
+ case strings.Contains(line, "INDPROM"):
+ indprom++
+ case strings.Contains(line, "DEMOTED"):
+ dem++
+ default:
+ unch++
+ }
+ }
+ showout := false
+ if prom != scen.promoted {
+ t.Errorf("testcase %q, got %d promoted want %d promoted",
+ scen.name, prom, scen.promoted)
+ showout = true
+ }
+ if indprom != scen.indirectlyPromoted {
+ t.Errorf("testcase %q, got %d indirectly promoted want %d",
+ scen.name, indprom, scen.indirectlyPromoted)
+ showout = true
+ }
+ if dem != scen.demoted {
+ t.Errorf("testcase %q, got %d demoted want %d demoted",
+ scen.name, dem, scen.demoted)
+ showout = true
+ }
+ if unch != scen.unchanged {
+ t.Errorf("testcase %q, got %d unchanged want %d unchanged",
+ scen.name, unch, scen.unchanged)
+ showout = true
+ }
+ if showout {
+ t.Logf(">> dump output: %s", strings.Join(lines, "\n"))
+ }
+ }
+}
+
+// gatherInlCallSitesScoresForFile builds the specified testcase 'testcase'
+// from testdata/props passing the "-d=dumpinlcallsitescores=1"
+// compiler option, to produce a dump, then returns the path of the
+// newly created file.
+func gatherInlCallSitesScoresForFile(t *testing.T, testcase string, td string) (string, error) {
+ t.Helper()
+ gopath := "testdata/" + testcase + ".go"
+ outpath := filepath.Join(td, testcase+".a")
+ dumpfile := filepath.Join(td, testcase+".callsites.txt")
+ run := []string{testenv.GoToolPath(t), "build",
+ "-gcflags=-d=dumpinlcallsitescores=1", "-o", outpath, gopath}
+ out, err := testenv.Command(t, run[0], run[1:]...).CombinedOutput()
+ t.Logf("run: %+v\n", run)
+ if err != nil {
+ return "", err
+ }
+ if err := os.WriteFile(dumpfile, out, 0666); err != nil {
+ return "", err
+ }
+ return dumpfile, err
+}
diff --git a/src/cmd/compile/internal/inline/inlheur/eclassify.go b/src/cmd/compile/internal/inline/inlheur/eclassify.go
new file mode 100644
index 0000000000..1e6d1b9e37
--- /dev/null
+++ b/src/cmd/compile/internal/inline/inlheur/eclassify.go
@@ -0,0 +1,247 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package inlheur
+
+import (
+ "cmd/compile/internal/ir"
+ "fmt"
+ "os"
+)
+
+// ShouldFoldIfNameConstant analyzes expression tree 'e' to see
+// whether it contains only combinations of simple references to all
+// of the names in 'names' with selected constants + operators. The
+// intent is to identify expression that could be folded away to a
+// constant if the value of 'n' were available. Return value is TRUE
+// if 'e' does look foldable given the value of 'n', and given that
+// 'e' actually makes reference to 'n'. Some examples where the type
+// of "n" is int64, type of "s" is string, and type of "p" is *byte:
+//
+// Simple? Expr
+// yes n<10
+// yes n*n-100
+// yes (n < 10 || n > 100) && (n >= 12 || n <= 99 || n != 101)
+// yes s == "foo"
+// yes p == nil
+// no n<foo()
+// no n<1 || n>m
+// no float32(n)<1.0
+// no *p == 1
+// no 1 + 100
+// no 1 / n
+// no 1 + unsafe.Sizeof(n)
+//
+// To avoid complexities (e.g. nan, inf) we stay way from folding and
+// floating point or complex operations (integers, bools, and strings
+// only). We also try to be conservative about avoiding any operation
+// that might result in a panic at runtime, e.g. for "n" with type
+// int64:
+//
+// 1<<(n-9) < 100/(n<<9999)
+//
+// we would return FALSE due to the negative shift count and/or
+// potential divide by zero.
+func ShouldFoldIfNameConstant(n ir.Node, names []*ir.Name) bool {
+ cl := makeExprClassifier(names)
+ var doNode func(ir.Node) bool
+ doNode = func(n ir.Node) bool {
+ ir.DoChildren(n, doNode)
+ cl.Visit(n)
+ return false
+ }
+ doNode(n)
+ if cl.getdisp(n) != exprSimple {
+ return false
+ }
+ for _, v := range cl.names {
+ if !v {
+ return false
+ }
+ }
+ return true
+}
+
+// exprClassifier holds intermediate state about nodes within an
+// expression tree being analyzed by ShouldFoldIfNameConstant. Here
+// "name" is the name node passed in, and "disposition" stores the
+// result of classifying a given IR node.
+type exprClassifier struct {
+ names map[*ir.Name]bool
+ disposition map[ir.Node]disp
+}
+
+type disp int
+
+const (
+ // no info on this expr
+ exprNoInfo disp = iota
+
+ // expr contains only literals
+ exprLiterals
+
+ // expr is legal combination of literals and specified names
+ exprSimple
+)
+
+func (d disp) String() string {
+ switch d {
+ case exprNoInfo:
+ return "noinfo"
+ case exprSimple:
+ return "simple"
+ case exprLiterals:
+ return "literals"
+ default:
+ return fmt.Sprintf("unknown<%d>", d)
+ }
+}
+
+func makeExprClassifier(names []*ir.Name) *exprClassifier {
+ m := make(map[*ir.Name]bool, len(names))
+ for _, n := range names {
+ m[n] = false
+ }
+ return &exprClassifier{
+ names: m,
+ disposition: make(map[ir.Node]disp),
+ }
+}
+
+// Visit sets the classification for 'n' based on the previously
+// calculated classifications for n's children, as part of a bottom-up
+// walk over an expression tree.
+func (ec *exprClassifier) Visit(n ir.Node) {
+
+ ndisp := exprNoInfo
+
+ binparts := func(n ir.Node) (ir.Node, ir.Node) {
+ if lex, ok := n.(*ir.LogicalExpr); ok {
+ return lex.X, lex.Y
+ } else if bex, ok := n.(*ir.BinaryExpr); ok {
+ return bex.X, bex.Y
+ } else {
+ panic("bad")
+ }
+ }
+
+ t := n.Type()
+ if t == nil {
+ if debugTrace&debugTraceExprClassify != 0 {
+ fmt.Fprintf(os.Stderr, "=-= *** untyped op=%s\n",
+ n.Op().String())
+ }
+ } else if t.IsInteger() || t.IsString() || t.IsBoolean() || t.HasNil() {
+ switch n.Op() {
+ // FIXME: maybe add support for OADDSTR?
+ case ir.ONIL:
+ ndisp = exprLiterals
+
+ case ir.OLITERAL:
+ if _, ok := n.(*ir.BasicLit); ok {
+ } else {
+ panic("unexpected")
+ }
+ ndisp = exprLiterals
+
+ case ir.ONAME:
+ nn := n.(*ir.Name)
+ if _, ok := ec.names[nn]; ok {
+ ndisp = exprSimple
+ ec.names[nn] = true
+ } else {
+ sv := ir.StaticValue(n)
+ if sv.Op() == ir.ONAME {
+ nn = sv.(*ir.Name)
+ }
+ if _, ok := ec.names[nn]; ok {
+ ndisp = exprSimple
+ ec.names[nn] = true
+ }
+ }
+
+ case ir.ONOT,
+ ir.OPLUS,
+ ir.ONEG:
+ uex := n.(*ir.UnaryExpr)
+ ndisp = ec.getdisp(uex.X)
+
+ case ir.OEQ,
+ ir.ONE,
+ ir.OLT,
+ ir.OGT,
+ ir.OGE,
+ ir.OLE:
+ // compare ops
+ x, y := binparts(n)
+ ndisp = ec.dispmeet(x, y)
+ if debugTrace&debugTraceExprClassify != 0 {
+ fmt.Fprintf(os.Stderr, "=-= meet(%s,%s) = %s for op=%s\n",
+ ec.getdisp(x), ec.getdisp(y), ec.dispmeet(x, y),
+ n.Op().String())
+ }
+ case ir.OLSH,
+ ir.ORSH,
+ ir.ODIV,
+ ir.OMOD:
+ x, y := binparts(n)
+ if ec.getdisp(y) == exprLiterals {
+ ndisp = ec.dispmeet(x, y)
+ }
+
+ case ir.OADD,
+ ir.OSUB,
+ ir.OOR,
+ ir.OXOR,
+ ir.OMUL,
+ ir.OAND,
+ ir.OANDNOT,
+ ir.OANDAND,
+ ir.OOROR:
+ x, y := binparts(n)
+ if debugTrace&debugTraceExprClassify != 0 {
+ fmt.Fprintf(os.Stderr, "=-= meet(%s,%s) = %s for op=%s\n",
+ ec.getdisp(x), ec.getdisp(y), ec.dispmeet(x, y),
+ n.Op().String())
+ }
+ ndisp = ec.dispmeet(x, y)
+ }
+ }
+
+ if debugTrace&debugTraceExprClassify != 0 {
+ fmt.Fprintf(os.Stderr, "=-= op=%s disp=%v\n", n.Op().String(),
+ ndisp.String())
+ }
+
+ ec.disposition[n] = ndisp
+}
+
+func (ec *exprClassifier) getdisp(x ir.Node) disp {
+ if d, ok := ec.disposition[x]; ok {
+ return d
+ } else {
+ panic("missing node from disp table")
+ }
+}
+
+// dispmeet performs a "meet" operation on the data flow states of
+// node x and y (where the term "meet" is being drawn from traditional
+// lattice-theoretical data flow analysis terminology).
+func (ec *exprClassifier) dispmeet(x, y ir.Node) disp {
+ xd := ec.getdisp(x)
+ if xd == exprNoInfo {
+ return exprNoInfo
+ }
+ yd := ec.getdisp(y)
+ if yd == exprNoInfo {
+ return exprNoInfo
+ }
+ if xd == exprSimple || yd == exprSimple {
+ return exprSimple
+ }
+ if xd != exprLiterals || yd != exprLiterals {
+ panic("unexpected")
+ }
+ return exprLiterals
+}
diff --git a/src/cmd/compile/internal/inline/inlheur/funcprop_string.go b/src/cmd/compile/internal/inline/inlheur/funcprop_string.go
new file mode 100644
index 0000000000..d16e4d3378
--- /dev/null
+++ b/src/cmd/compile/internal/inline/inlheur/funcprop_string.go
@@ -0,0 +1,44 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package inlheur
+
+import (
+ "fmt"
+ "strings"
+)
+
+func (fp *FuncProps) String() string {
+ return fp.ToString("")
+}
+
+func (fp *FuncProps) ToString(prefix string) string {
+ var sb strings.Builder
+ if fp.Flags != 0 {
+ fmt.Fprintf(&sb, "%sFlags %s\n", prefix, fp.Flags)
+ }
+ flagSliceToSB[ParamPropBits](&sb, fp.ParamFlags,
+ prefix, "ParamFlags")
+ flagSliceToSB[ResultPropBits](&sb, fp.ResultFlags,
+ prefix, "ResultFlags")
+ return sb.String()
+}
+
+func flagSliceToSB[T interface {
+ ~uint32
+ String() string
+}](sb *strings.Builder, sl []T, prefix string, tag string) {
+ var sb2 strings.Builder
+ foundnz := false
+ fmt.Fprintf(&sb2, "%s%s\n", prefix, tag)
+ for i, e := range sl {
+ if e != 0 {
+ foundnz = true
+ }
+ fmt.Fprintf(&sb2, "%s %d %s\n", prefix, i, e.String())
+ }
+ if foundnz {
+ sb.WriteString(sb2.String())
+ }
+}
diff --git a/src/cmd/compile/internal/inline/inlheur/funcpropbits_string.go b/src/cmd/compile/internal/inline/inlheur/funcpropbits_string.go
new file mode 100644
index 0000000000..28de4a9ced
--- /dev/null
+++ b/src/cmd/compile/internal/inline/inlheur/funcpropbits_string.go
@@ -0,0 +1,58 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Code generated by "stringer -bitset -type FuncPropBits"; DO NOT EDIT.
+
+package inlheur
+
+import (
+ "bytes"
+ "strconv"
+)
+
+func _() {
+ // An "invalid array index" compiler error signifies that the constant values have changed.
+ // Re-run the stringer command to generate them again.
+ var x [1]struct{}
+ _ = x[FuncPropNeverReturns-1]
+}
+
+var _FuncPropBits_value = [...]uint64{
+ 0x1, /* FuncPropNeverReturns */
+}
+
+const _FuncPropBits_name = "FuncPropNeverReturns"
+
+var _FuncPropBits_index = [...]uint8{0, 20}
+
+func (i FuncPropBits) String() string {
+ var b bytes.Buffer
+
+ remain := uint64(i)
+ seen := false
+
+ for k, v := range _FuncPropBits_value {
+ x := _FuncPropBits_name[_FuncPropBits_index[k]:_FuncPropBits_index[k+1]]
+ if v == 0 {
+ if i == 0 {
+ b.WriteString(x)
+ return b.String()
+ }
+ continue
+ }
+ if (v & remain) == v {
+ remain &^= v
+ x := _FuncPropBits_name[_FuncPropBits_index[k]:_FuncPropBits_index[k+1]]
+ if seen {
+ b.WriteString("|")
+ }
+ seen = true
+ b.WriteString(x)
+ }
+ }
+ if remain == 0 {
+ return b.String()
+ }
+ return "FuncPropBits(0x" + strconv.FormatInt(int64(i), 16) + ")"
+}
diff --git a/src/cmd/compile/internal/inline/inlheur/funcprops_test.go b/src/cmd/compile/internal/inline/inlheur/funcprops_test.go
new file mode 100644
index 0000000000..c04e604882
--- /dev/null
+++ b/src/cmd/compile/internal/inline/inlheur/funcprops_test.go
@@ -0,0 +1,530 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package inlheur
+
+import (
+ "bufio"
+ "encoding/json"
+ "flag"
+ "fmt"
+ "internal/testenv"
+ "os"
+ "path/filepath"
+ "regexp"
+ "strconv"
+ "strings"
+ "testing"
+ "time"
+)
+
+var remasterflag = flag.Bool("update-expected", false, "if true, generate updated golden results in testcases for all props tests")
+
+func TestFuncProperties(t *testing.T) {
+ td := t.TempDir()
+ // td = "/tmp/qqq"
+ // os.RemoveAll(td)
+ // os.Mkdir(td, 0777)
+ testenv.MustHaveGoBuild(t)
+
+ // NOTE: this testpoint has the unfortunate characteristic that it
+ // relies on the installed compiler, meaning that if you make
+ // changes to the inline heuristics code in your working copy and
+ // then run the test, it will test the installed compiler and not
+ // your local modifications. TODO: decide whether to convert this
+ // to building a fresh compiler on the fly, or using some other
+ // scheme.
+
+ testcases := []string{"funcflags", "returns", "params",
+ "acrosscall", "calls", "returns2"}
+ for _, tc := range testcases {
+ dumpfile, err := gatherPropsDumpForFile(t, tc, td)
+ if err != nil {
+ t.Fatalf("dumping func props for %q: error %v", tc, err)
+ }
+ // Read in the newly generated dump.
+ dentries, dcsites, derr := readDump(t, dumpfile)
+ if derr != nil {
+ t.Fatalf("reading func prop dump: %v", derr)
+ }
+ if *remasterflag {
+ updateExpected(t, tc, dentries, dcsites)
+ continue
+ }
+ // Generate expected dump.
+ epath, egerr := genExpected(td, tc)
+ if egerr != nil {
+ t.Fatalf("generating expected func prop dump: %v", egerr)
+ }
+ // Read in the expected result entries.
+ eentries, ecsites, eerr := readDump(t, epath)
+ if eerr != nil {
+ t.Fatalf("reading expected func prop dump: %v", eerr)
+ }
+ // Compare new vs expected.
+ n := len(dentries)
+ eidx := 0
+ for i := 0; i < n; i++ {
+ dentry := dentries[i]
+ dcst := dcsites[i]
+ if !interestingToCompare(dentry.fname) {
+ continue
+ }
+ if eidx >= len(eentries) {
+ t.Errorf("testcase %s missing expected entry for %s, skipping", tc, dentry.fname)
+ continue
+ }
+ eentry := eentries[eidx]
+ ecst := ecsites[eidx]
+ eidx++
+ if dentry.fname != eentry.fname {
+ t.Errorf("got fn %q wanted %q, skipping checks",
+ dentry.fname, eentry.fname)
+ continue
+ }
+ compareEntries(t, tc, &dentry, dcst, &eentry, ecst)
+ }
+ }
+}
+
+func propBitsToString[T interface{ String() string }](sl []T) string {
+ var sb strings.Builder
+ for i, f := range sl {
+ fmt.Fprintf(&sb, "%d: %s\n", i, f.String())
+ }
+ return sb.String()
+}
+
+func compareEntries(t *testing.T, tc string, dentry *fnInlHeur, dcsites encodedCallSiteTab, eentry *fnInlHeur, ecsites encodedCallSiteTab) {
+ dfp := dentry.props
+ efp := eentry.props
+ dfn := dentry.fname
+
+ // Compare function flags.
+ if dfp.Flags != efp.Flags {
+ t.Errorf("testcase %q: Flags mismatch for %q: got %s, wanted %s",
+ tc, dfn, dfp.Flags.String(), efp.Flags.String())
+ }
+ // Compare returns
+ rgot := propBitsToString[ResultPropBits](dfp.ResultFlags)
+ rwant := propBitsToString[ResultPropBits](efp.ResultFlags)
+ if rgot != rwant {
+ t.Errorf("testcase %q: Results mismatch for %q: got:\n%swant:\n%s",
+ tc, dfn, rgot, rwant)
+ }
+ // Compare receiver + params.
+ pgot := propBitsToString[ParamPropBits](dfp.ParamFlags)
+ pwant := propBitsToString[ParamPropBits](efp.ParamFlags)
+ if pgot != pwant {
+ t.Errorf("testcase %q: Params mismatch for %q: got:\n%swant:\n%s",
+ tc, dfn, pgot, pwant)
+ }
+ // Compare call sites.
+ for k, ve := range ecsites {
+ if vd, ok := dcsites[k]; !ok {
+ t.Errorf("testcase %q missing expected callsite %q in func %q", tc, k, dfn)
+ continue
+ } else {
+ if vd != ve {
+ t.Errorf("testcase %q callsite %q in func %q: got %+v want %+v",
+ tc, k, dfn, vd.String(), ve.String())
+ }
+ }
+ }
+ for k := range dcsites {
+ if _, ok := ecsites[k]; !ok {
+ t.Errorf("testcase %q unexpected extra callsite %q in func %q", tc, k, dfn)
+ }
+ }
+}
+
+type dumpReader struct {
+ s *bufio.Scanner
+ t *testing.T
+ p string
+ ln int
+}
+
+// readDump reads in the contents of a dump file produced
+// by the "-d=dumpinlfuncprops=..." command line flag by the Go
+// compiler. It breaks the dump down into separate sections
+// by function, then deserializes each func section into a
+// fnInlHeur object and returns a slice of those objects.
+func readDump(t *testing.T, path string) ([]fnInlHeur, []encodedCallSiteTab, error) {
+ content, err := os.ReadFile(path)
+ if err != nil {
+ return nil, nil, err
+ }
+ dr := &dumpReader{
+ s: bufio.NewScanner(strings.NewReader(string(content))),
+ t: t,
+ p: path,
+ ln: 1,
+ }
+ // consume header comment until preamble delimiter.
+ found := false
+ for dr.scan() {
+ if dr.curLine() == preambleDelimiter {
+ found = true
+ break
+ }
+ }
+ if !found {
+ return nil, nil, fmt.Errorf("malformed testcase file %s, missing preamble delimiter", path)
+ }
+ res := []fnInlHeur{}
+ csres := []encodedCallSiteTab{}
+ for {
+ dentry, dcst, err := dr.readEntry()
+ if err != nil {
+ t.Fatalf("reading func prop dump: %v", err)
+ }
+ if dentry.fname == "" {
+ break
+ }
+ res = append(res, dentry)
+ csres = append(csres, dcst)
+ }
+ return res, csres, nil
+}
+
+func (dr *dumpReader) scan() bool {
+ v := dr.s.Scan()
+ if v {
+ dr.ln++
+ }
+ return v
+}
+
+func (dr *dumpReader) curLine() string {
+ res := strings.TrimSpace(dr.s.Text())
+ if !strings.HasPrefix(res, "// ") {
+ dr.t.Fatalf("malformed line %s:%d, no comment: %s", dr.p, dr.ln, res)
+ }
+ return res[3:]
+}
+
+// readObjBlob reads in a series of commented lines until
+// it hits a delimiter, then returns the contents of the comments.
+func (dr *dumpReader) readObjBlob(delim string) (string, error) {
+ var sb strings.Builder
+ foundDelim := false
+ for dr.scan() {
+ line := dr.curLine()
+ if delim == line {
+ foundDelim = true
+ break
+ }
+ sb.WriteString(line + "\n")
+ }
+ if err := dr.s.Err(); err != nil {
+ return "", err
+ }
+ if !foundDelim {
+ return "", fmt.Errorf("malformed input %s, missing delimiter %q",
+ dr.p, delim)
+ }
+ return sb.String(), nil
+}
+
+// readEntry reads a single function's worth of material from
+// a file produced by the "-d=dumpinlfuncprops=..." command line
+// flag. It deserializes the json for the func properties and
+// returns the resulting properties and function name. EOF is
+// signaled by a nil FuncProps return (with no error
+func (dr *dumpReader) readEntry() (fnInlHeur, encodedCallSiteTab, error) {
+ var funcInlHeur fnInlHeur
+ var callsites encodedCallSiteTab
+ if !dr.scan() {
+ return funcInlHeur, callsites, nil
+ }
+ // first line contains info about function: file/name/line
+ info := dr.curLine()
+ chunks := strings.Fields(info)
+ funcInlHeur.file = chunks[0]
+ funcInlHeur.fname = chunks[1]
+ if _, err := fmt.Sscanf(chunks[2], "%d", &funcInlHeur.line); err != nil {
+ return funcInlHeur, callsites, fmt.Errorf("scanning line %q: %v", info, err)
+ }
+ // consume comments until and including delimiter
+ for {
+ if !dr.scan() {
+ break
+ }
+ if dr.curLine() == comDelimiter {
+ break
+ }
+ }
+
+ // Consume JSON for encoded props.
+ dr.scan()
+ line := dr.curLine()
+ fp := &FuncProps{}
+ if err := json.Unmarshal([]byte(line), fp); err != nil {
+ return funcInlHeur, callsites, err
+ }
+ funcInlHeur.props = fp
+
+ // Consume callsites.
+ callsites = make(encodedCallSiteTab)
+ for dr.scan() {
+ line := dr.curLine()
+ if line == csDelimiter {
+ break
+ }
+ // expected format: "// callsite: <expanded pos> flagstr <desc> flagval <flags> score <score> mask <scoremask> maskstr <scoremaskstring>"
+ fields := strings.Fields(line)
+ if len(fields) != 12 {
+ return funcInlHeur, nil, fmt.Errorf("malformed callsite (nf=%d) %s line %d: %s", len(fields), dr.p, dr.ln, line)
+ }
+ if fields[2] != "flagstr" || fields[4] != "flagval" || fields[6] != "score" || fields[8] != "mask" || fields[10] != "maskstr" {
+ return funcInlHeur, nil, fmt.Errorf("malformed callsite %s line %d: %s",
+ dr.p, dr.ln, line)
+ }
+ tag := fields[1]
+ flagstr := fields[5]
+ flags, err := strconv.Atoi(flagstr)
+ if err != nil {
+ return funcInlHeur, nil, fmt.Errorf("bad flags val %s line %d: %q err=%v",
+ dr.p, dr.ln, line, err)
+ }
+ scorestr := fields[7]
+ score, err2 := strconv.Atoi(scorestr)
+ if err2 != nil {
+ return funcInlHeur, nil, fmt.Errorf("bad score val %s line %d: %q err=%v",
+ dr.p, dr.ln, line, err2)
+ }
+ maskstr := fields[9]
+ mask, err3 := strconv.Atoi(maskstr)
+ if err3 != nil {
+ return funcInlHeur, nil, fmt.Errorf("bad mask val %s line %d: %q err=%v",
+ dr.p, dr.ln, line, err3)
+ }
+ callsites[tag] = propsAndScore{
+ props: CSPropBits(flags),
+ score: score,
+ mask: scoreAdjustTyp(mask),
+ }
+ }
+
+ // Consume function delimiter.
+ dr.scan()
+ line = dr.curLine()
+ if line != fnDelimiter {
+ return funcInlHeur, nil, fmt.Errorf("malformed testcase file %q, missing delimiter %q", dr.p, fnDelimiter)
+ }
+
+ return funcInlHeur, callsites, nil
+}
+
+// gatherPropsDumpForFile builds the specified testcase 'testcase' from
+// testdata/props passing the "-d=dumpinlfuncprops=..." compiler option,
+// to produce a properties dump, then returns the path of the newly
+// created file. NB: we can't use "go tool compile" here, since
+// some of the test cases import stdlib packages (such as "os").
+// This means using "go build", which is problematic since the
+// Go command can potentially cache the results of the compile step,
+// causing the test to fail when being run interactively. E.g.
+//
+// $ rm -f dump.txt
+// $ go build -o foo.a -gcflags=-d=dumpinlfuncprops=dump.txt foo.go
+// $ rm -f dump.txt foo.a
+// $ go build -o foo.a -gcflags=-d=dumpinlfuncprops=dump.txt foo.go
+// $ ls foo.a dump.txt > /dev/null
+// ls : cannot access 'dump.txt': No such file or directory
+// $
+//
+// For this reason, pick a unique filename for the dump, so as to
+// defeat the caching.
+func gatherPropsDumpForFile(t *testing.T, testcase string, td string) (string, error) {
+ t.Helper()
+ gopath := "testdata/props/" + testcase + ".go"
+ outpath := filepath.Join(td, testcase+".a")
+ salt := fmt.Sprintf(".p%dt%d", os.Getpid(), time.Now().UnixNano())
+ dumpfile := filepath.Join(td, testcase+salt+".dump.txt")
+ run := []string{testenv.GoToolPath(t), "build",
+ "-gcflags=-d=dumpinlfuncprops=" + dumpfile, "-o", outpath, gopath}
+ out, err := testenv.Command(t, run[0], run[1:]...).CombinedOutput()
+ if err != nil {
+ t.Logf("compile command: %+v", run)
+ }
+ if strings.TrimSpace(string(out)) != "" {
+ t.Logf("%s", out)
+ }
+ return dumpfile, err
+}
+
+// genExpected reads in a given Go testcase file, strips out all the
+// unindented (column 0) commands, writes them out to a new file, and
+// returns the path of that new file. By picking out just the comments
+// from the Go file we wind up with something that resembles the
+// output from a "-d=dumpinlfuncprops=..." compilation.
+func genExpected(td string, testcase string) (string, error) {
+ epath := filepath.Join(td, testcase+".expected")
+ outf, err := os.OpenFile(epath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644)
+ if err != nil {
+ return "", err
+ }
+ gopath := "testdata/props/" + testcase + ".go"
+ content, err := os.ReadFile(gopath)
+ if err != nil {
+ return "", err
+ }
+ lines := strings.Split(string(content), "\n")
+ for _, line := range lines[3:] {
+ if !strings.HasPrefix(line, "// ") {
+ continue
+ }
+ fmt.Fprintf(outf, "%s\n", line)
+ }
+ if err := outf.Close(); err != nil {
+ return "", err
+ }
+ return epath, nil
+}
+
+type upexState struct {
+ dentries []fnInlHeur
+ newgolines []string
+ atline map[uint]uint
+}
+
+func mkUpexState(dentries []fnInlHeur) *upexState {
+ atline := make(map[uint]uint)
+ for _, e := range dentries {
+ atline[e.line] = atline[e.line] + 1
+ }
+ return &upexState{
+ dentries: dentries,
+ atline: atline,
+ }
+}
+
+// updateExpected takes a given Go testcase file X.go and writes out a
+// new/updated version of the file to X.go.new, where the column-0
+// "expected" comments have been updated using fresh data from
+// "dentries".
+//
+// Writing of expected results is complicated by closures and by
+// generics, where you can have multiple functions that all share the
+// same starting line. Currently we combine up all the dups and
+// closures into the single pre-func comment.
+func updateExpected(t *testing.T, testcase string, dentries []fnInlHeur, dcsites []encodedCallSiteTab) {
+ nd := len(dentries)
+
+ ues := mkUpexState(dentries)
+
+ gopath := "testdata/props/" + testcase + ".go"
+ newgopath := "testdata/props/" + testcase + ".go.new"
+
+ // Read the existing Go file.
+ content, err := os.ReadFile(gopath)
+ if err != nil {
+ t.Fatalf("opening %s: %v", gopath, err)
+ }
+ golines := strings.Split(string(content), "\n")
+
+ // Preserve copyright.
+ ues.newgolines = append(ues.newgolines, golines[:4]...)
+ if !strings.HasPrefix(golines[0], "// Copyright") {
+ t.Fatalf("missing copyright from existing testcase")
+ }
+ golines = golines[4:]
+
+ clore := regexp.MustCompile(`.+\.func\d+[\.\d]*$`)
+
+ emitFunc := func(e *fnInlHeur, dcsites encodedCallSiteTab,
+ instance, atl uint) {
+ var sb strings.Builder
+ dumpFnPreamble(&sb, e, dcsites, instance, atl)
+ ues.newgolines = append(ues.newgolines,
+ strings.Split(strings.TrimSpace(sb.String()), "\n")...)
+ }
+
+ // Write file preamble with "DO NOT EDIT" message and such.
+ var sb strings.Builder
+ dumpFilePreamble(&sb)
+ ues.newgolines = append(ues.newgolines,
+ strings.Split(strings.TrimSpace(sb.String()), "\n")...)
+
+ // Helper to add a clump of functions to the output file.
+ processClump := func(idx int, emit bool) int {
+ // Process func itself, plus anything else defined
+ // on the same line
+ atl := ues.atline[dentries[idx].line]
+ for k := uint(0); k < atl; k++ {
+ if emit {
+ emitFunc(&dentries[idx], dcsites[idx], k, atl)
+ }
+ idx++
+ }
+ // now process any closures it contains
+ ncl := 0
+ for idx < nd {
+ nfn := dentries[idx].fname
+ if !clore.MatchString(nfn) {
+ break
+ }
+ ncl++
+ if emit {
+ emitFunc(&dentries[idx], dcsites[idx], 0, 1)
+ }
+ idx++
+ }
+ return idx
+ }
+
+ didx := 0
+ for _, line := range golines {
+ if strings.HasPrefix(line, "func ") {
+
+ // We have a function definition.
+ // Pick out the corresponding entry or entries in the dump
+ // and emit if interesting (or skip if not).
+ dentry := dentries[didx]
+ emit := interestingToCompare(dentry.fname)
+ didx = processClump(didx, emit)
+ }
+
+ // Consume all existing comments.
+ if strings.HasPrefix(line, "//") {
+ continue
+ }
+ ues.newgolines = append(ues.newgolines, line)
+ }
+
+ if didx != nd {
+ t.Logf("didx=%d wanted %d", didx, nd)
+ }
+
+ // Open new Go file and write contents.
+ of, err := os.OpenFile(newgopath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644)
+ if err != nil {
+ t.Fatalf("opening %s: %v", newgopath, err)
+ }
+ fmt.Fprintf(of, "%s", strings.Join(ues.newgolines, "\n"))
+ if err := of.Close(); err != nil {
+ t.Fatalf("closing %s: %v", newgopath, err)
+ }
+
+ t.Logf("update-expected: emitted updated file %s", newgopath)
+ t.Logf("please compare the two files, then overwrite %s with %s\n",
+ gopath, newgopath)
+}
+
+// interestingToCompare returns TRUE if we want to compare results
+// for function 'fname'.
+func interestingToCompare(fname string) bool {
+ if strings.HasPrefix(fname, "init.") {
+ return true
+ }
+ if strings.HasPrefix(fname, "T_") {
+ return true
+ }
+ f := strings.Split(fname, ".")
+ if len(f) == 2 && strings.HasPrefix(f[1], "T_") {
+ return true
+ }
+ return false
+}
diff --git a/src/cmd/compile/internal/inline/inlheur/function_properties.go b/src/cmd/compile/internal/inline/inlheur/function_properties.go
new file mode 100644
index 0000000000..b90abf976a
--- /dev/null
+++ b/src/cmd/compile/internal/inline/inlheur/function_properties.go
@@ -0,0 +1,98 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package inlheur
+
+// This file defines a set of Go function "properties" intended to
+// guide inlining heuristics; these properties may apply to the
+// function as a whole, or to one or more function return values or
+// parameters.
+//
+// IMPORTANT: function properties are produced on a "best effort"
+// basis, meaning that the code that computes them doesn't verify that
+// the properties are guaranteed to be true in 100% of cases. For this
+// reason, properties should only be used to drive always-safe
+// optimization decisions (e.g. "should I inline this call", or
+// "should I unroll this loop") as opposed to potentially unsafe IR
+// alterations that could change program semantics (e.g. "can I delete
+// this variable" or "can I move this statement to a new location").
+//
+//----------------------------------------------------------------
+
+// FuncProps describes a set of function or method properties that may
+// be useful for inlining heuristics. Here 'Flags' are properties that
+// we think apply to the entire function; 'RecvrParamFlags' are
+// properties of specific function params (or the receiver), and
+// 'ResultFlags' are things properties we think will apply to values
+// of specific results. Note that 'ParamFlags' includes and entry for
+// the receiver if applicable, and does include etries for blank
+// params; for a function such as "func foo(_ int, b byte, _ float32)"
+// the length of ParamFlags will be 3.
+type FuncProps struct {
+ Flags FuncPropBits
+ ParamFlags []ParamPropBits // slot 0 receiver if applicable
+ ResultFlags []ResultPropBits
+}
+
+type FuncPropBits uint32
+
+const (
+ // Function always panics or invokes os.Exit() or a func that does
+ // likewise.
+ FuncPropNeverReturns FuncPropBits = 1 << iota
+)
+
+type ParamPropBits uint32
+
+const (
+ // No info about this param
+ ParamNoInfo ParamPropBits = 0
+
+ // Parameter value feeds unmodified into a top-level interface
+ // call (this assumes the parameter is of interface type).
+ ParamFeedsInterfaceMethodCall ParamPropBits = 1 << iota
+
+ // Parameter value feeds unmodified into an interface call that
+ // may be conditional/nested and not always executed (this assumes
+ // the parameter is of interface type).
+ ParamMayFeedInterfaceMethodCall ParamPropBits = 1 << iota
+
+ // Parameter value feeds unmodified into a top level indirect
+ // function call (assumes parameter is of function type).
+ ParamFeedsIndirectCall
+
+ // Parameter value feeds unmodified into an indirect function call
+ // that is conditional/nested (not guaranteed to execute). Assumes
+ // parameter is of function type.
+ ParamMayFeedIndirectCall
+
+ // Parameter value feeds unmodified into a top level "switch"
+ // statement or "if" statement simple expressions (see more on
+ // "simple" expression classification below).
+ ParamFeedsIfOrSwitch
+
+ // Parameter value feeds unmodified into a "switch" or "if"
+ // statement simple expressions (see more on "simple" expression
+ // classification below), where the if/switch is
+ // conditional/nested.
+ ParamMayFeedIfOrSwitch
+)
+
+type ResultPropBits uint32
+
+const (
+ // No info about this result
+ ResultNoInfo ResultPropBits = 0
+ // This result always contains allocated memory.
+ ResultIsAllocatedMem ResultPropBits = 1 << iota
+ // This result is always a single concrete type that is
+ // implicitly converted to interface.
+ ResultIsConcreteTypeConvertedToInterface
+ // Result is always the same non-composite compile time constant.
+ ResultAlwaysSameConstant
+ // Result is always the same function or closure.
+ ResultAlwaysSameFunc
+ // Result is always the same (potentially) inlinable function or closure.
+ ResultAlwaysSameInlinableFunc
+)
diff --git a/src/cmd/compile/internal/inline/inlheur/names.go b/src/cmd/compile/internal/inline/inlheur/names.go
new file mode 100644
index 0000000000..022385087b
--- /dev/null
+++ b/src/cmd/compile/internal/inline/inlheur/names.go
@@ -0,0 +1,129 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package inlheur
+
+import (
+ "cmd/compile/internal/ir"
+ "go/constant"
+)
+
+// nameFinder provides a set of "isXXX" query methods for clients to
+// ask whether a given AST node corresponds to a function, a constant
+// value, and so on. These methods use an underlying ir.ReassignOracle
+// to return more precise results in cases where an "interesting"
+// value is assigned to a singly-defined local temp. Example:
+//
+// const q = 101
+// fq := func() int { return q }
+// copyOfConstant := q
+// copyOfFunc := f
+// interestingCall(copyOfConstant, copyOfFunc)
+//
+// A name finder query method invoked on the arguments being passed to
+// "interestingCall" will be able detect that 'copyOfConstant' always
+// evaluates to a constant (even though it is in fact a PAUTO local
+// variable). A given nameFinder can also operate without using
+// ir.ReassignOracle (in cases where it is not practical to look
+// at the entire function); in such cases queries will still work
+// for explicit constant values and functions.
+type nameFinder struct {
+ ro *ir.ReassignOracle
+}
+
+// newNameFinder returns a new nameFinder object with a reassignment
+// oracle initialized based on the function fn, or if fn is nil,
+// without an underlying ReassignOracle.
+func newNameFinder(fn *ir.Func) *nameFinder {
+ var ro *ir.ReassignOracle
+ if fn != nil {
+ ro = &ir.ReassignOracle{}
+ ro.Init(fn)
+ }
+ return &nameFinder{ro: ro}
+}
+
+// funcName returns the *ir.Name for the func or method
+// corresponding to node 'n', or nil if n can't be proven
+// to contain a function value.
+func (nf *nameFinder) funcName(n ir.Node) *ir.Name {
+ sv := n
+ if nf.ro != nil {
+ sv = nf.ro.StaticValue(n)
+ }
+ if name := ir.StaticCalleeName(sv); name != nil {
+ return name
+ }
+ return nil
+}
+
+// isAllocatedMem returns true if node n corresponds to a memory
+// allocation expression (make, new, or equivalent).
+func (nf *nameFinder) isAllocatedMem(n ir.Node) bool {
+ sv := n
+ if nf.ro != nil {
+ sv = nf.ro.StaticValue(n)
+ }
+ switch sv.Op() {
+ case ir.OMAKESLICE, ir.ONEW, ir.OPTRLIT, ir.OSLICELIT:
+ return true
+ }
+ return false
+}
+
+// constValue returns the underlying constant.Value for an AST node n
+// if n is itself a constant value/expr, or if n is a singly assigned
+// local containing constant expr/value (or nil not constant).
+func (nf *nameFinder) constValue(n ir.Node) constant.Value {
+ sv := n
+ if nf.ro != nil {
+ sv = nf.ro.StaticValue(n)
+ }
+ if sv.Op() == ir.OLITERAL {
+ return sv.Val()
+ }
+ return nil
+}
+
+// isNil returns whether n is nil (or singly
+// assigned local containing nil).
+func (nf *nameFinder) isNil(n ir.Node) bool {
+ sv := n
+ if nf.ro != nil {
+ sv = nf.ro.StaticValue(n)
+ }
+ return sv.Op() == ir.ONIL
+}
+
+func (nf *nameFinder) staticValue(n ir.Node) ir.Node {
+ if nf.ro == nil {
+ return n
+ }
+ return nf.ro.StaticValue(n)
+}
+
+func (nf *nameFinder) reassigned(n *ir.Name) bool {
+ if nf.ro == nil {
+ return true
+ }
+ return nf.ro.Reassigned(n)
+}
+
+func (nf *nameFinder) isConcreteConvIface(n ir.Node) bool {
+ sv := n
+ if nf.ro != nil {
+ sv = nf.ro.StaticValue(n)
+ }
+ if sv.Op() != ir.OCONVIFACE {
+ return false
+ }
+ return !sv.(*ir.ConvExpr).X.Type().IsInterface()
+}
+
+func isSameFuncName(v1, v2 *ir.Name) bool {
+ // NB: there are a few corner cases where pointer equality
+ // doesn't work here, but this should be good enough for
+ // our purposes here.
+ return v1 == v2
+}
diff --git a/src/cmd/compile/internal/inline/inlheur/parampropbits_string.go b/src/cmd/compile/internal/inline/inlheur/parampropbits_string.go
new file mode 100644
index 0000000000..bf4d3ca4ad
--- /dev/null
+++ b/src/cmd/compile/internal/inline/inlheur/parampropbits_string.go
@@ -0,0 +1,70 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Code generated by "stringer -bitset -type ParamPropBits"; DO NOT EDIT.
+
+package inlheur
+
+import (
+ "bytes"
+ "strconv"
+)
+
+func _() {
+ // An "invalid array index" compiler error signifies that the constant values have changed.
+ // Re-run the stringer command to generate them again.
+ var x [1]struct{}
+ _ = x[ParamNoInfo-0]
+ _ = x[ParamFeedsInterfaceMethodCall-2]
+ _ = x[ParamMayFeedInterfaceMethodCall-4]
+ _ = x[ParamFeedsIndirectCall-8]
+ _ = x[ParamMayFeedIndirectCall-16]
+ _ = x[ParamFeedsIfOrSwitch-32]
+ _ = x[ParamMayFeedIfOrSwitch-64]
+}
+
+var _ParamPropBits_value = [...]uint64{
+ 0x0, /* ParamNoInfo */
+ 0x2, /* ParamFeedsInterfaceMethodCall */
+ 0x4, /* ParamMayFeedInterfaceMethodCall */
+ 0x8, /* ParamFeedsIndirectCall */
+ 0x10, /* ParamMayFeedIndirectCall */
+ 0x20, /* ParamFeedsIfOrSwitch */
+ 0x40, /* ParamMayFeedIfOrSwitch */
+}
+
+const _ParamPropBits_name = "ParamNoInfoParamFeedsInterfaceMethodCallParamMayFeedInterfaceMethodCallParamFeedsIndirectCallParamMayFeedIndirectCallParamFeedsIfOrSwitchParamMayFeedIfOrSwitch"
+
+var _ParamPropBits_index = [...]uint8{0, 11, 40, 71, 93, 117, 137, 159}
+
+func (i ParamPropBits) String() string {
+ var b bytes.Buffer
+
+ remain := uint64(i)
+ seen := false
+
+ for k, v := range _ParamPropBits_value {
+ x := _ParamPropBits_name[_ParamPropBits_index[k]:_ParamPropBits_index[k+1]]
+ if v == 0 {
+ if i == 0 {
+ b.WriteString(x)
+ return b.String()
+ }
+ continue
+ }
+ if (v & remain) == v {
+ remain &^= v
+ x := _ParamPropBits_name[_ParamPropBits_index[k]:_ParamPropBits_index[k+1]]
+ if seen {
+ b.WriteString("|")
+ }
+ seen = true
+ b.WriteString(x)
+ }
+ }
+ if remain == 0 {
+ return b.String()
+ }
+ return "ParamPropBits(0x" + strconv.FormatInt(int64(i), 16) + ")"
+}
diff --git a/src/cmd/compile/internal/inline/inlheur/pstate_string.go b/src/cmd/compile/internal/inline/inlheur/pstate_string.go
new file mode 100644
index 0000000000..e6108d1318
--- /dev/null
+++ b/src/cmd/compile/internal/inline/inlheur/pstate_string.go
@@ -0,0 +1,30 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Code generated by "stringer -type pstate"; DO NOT EDIT.
+
+package inlheur
+
+import "strconv"
+
+func _() {
+ // An "invalid array index" compiler error signifies that the constant values have changed.
+ // Re-run the stringer command to generate them again.
+ var x [1]struct{}
+ _ = x[psNoInfo-0]
+ _ = x[psCallsPanic-1]
+ _ = x[psMayReturn-2]
+ _ = x[psTop-3]
+}
+
+const _pstate_name = "psNoInfopsCallsPanicpsMayReturnpsTop"
+
+var _pstate_index = [...]uint8{0, 8, 20, 31, 36}
+
+func (i pstate) String() string {
+ if i < 0 || i >= pstate(len(_pstate_index)-1) {
+ return "pstate(" + strconv.FormatInt(int64(i), 10) + ")"
+ }
+ return _pstate_name[_pstate_index[i]:_pstate_index[i+1]]
+}
diff --git a/src/cmd/compile/internal/inline/inlheur/resultpropbits_string.go b/src/cmd/compile/internal/inline/inlheur/resultpropbits_string.go
new file mode 100644
index 0000000000..888af98fc3
--- /dev/null
+++ b/src/cmd/compile/internal/inline/inlheur/resultpropbits_string.go
@@ -0,0 +1,68 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Code generated by "stringer -bitset -type ResultPropBits"; DO NOT EDIT.
+
+package inlheur
+
+import (
+ "bytes"
+ "strconv"
+)
+
+func _() {
+ // An "invalid array index" compiler error signifies that the constant values have changed.
+ // Re-run the stringer command to generate them again.
+ var x [1]struct{}
+ _ = x[ResultNoInfo-0]
+ _ = x[ResultIsAllocatedMem-2]
+ _ = x[ResultIsConcreteTypeConvertedToInterface-4]
+ _ = x[ResultAlwaysSameConstant-8]
+ _ = x[ResultAlwaysSameFunc-16]
+ _ = x[ResultAlwaysSameInlinableFunc-32]
+}
+
+var _ResultPropBits_value = [...]uint64{
+ 0x0, /* ResultNoInfo */
+ 0x2, /* ResultIsAllocatedMem */
+ 0x4, /* ResultIsConcreteTypeConvertedToInterface */
+ 0x8, /* ResultAlwaysSameConstant */
+ 0x10, /* ResultAlwaysSameFunc */
+ 0x20, /* ResultAlwaysSameInlinableFunc */
+}
+
+const _ResultPropBits_name = "ResultNoInfoResultIsAllocatedMemResultIsConcreteTypeConvertedToInterfaceResultAlwaysSameConstantResultAlwaysSameFuncResultAlwaysSameInlinableFunc"
+
+var _ResultPropBits_index = [...]uint8{0, 12, 32, 72, 96, 116, 145}
+
+func (i ResultPropBits) String() string {
+ var b bytes.Buffer
+
+ remain := uint64(i)
+ seen := false
+
+ for k, v := range _ResultPropBits_value {
+ x := _ResultPropBits_name[_ResultPropBits_index[k]:_ResultPropBits_index[k+1]]
+ if v == 0 {
+ if i == 0 {
+ b.WriteString(x)
+ return b.String()
+ }
+ continue
+ }
+ if (v & remain) == v {
+ remain &^= v
+ x := _ResultPropBits_name[_ResultPropBits_index[k]:_ResultPropBits_index[k+1]]
+ if seen {
+ b.WriteString("|")
+ }
+ seen = true
+ b.WriteString(x)
+ }
+ }
+ if remain == 0 {
+ return b.String()
+ }
+ return "ResultPropBits(0x" + strconv.FormatInt(int64(i), 16) + ")"
+}
diff --git a/src/cmd/compile/internal/inline/inlheur/score_callresult_uses.go b/src/cmd/compile/internal/inline/inlheur/score_callresult_uses.go
new file mode 100644
index 0000000000..b95ea37d59
--- /dev/null
+++ b/src/cmd/compile/internal/inline/inlheur/score_callresult_uses.go
@@ -0,0 +1,413 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package inlheur
+
+import (
+ "cmd/compile/internal/ir"
+ "fmt"
+ "os"
+)
+
+// This file contains code to re-score callsites based on how the
+// results of the call were used. Example:
+//
+// func foo() {
+// x, fptr := bar()
+// switch x {
+// case 10: fptr = baz()
+// default: blix()
+// }
+// fptr(100)
+// }
+//
+// The initial scoring pass will assign a score to "bar()" based on
+// various criteria, however once the first pass of scoring is done,
+// we look at the flags on the result from bar, and check to see
+// how those results are used. If bar() always returns the same constant
+// for its first result, and if the variable receiving that result
+// isn't redefined, and if that variable feeds into an if/switch
+// condition, then we will try to adjust the score for "bar" (on the
+// theory that if we inlined, we can constant fold / deadcode).
+
+type resultPropAndCS struct {
+ defcs *CallSite
+ props ResultPropBits
+}
+
+type resultUseAnalyzer struct {
+ resultNameTab map[*ir.Name]resultPropAndCS
+ fn *ir.Func
+ cstab CallSiteTab
+ *condLevelTracker
+}
+
+// rescoreBasedOnCallResultUses examines how call results are used,
+// and tries to update the scores of calls based on how their results
+// are used in the function.
+func (csa *callSiteAnalyzer) rescoreBasedOnCallResultUses(fn *ir.Func, resultNameTab map[*ir.Name]resultPropAndCS, cstab CallSiteTab) {
+ enableDebugTraceIfEnv()
+ rua := &resultUseAnalyzer{
+ resultNameTab: resultNameTab,
+ fn: fn,
+ cstab: cstab,
+ condLevelTracker: new(condLevelTracker),
+ }
+ var doNode func(ir.Node) bool
+ doNode = func(n ir.Node) bool {
+ rua.nodeVisitPre(n)
+ ir.DoChildren(n, doNode)
+ rua.nodeVisitPost(n)
+ return false
+ }
+ doNode(fn)
+ disableDebugTrace()
+}
+
+func (csa *callSiteAnalyzer) examineCallResults(cs *CallSite, resultNameTab map[*ir.Name]resultPropAndCS) map[*ir.Name]resultPropAndCS {
+ if debugTrace&debugTraceScoring != 0 {
+ fmt.Fprintf(os.Stderr, "=-= examining call results for %q\n",
+ EncodeCallSiteKey(cs))
+ }
+
+ // Invoke a helper to pick out the specific ir.Name's the results
+ // from this call are assigned into, e.g. "x, y := fooBar()". If
+ // the call is not part of an assignment statement, or if the
+ // variables in question are not newly defined, then we'll receive
+ // an empty list here.
+ //
+ names, autoTemps, props := namesDefined(cs)
+ if len(names) == 0 {
+ return resultNameTab
+ }
+
+ if debugTrace&debugTraceScoring != 0 {
+ fmt.Fprintf(os.Stderr, "=-= %d names defined\n", len(names))
+ }
+
+ // For each returned value, if the value has interesting
+ // properties (ex: always returns the same constant), and the name
+ // in question is never redefined, then make an entry in the
+ // result table for it.
+ const interesting = (ResultIsConcreteTypeConvertedToInterface |
+ ResultAlwaysSameConstant | ResultAlwaysSameInlinableFunc | ResultAlwaysSameFunc)
+ for idx, n := range names {
+ rprop := props.ResultFlags[idx]
+
+ if debugTrace&debugTraceScoring != 0 {
+ fmt.Fprintf(os.Stderr, "=-= props for ret %d %q: %s\n",
+ idx, n.Sym().Name, rprop.String())
+ }
+
+ if rprop&interesting == 0 {
+ continue
+ }
+ if csa.nameFinder.reassigned(n) {
+ continue
+ }
+ if resultNameTab == nil {
+ resultNameTab = make(map[*ir.Name]resultPropAndCS)
+ } else if _, ok := resultNameTab[n]; ok {
+ panic("should never happen")
+ }
+ entry := resultPropAndCS{
+ defcs: cs,
+ props: rprop,
+ }
+ resultNameTab[n] = entry
+ if autoTemps[idx] != nil {
+ resultNameTab[autoTemps[idx]] = entry
+ }
+ if debugTrace&debugTraceScoring != 0 {
+ fmt.Fprintf(os.Stderr, "=-= add resultNameTab table entry n=%v autotemp=%v props=%s\n", n, autoTemps[idx], rprop.String())
+ }
+ }
+ return resultNameTab
+}
+
+// namesDefined returns a list of ir.Name's corresponding to locals
+// that receive the results from the call at site 'cs', plus the
+// properties object for the called function. If a given result
+// isn't cleanly assigned to a newly defined local, the
+// slot for that result in the returned list will be nil. Example:
+//
+// call returned name list
+//
+// x := foo() [ x ]
+// z, y := bar() [ nil, nil ]
+// _, q := baz() [ nil, q ]
+//
+// In the case of a multi-return call, such as "x, y := foo()",
+// the pattern we see from the front end will be a call op
+// assigning to auto-temps, and then an assignment of the auto-temps
+// to the user-level variables. In such cases we return
+// first the user-level variable (in the first func result)
+// and then the auto-temp name in the second result.
+func namesDefined(cs *CallSite) ([]*ir.Name, []*ir.Name, *FuncProps) {
+ // If this call doesn't feed into an assignment (and of course not
+ // all calls do), then we don't have anything to work with here.
+ if cs.Assign == nil {
+ return nil, nil, nil
+ }
+ funcInlHeur, ok := fpmap[cs.Callee]
+ if !ok {
+ // TODO: add an assert/panic here.
+ return nil, nil, nil
+ }
+ if len(funcInlHeur.props.ResultFlags) == 0 {
+ return nil, nil, nil
+ }
+
+ // Single return case.
+ if len(funcInlHeur.props.ResultFlags) == 1 {
+ asgn, ok := cs.Assign.(*ir.AssignStmt)
+ if !ok {
+ return nil, nil, nil
+ }
+ // locate name being assigned
+ aname, ok := asgn.X.(*ir.Name)
+ if !ok {
+ return nil, nil, nil
+ }
+ return []*ir.Name{aname}, []*ir.Name{nil}, funcInlHeur.props
+ }
+
+ // Multi-return case
+ asgn, ok := cs.Assign.(*ir.AssignListStmt)
+ if !ok || !asgn.Def {
+ return nil, nil, nil
+ }
+ userVars := make([]*ir.Name, len(funcInlHeur.props.ResultFlags))
+ autoTemps := make([]*ir.Name, len(funcInlHeur.props.ResultFlags))
+ for idx, x := range asgn.Lhs {
+ if n, ok := x.(*ir.Name); ok {
+ userVars[idx] = n
+ r := asgn.Rhs[idx]
+ if r.Op() == ir.OCONVNOP {
+ r = r.(*ir.ConvExpr).X
+ }
+ if ir.IsAutoTmp(r) {
+ autoTemps[idx] = r.(*ir.Name)
+ }
+ if debugTrace&debugTraceScoring != 0 {
+ fmt.Fprintf(os.Stderr, "=-= multi-ret namedef uv=%v at=%v\n",
+ x, autoTemps[idx])
+ }
+ } else {
+ return nil, nil, nil
+ }
+ }
+ return userVars, autoTemps, funcInlHeur.props
+}
+
+func (rua *resultUseAnalyzer) nodeVisitPost(n ir.Node) {
+ rua.condLevelTracker.post(n)
+}
+
+func (rua *resultUseAnalyzer) nodeVisitPre(n ir.Node) {
+ rua.condLevelTracker.pre(n)
+ switch n.Op() {
+ case ir.OCALLINTER:
+ if debugTrace&debugTraceScoring != 0 {
+ fmt.Fprintf(os.Stderr, "=-= rescore examine iface call %v:\n", n)
+ }
+ rua.callTargetCheckResults(n)
+ case ir.OCALLFUNC:
+ if debugTrace&debugTraceScoring != 0 {
+ fmt.Fprintf(os.Stderr, "=-= rescore examine call %v:\n", n)
+ }
+ rua.callTargetCheckResults(n)
+ case ir.OIF:
+ ifst := n.(*ir.IfStmt)
+ rua.foldCheckResults(ifst.Cond)
+ case ir.OSWITCH:
+ swst := n.(*ir.SwitchStmt)
+ if swst.Tag != nil {
+ rua.foldCheckResults(swst.Tag)
+ }
+
+ }
+}
+
+// callTargetCheckResults examines a given call to see whether the
+// callee expression is potentially an inlinable function returned
+// from a potentially inlinable call. Examples:
+//
+// Scenario 1: named intermediate
+//
+// fn1 := foo() conc := bar()
+// fn1("blah") conc.MyMethod()
+//
+// Scenario 2: returned func or concrete object feeds directly to call
+//
+// foo()("blah") bar().MyMethod()
+//
+// In the second case although at the source level the result of the
+// direct call feeds right into the method call or indirect call,
+// we're relying on the front end having inserted an auto-temp to
+// capture the value.
+func (rua *resultUseAnalyzer) callTargetCheckResults(call ir.Node) {
+ ce := call.(*ir.CallExpr)
+ rname := rua.getCallResultName(ce)
+ if rname == nil {
+ return
+ }
+ if debugTrace&debugTraceScoring != 0 {
+ fmt.Fprintf(os.Stderr, "=-= staticvalue returns %v:\n",
+ rname)
+ }
+ if rname.Class != ir.PAUTO {
+ return
+ }
+ switch call.Op() {
+ case ir.OCALLINTER:
+ if debugTrace&debugTraceScoring != 0 {
+ fmt.Fprintf(os.Stderr, "=-= in %s checking %v for cci prop:\n",
+ rua.fn.Sym().Name, rname)
+ }
+ if cs := rua.returnHasProp(rname, ResultIsConcreteTypeConvertedToInterface); cs != nil {
+
+ adj := returnFeedsConcreteToInterfaceCallAdj
+ cs.Score, cs.ScoreMask = adjustScore(adj, cs.Score, cs.ScoreMask)
+ }
+ case ir.OCALLFUNC:
+ if debugTrace&debugTraceScoring != 0 {
+ fmt.Fprintf(os.Stderr, "=-= in %s checking %v for samefunc props:\n",
+ rua.fn.Sym().Name, rname)
+ v, ok := rua.resultNameTab[rname]
+ if !ok {
+ fmt.Fprintf(os.Stderr, "=-= no entry for %v in rt\n", rname)
+ } else {
+ fmt.Fprintf(os.Stderr, "=-= props for %v: %q\n", rname, v.props.String())
+ }
+ }
+ if cs := rua.returnHasProp(rname, ResultAlwaysSameInlinableFunc); cs != nil {
+ adj := returnFeedsInlinableFuncToIndCallAdj
+ cs.Score, cs.ScoreMask = adjustScore(adj, cs.Score, cs.ScoreMask)
+ } else if cs := rua.returnHasProp(rname, ResultAlwaysSameFunc); cs != nil {
+ adj := returnFeedsFuncToIndCallAdj
+ cs.Score, cs.ScoreMask = adjustScore(adj, cs.Score, cs.ScoreMask)
+
+ }
+ }
+}
+
+// foldCheckResults examines the specified if/switch condition 'cond'
+// to see if it refers to locals defined by a (potentially inlinable)
+// function call at call site C, and if so, whether 'cond' contains
+// only combinations of simple references to all of the names in
+// 'names' with selected constants + operators. If these criteria are
+// met, then we adjust the score for call site C to reflect the
+// fact that inlining will enable deadcode and/or constant propagation.
+// Note: for this heuristic to kick in, the names in question have to
+// be all from the same callsite. Examples:
+//
+// q, r := baz() x, y := foo()
+// switch q+r { a, b, c := bar()
+// ... if x && y && a && b && c {
+// } ...
+// }
+//
+// For the call to "baz" above we apply a score adjustment, but not
+// for the calls to "foo" or "bar".
+func (rua *resultUseAnalyzer) foldCheckResults(cond ir.Node) {
+ namesUsed := collectNamesUsed(cond)
+ if len(namesUsed) == 0 {
+ return
+ }
+ var cs *CallSite
+ for _, n := range namesUsed {
+ rpcs, found := rua.resultNameTab[n]
+ if !found {
+ return
+ }
+ if cs != nil && rpcs.defcs != cs {
+ return
+ }
+ cs = rpcs.defcs
+ if rpcs.props&ResultAlwaysSameConstant == 0 {
+ return
+ }
+ }
+ if debugTrace&debugTraceScoring != 0 {
+ nls := func(nl []*ir.Name) string {
+ r := ""
+ for _, n := range nl {
+ r += " " + n.Sym().Name
+ }
+ return r
+ }
+ fmt.Fprintf(os.Stderr, "=-= calling ShouldFoldIfNameConstant on names={%s} cond=%v\n", nls(namesUsed), cond)
+ }
+
+ if !ShouldFoldIfNameConstant(cond, namesUsed) {
+ return
+ }
+ adj := returnFeedsConstToIfAdj
+ cs.Score, cs.ScoreMask = adjustScore(adj, cs.Score, cs.ScoreMask)
+}
+
+func collectNamesUsed(expr ir.Node) []*ir.Name {
+ res := []*ir.Name{}
+ ir.Visit(expr, func(n ir.Node) {
+ if n.Op() != ir.ONAME {
+ return
+ }
+ nn := n.(*ir.Name)
+ if nn.Class != ir.PAUTO {
+ return
+ }
+ res = append(res, nn)
+ })
+ return res
+}
+
+func (rua *resultUseAnalyzer) returnHasProp(name *ir.Name, prop ResultPropBits) *CallSite {
+ v, ok := rua.resultNameTab[name]
+ if !ok {
+ return nil
+ }
+ if v.props&prop == 0 {
+ return nil
+ }
+ return v.defcs
+}
+
+func (rua *resultUseAnalyzer) getCallResultName(ce *ir.CallExpr) *ir.Name {
+ var callTarg ir.Node
+ if sel, ok := ce.Fun.(*ir.SelectorExpr); ok {
+ // method call
+ callTarg = sel.X
+ } else if ctarg, ok := ce.Fun.(*ir.Name); ok {
+ // regular call
+ callTarg = ctarg
+ } else {
+ return nil
+ }
+ r := ir.StaticValue(callTarg)
+ if debugTrace&debugTraceScoring != 0 {
+ fmt.Fprintf(os.Stderr, "=-= staticname on %v returns %v:\n",
+ callTarg, r)
+ }
+ if r.Op() == ir.OCALLFUNC {
+ // This corresponds to the "x := foo()" case; here
+ // ir.StaticValue has brought us all the way back to
+ // the call expression itself. We need to back off to
+ // the name defined by the call; do this by looking up
+ // the callsite.
+ ce := r.(*ir.CallExpr)
+ cs, ok := rua.cstab[ce]
+ if !ok {
+ return nil
+ }
+ names, _, _ := namesDefined(cs)
+ if len(names) == 0 {
+ return nil
+ }
+ return names[0]
+ } else if r.Op() == ir.ONAME {
+ return r.(*ir.Name)
+ }
+ return nil
+}
diff --git a/src/cmd/compile/internal/inline/inlheur/scoreadjusttyp_string.go b/src/cmd/compile/internal/inline/inlheur/scoreadjusttyp_string.go
new file mode 100644
index 0000000000..f5b8bf6903
--- /dev/null
+++ b/src/cmd/compile/internal/inline/inlheur/scoreadjusttyp_string.go
@@ -0,0 +1,80 @@
+// Code generated by "stringer -bitset -type scoreAdjustTyp"; DO NOT EDIT.
+
+package inlheur
+
+import "strconv"
+import "bytes"
+
+func _() {
+ // An "invalid array index" compiler error signifies that the constant values have changed.
+ // Re-run the stringer command to generate them again.
+ var x [1]struct{}
+ _ = x[panicPathAdj-1]
+ _ = x[initFuncAdj-2]
+ _ = x[inLoopAdj-4]
+ _ = x[passConstToIfAdj-8]
+ _ = x[passConstToNestedIfAdj-16]
+ _ = x[passConcreteToItfCallAdj-32]
+ _ = x[passConcreteToNestedItfCallAdj-64]
+ _ = x[passFuncToIndCallAdj-128]
+ _ = x[passFuncToNestedIndCallAdj-256]
+ _ = x[passInlinableFuncToIndCallAdj-512]
+ _ = x[passInlinableFuncToNestedIndCallAdj-1024]
+ _ = x[returnFeedsConstToIfAdj-2048]
+ _ = x[returnFeedsFuncToIndCallAdj-4096]
+ _ = x[returnFeedsInlinableFuncToIndCallAdj-8192]
+ _ = x[returnFeedsConcreteToInterfaceCallAdj-16384]
+}
+
+var _scoreAdjustTyp_value = [...]uint64{
+ 0x1, /* panicPathAdj */
+ 0x2, /* initFuncAdj */
+ 0x4, /* inLoopAdj */
+ 0x8, /* passConstToIfAdj */
+ 0x10, /* passConstToNestedIfAdj */
+ 0x20, /* passConcreteToItfCallAdj */
+ 0x40, /* passConcreteToNestedItfCallAdj */
+ 0x80, /* passFuncToIndCallAdj */
+ 0x100, /* passFuncToNestedIndCallAdj */
+ 0x200, /* passInlinableFuncToIndCallAdj */
+ 0x400, /* passInlinableFuncToNestedIndCallAdj */
+ 0x800, /* returnFeedsConstToIfAdj */
+ 0x1000, /* returnFeedsFuncToIndCallAdj */
+ 0x2000, /* returnFeedsInlinableFuncToIndCallAdj */
+ 0x4000, /* returnFeedsConcreteToInterfaceCallAdj */
+}
+
+const _scoreAdjustTyp_name = "panicPathAdjinitFuncAdjinLoopAdjpassConstToIfAdjpassConstToNestedIfAdjpassConcreteToItfCallAdjpassConcreteToNestedItfCallAdjpassFuncToIndCallAdjpassFuncToNestedIndCallAdjpassInlinableFuncToIndCallAdjpassInlinableFuncToNestedIndCallAdjreturnFeedsConstToIfAdjreturnFeedsFuncToIndCallAdjreturnFeedsInlinableFuncToIndCallAdjreturnFeedsConcreteToInterfaceCallAdj"
+
+var _scoreAdjustTyp_index = [...]uint16{0, 12, 23, 32, 48, 70, 94, 124, 144, 170, 199, 234, 257, 284, 320, 357}
+
+func (i scoreAdjustTyp) String() string {
+ var b bytes.Buffer
+
+ remain := uint64(i)
+ seen := false
+
+ for k, v := range _scoreAdjustTyp_value {
+ x := _scoreAdjustTyp_name[_scoreAdjustTyp_index[k]:_scoreAdjustTyp_index[k+1]]
+ if v == 0 {
+ if i == 0 {
+ b.WriteString(x)
+ return b.String()
+ }
+ continue
+ }
+ if (v & remain) == v {
+ remain &^= v
+ x := _scoreAdjustTyp_name[_scoreAdjustTyp_index[k]:_scoreAdjustTyp_index[k+1]]
+ if seen {
+ b.WriteString("|")
+ }
+ seen = true
+ b.WriteString(x)
+ }
+ }
+ if remain == 0 {
+ return b.String()
+ }
+ return "scoreAdjustTyp(0x" + strconv.FormatInt(int64(i), 16) + ")"
+}
diff --git a/src/cmd/compile/internal/inline/inlheur/scoring.go b/src/cmd/compile/internal/inline/inlheur/scoring.go
new file mode 100644
index 0000000000..623ba8adf0
--- /dev/null
+++ b/src/cmd/compile/internal/inline/inlheur/scoring.go
@@ -0,0 +1,751 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package inlheur
+
+import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/pgo"
+ "cmd/compile/internal/types"
+ "fmt"
+ "os"
+ "sort"
+ "strconv"
+ "strings"
+)
+
+// These constants enumerate the set of possible ways/scenarios
+// in which we'll adjust the score of a given callsite.
+type scoreAdjustTyp uint
+
+// These constants capture the various ways in which the inliner's
+// scoring phase can adjust a callsite score based on heuristics. They
+// fall broadly into three categories:
+//
+// 1) adjustments based solely on the callsite context (ex: call
+// appears on panic path)
+//
+// 2) adjustments that take into account specific interesting values
+// passed at a call site (ex: passing a constant that could result in
+// cprop/deadcode in the caller)
+//
+// 3) adjustments that take into account values returned from the call
+// at a callsite (ex: call always returns the same inlinable function,
+// and return value flows unmodified into an indirect call)
+//
+// For categories 2 and 3 above, each adjustment can have either a
+// "must" version and a "may" version (but not both). Here the idea is
+// that in the "must" version the value flow is unconditional: if the
+// callsite executes, then the condition we're interested in (ex:
+// param feeding call) is guaranteed to happen. For the "may" version,
+// there may be control flow that could cause the benefit to be
+// bypassed.
+const (
+ // Category 1 adjustments (see above)
+ panicPathAdj scoreAdjustTyp = (1 << iota)
+ initFuncAdj
+ inLoopAdj
+
+ // Category 2 adjustments (see above).
+ passConstToIfAdj
+ passConstToNestedIfAdj
+ passConcreteToItfCallAdj
+ passConcreteToNestedItfCallAdj
+ passFuncToIndCallAdj
+ passFuncToNestedIndCallAdj
+ passInlinableFuncToIndCallAdj
+ passInlinableFuncToNestedIndCallAdj
+
+ // Category 3 adjustments.
+ returnFeedsConstToIfAdj
+ returnFeedsFuncToIndCallAdj
+ returnFeedsInlinableFuncToIndCallAdj
+ returnFeedsConcreteToInterfaceCallAdj
+
+ sentinelScoreAdj // sentinel; not a real adjustment
+)
+
+// This table records the specific values we use to adjust call
+// site scores in a given scenario.
+// NOTE: these numbers are chosen very arbitrarily; ideally
+// we will go through some sort of turning process to decide
+// what value for each one produces the best performance.
+
+var adjValues = map[scoreAdjustTyp]int{
+ panicPathAdj: 40,
+ initFuncAdj: 20,
+ inLoopAdj: -5,
+ passConstToIfAdj: -20,
+ passConstToNestedIfAdj: -15,
+ passConcreteToItfCallAdj: -30,
+ passConcreteToNestedItfCallAdj: -25,
+ passFuncToIndCallAdj: -25,
+ passFuncToNestedIndCallAdj: -20,
+ passInlinableFuncToIndCallAdj: -45,
+ passInlinableFuncToNestedIndCallAdj: -40,
+ returnFeedsConstToIfAdj: -15,
+ returnFeedsFuncToIndCallAdj: -25,
+ returnFeedsInlinableFuncToIndCallAdj: -40,
+ returnFeedsConcreteToInterfaceCallAdj: -25,
+}
+
+// SetupScoreAdjustments interprets the value of the -d=inlscoreadj
+// debugging option, if set. The value of this flag is expected to be
+// a series of "/"-separated clauses of the form adj1:value1. Example:
+// -d=inlscoreadj=inLoopAdj=0/passConstToIfAdj=-99
+func SetupScoreAdjustments() {
+ if base.Debug.InlScoreAdj == "" {
+ return
+ }
+ if err := parseScoreAdj(base.Debug.InlScoreAdj); err != nil {
+ base.Fatalf("malformed -d=inlscoreadj argument %q: %v",
+ base.Debug.InlScoreAdj, err)
+ }
+}
+
+func adjStringToVal(s string) (scoreAdjustTyp, bool) {
+ for adj := scoreAdjustTyp(1); adj < sentinelScoreAdj; adj <<= 1 {
+ if adj.String() == s {
+ return adj, true
+ }
+ }
+ return 0, false
+}
+
+func parseScoreAdj(val string) error {
+ clauses := strings.Split(val, "/")
+ if len(clauses) == 0 {
+ return fmt.Errorf("no clauses")
+ }
+ for _, clause := range clauses {
+ elems := strings.Split(clause, ":")
+ if len(elems) < 2 {
+ return fmt.Errorf("clause %q: expected colon", clause)
+ }
+ if len(elems) != 2 {
+ return fmt.Errorf("clause %q has %d elements, wanted 2", clause,
+ len(elems))
+ }
+ adj, ok := adjStringToVal(elems[0])
+ if !ok {
+ return fmt.Errorf("clause %q: unknown adjustment", clause)
+ }
+ val, err := strconv.Atoi(elems[1])
+ if err != nil {
+ return fmt.Errorf("clause %q: malformed value: %v", clause, err)
+ }
+ adjValues[adj] = val
+ }
+ return nil
+}
+
+func adjValue(x scoreAdjustTyp) int {
+ if val, ok := adjValues[x]; ok {
+ return val
+ } else {
+ panic("internal error unregistered adjustment type")
+ }
+}
+
+var mayMustAdj = [...]struct{ may, must scoreAdjustTyp }{
+ {may: passConstToNestedIfAdj, must: passConstToIfAdj},
+ {may: passConcreteToNestedItfCallAdj, must: passConcreteToItfCallAdj},
+ {may: passFuncToNestedIndCallAdj, must: passFuncToNestedIndCallAdj},
+ {may: passInlinableFuncToNestedIndCallAdj, must: passInlinableFuncToIndCallAdj},
+}
+
+func isMay(x scoreAdjustTyp) bool {
+ return mayToMust(x) != 0
+}
+
+func isMust(x scoreAdjustTyp) bool {
+ return mustToMay(x) != 0
+}
+
+func mayToMust(x scoreAdjustTyp) scoreAdjustTyp {
+ for _, v := range mayMustAdj {
+ if x == v.may {
+ return v.must
+ }
+ }
+ return 0
+}
+
+func mustToMay(x scoreAdjustTyp) scoreAdjustTyp {
+ for _, v := range mayMustAdj {
+ if x == v.must {
+ return v.may
+ }
+ }
+ return 0
+}
+
+// computeCallSiteScore takes a given call site whose ir node is
+// 'call' and callee function is 'callee' and with previously computed
+// call site properties 'csflags', then computes a score for the
+// callsite that combines the size cost of the callee with heuristics
+// based on previously computed argument and function properties,
+// then stores the score and the adjustment mask in the appropriate
+// fields in 'cs'
+func (cs *CallSite) computeCallSiteScore(csa *callSiteAnalyzer, calleeProps *FuncProps) {
+ callee := cs.Callee
+ csflags := cs.Flags
+ call := cs.Call
+
+ // Start with the size-based score for the callee.
+ score := int(callee.Inl.Cost)
+ var tmask scoreAdjustTyp
+
+ if debugTrace&debugTraceScoring != 0 {
+ fmt.Fprintf(os.Stderr, "=-= scoring call to %s at %s , initial=%d\n",
+ callee.Sym().Name, fmtFullPos(call.Pos()), score)
+ }
+
+ // First some score adjustments to discourage inlining in selected cases.
+ if csflags&CallSiteOnPanicPath != 0 {
+ score, tmask = adjustScore(panicPathAdj, score, tmask)
+ }
+ if csflags&CallSiteInInitFunc != 0 {
+ score, tmask = adjustScore(initFuncAdj, score, tmask)
+ }
+
+ // Then adjustments to encourage inlining in selected cases.
+ if csflags&CallSiteInLoop != 0 {
+ score, tmask = adjustScore(inLoopAdj, score, tmask)
+ }
+
+ // Stop here if no callee props.
+ if calleeProps == nil {
+ cs.Score, cs.ScoreMask = score, tmask
+ return
+ }
+
+ // Walk through the actual expressions being passed at the call.
+ calleeRecvrParms := callee.Type().RecvParams()
+ for idx := range call.Args {
+ // ignore blanks
+ if calleeRecvrParms[idx].Sym == nil ||
+ calleeRecvrParms[idx].Sym.IsBlank() {
+ continue
+ }
+ arg := call.Args[idx]
+ pflag := calleeProps.ParamFlags[idx]
+ if debugTrace&debugTraceScoring != 0 {
+ fmt.Fprintf(os.Stderr, "=-= arg %d of %d: val %v flags=%s\n",
+ idx, len(call.Args), arg, pflag.String())
+ }
+
+ if len(cs.ArgProps) == 0 {
+ continue
+ }
+ argProps := cs.ArgProps[idx]
+
+ if debugTrace&debugTraceScoring != 0 {
+ fmt.Fprintf(os.Stderr, "=-= arg %d props %s value %v\n",
+ idx, argProps.String(), arg)
+ }
+
+ if argProps&ActualExprConstant != 0 {
+ if pflag&ParamMayFeedIfOrSwitch != 0 {
+ score, tmask = adjustScore(passConstToNestedIfAdj, score, tmask)
+ }
+ if pflag&ParamFeedsIfOrSwitch != 0 {
+ score, tmask = adjustScore(passConstToIfAdj, score, tmask)
+ }
+ }
+
+ if argProps&ActualExprIsConcreteConvIface != 0 {
+ // FIXME: ideally here it would be nice to make a
+ // distinction between the inlinable case and the
+ // non-inlinable case, but this is hard to do. Example:
+ //
+ // type I interface { Tiny() int; Giant() }
+ // type Conc struct { x int }
+ // func (c *Conc) Tiny() int { return 42 }
+ // func (c *Conc) Giant() { <huge amounts of code> }
+ //
+ // func passConcToItf(c *Conc) {
+ // makesItfMethodCall(c)
+ // }
+ //
+ // In the code above, function properties will only tell
+ // us that 'makesItfMethodCall' invokes a method on its
+ // interface parameter, but we don't know whether it calls
+ // "Tiny" or "Giant". If we knew if called "Tiny", then in
+ // theory in addition to converting the interface call to
+ // a direct call, we could also inline (in which case
+ // we'd want to decrease the score even more).
+ //
+ // One thing we could do (not yet implemented) is iterate
+ // through all of the methods of "*Conc" that allow it to
+ // satisfy I, and if all are inlinable, then exploit that.
+ if pflag&ParamMayFeedInterfaceMethodCall != 0 {
+ score, tmask = adjustScore(passConcreteToNestedItfCallAdj, score, tmask)
+ }
+ if pflag&ParamFeedsInterfaceMethodCall != 0 {
+ score, tmask = adjustScore(passConcreteToItfCallAdj, score, tmask)
+ }
+ }
+
+ if argProps&(ActualExprIsFunc|ActualExprIsInlinableFunc) != 0 {
+ mayadj := passFuncToNestedIndCallAdj
+ mustadj := passFuncToIndCallAdj
+ if argProps&ActualExprIsInlinableFunc != 0 {
+ mayadj = passInlinableFuncToNestedIndCallAdj
+ mustadj = passInlinableFuncToIndCallAdj
+ }
+ if pflag&ParamMayFeedIndirectCall != 0 {
+ score, tmask = adjustScore(mayadj, score, tmask)
+ }
+ if pflag&ParamFeedsIndirectCall != 0 {
+ score, tmask = adjustScore(mustadj, score, tmask)
+ }
+ }
+ }
+
+ cs.Score, cs.ScoreMask = score, tmask
+}
+
+func adjustScore(typ scoreAdjustTyp, score int, mask scoreAdjustTyp) (int, scoreAdjustTyp) {
+
+ if isMust(typ) {
+ if mask&typ != 0 {
+ return score, mask
+ }
+ may := mustToMay(typ)
+ if mask&may != 0 {
+ // promote may to must, so undo may
+ score -= adjValue(may)
+ mask &^= may
+ }
+ } else if isMay(typ) {
+ must := mayToMust(typ)
+ if mask&(must|typ) != 0 {
+ return score, mask
+ }
+ }
+ if mask&typ == 0 {
+ if debugTrace&debugTraceScoring != 0 {
+ fmt.Fprintf(os.Stderr, "=-= applying adj %d for %s\n",
+ adjValue(typ), typ.String())
+ }
+ score += adjValue(typ)
+ mask |= typ
+ }
+ return score, mask
+}
+
+var resultFlagToPositiveAdj map[ResultPropBits]scoreAdjustTyp
+var paramFlagToPositiveAdj map[ParamPropBits]scoreAdjustTyp
+
+func setupFlagToAdjMaps() {
+ resultFlagToPositiveAdj = map[ResultPropBits]scoreAdjustTyp{
+ ResultIsAllocatedMem: returnFeedsConcreteToInterfaceCallAdj,
+ ResultAlwaysSameFunc: returnFeedsFuncToIndCallAdj,
+ ResultAlwaysSameConstant: returnFeedsConstToIfAdj,
+ }
+ paramFlagToPositiveAdj = map[ParamPropBits]scoreAdjustTyp{
+ ParamMayFeedInterfaceMethodCall: passConcreteToNestedItfCallAdj,
+ ParamFeedsInterfaceMethodCall: passConcreteToItfCallAdj,
+ ParamMayFeedIndirectCall: passInlinableFuncToNestedIndCallAdj,
+ ParamFeedsIndirectCall: passInlinableFuncToIndCallAdj,
+ }
+}
+
+// LargestNegativeScoreAdjustment tries to estimate the largest possible
+// negative score adjustment that could be applied to a call of the
+// function with the specified props. Example:
+//
+// func foo() { func bar(x int, p *int) int {
+// ... if x < 0 { *p = x }
+// } return 99
+// }
+//
+// Function 'foo' above on the left has no interesting properties,
+// thus as a result the most we'll adjust any call to is the value for
+// "call in loop". If the calculated cost of the function is 150, and
+// the in-loop adjustment is 5 (for example), then there is not much
+// point treating it as inlinable. On the other hand "bar" has a param
+// property (parameter "x" feeds unmodified to an "if" statement") and
+// a return property (always returns same constant) meaning that a
+// given call _could_ be rescored down as much as -35 points-- thus if
+// the size of "bar" is 100 (for example) then there is at least a
+// chance that scoring will enable inlining.
+func LargestNegativeScoreAdjustment(fn *ir.Func, props *FuncProps) int {
+ if resultFlagToPositiveAdj == nil {
+ setupFlagToAdjMaps()
+ }
+ var tmask scoreAdjustTyp
+ score := adjValues[inLoopAdj] // any call can be in a loop
+ for _, pf := range props.ParamFlags {
+ if adj, ok := paramFlagToPositiveAdj[pf]; ok {
+ score, tmask = adjustScore(adj, score, tmask)
+ }
+ }
+ for _, rf := range props.ResultFlags {
+ if adj, ok := resultFlagToPositiveAdj[rf]; ok {
+ score, tmask = adjustScore(adj, score, tmask)
+ }
+ }
+
+ if debugTrace&debugTraceScoring != 0 {
+ fmt.Fprintf(os.Stderr, "=-= largestScore(%v) is %d\n",
+ fn, score)
+ }
+
+ return score
+}
+
+// LargestPositiveScoreAdjustment tries to estimate the largest possible
+// positive score adjustment that could be applied to a given callsite.
+// At the moment we don't have very many positive score adjustments, so
+// this is just hard-coded, not table-driven.
+func LargestPositiveScoreAdjustment(fn *ir.Func) int {
+ return adjValues[panicPathAdj] + adjValues[initFuncAdj]
+}
+
+// callSiteTab contains entries for each call in the function
+// currently being processed by InlineCalls; this variable will either
+// be set to 'cstabCache' below (for non-inlinable routines) or to the
+// local 'cstab' entry in the fnInlHeur object for inlinable routines.
+//
+// NOTE: this assumes that inlining operations are happening in a serial,
+// single-threaded fashion,f which is true today but probably won't hold
+// in the future (for example, we might want to score the callsites
+// in multiple functions in parallel); if the inliner evolves in this
+// direction we'll need to come up with a different approach here.
+var callSiteTab CallSiteTab
+
+// scoreCallsCache caches a call site table and call site list between
+// invocations of ScoreCalls so that we can reuse previously allocated
+// storage.
+var scoreCallsCache scoreCallsCacheType
+
+type scoreCallsCacheType struct {
+ tab CallSiteTab
+ csl []*CallSite
+}
+
+// ScoreCalls assigns numeric scores to each of the callsites in
+// function 'fn'; the lower the score, the more helpful we think it
+// will be to inline.
+//
+// Unlike a lot of the other inline heuristics machinery, callsite
+// scoring can't be done as part of the CanInline call for a function,
+// due to fact that we may be working on a non-trivial SCC. So for
+// example with this SCC:
+//
+// func foo(x int) { func bar(x int, f func()) {
+// if x != 0 { f()
+// bar(x, func(){}) foo(x-1)
+// } }
+// }
+//
+// We don't want to perform scoring for the 'foo' call in "bar" until
+// after foo has been analyzed, but it's conceivable that CanInline
+// might visit bar before foo for this SCC.
+func ScoreCalls(fn *ir.Func) {
+ if len(fn.Body) == 0 {
+ return
+ }
+ enableDebugTraceIfEnv()
+
+ nameFinder := newNameFinder(fn)
+
+ if debugTrace&debugTraceScoring != 0 {
+ fmt.Fprintf(os.Stderr, "=-= ScoreCalls(%v)\n", ir.FuncName(fn))
+ }
+
+ // If this is an inlinable function, use the precomputed
+ // call site table for it. If the function wasn't an inline
+ // candidate, collect a callsite table for it now.
+ var cstab CallSiteTab
+ if funcInlHeur, ok := fpmap[fn]; ok {
+ cstab = funcInlHeur.cstab
+ } else {
+ if len(scoreCallsCache.tab) != 0 {
+ panic("missing call to ScoreCallsCleanup")
+ }
+ if scoreCallsCache.tab == nil {
+ scoreCallsCache.tab = make(CallSiteTab)
+ }
+ if debugTrace&debugTraceScoring != 0 {
+ fmt.Fprintf(os.Stderr, "=-= building cstab for non-inl func %s\n",
+ ir.FuncName(fn))
+ }
+ cstab = computeCallSiteTable(fn, fn.Body, scoreCallsCache.tab, nil, 0,
+ nameFinder)
+ }
+
+ csa := makeCallSiteAnalyzer(fn)
+ const doCallResults = true
+ csa.scoreCallsRegion(fn, fn.Body, cstab, doCallResults, nil)
+
+ disableDebugTrace()
+}
+
+// scoreCallsRegion assigns numeric scores to each of the callsites in
+// region 'region' within function 'fn'. This can be called on
+// an entire function, or with 'region' set to a chunk of
+// code corresponding to an inlined call.
+func (csa *callSiteAnalyzer) scoreCallsRegion(fn *ir.Func, region ir.Nodes, cstab CallSiteTab, doCallResults bool, ic *ir.InlinedCallExpr) {
+ if debugTrace&debugTraceScoring != 0 {
+ fmt.Fprintf(os.Stderr, "=-= scoreCallsRegion(%v, %s) len(cstab)=%d\n",
+ ir.FuncName(fn), region[0].Op().String(), len(cstab))
+ }
+
+ // Sort callsites to avoid any surprises with non deterministic
+ // map iteration order (this is probably not needed, but here just
+ // in case).
+ csl := scoreCallsCache.csl[:0]
+ for _, cs := range cstab {
+ csl = append(csl, cs)
+ }
+ scoreCallsCache.csl = csl[:0]
+ sort.Slice(csl, func(i, j int) bool {
+ return csl[i].ID < csl[j].ID
+ })
+
+ // Score each call site.
+ var resultNameTab map[*ir.Name]resultPropAndCS
+ for _, cs := range csl {
+ var cprops *FuncProps
+ fihcprops := false
+ desercprops := false
+ if funcInlHeur, ok := fpmap[cs.Callee]; ok {
+ cprops = funcInlHeur.props
+ fihcprops = true
+ } else if cs.Callee.Inl != nil {
+ cprops = DeserializeFromString(cs.Callee.Inl.Properties)
+ desercprops = true
+ } else {
+ if base.Debug.DumpInlFuncProps != "" {
+ fmt.Fprintf(os.Stderr, "=-= *** unable to score call to %s from %s\n", cs.Callee.Sym().Name, fmtFullPos(cs.Call.Pos()))
+ panic("should never happen")
+ } else {
+ continue
+ }
+ }
+ cs.computeCallSiteScore(csa, cprops)
+
+ if doCallResults {
+ if debugTrace&debugTraceScoring != 0 {
+ fmt.Fprintf(os.Stderr, "=-= examineCallResults at %s: flags=%d score=%d funcInlHeur=%v deser=%v\n", fmtFullPos(cs.Call.Pos()), cs.Flags, cs.Score, fihcprops, desercprops)
+ }
+ resultNameTab = csa.examineCallResults(cs, resultNameTab)
+ }
+
+ if debugTrace&debugTraceScoring != 0 {
+ fmt.Fprintf(os.Stderr, "=-= scoring call at %s: flags=%d score=%d funcInlHeur=%v deser=%v\n", fmtFullPos(cs.Call.Pos()), cs.Flags, cs.Score, fihcprops, desercprops)
+ }
+ }
+
+ if resultNameTab != nil {
+ csa.rescoreBasedOnCallResultUses(fn, resultNameTab, cstab)
+ }
+
+ disableDebugTrace()
+
+ if ic != nil && callSiteTab != nil {
+ // Integrate the calls from this cstab into the table for the caller.
+ if err := callSiteTab.merge(cstab); err != nil {
+ base.FatalfAt(ic.Pos(), "%v", err)
+ }
+ } else {
+ callSiteTab = cstab
+ }
+}
+
+// ScoreCallsCleanup resets the state of the callsite cache
+// once ScoreCalls is done with a function.
+func ScoreCallsCleanup() {
+ if base.Debug.DumpInlCallSiteScores != 0 {
+ if allCallSites == nil {
+ allCallSites = make(CallSiteTab)
+ }
+ for call, cs := range callSiteTab {
+ allCallSites[call] = cs
+ }
+ }
+ for k := range scoreCallsCache.tab {
+ delete(scoreCallsCache.tab, k)
+ }
+}
+
+// GetCallSiteScore returns the previously calculated score for call
+// within fn.
+func GetCallSiteScore(fn *ir.Func, call *ir.CallExpr) (int, bool) {
+ if funcInlHeur, ok := fpmap[fn]; ok {
+ if cs, ok := funcInlHeur.cstab[call]; ok {
+ return cs.Score, true
+ }
+ }
+ if cs, ok := callSiteTab[call]; ok {
+ return cs.Score, true
+ }
+ return 0, false
+}
+
+// BudgetExpansion returns the amount to relax/expand the base
+// inlining budget when the new inliner is turned on; the inliner
+// will add the returned value to the hairyness budget.
+//
+// Background: with the new inliner, the score for a given callsite
+// can be adjusted down by some amount due to heuristics, however we
+// won't know whether this is going to happen until much later after
+// the CanInline call. This function returns the amount to relax the
+// budget initially (to allow for a large score adjustment); later on
+// in RevisitInlinability we'll look at each individual function to
+// demote it if needed.
+func BudgetExpansion(maxBudget int32) int32 {
+ if base.Debug.InlBudgetSlack != 0 {
+ return int32(base.Debug.InlBudgetSlack)
+ }
+ // In the default case, return maxBudget, which will effectively
+ // double the budget from 80 to 160; this should be good enough
+ // for most cases.
+ return maxBudget
+}
+
+var allCallSites CallSiteTab
+
+// DumpInlCallSiteScores is invoked by the inliner if the debug flag
+// "-d=dumpinlcallsitescores" is set; it dumps out a human-readable
+// summary of all (potentially) inlinable callsites in the package,
+// along with info on call site scoring and the adjustments made to a
+// given score. Here profile is the PGO profile in use (may be
+// nil), budgetCallback is a callback that can be invoked to find out
+// the original pre-adjustment hairyness limit for the function, and
+// inlineHotMaxBudget is the constant of the same name used in the
+// inliner. Sample output lines:
+//
+// Score Adjustment Status Callee CallerPos ScoreFlags
+// 115 40 DEMOTED cmd/compile/internal/abi.(*ABIParamAssignment).Offset expand_calls.go:1679:14|6 panicPathAdj
+// 76 -5n PROMOTED runtime.persistentalloc mcheckmark.go:48:45|3 inLoopAdj
+// 201 0 --- PGO unicode.DecodeRuneInString utf8.go:312:30|1
+// 7 -5 --- PGO internal/abi.Name.DataChecked type.go:625:22|0 inLoopAdj
+//
+// In the dump above, "Score" is the final score calculated for the
+// callsite, "Adjustment" is the amount added to or subtracted from
+// the original hairyness estimate to form the score. "Status" shows
+// whether anything changed with the site -- did the adjustment bump
+// it down just below the threshold ("PROMOTED") or instead bump it
+// above the threshold ("DEMOTED"); this will be blank ("---") if no
+// threshold was crossed as a result of the heuristics. Note that
+// "Status" also shows whether PGO was involved. "Callee" is the name
+// of the function called, "CallerPos" is the position of the
+// callsite, and "ScoreFlags" is a digest of the specific properties
+// we used to make adjustments to callsite score via heuristics.
+func DumpInlCallSiteScores(profile *pgo.Profile, budgetCallback func(fn *ir.Func, profile *pgo.Profile) (int32, bool)) {
+
+ var indirectlyDueToPromotion func(cs *CallSite) bool
+ indirectlyDueToPromotion = func(cs *CallSite) bool {
+ bud, _ := budgetCallback(cs.Callee, profile)
+ hairyval := cs.Callee.Inl.Cost
+ score := int32(cs.Score)
+ if hairyval > bud && score <= bud {
+ return true
+ }
+ if cs.parent != nil {
+ return indirectlyDueToPromotion(cs.parent)
+ }
+ return false
+ }
+
+ genstatus := func(cs *CallSite) string {
+ hairyval := cs.Callee.Inl.Cost
+ bud, isPGO := budgetCallback(cs.Callee, profile)
+ score := int32(cs.Score)
+ st := "---"
+ expinl := false
+ switch {
+ case hairyval <= bud && score <= bud:
+ // "Normal" inlined case: hairy val sufficiently low that
+ // it would have been inlined anyway without heuristics.
+ expinl = true
+ case hairyval > bud && score > bud:
+ // "Normal" not inlined case: hairy val sufficiently high
+ // and scoring didn't lower it.
+ case hairyval > bud && score <= bud:
+ // Promoted: we would not have inlined it before, but
+ // after score adjustment we decided to inline.
+ st = "PROMOTED"
+ expinl = true
+ case hairyval <= bud && score > bud:
+ // Demoted: we would have inlined it before, but after
+ // score adjustment we decided not to inline.
+ st = "DEMOTED"
+ }
+ inlined := cs.aux&csAuxInlined != 0
+ indprom := false
+ if cs.parent != nil {
+ indprom = indirectlyDueToPromotion(cs.parent)
+ }
+ if inlined && indprom {
+ st += "|INDPROM"
+ }
+ if inlined && !expinl {
+ st += "|[NI?]"
+ } else if !inlined && expinl {
+ st += "|[IN?]"
+ }
+ if isPGO {
+ st += "|PGO"
+ }
+ return st
+ }
+
+ if base.Debug.DumpInlCallSiteScores != 0 {
+ var sl []*CallSite
+ for _, cs := range allCallSites {
+ sl = append(sl, cs)
+ }
+ sort.Slice(sl, func(i, j int) bool {
+ if sl[i].Score != sl[j].Score {
+ return sl[i].Score < sl[j].Score
+ }
+ fni := ir.PkgFuncName(sl[i].Callee)
+ fnj := ir.PkgFuncName(sl[j].Callee)
+ if fni != fnj {
+ return fni < fnj
+ }
+ ecsi := EncodeCallSiteKey(sl[i])
+ ecsj := EncodeCallSiteKey(sl[j])
+ return ecsi < ecsj
+ })
+
+ mkname := func(fn *ir.Func) string {
+ var n string
+ if fn == nil || fn.Nname == nil {
+ return "<nil>"
+ }
+ if fn.Sym().Pkg == types.LocalPkg {
+ n = "·" + fn.Sym().Name
+ } else {
+ n = ir.PkgFuncName(fn)
+ }
+ // don't try to print super-long names
+ if len(n) <= 64 {
+ return n
+ }
+ return n[:32] + "..." + n[len(n)-32:len(n)]
+ }
+
+ if len(sl) != 0 {
+ fmt.Fprintf(os.Stdout, "# scores for package %s\n", types.LocalPkg.Path)
+ fmt.Fprintf(os.Stdout, "# Score Adjustment Status Callee CallerPos Flags ScoreFlags\n")
+ }
+ for _, cs := range sl {
+ hairyval := cs.Callee.Inl.Cost
+ adj := int32(cs.Score) - hairyval
+ nm := mkname(cs.Callee)
+ ecc := EncodeCallSiteKey(cs)
+ fmt.Fprintf(os.Stdout, "%d %d\t%s\t%s\t%s\t%s\n",
+ cs.Score, adj, genstatus(cs),
+ nm, ecc,
+ cs.ScoreMask.String())
+ }
+ }
+}
diff --git a/src/cmd/compile/internal/inline/inlheur/serialize.go b/src/cmd/compile/internal/inline/inlheur/serialize.go
new file mode 100644
index 0000000000..d650626679
--- /dev/null
+++ b/src/cmd/compile/internal/inline/inlheur/serialize.go
@@ -0,0 +1,80 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package inlheur
+
+import "strings"
+
+func (funcProps *FuncProps) SerializeToString() string {
+ if funcProps == nil {
+ return ""
+ }
+ var sb strings.Builder
+ writeUleb128(&sb, uint64(funcProps.Flags))
+ writeUleb128(&sb, uint64(len(funcProps.ParamFlags)))
+ for _, pf := range funcProps.ParamFlags {
+ writeUleb128(&sb, uint64(pf))
+ }
+ writeUleb128(&sb, uint64(len(funcProps.ResultFlags)))
+ for _, rf := range funcProps.ResultFlags {
+ writeUleb128(&sb, uint64(rf))
+ }
+ return sb.String()
+}
+
+func DeserializeFromString(s string) *FuncProps {
+ if len(s) == 0 {
+ return nil
+ }
+ var funcProps FuncProps
+ var v uint64
+ sl := []byte(s)
+ v, sl = readULEB128(sl)
+ funcProps.Flags = FuncPropBits(v)
+ v, sl = readULEB128(sl)
+ funcProps.ParamFlags = make([]ParamPropBits, v)
+ for i := range funcProps.ParamFlags {
+ v, sl = readULEB128(sl)
+ funcProps.ParamFlags[i] = ParamPropBits(v)
+ }
+ v, sl = readULEB128(sl)
+ funcProps.ResultFlags = make([]ResultPropBits, v)
+ for i := range funcProps.ResultFlags {
+ v, sl = readULEB128(sl)
+ funcProps.ResultFlags[i] = ResultPropBits(v)
+ }
+ return &funcProps
+}
+
+func readULEB128(sl []byte) (value uint64, rsl []byte) {
+ var shift uint
+
+ for {
+ b := sl[0]
+ sl = sl[1:]
+ value |= (uint64(b&0x7F) << shift)
+ if b&0x80 == 0 {
+ break
+ }
+ shift += 7
+ }
+ return value, sl
+}
+
+func writeUleb128(sb *strings.Builder, v uint64) {
+ if v < 128 {
+ sb.WriteByte(uint8(v))
+ return
+ }
+ more := true
+ for more {
+ c := uint8(v & 0x7f)
+ v >>= 7
+ more = v != 0
+ if more {
+ c |= 0x80
+ }
+ sb.WriteByte(c)
+ }
+}
diff --git a/src/cmd/compile/internal/inline/inlheur/testdata/dumpscores.go b/src/cmd/compile/internal/inline/inlheur/testdata/dumpscores.go
new file mode 100644
index 0000000000..6f2f76002e
--- /dev/null
+++ b/src/cmd/compile/internal/inline/inlheur/testdata/dumpscores.go
@@ -0,0 +1,45 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package dumpscores
+
+var G int
+
+func inlinable(x int, f func(int) int) int {
+ if x != 0 {
+ return 1
+ }
+ G += noninl(x)
+ return f(x)
+}
+
+func inlinable2(x int) int {
+ return noninl(-x)
+}
+
+//go:noinline
+func noninl(x int) int {
+ return x + 1
+}
+
+func tooLargeToInline(x int) int {
+ if x > 101 {
+ // Drive up the cost of inlining this func over the
+ // regular threshold.
+ return big(big(big(big(big(G + x)))))
+ }
+ if x < 100 {
+ // make sure this callsite is scored properly
+ G += inlinable(101, inlinable2)
+ if G == 101 {
+ return 0
+ }
+ panic(inlinable2(3))
+ }
+ return G
+}
+
+func big(q int) int {
+ return noninl(q) + noninl(-q)
+}
diff --git a/src/cmd/compile/internal/inline/inlheur/testdata/props/README.txt b/src/cmd/compile/internal/inline/inlheur/testdata/props/README.txt
new file mode 100644
index 0000000000..af5ebec850
--- /dev/null
+++ b/src/cmd/compile/internal/inline/inlheur/testdata/props/README.txt
@@ -0,0 +1,77 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+Notes on the format of the testcase files in
+cmd/compile/internal/inline/inlheur/testdata/props:
+
+- each (compilable) file contains input Go code and expected results
+ in the form of column-0 comments.
+
+- functions or methods that begin with "T_" are targeted for testing,
+ as well as "init" functions; all other functions are ignored.
+
+- function header comments begin with a line containing
+ the file name, function name, definition line, then index
+ and a count of the number of funcs that share that same
+ definition line (needed to support generics). Example:
+
+ // foo.go T_mumble 35 1 4
+
+ Here "T_mumble" is defined at line 35, and it is func 0
+ out of the 4 funcs that share that same line.
+
+- function property expected results appear as comments in immediately
+ prior to the function. For example, here we have first the function
+ name ("T_feeds_if_simple"), then human-readable dump of the function
+ properties, as well as the JSON for the properties object, each
+ section separated by a "<>" delimiter.
+
+ // params.go T_feeds_if_simple 35 0 1
+ // RecvrParamFlags:
+ // 0: ParamFeedsIfOrSwitch
+ // <endpropsdump>
+ // {"Flags":0,"RecvrParamFlags":[8],"ReturnFlags":[]}
+ // callsite: params.go:34:10|0 "CallSiteOnPanicPath" 2
+ // <endcallsites>
+ // <endfuncpreamble>
+ func T_feeds_if_simple(x int) {
+ if x < 100 {
+ os.Exit(1)
+ }
+ println(x)
+ }
+
+- when the test runs, it will compile the Go source file with an
+ option to dump out function properties, then compare the new dump
+ for each function with the JSON appearing in the header comment for
+ the function (in the example above, the JSON appears between
+ "<endpropsdump>" and "<endfuncpreamble>". The material prior to the
+ dump is simply there for human consumption, so that a developer can
+ easily see that "RecvrParamFlags":[8] means that the first parameter
+ has flag ParamFeedsIfOrSwitch.
+
+- when making changes to the compiler (which can alter the expected
+ results) or edits/additions to the go code in the testcase files,
+ you can remaster the results by running
+
+ go test -v -count=1 .
+
+ In the trace output of this run, you'll see messages of the form
+
+ === RUN TestFuncProperties
+ funcprops_test.go:NNN: update-expected: emitted updated file
+ testdata/props/XYZ.go.new
+ funcprops_test.go:MMM: please compare the two files, then overwrite
+ testdata/props/XYZ.go with testdata/props/XYZ.go.new
+
+ at which point you can compare the old and new files by hand, then
+ overwrite the *.go file with the *.go.new file if you are happy with
+ the diffs.
+
+- note that the remastering process will strip out any existing
+ column-0 (unindented) comments; if you write comments that you
+ want to see preserved, use "/* */" or indent them.
+
+
+
diff --git a/src/cmd/compile/internal/inline/inlheur/testdata/props/acrosscall.go b/src/cmd/compile/internal/inline/inlheur/testdata/props/acrosscall.go
new file mode 100644
index 0000000000..a8166fddb6
--- /dev/null
+++ b/src/cmd/compile/internal/inline/inlheur/testdata/props/acrosscall.go
@@ -0,0 +1,214 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// DO NOT EDIT (use 'go test -v -update-expected' instead.)
+// See cmd/compile/internal/inline/inlheur/testdata/props/README.txt
+// for more information on the format of this file.
+// <endfilepreamble>
+package params
+
+// acrosscall.go T_feeds_indirect_call_via_call_toplevel 19 0 1
+// ParamFlags
+// 0 ParamFeedsIndirectCall
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[8],"ResultFlags":null}
+// callsite: acrosscall.go:20:12|0 flagstr "" flagval 0 score 60 mask 0 maskstr ""
+// <endcallsites>
+// <endfuncpreamble>
+func T_feeds_indirect_call_via_call_toplevel(f func(int)) {
+ callsparam(f)
+}
+
+// acrosscall.go T_feeds_indirect_call_via_call_conditional 31 0 1
+// ParamFlags
+// 0 ParamMayFeedIndirectCall
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[16],"ResultFlags":null}
+// callsite: acrosscall.go:33:13|0 flagstr "" flagval 0 score 60 mask 0 maskstr ""
+// <endcallsites>
+// <endfuncpreamble>
+func T_feeds_indirect_call_via_call_conditional(f func(int)) {
+ if G != 101 {
+ callsparam(f)
+ }
+}
+
+// acrosscall.go T_feeds_conditional_indirect_call_via_call_toplevel 45 0 1
+// ParamFlags
+// 0 ParamMayFeedIndirectCall
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[16],"ResultFlags":null}
+// callsite: acrosscall.go:46:23|0 flagstr "" flagval 0 score 64 mask 0 maskstr ""
+// <endcallsites>
+// <endfuncpreamble>
+func T_feeds_conditional_indirect_call_via_call_toplevel(f func(int)) {
+ callsparamconditional(f)
+}
+
+// acrosscall.go T_feeds_if_via_call 57 0 1
+// ParamFlags
+// 0 ParamFeedsIfOrSwitch
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[32],"ResultFlags":null}
+// callsite: acrosscall.go:58:9|0 flagstr "" flagval 0 score 8 mask 0 maskstr ""
+// <endcallsites>
+// <endfuncpreamble>
+func T_feeds_if_via_call(x int) {
+ feedsif(x)
+}
+
+// acrosscall.go T_feeds_if_via_call_conditional 69 0 1
+// ParamFlags
+// 0 ParamMayFeedIfOrSwitch
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[64],"ResultFlags":null}
+// callsite: acrosscall.go:71:10|0 flagstr "" flagval 0 score 8 mask 0 maskstr ""
+// <endcallsites>
+// <endfuncpreamble>
+func T_feeds_if_via_call_conditional(x int) {
+ if G != 101 {
+ feedsif(x)
+ }
+}
+
+// acrosscall.go T_feeds_conditional_if_via_call 83 0 1
+// ParamFlags
+// 0 ParamMayFeedIfOrSwitch
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[64],"ResultFlags":null}
+// callsite: acrosscall.go:84:20|0 flagstr "" flagval 0 score 12 mask 0 maskstr ""
+// <endcallsites>
+// <endfuncpreamble>
+func T_feeds_conditional_if_via_call(x int) {
+ feedsifconditional(x)
+}
+
+// acrosscall.go T_multifeeds1 97 0 1
+// ParamFlags
+// 0 ParamFeedsIndirectCall|ParamMayFeedIndirectCall
+// 1 ParamNoInfo
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[24,0],"ResultFlags":null}
+// callsite: acrosscall.go:98:12|0 flagstr "" flagval 0 score 60 mask 0 maskstr ""
+// callsite: acrosscall.go:99:23|1 flagstr "" flagval 0 score 64 mask 0 maskstr ""
+// <endcallsites>
+// <endfuncpreamble>
+func T_multifeeds1(f1, f2 func(int)) {
+ callsparam(f1)
+ callsparamconditional(f1)
+}
+
+// acrosscall.go T_acrosscall_returnsconstant 110 0 1
+// ResultFlags
+// 0 ResultAlwaysSameConstant
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":null,"ResultFlags":[8]}
+// callsite: acrosscall.go:111:24|0 flagstr "" flagval 0 score 2 mask 0 maskstr ""
+// <endcallsites>
+// <endfuncpreamble>
+func T_acrosscall_returnsconstant() int {
+ return returnsconstant()
+}
+
+// acrosscall.go T_acrosscall_returnsmem 122 0 1
+// ResultFlags
+// 0 ResultIsAllocatedMem
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":null,"ResultFlags":[2]}
+// callsite: acrosscall.go:123:19|0 flagstr "" flagval 0 score 2 mask 0 maskstr ""
+// <endcallsites>
+// <endfuncpreamble>
+func T_acrosscall_returnsmem() *int {
+ return returnsmem()
+}
+
+// acrosscall.go T_acrosscall_returnscci 134 0 1
+// ResultFlags
+// 0 ResultIsConcreteTypeConvertedToInterface
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":null,"ResultFlags":[4]}
+// callsite: acrosscall.go:135:19|0 flagstr "" flagval 0 score 7 mask 0 maskstr ""
+// <endcallsites>
+// <endfuncpreamble>
+func T_acrosscall_returnscci() I {
+ return returnscci()
+}
+
+// acrosscall.go T_acrosscall_multiret 144 0 1
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[0],"ResultFlags":[0]}
+// callsite: acrosscall.go:146:25|0 flagstr "" flagval 0 score 2 mask 0 maskstr ""
+// <endcallsites>
+// <endfuncpreamble>
+func T_acrosscall_multiret(q int) int {
+ if q != G {
+ return returnsconstant()
+ }
+ return 0
+}
+
+// acrosscall.go T_acrosscall_multiret2 158 0 1
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[0],"ResultFlags":[0]}
+// callsite: acrosscall.go:160:25|0 flagstr "" flagval 0 score 2 mask 0 maskstr ""
+// callsite: acrosscall.go:162:25|1 flagstr "" flagval 0 score 2 mask 0 maskstr ""
+// <endcallsites>
+// <endfuncpreamble>
+func T_acrosscall_multiret2(q int) int {
+ if q == G {
+ return returnsconstant()
+ } else {
+ return returnsconstant()
+ }
+}
+
+func callsparam(f func(int)) {
+ f(2)
+}
+
+func callsparamconditional(f func(int)) {
+ if G != 101 {
+ f(2)
+ }
+}
+
+func feedsif(x int) int {
+ if x != 101 {
+ return 42
+ }
+ return 43
+}
+
+func feedsifconditional(x int) int {
+ if G != 101 {
+ if x != 101 {
+ return 42
+ }
+ }
+ return 43
+}
+
+func returnsconstant() int {
+ return 42
+}
+
+func returnsmem() *int {
+ return new(int)
+}
+
+func returnscci() I {
+ var q Q
+ return q
+}
+
+type I interface {
+ Foo()
+}
+
+type Q int
+
+func (q Q) Foo() {
+}
+
+var G int
diff --git a/src/cmd/compile/internal/inline/inlheur/testdata/props/calls.go b/src/cmd/compile/internal/inline/inlheur/testdata/props/calls.go
new file mode 100644
index 0000000000..5cc217b4ba
--- /dev/null
+++ b/src/cmd/compile/internal/inline/inlheur/testdata/props/calls.go
@@ -0,0 +1,240 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// DO NOT EDIT (use 'go test -v -update-expected' instead.)
+// See cmd/compile/internal/inline/inlheur/testdata/props/README.txt
+// for more information on the format of this file.
+// <endfilepreamble>
+package calls
+
+import "os"
+
+// calls.go T_call_in_panic_arg 19 0 1
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[0],"ResultFlags":null}
+// callsite: calls.go:21:15|0 flagstr "CallSiteOnPanicPath" flagval 2 score 42 mask 1 maskstr "panicPathAdj"
+// <endcallsites>
+// <endfuncpreamble>
+func T_call_in_panic_arg(x int) {
+ if x < G {
+ panic(callee(x))
+ }
+}
+
+// calls.go T_calls_in_loops 32 0 1
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[0,0],"ResultFlags":null}
+// callsite: calls.go:34:9|0 flagstr "CallSiteInLoop" flagval 1 score -3 mask 4 maskstr "inLoopAdj"
+// callsite: calls.go:37:9|1 flagstr "CallSiteInLoop" flagval 1 score -3 mask 4 maskstr "inLoopAdj"
+// <endcallsites>
+// <endfuncpreamble>
+func T_calls_in_loops(x int, q []string) {
+ for i := 0; i < x; i++ {
+ callee(i)
+ }
+ for _, s := range q {
+ callee(len(s))
+ }
+}
+
+// calls.go T_calls_in_pseudo_loop 48 0 1
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[0,0],"ResultFlags":null}
+// callsite: calls.go:50:9|0 flagstr "" flagval 0 score 2 mask 0 maskstr ""
+// callsite: calls.go:54:9|1 flagstr "" flagval 0 score 2 mask 0 maskstr ""
+// <endcallsites>
+// <endfuncpreamble>
+func T_calls_in_pseudo_loop(x int, q []string) {
+ for i := 0; i < x; i++ {
+ callee(i)
+ return
+ }
+ for _, s := range q {
+ callee(len(s))
+ break
+ }
+}
+
+// calls.go T_calls_on_panic_paths 67 0 1
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[0,0],"ResultFlags":null}
+// callsite: calls.go:69:9|0 flagstr "CallSiteOnPanicPath" flagval 2 score 42 mask 1 maskstr "panicPathAdj"
+// callsite: calls.go:73:9|1 flagstr "CallSiteOnPanicPath" flagval 2 score 42 mask 1 maskstr "panicPathAdj"
+// callsite: calls.go:77:12|2 flagstr "CallSiteOnPanicPath" flagval 2 score 102 mask 1 maskstr "panicPathAdj"
+// <endcallsites>
+// <endfuncpreamble>
+func T_calls_on_panic_paths(x int, q []string) {
+ if x+G == 101 {
+ callee(x)
+ panic("ouch")
+ }
+ if x < G-101 {
+ callee(x)
+ if len(q) == 0 {
+ G++
+ }
+ callsexit(x)
+ }
+}
+
+// calls.go T_calls_not_on_panic_paths 93 0 1
+// ParamFlags
+// 0 ParamFeedsIfOrSwitch|ParamMayFeedIfOrSwitch
+// 1 ParamNoInfo
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[96,0],"ResultFlags":null}
+// callsite: calls.go:103:9|0 flagstr "" flagval 0 score 2 mask 0 maskstr ""
+// callsite: calls.go:112:9|1 flagstr "" flagval 0 score 2 mask 0 maskstr ""
+// callsite: calls.go:115:9|2 flagstr "" flagval 0 score 2 mask 0 maskstr ""
+// callsite: calls.go:119:12|3 flagstr "CallSiteOnPanicPath" flagval 2 score 102 mask 1 maskstr "panicPathAdj"
+// <endcallsites>
+// <endfuncpreamble>
+func T_calls_not_on_panic_paths(x int, q []string) {
+ if x != G {
+ panic("ouch")
+ /* Notes: */
+ /* - we only look for post-dominating panic/exit, so */
+ /* this site will on fact not have a panicpath flag */
+ /* - vet will complain about this site as unreachable */
+ callee(x)
+ }
+ if x != G {
+ callee(x)
+ if x < 100 {
+ panic("ouch")
+ }
+ }
+ if x+G == 101 {
+ if x < 100 {
+ panic("ouch")
+ }
+ callee(x)
+ }
+ if x < -101 {
+ callee(x)
+ if len(q) == 0 {
+ return
+ }
+ callsexit(x)
+ }
+}
+
+// calls.go init.0 129 0 1
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":null,"ResultFlags":null}
+// callsite: calls.go:130:16|0 flagstr "CallSiteInInitFunc" flagval 4 score 22 mask 2 maskstr "initFuncAdj"
+// <endcallsites>
+// <endfuncpreamble>
+func init() {
+ println(callee(5))
+}
+
+// calls.go T_pass_inlinable_func_to_param_feeding_indirect_call 140 0 1
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[0],"ResultFlags":[0]}
+// callsite: calls.go:141:19|0 flagstr "" flagval 0 score 16 mask 512 maskstr "passInlinableFuncToIndCallAdj"
+// callsite: calls.go:141:19|calls.go:232:10|0 flagstr "" flagval 0 score 2 mask 0 maskstr ""
+// <endcallsites>
+// <endfuncpreamble>
+func T_pass_inlinable_func_to_param_feeding_indirect_call(x int) int {
+ return callsParam(x, callee)
+}
+
+// calls.go T_pass_noninlinable_func_to_param_feeding_indirect_call 150 0 1
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[0],"ResultFlags":[0]}
+// callsite: calls.go:153:19|0 flagstr "" flagval 0 score 36 mask 128 maskstr "passFuncToIndCallAdj"
+// <endcallsites>
+// <endfuncpreamble>
+func T_pass_noninlinable_func_to_param_feeding_indirect_call(x int) int {
+ // if we inline callsParam we can convert the indirect call
+ // to a direct call, but we can't inline it.
+ return callsParam(x, calleeNoInline)
+}
+
+// calls.go T_pass_inlinable_func_to_param_feeding_nested_indirect_call 165 0 1
+// ParamFlags
+// 0 ParamFeedsIfOrSwitch
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[32],"ResultFlags":[0]}
+// callsite: calls.go:166:25|0 flagstr "" flagval 0 score 27 mask 1024 maskstr "passInlinableFuncToNestedIndCallAdj"
+// callsite: calls.go:166:25|calls.go:237:11|0 flagstr "" flagval 0 score 2 mask 0 maskstr ""
+// <endcallsites>
+// <endfuncpreamble>
+func T_pass_inlinable_func_to_param_feeding_nested_indirect_call(x int) int {
+ return callsParamNested(x, callee)
+}
+
+// calls.go T_pass_noninlinable_func_to_param_feeding_nested_indirect_call 177 0 1
+// ParamFlags
+// 0 ParamFeedsIfOrSwitch
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[32],"ResultFlags":[0]}
+// callsite: calls.go:178:25|0 flagstr "" flagval 0 score 47 mask 256 maskstr "passFuncToNestedIndCallAdj"
+// <endcallsites>
+// <endfuncpreamble>
+func T_pass_noninlinable_func_to_param_feeding_nested_indirect_call(x int) int {
+ return callsParamNested(x, calleeNoInline)
+}
+
+// calls.go T_call_scoring_in_noninlinable_func 195 0 1
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[0,0],"ResultFlags":[0]}
+// callsite: calls.go:209:14|0 flagstr "CallSiteOnPanicPath" flagval 2 score 42 mask 1 maskstr "panicPathAdj"
+// callsite: calls.go:210:15|1 flagstr "CallSiteOnPanicPath" flagval 2 score 42 mask 1 maskstr "panicPathAdj"
+// callsite: calls.go:212:19|2 flagstr "" flagval 0 score 16 mask 512 maskstr "passInlinableFuncToIndCallAdj"
+// callsite: calls.go:212:19|calls.go:232:10|0 flagstr "" flagval 0 score 4 mask 0 maskstr ""
+// <endcallsites>
+// <endfuncpreamble>
+// calls.go T_call_scoring_in_noninlinable_func.func1 212 0 1
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[0],"ResultFlags":[0]}
+// <endcallsites>
+// <endfuncpreamble>
+func T_call_scoring_in_noninlinable_func(x int, sl []int) int {
+ if x == 101 {
+ // Drive up the cost of inlining this funcfunc over the
+ // regular threshold.
+ for i := 0; i < 10; i++ {
+ for j := 0; j < i; j++ {
+ sl = append(sl, append(sl, append(sl, append(sl, x)...)...)...)
+ sl = append(sl, sl[0], sl[1], sl[2])
+ x += calleeNoInline(x)
+ }
+ }
+ }
+ if x < 100 {
+ // make sure this callsite is scored properly
+ G += callee(101)
+ panic(callee(x))
+ }
+ return callsParam(x, func(y int) int { return y + x })
+}
+
+var G int
+
+func callee(x int) int {
+ return x
+}
+
+func calleeNoInline(x int) int {
+ defer func() { G++ }()
+ return x
+}
+
+func callsexit(x int) {
+ println(x)
+ os.Exit(x)
+}
+
+func callsParam(x int, f func(int) int) int {
+ return f(x)
+}
+
+func callsParamNested(x int, f func(int) int) int {
+ if x < 0 {
+ return f(x)
+ }
+ return 0
+}
diff --git a/src/cmd/compile/internal/inline/inlheur/testdata/props/funcflags.go b/src/cmd/compile/internal/inline/inlheur/testdata/props/funcflags.go
new file mode 100644
index 0000000000..f3d74241b4
--- /dev/null
+++ b/src/cmd/compile/internal/inline/inlheur/testdata/props/funcflags.go
@@ -0,0 +1,341 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// DO NOT EDIT (use 'go test -v -update-expected' instead.)
+// See cmd/compile/internal/inline/inlheur/testdata/props/README.txt
+// for more information on the format of this file.
+// <endfilepreamble>
+
+package funcflags
+
+import "os"
+
+// funcflags.go T_simple 20 0 1
+// Flags FuncPropNeverReturns
+// <endpropsdump>
+// {"Flags":1,"ParamFlags":null,"ResultFlags":null}
+// <endcallsites>
+// <endfuncpreamble>
+func T_simple() {
+ panic("bad")
+}
+
+// funcflags.go T_nested 32 0 1
+// Flags FuncPropNeverReturns
+// ParamFlags
+// 0 ParamFeedsIfOrSwitch
+// <endpropsdump>
+// {"Flags":1,"ParamFlags":[32],"ResultFlags":null}
+// <endcallsites>
+// <endfuncpreamble>
+func T_nested(x int) {
+ if x < 10 {
+ panic("bad")
+ } else {
+ panic("good")
+ }
+}
+
+// funcflags.go T_block1 46 0 1
+// Flags FuncPropNeverReturns
+// <endpropsdump>
+// {"Flags":1,"ParamFlags":[0],"ResultFlags":null}
+// <endcallsites>
+// <endfuncpreamble>
+func T_block1(x int) {
+ panic("bad")
+ if x < 10 {
+ return
+ }
+}
+
+// funcflags.go T_block2 60 0 1
+// ParamFlags
+// 0 ParamFeedsIfOrSwitch
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[32],"ResultFlags":null}
+// <endcallsites>
+// <endfuncpreamble>
+func T_block2(x int) {
+ if x < 10 {
+ return
+ }
+ panic("bad")
+}
+
+// funcflags.go T_switches1 75 0 1
+// Flags FuncPropNeverReturns
+// ParamFlags
+// 0 ParamFeedsIfOrSwitch
+// <endpropsdump>
+// {"Flags":1,"ParamFlags":[32],"ResultFlags":null}
+// <endcallsites>
+// <endfuncpreamble>
+func T_switches1(x int) {
+ switch x {
+ case 1:
+ panic("one")
+ case 2:
+ panic("two")
+ }
+ panic("whatev")
+}
+
+// funcflags.go T_switches1a 92 0 1
+// ParamFlags
+// 0 ParamFeedsIfOrSwitch
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[32],"ResultFlags":null}
+// <endcallsites>
+// <endfuncpreamble>
+func T_switches1a(x int) {
+ switch x {
+ case 2:
+ panic("two")
+ }
+}
+
+// funcflags.go T_switches2 106 0 1
+// ParamFlags
+// 0 ParamFeedsIfOrSwitch
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[32],"ResultFlags":null}
+// <endcallsites>
+// <endfuncpreamble>
+func T_switches2(x int) {
+ switch x {
+ case 1:
+ panic("one")
+ case 2:
+ panic("two")
+ default:
+ return
+ }
+ panic("whatev")
+}
+
+// funcflags.go T_switches3 123 0 1
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[0],"ResultFlags":null}
+// <endcallsites>
+// <endfuncpreamble>
+func T_switches3(x interface{}) {
+ switch x.(type) {
+ case bool:
+ panic("one")
+ case float32:
+ panic("two")
+ }
+}
+
+// funcflags.go T_switches4 138 0 1
+// Flags FuncPropNeverReturns
+// <endpropsdump>
+// {"Flags":1,"ParamFlags":[0],"ResultFlags":null}
+// <endcallsites>
+// <endfuncpreamble>
+func T_switches4(x int) {
+ switch x {
+ case 1:
+ x++
+ fallthrough
+ case 2:
+ panic("two")
+ fallthrough
+ default:
+ panic("bad")
+ }
+ panic("whatev")
+}
+
+// funcflags.go T_recov 157 0 1
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[0],"ResultFlags":null}
+// <endcallsites>
+// <endfuncpreamble>
+func T_recov(x int) {
+ if x := recover(); x != nil {
+ panic(x)
+ }
+}
+
+// funcflags.go T_forloops1 169 0 1
+// Flags FuncPropNeverReturns
+// <endpropsdump>
+// {"Flags":1,"ParamFlags":[0],"ResultFlags":null}
+// <endcallsites>
+// <endfuncpreamble>
+func T_forloops1(x int) {
+ for {
+ panic("wokketa")
+ }
+}
+
+// funcflags.go T_forloops2 180 0 1
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[0],"ResultFlags":null}
+// <endcallsites>
+// <endfuncpreamble>
+func T_forloops2(x int) {
+ for {
+ println("blah")
+ if true {
+ break
+ }
+ panic("warg")
+ }
+}
+
+// funcflags.go T_forloops3 195 0 1
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[0],"ResultFlags":null}
+// <endcallsites>
+// <endfuncpreamble>
+func T_forloops3(x int) {
+ for i := 0; i < 101; i++ {
+ println("blah")
+ if true {
+ continue
+ }
+ panic("plark")
+ }
+ for i := range [10]int{} {
+ println(i)
+ panic("plark")
+ }
+ panic("whatev")
+}
+
+// funcflags.go T_hasgotos 215 0 1
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[0,0],"ResultFlags":null}
+// <endcallsites>
+// <endfuncpreamble>
+func T_hasgotos(x int, y int) {
+ {
+ xx := x
+ panic("bad")
+ lab1:
+ goto lab2
+ lab2:
+ if false {
+ goto lab1
+ } else {
+ goto lab4
+ }
+ lab4:
+ if xx < y {
+ lab3:
+ if false {
+ goto lab3
+ }
+ }
+ println(9)
+ }
+}
+
+// funcflags.go T_break_with_label 246 0 1
+// ParamFlags
+// 0 ParamMayFeedIfOrSwitch
+// 1 ParamNoInfo
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[64,0],"ResultFlags":null}
+// <endcallsites>
+// <endfuncpreamble>
+func T_break_with_label(x int, y int) {
+ // presence of break with label should pessimize this func
+ // (similar to goto).
+ panic("bad")
+lab1:
+ for {
+ println("blah")
+ if x < 0 {
+ break lab1
+ }
+ panic("hubba")
+ }
+}
+
+// funcflags.go T_callsexit 268 0 1
+// Flags FuncPropNeverReturns
+// ParamFlags
+// 0 ParamFeedsIfOrSwitch
+// <endpropsdump>
+// {"Flags":1,"ParamFlags":[32],"ResultFlags":null}
+// <endcallsites>
+// <endfuncpreamble>
+func T_callsexit(x int) {
+ if x < 0 {
+ os.Exit(1)
+ }
+ os.Exit(2)
+}
+
+// funcflags.go T_exitinexpr 281 0 1
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[0],"ResultFlags":null}
+// callsite: funcflags.go:286:18|0 flagstr "CallSiteOnPanicPath" flagval 2 score 102 mask 1 maskstr "panicPathAdj"
+// <endcallsites>
+// <endfuncpreamble>
+func T_exitinexpr(x int) {
+ // This function does indeed unconditionally call exit, since the
+ // first thing it does is invoke exprcallsexit, however from the
+ // perspective of this function, the call is not at the statement
+ // level, so we'll wind up missing it.
+ if exprcallsexit(x) < 0 {
+ println("foo")
+ }
+}
+
+// funcflags.go T_select_noreturn 297 0 1
+// Flags FuncPropNeverReturns
+// <endpropsdump>
+// {"Flags":1,"ParamFlags":[0,0,0],"ResultFlags":null}
+// <endcallsites>
+// <endfuncpreamble>
+func T_select_noreturn(chi chan int, chf chan float32, p *int) {
+ rv := 0
+ select {
+ case i := <-chi:
+ rv = i
+ case f := <-chf:
+ rv = int(f)
+ }
+ *p = rv
+ panic("bad")
+}
+
+// funcflags.go T_select_mayreturn 314 0 1
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[0,0,0],"ResultFlags":[0]}
+// <endcallsites>
+// <endfuncpreamble>
+func T_select_mayreturn(chi chan int, chf chan float32, p *int) int {
+ rv := 0
+ select {
+ case i := <-chi:
+ rv = i
+ return i
+ case f := <-chf:
+ rv = int(f)
+ }
+ *p = rv
+ panic("bad")
+}
+
+// funcflags.go T_calls_callsexit 334 0 1
+// Flags FuncPropNeverReturns
+// <endpropsdump>
+// {"Flags":1,"ParamFlags":[0],"ResultFlags":null}
+// callsite: funcflags.go:335:15|0 flagstr "CallSiteOnPanicPath" flagval 2 score 102 mask 1 maskstr "panicPathAdj"
+// <endcallsites>
+// <endfuncpreamble>
+func T_calls_callsexit(x int) {
+ exprcallsexit(x)
+}
+
+func exprcallsexit(x int) int {
+ os.Exit(x)
+ return x
+}
diff --git a/src/cmd/compile/internal/inline/inlheur/testdata/props/params.go b/src/cmd/compile/internal/inline/inlheur/testdata/props/params.go
new file mode 100644
index 0000000000..1a3073c25c
--- /dev/null
+++ b/src/cmd/compile/internal/inline/inlheur/testdata/props/params.go
@@ -0,0 +1,367 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// DO NOT EDIT (use 'go test -v -update-expected' instead.)
+// See cmd/compile/internal/inline/inlheur/testdata/props/README.txt
+// for more information on the format of this file.
+// <endfilepreamble>
+package params
+
+import "os"
+
+// params.go T_feeds_if_simple 20 0 1
+// ParamFlags
+// 0 ParamFeedsIfOrSwitch
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[32],"ResultFlags":null}
+// <endcallsites>
+// <endfuncpreamble>
+func T_feeds_if_simple(x int) {
+ if x < 100 {
+ os.Exit(1)
+ }
+ println(x)
+}
+
+// params.go T_feeds_if_nested 35 0 1
+// ParamFlags
+// 0 ParamMayFeedIfOrSwitch
+// 1 ParamFeedsIfOrSwitch
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[64,32],"ResultFlags":null}
+// <endcallsites>
+// <endfuncpreamble>
+func T_feeds_if_nested(x, y int) {
+ if y != 0 {
+ if x < 100 {
+ os.Exit(1)
+ }
+ }
+ println(x)
+}
+
+// params.go T_feeds_if_pointer 51 0 1
+// ParamFlags
+// 0 ParamFeedsIfOrSwitch
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[32],"ResultFlags":null}
+// <endcallsites>
+// <endfuncpreamble>
+func T_feeds_if_pointer(xp *int) {
+ if xp != nil {
+ os.Exit(1)
+ }
+ println(xp)
+}
+
+// params.go T.T_feeds_if_simple_method 66 0 1
+// ParamFlags
+// 0 ParamFeedsIfOrSwitch
+// 1 ParamFeedsIfOrSwitch
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[32,32],"ResultFlags":null}
+// <endcallsites>
+// <endfuncpreamble>
+func (r T) T_feeds_if_simple_method(x int) {
+ if x < 100 {
+ os.Exit(1)
+ }
+ if r != 99 {
+ os.Exit(2)
+ }
+ println(x)
+}
+
+// params.go T_feeds_if_blanks 86 0 1
+// ParamFlags
+// 0 ParamNoInfo
+// 1 ParamFeedsIfOrSwitch
+// 2 ParamNoInfo
+// 3 ParamNoInfo
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[0,32,0,0],"ResultFlags":null}
+// <endcallsites>
+// <endfuncpreamble>
+func T_feeds_if_blanks(_ string, x int, _ bool, _ bool) {
+ // blanks ignored; from a props perspective "x" is param 0
+ if x < 100 {
+ os.Exit(1)
+ }
+ println(x)
+}
+
+// params.go T_feeds_if_with_copy 101 0 1
+// ParamFlags
+// 0 ParamFeedsIfOrSwitch
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[32],"ResultFlags":null}
+// <endcallsites>
+// <endfuncpreamble>
+func T_feeds_if_with_copy(x int) {
+ // simple copy here -- we get this case
+ xx := x
+ if xx < 100 {
+ os.Exit(1)
+ }
+ println(x)
+}
+
+// params.go T_feeds_if_with_copy_expr 115 0 1
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[0],"ResultFlags":null}
+// <endcallsites>
+// <endfuncpreamble>
+func T_feeds_if_with_copy_expr(x int) {
+ // this case (copy of expression) currently not handled.
+ xx := x < 100
+ if xx {
+ os.Exit(1)
+ }
+ println(x)
+}
+
+// params.go T_feeds_switch 131 0 1
+// ParamFlags
+// 0 ParamFeedsIfOrSwitch
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[32],"ResultFlags":null}
+// <endcallsites>
+// <endfuncpreamble>
+func T_feeds_switch(x int) {
+ switch x {
+ case 101:
+ println(101)
+ case 202:
+ panic("bad")
+ }
+ println(x)
+}
+
+// params.go T_feeds_if_toocomplex 146 0 1
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[0,0],"ResultFlags":null}
+// <endcallsites>
+// <endfuncpreamble>
+func T_feeds_if_toocomplex(x int, y int) {
+ // not handled at the moment; we only look for cases where
+ // an "if" or "switch" can be simplified based on a single
+ // constant param, not a combination of constant params.
+ if x < y {
+ panic("bad")
+ }
+ println(x + y)
+}
+
+// params.go T_feeds_if_redefined 161 0 1
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[0],"ResultFlags":null}
+// <endcallsites>
+// <endfuncpreamble>
+func T_feeds_if_redefined(x int) {
+ if x < G {
+ x++
+ }
+ if x == 101 {
+ panic("bad")
+ }
+}
+
+// params.go T_feeds_if_redefined2 175 0 1
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[0],"ResultFlags":null}
+// <endcallsites>
+// <endfuncpreamble>
+func T_feeds_if_redefined2(x int) {
+ // this currently classifies "x" as "no info", since the analysis we
+ // use to check for reassignments/redefinitions is not flow-sensitive,
+ // but we could probably catch this case with better analysis or
+ // high-level SSA.
+ if x == 101 {
+ panic("bad")
+ }
+ if x < G {
+ x++
+ }
+}
+
+// params.go T_feeds_multi_if 196 0 1
+// ParamFlags
+// 0 ParamFeedsIfOrSwitch
+// 1 ParamNoInfo
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[32,0],"ResultFlags":null}
+// <endcallsites>
+// <endfuncpreamble>
+func T_feeds_multi_if(x int, y int) {
+ // Here we have one "if" that is too complex (x < y) but one that is
+ // simple enough. Currently we enable the heuristic for this. It's
+ // possible to imagine this being a bad thing if the function in
+ // question is sufficiently large, but if it's too large we probably
+ // can't inline it anyhow.
+ if x < y {
+ panic("bad")
+ }
+ if x < 10 {
+ panic("whatev")
+ }
+ println(x + y)
+}
+
+// params.go T_feeds_if_redefined_indirectwrite 216 0 1
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[0],"ResultFlags":null}
+// <endcallsites>
+// <endfuncpreamble>
+func T_feeds_if_redefined_indirectwrite(x int) {
+ ax := &x
+ if G != 2 {
+ *ax = G
+ }
+ if x == 101 {
+ panic("bad")
+ }
+}
+
+// params.go T_feeds_if_redefined_indirectwrite_copy 231 0 1
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[0],"ResultFlags":null}
+// <endcallsites>
+// <endfuncpreamble>
+func T_feeds_if_redefined_indirectwrite_copy(x int) {
+ // we don't catch this case, "x" is marked as no info,
+ // since we're conservative about redefinitions.
+ ax := &x
+ cx := x
+ if G != 2 {
+ *ax = G
+ }
+ if cx == 101 {
+ panic("bad")
+ }
+}
+
+// params.go T_feeds_if_expr1 251 0 1
+// ParamFlags
+// 0 ParamFeedsIfOrSwitch
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[32],"ResultFlags":null}
+// <endcallsites>
+// <endfuncpreamble>
+func T_feeds_if_expr1(x int) {
+ if x == 101 || x == 102 || x&0xf == 0 {
+ panic("bad")
+ }
+}
+
+// params.go T_feeds_if_expr2 262 0 1
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[0],"ResultFlags":null}
+// <endcallsites>
+// <endfuncpreamble>
+func T_feeds_if_expr2(x int) {
+ if (x*x)-(x+x)%x == 101 || x&0xf == 0 {
+ panic("bad")
+ }
+}
+
+// params.go T_feeds_if_expr3 273 0 1
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[0],"ResultFlags":null}
+// <endcallsites>
+// <endfuncpreamble>
+func T_feeds_if_expr3(x int) {
+ if x-(x&0x1)^378 > (1 - G) {
+ panic("bad")
+ }
+}
+
+// params.go T_feeds_if_shift_may_panic 284 0 1
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[0],"ResultFlags":[0]}
+// <endcallsites>
+// <endfuncpreamble>
+func T_feeds_if_shift_may_panic(x int) *int {
+ // here if "x" is a constant like 2, we could simplify the "if",
+ // but if we were to pass in a negative value for "x" we can't
+ // fold the condition due to the need to panic on negative shift.
+ if 1<<x > 1024 {
+ return nil
+ }
+ return &G
+}
+
+// params.go T_feeds_if_maybe_divide_by_zero 299 0 1
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[0],"ResultFlags":null}
+// <endcallsites>
+// <endfuncpreamble>
+func T_feeds_if_maybe_divide_by_zero(x int) {
+ if 99/x == 3 {
+ return
+ }
+ println("blarg")
+}
+
+// params.go T_feeds_indcall 313 0 1
+// ParamFlags
+// 0 ParamMayFeedIndirectCall
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[16],"ResultFlags":null}
+// <endcallsites>
+// <endfuncpreamble>
+func T_feeds_indcall(x func()) {
+ if G != 20 {
+ x()
+ }
+}
+
+// params.go T_feeds_indcall_and_if 326 0 1
+// ParamFlags
+// 0 ParamMayFeedIndirectCall|ParamFeedsIfOrSwitch
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[48],"ResultFlags":null}
+// <endcallsites>
+// <endfuncpreamble>
+func T_feeds_indcall_and_if(x func()) {
+ if x != nil {
+ x()
+ }
+}
+
+// params.go T_feeds_indcall_with_copy 339 0 1
+// ParamFlags
+// 0 ParamFeedsIndirectCall
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[8],"ResultFlags":null}
+// <endcallsites>
+// <endfuncpreamble>
+func T_feeds_indcall_with_copy(x func()) {
+ xx := x
+ if G < 10 {
+ G--
+ }
+ xx()
+}
+
+// params.go T_feeds_interface_method_call 354 0 1
+// ParamFlags
+// 0 ParamFeedsInterfaceMethodCall
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[2],"ResultFlags":null}
+// <endcallsites>
+// <endfuncpreamble>
+func T_feeds_interface_method_call(i I) {
+ i.Blarg()
+}
+
+var G int
+
+type T int
+
+type I interface {
+ Blarg()
+}
+
+func (r T) Blarg() {
+}
diff --git a/src/cmd/compile/internal/inline/inlheur/testdata/props/returns.go b/src/cmd/compile/internal/inline/inlheur/testdata/props/returns.go
new file mode 100644
index 0000000000..51f2bc7cb2
--- /dev/null
+++ b/src/cmd/compile/internal/inline/inlheur/testdata/props/returns.go
@@ -0,0 +1,370 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// DO NOT EDIT (use 'go test -v -update-expected' instead.)
+// See cmd/compile/internal/inline/inlheur/testdata/props/README.txt
+// for more information on the format of this file.
+// <endfilepreamble>
+
+package returns1
+
+import "unsafe"
+
+// returns.go T_simple_allocmem 21 0 1
+// ResultFlags
+// 0 ResultIsAllocatedMem
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":null,"ResultFlags":[2]}
+// <endcallsites>
+// <endfuncpreamble>
+func T_simple_allocmem() *Bar {
+ return &Bar{}
+}
+
+// returns.go T_allocmem_two_returns 34 0 1
+// ParamFlags
+// 0 ParamFeedsIfOrSwitch
+// ResultFlags
+// 0 ResultIsAllocatedMem
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[32],"ResultFlags":[2]}
+// <endcallsites>
+// <endfuncpreamble>
+func T_allocmem_two_returns(x int) *Bar {
+ // multiple returns
+ if x < 0 {
+ return new(Bar)
+ } else {
+ return &Bar{x: 2}
+ }
+}
+
+// returns.go T_allocmem_three_returns 52 0 1
+// ParamFlags
+// 0 ParamFeedsIfOrSwitch
+// ResultFlags
+// 0 ResultIsAllocatedMem
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[32],"ResultFlags":[2]}
+// <endcallsites>
+// <endfuncpreamble>
+func T_allocmem_three_returns(x int) []*Bar {
+ // more multiple returns
+ switch x {
+ case 10, 11, 12:
+ return make([]*Bar, 10)
+ case 13:
+ fallthrough
+ case 15:
+ return []*Bar{&Bar{x: 15}}
+ }
+ return make([]*Bar, 0, 10)
+}
+
+// returns.go T_return_nil 72 0 1
+// ResultFlags
+// 0 ResultAlwaysSameConstant
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":null,"ResultFlags":[8]}
+// <endcallsites>
+// <endfuncpreamble>
+func T_return_nil() *Bar {
+ // simple case: no alloc
+ return nil
+}
+
+// returns.go T_multi_return_nil 84 0 1
+// ResultFlags
+// 0 ResultAlwaysSameConstant
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[0,0],"ResultFlags":[8]}
+// <endcallsites>
+// <endfuncpreamble>
+func T_multi_return_nil(x, y bool) *Bar {
+ if x && y {
+ return nil
+ }
+ return nil
+}
+
+// returns.go T_multi_return_nil_anomoly 98 0 1
+// ResultFlags
+// 0 ResultIsConcreteTypeConvertedToInterface
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[0,0],"ResultFlags":[4]}
+// <endcallsites>
+// <endfuncpreamble>
+func T_multi_return_nil_anomoly(x, y bool) Itf {
+ if x && y {
+ var qnil *Q
+ return qnil
+ }
+ var barnil *Bar
+ return barnil
+}
+
+// returns.go T_multi_return_some_nil 112 0 1
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[0,0],"ResultFlags":[0]}
+// <endcallsites>
+// <endfuncpreamble>
+func T_multi_return_some_nil(x, y bool) *Bar {
+ if x && y {
+ return nil
+ } else {
+ return &GB
+ }
+}
+
+// returns.go T_mixed_returns 127 0 1
+// ParamFlags
+// 0 ParamFeedsIfOrSwitch
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[32],"ResultFlags":[0]}
+// <endcallsites>
+// <endfuncpreamble>
+func T_mixed_returns(x int) *Bar {
+ // mix of alloc and non-alloc
+ if x < 0 {
+ return new(Bar)
+ } else {
+ return &GB
+ }
+}
+
+// returns.go T_mixed_returns_slice 143 0 1
+// ParamFlags
+// 0 ParamFeedsIfOrSwitch
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[32],"ResultFlags":[0]}
+// <endcallsites>
+// <endfuncpreamble>
+func T_mixed_returns_slice(x int) []*Bar {
+ // mix of alloc and non-alloc
+ switch x {
+ case 10, 11, 12:
+ return make([]*Bar, 10)
+ case 13:
+ fallthrough
+ case 15:
+ return []*Bar{&Bar{x: 15}}
+ }
+ ba := [...]*Bar{&GB, &GB}
+ return ba[:]
+}
+
+// returns.go T_maps_and_channels 167 0 1
+// ResultFlags
+// 0 ResultNoInfo
+// 1 ResultNoInfo
+// 2 ResultNoInfo
+// 3 ResultAlwaysSameConstant
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[0,0],"ResultFlags":[0,0,0,8]}
+// <endcallsites>
+// <endfuncpreamble>
+func T_maps_and_channels(x int, b bool) (bool, map[int]int, chan bool, unsafe.Pointer) {
+ // maps and channels
+ return b, make(map[int]int), make(chan bool), nil
+}
+
+// returns.go T_assignment_to_named_returns 179 0 1
+// ParamFlags
+// 0 ParamFeedsIfOrSwitch
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[32],"ResultFlags":[0,0]}
+// <endcallsites>
+// <endfuncpreamble>
+func T_assignment_to_named_returns(x int) (r1 *uint64, r2 *uint64) {
+ // assignments to named returns and then "return" not supported
+ r1 = new(uint64)
+ if x < 1 {
+ *r1 = 2
+ }
+ r2 = new(uint64)
+ return
+}
+
+// returns.go T_named_returns_but_return_explicit_values 199 0 1
+// ParamFlags
+// 0 ParamFeedsIfOrSwitch
+// ResultFlags
+// 0 ResultIsAllocatedMem
+// 1 ResultIsAllocatedMem
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[32],"ResultFlags":[2,2]}
+// <endcallsites>
+// <endfuncpreamble>
+func T_named_returns_but_return_explicit_values(x int) (r1 *uint64, r2 *uint64) {
+ // named returns ok if all returns are non-empty
+ rx1 := new(uint64)
+ if x < 1 {
+ *rx1 = 2
+ }
+ rx2 := new(uint64)
+ return rx1, rx2
+}
+
+// returns.go T_return_concrete_type_to_itf 216 0 1
+// ResultFlags
+// 0 ResultIsConcreteTypeConvertedToInterface
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[0,0],"ResultFlags":[4]}
+// <endcallsites>
+// <endfuncpreamble>
+func T_return_concrete_type_to_itf(x, y int) Itf {
+ return &Bar{}
+}
+
+// returns.go T_return_concrete_type_to_itfwith_copy 227 0 1
+// ResultFlags
+// 0 ResultIsConcreteTypeConvertedToInterface
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[0,0],"ResultFlags":[4]}
+// <endcallsites>
+// <endfuncpreamble>
+func T_return_concrete_type_to_itfwith_copy(x, y int) Itf {
+ b := &Bar{}
+ println("whee")
+ return b
+}
+
+// returns.go T_return_concrete_type_to_itf_mixed 238 0 1
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[0,0],"ResultFlags":[0]}
+// <endcallsites>
+// <endfuncpreamble>
+func T_return_concrete_type_to_itf_mixed(x, y int) Itf {
+ if x < y {
+ b := &Bar{}
+ return b
+ }
+ return nil
+}
+
+// returns.go T_return_same_func 253 0 1
+// ResultFlags
+// 0 ResultAlwaysSameInlinableFunc
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":null,"ResultFlags":[32]}
+// <endcallsites>
+// <endfuncpreamble>
+func T_return_same_func() func(int) int {
+ if G < 10 {
+ return foo
+ } else {
+ return foo
+ }
+}
+
+// returns.go T_return_different_funcs 266 0 1
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":null,"ResultFlags":[0]}
+// <endcallsites>
+// <endfuncpreamble>
+func T_return_different_funcs() func(int) int {
+ if G != 10 {
+ return foo
+ } else {
+ return bar
+ }
+}
+
+// returns.go T_return_same_closure 286 0 1
+// ResultFlags
+// 0 ResultAlwaysSameInlinableFunc
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":null,"ResultFlags":[32]}
+// <endcallsites>
+// <endfuncpreamble>
+// returns.go T_return_same_closure.func1 287 0 1
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[0],"ResultFlags":[0]}
+// <endcallsites>
+// <endfuncpreamble>
+func T_return_same_closure() func(int) int {
+ p := func(q int) int { return q }
+ if G < 10 {
+ return p
+ } else {
+ return p
+ }
+}
+
+// returns.go T_return_different_closures 312 0 1
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":null,"ResultFlags":[0]}
+// <endcallsites>
+// <endfuncpreamble>
+// returns.go T_return_different_closures.func1 313 0 1
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[0],"ResultFlags":[0]}
+// <endcallsites>
+// <endfuncpreamble>
+// returns.go T_return_different_closures.func2 317 0 1
+// ResultFlags
+// 0 ResultAlwaysSameConstant
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[0],"ResultFlags":[8]}
+// <endcallsites>
+// <endfuncpreamble>
+func T_return_different_closures() func(int) int {
+ p := func(q int) int { return q }
+ if G < 10 {
+ return p
+ } else {
+ return func(q int) int { return 101 }
+ }
+}
+
+// returns.go T_return_noninlinable 339 0 1
+// ResultFlags
+// 0 ResultAlwaysSameFunc
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[0],"ResultFlags":[16]}
+// <endcallsites>
+// <endfuncpreamble>
+// returns.go T_return_noninlinable.func1 340 0 1
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[0],"ResultFlags":[0]}
+// callsite: returns.go:343:4|0 flagstr "" flagval 0 score 4 mask 0 maskstr ""
+// <endcallsites>
+// <endfuncpreamble>
+// returns.go T_return_noninlinable.func1.1 341 0 1
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":null,"ResultFlags":null}
+// <endcallsites>
+// <endfuncpreamble>
+func T_return_noninlinable(x int) func(int) int {
+ noti := func(q int) int {
+ defer func() {
+ println(q + x)
+ }()
+ return q
+ }
+ return noti
+}
+
+type Bar struct {
+ x int
+ y string
+}
+
+func (b *Bar) Plark() {
+}
+
+type Q int
+
+func (q *Q) Plark() {
+}
+
+func foo(x int) int { return x }
+func bar(x int) int { return -x }
+
+var G int
+var GB Bar
+
+type Itf interface {
+ Plark()
+}
diff --git a/src/cmd/compile/internal/inline/inlheur/testdata/props/returns2.go b/src/cmd/compile/internal/inline/inlheur/testdata/props/returns2.go
new file mode 100644
index 0000000000..7200926fb8
--- /dev/null
+++ b/src/cmd/compile/internal/inline/inlheur/testdata/props/returns2.go
@@ -0,0 +1,231 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// DO NOT EDIT (use 'go test -v -update-expected' instead.)
+// See cmd/compile/internal/inline/inlheur/testdata/props/README.txt
+// for more information on the format of this file.
+// <endfilepreamble>
+
+package returns2
+
+// returns2.go T_return_feeds_iface_call 18 0 1
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":null,"ResultFlags":null}
+// callsite: returns2.go:19:13|0 flagstr "" flagval 0 score 1 mask 16384 maskstr "returnFeedsConcreteToInterfaceCallAdj"
+// <endcallsites>
+// <endfuncpreamble>
+func T_return_feeds_iface_call() {
+ b := newBar(10)
+ b.Plark()
+}
+
+// returns2.go T_multi_return_feeds_iface_call 29 0 1
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":null,"ResultFlags":null}
+// callsite: returns2.go:30:20|0 flagstr "" flagval 0 score 3 mask 16384 maskstr "returnFeedsConcreteToInterfaceCallAdj"
+// <endcallsites>
+// <endfuncpreamble>
+func T_multi_return_feeds_iface_call() {
+ _, b, _ := newBar2(10)
+ b.Plark()
+}
+
+// returns2.go T_returned_inlinable_func_feeds_indirect_call 41 0 1
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[0],"ResultFlags":null}
+// callsite: returns2.go:42:18|0 flagstr "" flagval 0 score -51 mask 8200 maskstr "passConstToIfAdj|returnFeedsInlinableFuncToIndCallAdj"
+// callsite: returns2.go:44:20|1 flagstr "" flagval 0 score -23 mask 8192 maskstr "returnFeedsInlinableFuncToIndCallAdj"
+// <endcallsites>
+// <endfuncpreamble>
+func T_returned_inlinable_func_feeds_indirect_call(q int) {
+ f := returnsFunc(10)
+ f(q)
+ f2 := returnsFunc2()
+ f2(q)
+}
+
+// returns2.go T_returned_noninlineable_func_feeds_indirect_call 54 0 1
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[0],"ResultFlags":null}
+// callsite: returns2.go:55:30|0 flagstr "" flagval 0 score -23 mask 4096 maskstr "returnFeedsFuncToIndCallAdj"
+// <endcallsites>
+// <endfuncpreamble>
+func T_returned_noninlineable_func_feeds_indirect_call(q int) {
+ f := returnsNonInlinableFunc()
+ f(q)
+}
+
+// returns2.go T_multi_return_feeds_indirect_call 65 0 1
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[0],"ResultFlags":null}
+// callsite: returns2.go:66:29|0 flagstr "" flagval 0 score -21 mask 8192 maskstr "returnFeedsInlinableFuncToIndCallAdj"
+// <endcallsites>
+// <endfuncpreamble>
+func T_multi_return_feeds_indirect_call(q int) {
+ _, f, _ := multiReturnsFunc()
+ f(q)
+}
+
+// returns2.go T_return_feeds_ifswitch 76 0 1
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[0],"ResultFlags":[0]}
+// callsite: returns2.go:77:14|0 flagstr "" flagval 0 score 10 mask 2048 maskstr "returnFeedsConstToIfAdj"
+// <endcallsites>
+// <endfuncpreamble>
+func T_return_feeds_ifswitch(q int) int {
+ x := meaning(q)
+ if x < 42 {
+ switch x {
+ case 42:
+ return 1
+ }
+ }
+ return 0
+}
+
+// returns2.go T_multi_return_feeds_ifswitch 93 0 1
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[0],"ResultFlags":[0]}
+// callsite: returns2.go:94:21|0 flagstr "" flagval 0 score 9 mask 2048 maskstr "returnFeedsConstToIfAdj"
+// <endcallsites>
+// <endfuncpreamble>
+func T_multi_return_feeds_ifswitch(q int) int {
+ x, y, z := meanings(q)
+ if x < y {
+ switch x {
+ case 42:
+ return z
+ }
+ }
+ return 0
+}
+
+// returns2.go T_two_calls_feed_ifswitch 111 0 1
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[0],"ResultFlags":[0]}
+// callsite: returns2.go:115:14|0 flagstr "" flagval 0 score 25 mask 0 maskstr ""
+// callsite: returns2.go:116:14|1 flagstr "" flagval 0 score 25 mask 0 maskstr ""
+// <endcallsites>
+// <endfuncpreamble>
+func T_two_calls_feed_ifswitch(q int) int {
+ // This case we don't handle; for the heuristic to kick in,
+ // all names in a given if/switch cond have to come from the
+ // same callsite
+ x := meaning(q)
+ y := meaning(-q)
+ if x < y {
+ switch x + y {
+ case 42:
+ return 1
+ }
+ }
+ return 0
+}
+
+// returns2.go T_chained_indirect_call 132 0 1
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[0,0],"ResultFlags":null}
+// callsite: returns2.go:135:18|0 flagstr "" flagval 0 score -31 mask 8192 maskstr "returnFeedsInlinableFuncToIndCallAdj"
+// <endcallsites>
+// <endfuncpreamble>
+func T_chained_indirect_call(x, y int) {
+ // Here 'returnsFunc' returns an inlinable func that feeds
+ // directly into a call (no named intermediate).
+ G += returnsFunc(x - y)(x + y)
+}
+
+// returns2.go T_chained_conc_iface_call 144 0 1
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[0,0],"ResultFlags":null}
+// callsite: returns2.go:148:8|0 flagstr "" flagval 0 score 1 mask 16384 maskstr "returnFeedsConcreteToInterfaceCallAdj"
+// <endcallsites>
+// <endfuncpreamble>
+func T_chained_conc_iface_call(x, y int) {
+ // Similar to the case above, return from call returning concrete type
+ // feeds directly into interface call. Note that only the first
+ // iface call is interesting here.
+ newBar(10).Plark().Plark()
+}
+
+func returnsFunc(x int) func(int) int {
+ if x < 0 {
+ G++
+ }
+ return adder
+}
+
+func returnsFunc2() func(int) int {
+ return func(x int) int {
+ return adder(x)
+ }
+}
+
+func returnsNonInlinableFunc() func(int) int {
+ return adderNoInline
+}
+
+func multiReturnsFunc() (int, func(int) int, int) {
+ return 42, func(x int) int { G++; return 1 }, -42
+}
+
+func adder(x int) int {
+ G += 1
+ return G
+}
+
+func adderNoInline(x int) int {
+ defer func() { G += x }()
+ G += 1
+ return G
+}
+
+func meaning(q int) int {
+ r := 0
+ for i := 0; i < 42; i++ {
+ r += q
+ }
+ G += r
+ return 42
+}
+
+func meanings(q int) (int, int, int) {
+ r := 0
+ for i := 0; i < 42; i++ {
+ r += q
+ }
+ return 42, 43, r
+}
+
+type Bar struct {
+ x int
+ y string
+}
+
+func (b *Bar) Plark() Itf {
+ return b
+}
+
+type Itf interface {
+ Plark() Itf
+}
+
+func newBar(x int) Itf {
+ s := 0
+ for i := 0; i < x; i++ {
+ s += i
+ }
+ return &Bar{
+ x: s,
+ }
+}
+
+func newBar2(x int) (int, Itf, bool) {
+ s := 0
+ for i := 0; i < x; i++ {
+ s += i
+ }
+ return 0, &Bar{x: s}, false
+}
+
+var G int
diff --git a/src/cmd/compile/internal/inline/inlheur/texpr_classify_test.go b/src/cmd/compile/internal/inline/inlheur/texpr_classify_test.go
new file mode 100644
index 0000000000..587eab03fc
--- /dev/null
+++ b/src/cmd/compile/internal/inline/inlheur/texpr_classify_test.go
@@ -0,0 +1,217 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package inlheur
+
+import (
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/typecheck"
+ "cmd/compile/internal/types"
+ "cmd/internal/src"
+ "go/constant"
+ "testing"
+)
+
+var pos src.XPos
+var local *types.Pkg
+var f *ir.Func
+
+func init() {
+ types.PtrSize = 8
+ types.RegSize = 8
+ types.MaxWidth = 1 << 50
+ typecheck.InitUniverse()
+ local = types.NewPkg("", "")
+ fsym := &types.Sym{
+ Pkg: types.NewPkg("my/import/path", "path"),
+ Name: "function",
+ }
+ f = ir.NewFunc(src.NoXPos, src.NoXPos, fsym, nil)
+}
+
+type state struct {
+ ntab map[string]*ir.Name
+}
+
+func mkstate() *state {
+ return &state{
+ ntab: make(map[string]*ir.Name),
+ }
+}
+
+func bin(x ir.Node, op ir.Op, y ir.Node) ir.Node {
+ return ir.NewBinaryExpr(pos, op, x, y)
+}
+
+func conv(x ir.Node, t *types.Type) ir.Node {
+ return ir.NewConvExpr(pos, ir.OCONV, t, x)
+}
+
+func logical(x ir.Node, op ir.Op, y ir.Node) ir.Node {
+ return ir.NewLogicalExpr(pos, op, x, y)
+}
+
+func un(op ir.Op, x ir.Node) ir.Node {
+ return ir.NewUnaryExpr(pos, op, x)
+}
+
+func liti(i int64) ir.Node {
+ return ir.NewBasicLit(pos, types.Types[types.TINT64], constant.MakeInt64(i))
+}
+
+func lits(s string) ir.Node {
+ return ir.NewBasicLit(pos, types.Types[types.TSTRING], constant.MakeString(s))
+}
+
+func (s *state) nm(name string, t *types.Type) *ir.Name {
+ if n, ok := s.ntab[name]; ok {
+ if n.Type() != t {
+ panic("bad")
+ }
+ return n
+ }
+ sym := local.Lookup(name)
+ nn := ir.NewNameAt(pos, sym, t)
+ s.ntab[name] = nn
+ return nn
+}
+
+func (s *state) nmi64(name string) *ir.Name {
+ return s.nm(name, types.Types[types.TINT64])
+}
+
+func (s *state) nms(name string) *ir.Name {
+ return s.nm(name, types.Types[types.TSTRING])
+}
+
+func TestClassifyIntegerCompare(t *testing.T) {
+
+ // (n < 10 || n > 100) && (n >= 12 || n <= 99 || n != 101)
+ s := mkstate()
+ nn := s.nmi64("n")
+ nlt10 := bin(nn, ir.OLT, liti(10)) // n < 10
+ ngt100 := bin(nn, ir.OGT, liti(100)) // n > 100
+ nge12 := bin(nn, ir.OGE, liti(12)) // n >= 12
+ nle99 := bin(nn, ir.OLE, liti(99)) // n < 10
+ nne101 := bin(nn, ir.ONE, liti(101)) // n != 101
+ noror1 := logical(nlt10, ir.OOROR, ngt100) // n < 10 || n > 100
+ noror2 := logical(nge12, ir.OOROR, nle99) // n >= 12 || n <= 99
+ noror3 := logical(noror2, ir.OOROR, nne101)
+ nandand := typecheck.Expr(logical(noror1, ir.OANDAND, noror3))
+
+ wantv := true
+ v := ShouldFoldIfNameConstant(nandand, []*ir.Name{nn})
+ if v != wantv {
+ t.Errorf("wanted shouldfold(%v) %v, got %v", nandand, wantv, v)
+ }
+}
+
+func TestClassifyStringCompare(t *testing.T) {
+
+ // s != "foo" && s < "ooblek" && s > "plarkish"
+ s := mkstate()
+ nn := s.nms("s")
+ snefoo := bin(nn, ir.ONE, lits("foo")) // s != "foo"
+ sltoob := bin(nn, ir.OLT, lits("ooblek")) // s < "ooblek"
+ sgtpk := bin(nn, ir.OGT, lits("plarkish")) // s > "plarkish"
+ nandand := logical(snefoo, ir.OANDAND, sltoob)
+ top := typecheck.Expr(logical(nandand, ir.OANDAND, sgtpk))
+
+ wantv := true
+ v := ShouldFoldIfNameConstant(top, []*ir.Name{nn})
+ if v != wantv {
+ t.Errorf("wanted shouldfold(%v) %v, got %v", top, wantv, v)
+ }
+}
+
+func TestClassifyIntegerArith(t *testing.T) {
+ // n+1 ^ n-3 * n/2 + n<<9 + n>>2 - n&^7
+
+ s := mkstate()
+ nn := s.nmi64("n")
+ np1 := bin(nn, ir.OADD, liti(1)) // n+1
+ nm3 := bin(nn, ir.OSUB, liti(3)) // n-3
+ nd2 := bin(nn, ir.ODIV, liti(2)) // n/2
+ nls9 := bin(nn, ir.OLSH, liti(9)) // n<<9
+ nrs2 := bin(nn, ir.ORSH, liti(2)) // n>>2
+ nan7 := bin(nn, ir.OANDNOT, liti(7)) // n&^7
+ c1xor := bin(np1, ir.OXOR, nm3)
+ c2mul := bin(c1xor, ir.OMUL, nd2)
+ c3add := bin(c2mul, ir.OADD, nls9)
+ c4add := bin(c3add, ir.OADD, nrs2)
+ c5sub := bin(c4add, ir.OSUB, nan7)
+ top := typecheck.Expr(c5sub)
+
+ wantv := true
+ v := ShouldFoldIfNameConstant(top, []*ir.Name{nn})
+ if v != wantv {
+ t.Errorf("wanted shouldfold(%v) %v, got %v", top, wantv, v)
+ }
+}
+
+func TestClassifyAssortedShifts(t *testing.T) {
+
+ s := mkstate()
+ nn := s.nmi64("n")
+ badcases := []ir.Node{
+ bin(liti(3), ir.OLSH, nn), // 3<<n
+ bin(liti(7), ir.ORSH, nn), // 7>>n
+ }
+ for _, bc := range badcases {
+ wantv := false
+ v := ShouldFoldIfNameConstant(typecheck.Expr(bc), []*ir.Name{nn})
+ if v != wantv {
+ t.Errorf("wanted shouldfold(%v) %v, got %v", bc, wantv, v)
+ }
+ }
+}
+
+func TestClassifyFloat(t *testing.T) {
+ // float32(n) + float32(10)
+ s := mkstate()
+ nn := s.nm("n", types.Types[types.TUINT32])
+ f1 := conv(nn, types.Types[types.TFLOAT32])
+ f2 := conv(liti(10), types.Types[types.TFLOAT32])
+ add := bin(f1, ir.OADD, f2)
+
+ wantv := false
+ v := ShouldFoldIfNameConstant(typecheck.Expr(add), []*ir.Name{nn})
+ if v != wantv {
+ t.Errorf("wanted shouldfold(%v) %v, got %v", add, wantv, v)
+ }
+}
+
+func TestMultipleNamesAllUsed(t *testing.T) {
+ // n != 101 && m < 2
+ s := mkstate()
+ nn := s.nmi64("n")
+ nm := s.nmi64("m")
+ nne101 := bin(nn, ir.ONE, liti(101)) // n != 101
+ mlt2 := bin(nm, ir.OLT, liti(2)) // m < 2
+ nandand := typecheck.Expr(logical(nne101, ir.OANDAND, mlt2))
+
+ // all names used
+ wantv := true
+ v := ShouldFoldIfNameConstant(nandand, []*ir.Name{nn, nm})
+ if v != wantv {
+ t.Errorf("wanted shouldfold(%v) %v, got %v", nandand, wantv, v)
+ }
+
+ // not all names used
+ wantv = false
+ v = ShouldFoldIfNameConstant(nne101, []*ir.Name{nn, nm})
+ if v != wantv {
+ t.Errorf("wanted shouldfold(%v) %v, got %v", nne101, wantv, v)
+ }
+
+ // other names used.
+ np := s.nmi64("p")
+ pne0 := bin(np, ir.ONE, liti(101)) // p != 0
+ noror := logical(nandand, ir.OOROR, pne0)
+ wantv = false
+ v = ShouldFoldIfNameConstant(noror, []*ir.Name{nn, nm})
+ if v != wantv {
+ t.Errorf("wanted shouldfold(%v) %v, got %v", noror, wantv, v)
+ }
+}
diff --git a/src/cmd/compile/internal/inline/inlheur/trace_off.go b/src/cmd/compile/internal/inline/inlheur/trace_off.go
new file mode 100644
index 0000000000..9eea7fa369
--- /dev/null
+++ b/src/cmd/compile/internal/inline/inlheur/trace_off.go
@@ -0,0 +1,18 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !debugtrace
+
+package inlheur
+
+const debugTrace = 0
+
+func enableDebugTrace(x int) {
+}
+
+func enableDebugTraceIfEnv() {
+}
+
+func disableDebugTrace() {
+}
diff --git a/src/cmd/compile/internal/inline/inlheur/trace_on.go b/src/cmd/compile/internal/inline/inlheur/trace_on.go
new file mode 100644
index 0000000000..160842905f
--- /dev/null
+++ b/src/cmd/compile/internal/inline/inlheur/trace_on.go
@@ -0,0 +1,40 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build debugtrace
+
+package inlheur
+
+import (
+ "os"
+ "strconv"
+)
+
+var debugTrace = 0
+
+func enableDebugTrace(x int) {
+ debugTrace = x
+}
+
+func enableDebugTraceIfEnv() {
+ v := os.Getenv("DEBUG_TRACE_INLHEUR")
+ if v == "" {
+ return
+ }
+ if v[0] == '*' {
+ if !UnitTesting() {
+ return
+ }
+ v = v[1:]
+ }
+ i, err := strconv.Atoi(v)
+ if err != nil {
+ return
+ }
+ debugTrace = i
+}
+
+func disableDebugTrace() {
+ debugTrace = 0
+}
diff --git a/src/cmd/compile/internal/inline/inlheur/tserial_test.go b/src/cmd/compile/internal/inline/inlheur/tserial_test.go
new file mode 100644
index 0000000000..def12f5aaf
--- /dev/null
+++ b/src/cmd/compile/internal/inline/inlheur/tserial_test.go
@@ -0,0 +1,65 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package inlheur
+
+import "testing"
+
+func fpeq(fp1, fp2 FuncProps) bool {
+ if fp1.Flags != fp2.Flags {
+ return false
+ }
+ if len(fp1.ParamFlags) != len(fp2.ParamFlags) {
+ return false
+ }
+ for i := range fp1.ParamFlags {
+ if fp1.ParamFlags[i] != fp2.ParamFlags[i] {
+ return false
+ }
+ }
+ if len(fp1.ResultFlags) != len(fp2.ResultFlags) {
+ return false
+ }
+ for i := range fp1.ResultFlags {
+ if fp1.ResultFlags[i] != fp2.ResultFlags[i] {
+ return false
+ }
+ }
+ return true
+}
+
+func TestSerDeser(t *testing.T) {
+ testcases := []FuncProps{
+ FuncProps{},
+ FuncProps{
+ Flags: 0xfffff,
+ },
+ FuncProps{
+ Flags: 1,
+ ResultFlags: []ResultPropBits{ResultAlwaysSameConstant},
+ },
+ FuncProps{
+ Flags: 1,
+ ParamFlags: []ParamPropBits{0x99, 0xaa, 0xfffff},
+ ResultFlags: []ResultPropBits{0xfeedface},
+ },
+ }
+
+ for k, tc := range testcases {
+ s := tc.SerializeToString()
+ fp := DeserializeFromString(s)
+ got := fp.String()
+ want := tc.String()
+ if !fpeq(*fp, tc) {
+ t.Errorf("eq check failed for test %d: got:\n%s\nwant:\n%s\n", k, got, want)
+ }
+ }
+
+ var nilt *FuncProps
+ ns := nilt.SerializeToString()
+ nfp := DeserializeFromString(ns)
+ if len(ns) != 0 || nfp != nil {
+ t.Errorf("nil serialize/deserialize failed")
+ }
+}
diff --git a/src/cmd/compile/internal/inline/interleaved/interleaved.go b/src/cmd/compile/internal/inline/interleaved/interleaved.go
new file mode 100644
index 0000000000..a6f19d470d
--- /dev/null
+++ b/src/cmd/compile/internal/inline/interleaved/interleaved.go
@@ -0,0 +1,132 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package interleaved implements the interleaved devirtualization and
+// inlining pass.
+package interleaved
+
+import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/devirtualize"
+ "cmd/compile/internal/inline"
+ "cmd/compile/internal/inline/inlheur"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/pgo"
+ "cmd/compile/internal/typecheck"
+ "fmt"
+)
+
+// DevirtualizeAndInlinePackage interleaves devirtualization and inlining on
+// all functions within pkg.
+func DevirtualizeAndInlinePackage(pkg *ir.Package, profile *pgo.Profile) {
+ if profile != nil && base.Debug.PGODevirtualize > 0 {
+ // TODO(mdempsky): Integrate into DevirtualizeAndInlineFunc below.
+ ir.VisitFuncsBottomUp(typecheck.Target.Funcs, func(list []*ir.Func, recursive bool) {
+ for _, fn := range list {
+ devirtualize.ProfileGuided(fn, profile)
+ }
+ })
+ ir.CurFunc = nil
+ }
+
+ if base.Flag.LowerL != 0 {
+ inlheur.SetupScoreAdjustments()
+ }
+
+ var inlProfile *pgo.Profile // copy of profile for inlining
+ if base.Debug.PGOInline != 0 {
+ inlProfile = profile
+ }
+ if inlProfile != nil {
+ inline.PGOInlinePrologue(inlProfile, pkg.Funcs)
+ }
+
+ ir.VisitFuncsBottomUp(pkg.Funcs, func(funcs []*ir.Func, recursive bool) {
+ // We visit functions within an SCC in fairly arbitrary order,
+ // so by computing inlinability for all functions in the SCC
+ // before performing any inlining, the results are less
+ // sensitive to the order within the SCC (see #58905 for an
+ // example).
+
+ // First compute inlinability for all functions in the SCC ...
+ inline.CanInlineSCC(funcs, recursive, inlProfile)
+
+ // ... then make a second pass to do devirtualization and inlining
+ // of calls.
+ for _, fn := range funcs {
+ DevirtualizeAndInlineFunc(fn, inlProfile)
+ }
+ })
+
+ if base.Flag.LowerL != 0 {
+ // Perform a garbage collection of hidden closures functions that
+ // are no longer reachable from top-level functions following
+ // inlining. See #59404 and #59638 for more context.
+ inline.GarbageCollectUnreferencedHiddenClosures()
+
+ if base.Debug.DumpInlFuncProps != "" {
+ inlheur.DumpFuncProps(nil, base.Debug.DumpInlFuncProps)
+ }
+ if inlheur.Enabled() {
+ inline.PostProcessCallSites(inlProfile)
+ inlheur.TearDown()
+ }
+ }
+}
+
+// DevirtualizeAndInlineFunc interleaves devirtualization and inlining
+// on a single function.
+func DevirtualizeAndInlineFunc(fn *ir.Func, profile *pgo.Profile) {
+ ir.WithFunc(fn, func() {
+ if base.Flag.LowerL != 0 {
+ if inlheur.Enabled() && !fn.Wrapper() {
+ inlheur.ScoreCalls(fn)
+ defer inlheur.ScoreCallsCleanup()
+ }
+ if base.Debug.DumpInlFuncProps != "" && !fn.Wrapper() {
+ inlheur.DumpFuncProps(fn, base.Debug.DumpInlFuncProps)
+ }
+ }
+
+ bigCaller := base.Flag.LowerL != 0 && inline.IsBigFunc(fn)
+ if bigCaller && base.Flag.LowerM > 1 {
+ fmt.Printf("%v: function %v considered 'big'; reducing max cost of inlinees\n", ir.Line(fn), fn)
+ }
+
+ // Walk fn's body and apply devirtualization and inlining.
+ var inlCalls []*ir.InlinedCallExpr
+ var edit func(ir.Node) ir.Node
+ edit = func(n ir.Node) ir.Node {
+ switch n := n.(type) {
+ case *ir.TailCallStmt:
+ n.Call.NoInline = true // can't inline yet
+ }
+
+ ir.EditChildren(n, edit)
+
+ if call, ok := n.(*ir.CallExpr); ok {
+ devirtualize.StaticCall(call)
+
+ if inlCall := inline.TryInlineCall(fn, call, bigCaller, profile); inlCall != nil {
+ inlCalls = append(inlCalls, inlCall)
+ n = inlCall
+ }
+ }
+
+ return n
+ }
+ ir.EditChildren(fn, edit)
+
+ // If we inlined any calls, we want to recursively visit their
+ // bodies for further devirtualization and inlining. However, we
+ // need to wait until *after* the original function body has been
+ // expanded, or else inlCallee can have false positives (e.g.,
+ // #54632).
+ for len(inlCalls) > 0 {
+ call := inlCalls[0]
+ inlCalls = inlCalls[1:]
+ ir.EditChildren(call, edit)
+ }
+ })
+}
diff --git a/src/cmd/compile/internal/ir/abi.go b/src/cmd/compile/internal/ir/abi.go
index 041448fb29..ebe0fbfb2a 100644
--- a/src/cmd/compile/internal/ir/abi.go
+++ b/src/cmd/compile/internal/ir/abi.go
@@ -50,9 +50,6 @@ func setupTextLSym(f *Func, flag int) {
if f.Pragma&Nosplit != 0 {
flag |= obj.NOSPLIT
}
- if f.ReflectMethod() {
- flag |= obj.REFLECTMETHOD
- }
if f.IsPackageInit() {
flag |= obj.PKGINIT
}
diff --git a/src/cmd/compile/internal/ir/check_reassign_no.go b/src/cmd/compile/internal/ir/check_reassign_no.go
new file mode 100644
index 0000000000..8290a7da7e
--- /dev/null
+++ b/src/cmd/compile/internal/ir/check_reassign_no.go
@@ -0,0 +1,9 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !checknewoldreassignment
+
+package ir
+
+const consistencyCheckEnabled = false
diff --git a/src/cmd/compile/internal/ir/check_reassign_yes.go b/src/cmd/compile/internal/ir/check_reassign_yes.go
new file mode 100644
index 0000000000..30876cca20
--- /dev/null
+++ b/src/cmd/compile/internal/ir/check_reassign_yes.go
@@ -0,0 +1,9 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build checknewoldreassignment
+
+package ir
+
+const consistencyCheckEnabled = true
diff --git a/src/cmd/compile/internal/ir/const.go b/src/cmd/compile/internal/ir/const.go
index 751620f26a..0efd1137fe 100644
--- a/src/cmd/compile/internal/ir/const.go
+++ b/src/cmd/compile/internal/ir/const.go
@@ -14,18 +14,79 @@ import (
"cmd/internal/src"
)
+// NewBool returns an OLITERAL representing b as an untyped boolean.
func NewBool(pos src.XPos, b bool) Node {
- return NewBasicLit(pos, constant.MakeBool(b))
+ return NewBasicLit(pos, types.UntypedBool, constant.MakeBool(b))
}
+// NewInt returns an OLITERAL representing v as an untyped integer.
func NewInt(pos src.XPos, v int64) Node {
- return NewBasicLit(pos, constant.MakeInt64(v))
+ return NewBasicLit(pos, types.UntypedInt, constant.MakeInt64(v))
}
+// NewString returns an OLITERAL representing s as an untyped string.
func NewString(pos src.XPos, s string) Node {
- return NewBasicLit(pos, constant.MakeString(s))
+ return NewBasicLit(pos, types.UntypedString, constant.MakeString(s))
}
+// NewUintptr returns an OLITERAL representing v as a uintptr.
+func NewUintptr(pos src.XPos, v int64) Node {
+ return NewBasicLit(pos, types.Types[types.TUINTPTR], constant.MakeInt64(v))
+}
+
+// NewZero returns a zero value of the given type.
+func NewZero(pos src.XPos, typ *types.Type) Node {
+ switch {
+ case typ.HasNil():
+ return NewNilExpr(pos, typ)
+ case typ.IsInteger():
+ return NewBasicLit(pos, typ, intZero)
+ case typ.IsFloat():
+ return NewBasicLit(pos, typ, floatZero)
+ case typ.IsComplex():
+ return NewBasicLit(pos, typ, complexZero)
+ case typ.IsBoolean():
+ return NewBasicLit(pos, typ, constant.MakeBool(false))
+ case typ.IsString():
+ return NewBasicLit(pos, typ, constant.MakeString(""))
+ case typ.IsArray() || typ.IsStruct():
+ // TODO(mdempsky): Return a typechecked expression instead.
+ return NewCompLitExpr(pos, OCOMPLIT, typ, nil)
+ }
+
+ base.FatalfAt(pos, "unexpected type: %v", typ)
+ panic("unreachable")
+}
+
+var (
+ intZero = constant.MakeInt64(0)
+ floatZero = constant.ToFloat(intZero)
+ complexZero = constant.ToComplex(intZero)
+)
+
+// NewOne returns an OLITERAL representing 1 with the given type.
+func NewOne(pos src.XPos, typ *types.Type) Node {
+ var val constant.Value
+ switch {
+ case typ.IsInteger():
+ val = intOne
+ case typ.IsFloat():
+ val = floatOne
+ case typ.IsComplex():
+ val = complexOne
+ default:
+ base.FatalfAt(pos, "%v cannot represent 1", typ)
+ }
+
+ return NewBasicLit(pos, typ, val)
+}
+
+var (
+ intOne = constant.MakeInt64(1)
+ floatOne = constant.ToFloat(intOne)
+ complexOne = constant.ToComplex(intOne)
+)
+
const (
// Maximum size in bits for big.Ints before signaling
// overflow and also mantissa precision for big.Floats.
diff --git a/src/cmd/compile/internal/ir/copy.go b/src/cmd/compile/internal/ir/copy.go
index be57a8fbc6..d30f7bc688 100644
--- a/src/cmd/compile/internal/ir/copy.go
+++ b/src/cmd/compile/internal/ir/copy.go
@@ -5,71 +5,12 @@
package ir
import (
- "cmd/compile/internal/base"
"cmd/internal/src"
)
-// A Node may implement the Orig and SetOrig method to
-// maintain a pointer to the "unrewritten" form of a Node.
-// If a Node does not implement OrigNode, it is its own Orig.
-//
-// Note that both SepCopy and Copy have definitions compatible
-// with a Node that does not implement OrigNode: such a Node
-// is its own Orig, and in that case, that's what both want to return
-// anyway (SepCopy unconditionally, and Copy only when the input
-// is its own Orig as well, but if the output does not implement
-// OrigNode, then neither does the input, making the condition true).
-type OrigNode interface {
- Node
- Orig() Node
- SetOrig(Node)
-}
-
-// origNode may be embedded into a Node to make it implement OrigNode.
-type origNode struct {
- orig Node `mknode:"-"`
-}
-
-func (n *origNode) Orig() Node { return n.orig }
-func (n *origNode) SetOrig(o Node) { n.orig = o }
-
-// Orig returns the “original” node for n.
-// If n implements OrigNode, Orig returns n.Orig().
-// Otherwise Orig returns n itself.
-func Orig(n Node) Node {
- if n, ok := n.(OrigNode); ok {
- o := n.Orig()
- if o == nil {
- Dump("Orig nil", n)
- base.Fatalf("Orig returned nil")
- }
- return o
- }
- return n
-}
-
-// SepCopy returns a separate shallow copy of n,
-// breaking any Orig link to any other nodes.
-func SepCopy(n Node) Node {
- n = n.copy()
- if n, ok := n.(OrigNode); ok {
- n.SetOrig(n)
- }
- return n
-}
-
// Copy returns a shallow copy of n.
-// If Orig(n) == n, then Orig(Copy(n)) == the copy.
-// Otherwise the Orig link is preserved as well.
-//
-// The specific semantics surrounding Orig are subtle but right for most uses.
-// See issues #26855 and #27765 for pitfalls.
func Copy(n Node) Node {
- c := n.copy()
- if n, ok := n.(OrigNode); ok && n.Orig() == n {
- c.(OrigNode).SetOrig(c)
- }
- return c
+ return n.copy()
}
// DeepCopy returns a “deep” copy of n, with its entire structure copied
diff --git a/src/cmd/compile/internal/ir/expr.go b/src/cmd/compile/internal/ir/expr.go
index 5355edc176..da5b437f99 100644
--- a/src/cmd/compile/internal/ir/expr.go
+++ b/src/cmd/compile/internal/ir/expr.go
@@ -54,7 +54,7 @@ func (n *miniExpr) Init() Nodes { return n.init }
func (n *miniExpr) PtrInit() *Nodes { return &n.init }
func (n *miniExpr) SetInit(x Nodes) { n.init = x }
-// An AddStringExpr is a string concatenation Expr[0] + Exprs[1] + ... + Expr[len(Expr)-1].
+// An AddStringExpr is a string concatenation List[0] + List[1] + ... + List[len(List)-1].
type AddStringExpr struct {
miniExpr
List Nodes
@@ -78,9 +78,39 @@ type AddrExpr struct {
}
func NewAddrExpr(pos src.XPos, x Node) *AddrExpr {
+ if x == nil || x.Typecheck() != 1 {
+ base.FatalfAt(pos, "missed typecheck: %L", x)
+ }
n := &AddrExpr{X: x}
- n.op = OADDR
n.pos = pos
+
+ switch x.Op() {
+ case OARRAYLIT, OMAPLIT, OSLICELIT, OSTRUCTLIT:
+ n.op = OPTRLIT
+
+ default:
+ n.op = OADDR
+ if r, ok := OuterValue(x).(*Name); ok && r.Op() == ONAME {
+ r.SetAddrtaken(true)
+
+ // If r is a closure variable, we need to mark its canonical
+ // variable as addrtaken too, so that closure conversion
+ // captures it by reference.
+ //
+ // Exception: if we've already marked the variable as
+ // capture-by-value, then that means this variable isn't
+ // logically modified, and we must be taking its address to pass
+ // to a runtime function that won't mutate it. In that case, we
+ // only need to make sure our own copy is addressable.
+ if r.IsClosureVar() && !r.Byval() {
+ r.Canonical().SetAddrtaken(true)
+ }
+ }
+ }
+
+ n.SetType(types.NewPtr(x.Type()))
+ n.SetTypecheck(1)
+
return n
}
@@ -102,19 +132,27 @@ type BasicLit struct {
val constant.Value
}
-func NewBasicLit(pos src.XPos, val constant.Value) Node {
+// NewBasicLit returns an OLITERAL representing val with the given type.
+func NewBasicLit(pos src.XPos, typ *types.Type, val constant.Value) Node {
+ AssertValidTypeForConst(typ, val)
+
n := &BasicLit{val: val}
n.op = OLITERAL
n.pos = pos
- if k := val.Kind(); k != constant.Unknown {
- n.SetType(idealType(k))
- }
+ n.SetType(typ)
+ n.SetTypecheck(1)
return n
}
func (n *BasicLit) Val() constant.Value { return n.val }
func (n *BasicLit) SetVal(val constant.Value) { n.val = val }
+// NewConstExpr returns an OLITERAL representing val, copying the
+// position and type from orig.
+func NewConstExpr(val constant.Value, orig Node) Node {
+ return NewBasicLit(orig.Pos(), orig.Type(), val)
+}
+
// A BinaryExpr is a binary expression X Op Y,
// or Op(X, Y) for builtin functions that do not become calls.
type BinaryExpr struct {
@@ -138,27 +176,27 @@ func (n *BinaryExpr) SetOp(op Op) {
case OADD, OADDSTR, OAND, OANDNOT, ODIV, OEQ, OGE, OGT, OLE,
OLSH, OLT, OMOD, OMUL, ONE, OOR, ORSH, OSUB, OXOR,
OCOPY, OCOMPLEX, OUNSAFEADD, OUNSAFESLICE, OUNSAFESTRING,
- OEFACE:
+ OMAKEFACE:
n.op = op
}
}
-// A CallExpr is a function call X(Args).
+// A CallExpr is a function call Fun(Args).
type CallExpr struct {
miniExpr
- origNode
- X Node
+ Fun Node
Args Nodes
+ DeferAt Node
RType Node `mknode:"-"` // see reflectdata/helpers.go
KeepAlive []*Name // vars to be kept alive until call returns
IsDDD bool
- NoInline bool
+ GoDefer bool // whether this call is part of a go or defer statement
+ NoInline bool // whether this call must not be inlined
}
func NewCallExpr(pos src.XPos, op Op, fun Node, args []Node) *CallExpr {
- n := &CallExpr{X: fun}
+ n := &CallExpr{Fun: fun}
n.pos = pos
- n.orig = n
n.SetOp(op)
n.Args = args
return n
@@ -174,7 +212,7 @@ func (n *CallExpr) SetOp(op Op) {
OCALL, OCALLFUNC, OCALLINTER, OCALLMETH,
ODELETE,
OGETG, OGETCALLERPC, OGETCALLERSP,
- OMAKE, OMAX, OMIN, OPRINT, OPRINTN,
+ OMAKE, OMAX, OMIN, OPRINT, OPRINTLN,
ORECOVER, ORECOVERFP:
n.op = op
}
@@ -192,7 +230,6 @@ type ClosureExpr struct {
// Before type-checking, the type is Ntype.
type CompLitExpr struct {
miniExpr
- origNode
List Nodes // initialized values
RType Node `mknode:"-"` // *runtime._type for OMAPLIT map types
Prealloc *Name
@@ -209,7 +246,6 @@ func NewCompLitExpr(pos src.XPos, op Op, typ *types.Type, list []Node) *CompLitE
if typ != nil {
n.SetType(typ)
}
- n.orig = n
return n
}
@@ -225,25 +261,6 @@ func (n *CompLitExpr) SetOp(op Op) {
}
}
-type ConstExpr struct {
- miniExpr
- origNode
- val constant.Value
-}
-
-func NewConstExpr(val constant.Value, orig Node) Node {
- n := &ConstExpr{val: val}
- n.op = OLITERAL
- n.pos = orig.Pos()
- n.orig = orig
- n.SetType(orig.Type())
- n.SetTypecheck(orig.Typecheck())
- return n
-}
-
-func (n *ConstExpr) Sym() *types.Sym { return n.orig.Sym() }
-func (n *ConstExpr) Val() constant.Value { return n.val }
-
// A ConvExpr is a conversion Type(X).
// It may end up being a value or a type.
type ConvExpr struct {
@@ -289,7 +306,7 @@ func (n *ConvExpr) SetOp(op Op) {
switch op {
default:
panic(n.no("SetOp " + op.String()))
- case OCONV, OCONVIFACE, OCONVIDATA, OCONVNOP, OBYTES2STR, OBYTES2STRTMP, ORUNES2STR, OSTR2BYTES, OSTR2BYTESTMP, OSTR2RUNES, ORUNESTR, OSLICE2ARR, OSLICE2ARRPTR:
+ case OCONV, OCONVIFACE, OCONVNOP, OBYTES2STR, OBYTES2STRTMP, ORUNES2STR, OSTR2BYTES, OSTR2BYTESTMP, OSTR2RUNES, ORUNESTR, OSLICE2ARR, OSLICE2ARRPTR:
n.op = op
}
}
@@ -333,7 +350,7 @@ func NewKeyExpr(pos src.XPos, key, value Node) *KeyExpr {
return n
}
-// A StructKeyExpr is an Field: Value composite literal key.
+// A StructKeyExpr is a Field: Value composite literal key.
type StructKeyExpr struct {
miniExpr
Field *types.Field
@@ -432,15 +449,19 @@ func (n *MakeExpr) SetOp(op Op) {
}
// A NilExpr represents the predefined untyped constant nil.
-// (It may be copied and assigned a type, though.)
type NilExpr struct {
miniExpr
}
-func NewNilExpr(pos src.XPos) *NilExpr {
+func NewNilExpr(pos src.XPos, typ *types.Type) *NilExpr {
+ if typ == nil {
+ base.FatalfAt(pos, "missing type")
+ }
n := &NilExpr{}
n.pos = pos
n.op = ONIL
+ n.SetType(typ)
+ n.SetTypecheck(1)
return n
}
@@ -461,20 +482,6 @@ func NewParenExpr(pos src.XPos, x Node) *ParenExpr {
func (n *ParenExpr) Implicit() bool { return n.flags&miniExprImplicit != 0 }
func (n *ParenExpr) SetImplicit(b bool) { n.flags.set(miniExprImplicit, b) }
-// A RawOrigExpr represents an arbitrary Go expression as a string value.
-// When printed in diagnostics, the string value is written out exactly as-is.
-type RawOrigExpr struct {
- miniExpr
- Raw string
-}
-
-func NewRawOrigExpr(pos src.XPos, op Op, raw string) *RawOrigExpr {
- n := &RawOrigExpr{Raw: raw}
- n.pos = pos
- n.op = op
- return n
-}
-
// A ResultExpr represents a direct access to a result.
type ResultExpr struct {
miniExpr
@@ -498,9 +505,13 @@ type LinksymOffsetExpr struct {
}
func NewLinksymOffsetExpr(pos src.XPos, lsym *obj.LSym, offset int64, typ *types.Type) *LinksymOffsetExpr {
+ if typ == nil {
+ base.FatalfAt(pos, "nil type")
+ }
n := &LinksymOffsetExpr{Linksym: lsym, Offset_: offset}
n.typ = typ
n.op = OLINKSYMOFFSET
+ n.SetTypecheck(1)
return n
}
@@ -557,9 +568,8 @@ func (n *SelectorExpr) FuncName() *Name {
if n.Op() != OMETHEXPR {
panic(n.no("FuncName"))
}
- fn := NewNameAt(n.Selection.Pos, MethodSym(n.X.Type(), n.Sel))
+ fn := NewNameAt(n.Selection.Pos, MethodSym(n.X.Type(), n.Sel), n.Type())
fn.Class = PFUNC
- fn.SetType(n.Type())
if n.Selection.Nname != nil {
// TODO(austin): Nname is nil for interface method
// expressions (I.M), so we can't attach a Func to
@@ -664,6 +674,9 @@ type TypeAssertExpr struct {
// Runtime type information provided by walkDotType for
// assertions from non-empty interface to concrete type.
ITab Node `mknode:"-"` // *runtime.itab for Type implementing X's type
+
+ // An internal/abi.TypeAssert descriptor to pass to the runtime.
+ Descriptor *obj.LSym
}
func NewTypeAssertExpr(pos src.XPos, x Node, typ *types.Type) *TypeAssertExpr {
@@ -746,33 +759,13 @@ func (n *UnaryExpr) SetOp(op Op) {
default:
panic(n.no("SetOp " + op.String()))
case OBITNOT, ONEG, ONOT, OPLUS, ORECV,
- OALIGNOF, OCAP, OCLEAR, OCLOSE, OIMAG, OLEN, ONEW,
- OOFFSETOF, OPANIC, OREAL, OSIZEOF,
+ OCAP, OCLEAR, OCLOSE, OIMAG, OLEN, ONEW, OPANIC, OREAL,
OCHECKNIL, OCFUNC, OIDATA, OITAB, OSPTR,
OUNSAFESTRINGDATA, OUNSAFESLICEDATA:
n.op = op
}
}
-// Probably temporary: using Implicit() flag to mark generic function nodes that
-// are called to make getGfInfo analysis easier in one pre-order pass.
-func (n *InstExpr) Implicit() bool { return n.flags&miniExprImplicit != 0 }
-func (n *InstExpr) SetImplicit(b bool) { n.flags.set(miniExprImplicit, b) }
-
-// An InstExpr is a generic function or type instantiation.
-type InstExpr struct {
- miniExpr
- X Node
- Targs []Ntype
-}
-
-func NewInstExpr(pos src.XPos, op Op, x Node, targs []Ntype) *InstExpr {
- n := &InstExpr{X: x, Targs: targs}
- n.pos = pos
- n.op = op
- return n
-}
-
func IsZero(n Node) bool {
switch n.Op() {
case ONIL:
@@ -847,6 +840,20 @@ func IsAddressable(n Node) bool {
return false
}
+// StaticValue analyzes n to find the earliest expression that always
+// evaluates to the same value as n, which might be from an enclosing
+// function.
+//
+// For example, given:
+//
+// var x int = g()
+// func() {
+// y := x
+// *p = int(y)
+// }
+//
+// calling StaticValue on the "int(y)" expression returns the outer
+// "g()" expression.
func StaticValue(n Node) Node {
for {
if n.Op() == OCONVNOP {
@@ -867,14 +874,11 @@ func StaticValue(n Node) Node {
}
}
-// staticValue1 implements a simple SSA-like optimization. If n is a local variable
-// that is initialized and never reassigned, staticValue1 returns the initializer
-// expression. Otherwise, it returns nil.
func staticValue1(nn Node) Node {
if nn.Op() != ONAME {
return nil
}
- n := nn.(*Name)
+ n := nn.(*Name).Canonical()
if n.Class != PAUTO {
return nil
}
@@ -906,20 +910,22 @@ FindRHS:
base.Fatalf("RHS is nil: %v", defn)
}
- if reassigned(n) {
+ if Reassigned(n) {
return nil
}
return rhs
}
-// reassigned takes an ONAME node, walks the function in which it is defined, and returns a boolean
-// indicating whether the name has any assignments other than its declaration.
-// The second return value is the first such assignment encountered in the walk, if any. It is mostly
-// useful for -m output documenting the reason for inhibited optimizations.
+// Reassigned takes an ONAME node, walks the function in which it is
+// defined, and returns a boolean indicating whether the name has any
+// assignments other than its declaration.
// NB: global variables are always considered to be re-assigned.
-// TODO: handle initial declaration not including an assignment and followed by a single assignment?
-func reassigned(name *Name) bool {
+// TODO: handle initial declaration not including an assignment and
+// followed by a single assignment?
+// NOTE: any changes made here should also be made in the corresponding
+// code in the ReassignOracle.Init method.
+func Reassigned(name *Name) bool {
if name.Op() != ONAME {
base.Fatalf("reassigned %v", name)
}
@@ -928,13 +934,20 @@ func reassigned(name *Name) bool {
return true
}
+ if name.Addrtaken() {
+ return true // conservatively assume it's reassigned indirectly
+ }
+
// TODO(mdempsky): This is inefficient and becoming increasingly
// unwieldy. Figure out a way to generalize escape analysis's
// reassignment detection for use by inlining and devirtualization.
// isName reports whether n is a reference to name.
isName := func(x Node) bool {
- n, ok := x.(*Name)
+ if x == nil {
+ return false
+ }
+ n, ok := OuterValue(x).(*Name)
return ok && n.Canonical() == name
}
@@ -953,10 +966,15 @@ func reassigned(name *Name) bool {
return true
}
}
+ case OASOP:
+ n := n.(*AssignOpStmt)
+ if isName(n.X) {
+ return true
+ }
case OADDR:
n := n.(*AddrExpr)
- if isName(OuterValue(n.X)) {
- return true
+ if isName(n.X) {
+ base.FatalfAt(n.Pos(), "%v not marked addrtaken", name)
}
case ORANGE:
n := n.(*RangeStmt)
@@ -974,6 +992,23 @@ func reassigned(name *Name) bool {
return Any(name.Curfn, do)
}
+// StaticCalleeName returns the ONAME/PFUNC for n, if known.
+func StaticCalleeName(n Node) *Name {
+ switch n.Op() {
+ case OMETHEXPR:
+ n := n.(*SelectorExpr)
+ return MethodExprName(n)
+ case ONAME:
+ n := n.(*Name)
+ if n.Class == PFUNC {
+ return n
+ }
+ case OCLOSURE:
+ return n.(*ClosureExpr).Func.Nname
+ }
+ return nil
+}
+
// IsIntrinsicCall reports whether the compiler back end will treat the call as an intrinsic operation.
var IsIntrinsicCall = func(*CallExpr) bool { return false }
@@ -1089,8 +1124,8 @@ func IsReflectHeaderDataField(l Node) bool {
func ParamNames(ft *types.Type) []Node {
args := make([]Node, ft.NumParams())
- for i, f := range ft.Params().FieldSlice() {
- args[i] = AsNode(f.Nname)
+ for i, f := range ft.Params() {
+ args[i] = f.Nname.(*Name)
}
return args
}
@@ -1109,7 +1144,7 @@ func MethodSym(recv *types.Type, msym *types.Sym) *types.Sym {
return sym
}
-// MethodSymSuffix is like methodsym, but allows attaching a
+// MethodSymSuffix is like MethodSym, but allows attaching a
// distinguisher suffix. To avoid collisions, the suffix must not
// start with a letter, number, or period.
func MethodSymSuffix(recv *types.Type, msym *types.Sym, suffix string) *types.Sym {
@@ -1157,6 +1192,51 @@ func MethodSymSuffix(recv *types.Type, msym *types.Sym, suffix string) *types.Sy
return rpkg.LookupBytes(b.Bytes())
}
+// LookupMethodSelector returns the types.Sym of the selector for a method
+// named in local symbol name, as well as the types.Sym of the receiver.
+//
+// TODO(prattmic): this does not attempt to handle method suffixes (wrappers).
+func LookupMethodSelector(pkg *types.Pkg, name string) (typ, meth *types.Sym, err error) {
+ typeName, methName := splitType(name)
+ if typeName == "" {
+ return nil, nil, fmt.Errorf("%s doesn't contain type split", name)
+ }
+
+ if len(typeName) > 3 && typeName[:2] == "(*" && typeName[len(typeName)-1] == ')' {
+ // Symbol name is for a pointer receiver method. We just want
+ // the base type name.
+ typeName = typeName[2 : len(typeName)-1]
+ }
+
+ typ = pkg.Lookup(typeName)
+ meth = pkg.Selector(methName)
+ return typ, meth, nil
+}
+
+// splitType splits a local symbol name into type and method (fn). If this a
+// free function, typ == "".
+//
+// N.B. closures and methods can be ambiguous (e.g., bar.func1). These cases
+// are returned as methods.
+func splitType(name string) (typ, fn string) {
+ // Types are split on the first dot, ignoring everything inside
+ // brackets (instantiation of type parameter, usually including
+ // "go.shape").
+ bracket := 0
+ for i, r := range name {
+ if r == '.' && bracket == 0 {
+ return name[:i], name[i+1:]
+ }
+ if r == '[' {
+ bracket++
+ }
+ if r == ']' {
+ bracket--
+ }
+ }
+ return "", name
+}
+
// MethodExprName returns the ONAME representing the method
// referenced by expression n, which must be a method selector,
// method expression, or method value.
diff --git a/src/cmd/compile/internal/ir/fmt.go b/src/cmd/compile/internal/ir/fmt.go
index 0c553a9963..31c610348b 100644
--- a/src/cmd/compile/internal/ir/fmt.go
+++ b/src/cmd/compile/internal/ir/fmt.go
@@ -27,7 +27,6 @@ var OpNames = []string{
OADDR: "&",
OADD: "+",
OADDSTR: "+",
- OALIGNOF: "unsafe.Alignof",
OANDAND: "&&",
OANDNOT: "&^",
OAND: "&",
@@ -70,12 +69,11 @@ var OpNames = []string{
ONEW: "new",
ONE: "!=",
ONOT: "!",
- OOFFSETOF: "unsafe.Offsetof",
OOROR: "||",
OOR: "|",
OPANIC: "panic",
OPLUS: "+",
- OPRINTN: "println",
+ OPRINTLN: "println",
OPRINT: "print",
ORANGE: "range",
OREAL: "real",
@@ -85,7 +83,6 @@ var OpNames = []string{
ORSH: ">>",
OSELECT: "select",
OSEND: "<-",
- OSIZEOF: "unsafe.Sizeof",
OSUB: "-",
OSWITCH: "switch",
OUNSAFEADD: "unsafe.Add",
@@ -173,7 +170,6 @@ func fmtNode(n Node, s fmt.State, verb rune) {
}
var OpPrec = []int{
- OALIGNOF: 8,
OAPPEND: 8,
OBYTES2STR: 8,
OARRAYLIT: 8,
@@ -188,7 +184,6 @@ var OpPrec = []int{
OCLOSE: 8,
OCOMPLIT: 8,
OCONVIFACE: 8,
- OCONVIDATA: 8,
OCONVNOP: 8,
OCONV: 8,
OCOPY: 8,
@@ -206,13 +201,11 @@ var OpPrec = []int{
ONEW: 8,
ONIL: 8,
ONONAME: 8,
- OOFFSETOF: 8,
OPANIC: 8,
OPAREN: 8,
- OPRINTN: 8,
+ OPRINTLN: 8,
OPRINT: 8,
ORUNESTR: 8,
- OSIZEOF: 8,
OSLICE2ARR: 8,
OSLICE2ARRPTR: 8,
OSTR2BYTES: 8,
@@ -526,12 +519,6 @@ func exprFmt(n Node, s fmt.State, prec int) {
return
}
- // We always want the original, if any.
- if o := Orig(n); o != n {
- n = o
- continue
- }
-
// Skip implicit operations introduced during typechecking.
switch nn := n; nn.Op() {
case OADDR:
@@ -546,7 +533,7 @@ func exprFmt(n Node, s fmt.State, prec int) {
n = nn.X
continue
}
- case OCONV, OCONVNOP, OCONVIFACE, OCONVIDATA:
+ case OCONV, OCONVNOP, OCONVIFACE:
nn := nn.(*ConvExpr)
if nn.Implicit() {
n = nn.X
@@ -567,11 +554,6 @@ func exprFmt(n Node, s fmt.State, prec int) {
return
}
- if n, ok := n.(*RawOrigExpr); ok {
- fmt.Fprint(s, n.Raw)
- return
- }
-
switch n.Op() {
case OPAREN:
n := n.(*ParenExpr)
@@ -580,46 +562,29 @@ func exprFmt(n Node, s fmt.State, prec int) {
case ONIL:
fmt.Fprint(s, "nil")
- case OLITERAL: // this is a bit of a mess
- if !exportFormat && n.Sym() != nil {
+ case OLITERAL:
+ if n.Sym() != nil {
fmt.Fprint(s, n.Sym())
return
}
- needUnparen := false
- if n.Type() != nil && !n.Type().IsUntyped() {
- // Need parens when type begins with what might
- // be misinterpreted as a unary operator: * or <-.
- if n.Type().IsPtr() || (n.Type().IsChan() && n.Type().ChanDir() == types.Crecv) {
- fmt.Fprintf(s, "(%v)(", n.Type())
- } else {
- fmt.Fprintf(s, "%v(", n.Type())
- }
- needUnparen = true
- }
-
- if n.Type() == types.UntypedRune {
- switch x, ok := constant.Uint64Val(n.Val()); {
- case !ok:
- fallthrough
- default:
- fmt.Fprintf(s, "('\\x00' + %v)", n.Val())
+ typ := n.Type()
+ val := n.Val()
- case x < utf8.RuneSelf:
+ // Special case for rune constants.
+ if typ == types.RuneType || typ == types.UntypedRune {
+ if x, ok := constant.Uint64Val(val); ok && x <= utf8.MaxRune {
fmt.Fprintf(s, "%q", x)
-
- case x < 1<<16:
- fmt.Fprintf(s, "'\\u%04x'", x)
-
- case x <= utf8.MaxRune:
- fmt.Fprintf(s, "'\\U%08x'", x)
+ return
}
- } else {
- fmt.Fprint(s, types.FmtConst(n.Val(), s.Flag('#')))
}
- if needUnparen {
- fmt.Fprintf(s, ")")
+ // Only include typ if it's neither the default nor untyped type
+ // for the constant value.
+ if k := val.Kind(); typ == types.Types[types.DefaultKinds[k]] || typ == types.UntypedTypes[k] {
+ fmt.Fprint(s, val)
+ } else {
+ fmt.Fprintf(s, "%v(%v)", typ, val)
}
case ODCLFUNC:
@@ -661,33 +626,17 @@ func exprFmt(n Node, s fmt.State, prec int) {
}
fmt.Fprintf(s, "%v { %v }", n.Type(), n.Func.Body)
- case OCOMPLIT:
- n := n.(*CompLitExpr)
- if !exportFormat {
- if n.Implicit() {
- fmt.Fprintf(s, "... argument")
- return
- }
- if typ := n.Type(); typ != nil {
- fmt.Fprintf(s, "%v{%s}", typ, ellipsisIf(len(n.List) != 0))
- return
- }
- fmt.Fprint(s, "composite literal")
- return
- }
- fmt.Fprintf(s, "(%v{ %.v })", n.Type(), n.List)
-
case OPTRLIT:
n := n.(*AddrExpr)
fmt.Fprintf(s, "&%v", n.X)
- case OSTRUCTLIT, OARRAYLIT, OSLICELIT, OMAPLIT:
+ case OCOMPLIT, OSTRUCTLIT, OARRAYLIT, OSLICELIT, OMAPLIT:
n := n.(*CompLitExpr)
- if !exportFormat {
- fmt.Fprintf(s, "%v{%s}", n.Type(), ellipsisIf(len(n.List) != 0))
+ if n.Implicit() {
+ fmt.Fprintf(s, "... argument")
return
}
- fmt.Fprintf(s, "(%v{ %.v })", n.Type(), n.List)
+ fmt.Fprintf(s, "%v{%s}", n.Type(), ellipsisIf(len(n.List) != 0))
case OKEY:
n := n.(*KeyExpr)
@@ -758,7 +707,6 @@ func exprFmt(n Node, s fmt.State, prec int) {
case OCONV,
OCONVIFACE,
- OCONVIDATA,
OCONVNOP,
OBYTES2STR,
ORUNES2STR,
@@ -782,10 +730,7 @@ func exprFmt(n Node, s fmt.State, prec int) {
OCLOSE,
OLEN,
ONEW,
- OPANIC,
- OALIGNOF,
- OOFFSETOF,
- OSIZEOF:
+ OPANIC:
n := n.(*UnaryExpr)
fmt.Fprintf(s, "%v(%v)", n.Op(), n.X)
@@ -796,7 +741,7 @@ func exprFmt(n Node, s fmt.State, prec int) {
OMIN,
ORECOVER,
OPRINT,
- OPRINTN:
+ OPRINTLN:
n := n.(*CallExpr)
if n.IsDDD {
fmt.Fprintf(s, "%v(%.v...)", n.Op(), n.Args)
@@ -806,7 +751,7 @@ func exprFmt(n Node, s fmt.State, prec int) {
case OCALL, OCALLFUNC, OCALLINTER, OCALLMETH, OGETG:
n := n.(*CallExpr)
- exprFmt(n.X, s, nprec)
+ exprFmt(n.Fun, s, nprec)
if n.IsDDD {
fmt.Fprintf(s, "(%.v...)", n.Args)
return
@@ -1184,11 +1129,6 @@ func dumpNode(w io.Writer, n Node, depth int) {
dumpNode(w, cv, depth+1)
}
}
- if len(fn.Enter) > 0 {
- indent(w, depth)
- fmt.Fprintf(w, "%+v-Enter", n.Op())
- dumpNodes(w, fn.Enter, depth+1)
- }
if len(fn.Body) > 0 {
indent(w, depth)
fmt.Fprintf(w, "%+v-body", n.Op())
diff --git a/src/cmd/compile/internal/ir/func.go b/src/cmd/compile/internal/ir/func.go
index 5c41893fc6..303c5e4fd0 100644
--- a/src/cmd/compile/internal/ir/func.go
+++ b/src/cmd/compile/internal/ir/func.go
@@ -12,6 +12,7 @@ import (
"cmd/internal/src"
"fmt"
"strings"
+ "unicode/utf8"
)
// A Func corresponds to a single function in a Go program
@@ -56,18 +57,11 @@ type Func struct {
Nname *Name // ONAME node
OClosure *ClosureExpr // OCLOSURE node
- // Extra entry code for the function. For example, allocate and initialize
- // memory for escaping parameters.
- Enter Nodes
- Exit Nodes
-
// ONAME nodes for all params/locals for this func/closure, does NOT
// include closurevars until transforming closures during walk.
// Names must be listed PPARAMs, PPARAMOUTs, then PAUTOs,
// with PPARAMs and PPARAMOUTs in order corresponding to the function signature.
- // However, as anonymous or blank PPARAMs are not actually declared,
- // they are omitted from Dcl.
- // Anonymous and blank PPARAMOUTs are declared as ~rNN and ~bNN Names, respectively.
+ // Anonymous and blank params are declared as ~pNN (for PPARAMs) and ~rNN (for PPARAMOUTs).
Dcl []*Name
// ClosureVars lists the free variables that are used within a
@@ -96,10 +90,15 @@ type Func struct {
Inl *Inline
- // Closgen tracks how many closures have been generated within
- // this function. Used by closurename for creating unique
+ // funcLitGen and goDeferGen track how many closures have been
+ // created in this function for function literals and go/defer
+ // wrappers, respectively. Used by closureName for creating unique
// function names.
- Closgen int32
+ //
+ // Tracking goDeferGen separately avoids wrappers throwing off
+ // function literal numbering (e.g., runtime/trace_test.TestTraceSymbolize.func11).
+ funcLitGen int32
+ goDeferGen int32
Label int32 // largest auto-generated label in this function
@@ -127,7 +126,7 @@ type Func struct {
NumDefers int32 // number of defer calls in the function
NumReturns int32 // number of explicit returns in the function
- // nwbrCalls records the LSyms of functions called by this
+ // NWBRCalls records the LSyms of functions called by this
// function for go:nowritebarrierrec analysis. Only filled in
// if nowritebarrierrecCheck != nil.
NWBRCalls *[]SymAndPos
@@ -147,14 +146,29 @@ type WasmImport struct {
Name string
}
-func NewFunc(pos src.XPos) *Func {
- f := new(Func)
- f.pos = pos
- f.op = ODCLFUNC
+// NewFunc returns a new Func with the given name and type.
+//
+// fpos is the position of the "func" token, and npos is the position
+// of the name identifier.
+//
+// TODO(mdempsky): I suspect there's no need for separate fpos and
+// npos.
+func NewFunc(fpos, npos src.XPos, sym *types.Sym, typ *types.Type) *Func {
+ name := NewNameAt(npos, sym, typ)
+ name.Class = PFUNC
+ sym.SetFunc(true)
+
+ fn := &Func{Nname: name}
+ fn.pos = fpos
+ fn.op = ODCLFUNC
// Most functions are ABIInternal. The importer or symabis
// pass may override this.
- f.ABI = obj.ABIInternal
- return f
+ fn.ABI = obj.ABIInternal
+ fn.SetTypecheck(1)
+
+ name.Func = fn
+
+ return fn
}
func (f *Func) isStmt() {}
@@ -173,12 +187,16 @@ func (f *Func) LinksymABI(abi obj.ABI) *obj.LSym { return f.Nname.LinksymABI(abi
type Inline struct {
Cost int32 // heuristic cost of inlining this function
- // Copies of Func.Dcl and Func.Body for use during inlining. Copies are
- // needed because the function's dcl/body may be changed by later compiler
- // transformations. These fields are also populated when a function from
- // another package is imported.
- Dcl []*Name
- Body []Node
+ // Copy of Func.Dcl for use during inlining. This copy is needed
+ // because the function's Dcl may change from later compiler
+ // transformations. This field is also populated when a function
+ // from another package is imported and inlined.
+ Dcl []*Name
+ HaveDcl bool // whether we've loaded Dcl
+
+ // Function properties, encoded as a string (these are used for
+ // making inlining decisions). See cmd/compile/internal/inline/inlheur.
+ Properties string
// CanDelayResults reports whether it's safe for the inliner to delay
// initializing the result parameters until immediately before the
@@ -200,11 +218,10 @@ type Mark struct {
type ScopeID int32
const (
- funcDupok = 1 << iota // duplicate definitions ok
- funcWrapper // hide frame from users (elide in tracebacks, don't count as a frame for recover())
- funcABIWrapper // is an ABI wrapper (also set flagWrapper)
- funcNeedctxt // function uses context register (has closure variables)
- funcReflectMethod // function calls reflect.Type.Method or MethodByName
+ funcDupok = 1 << iota // duplicate definitions ok
+ funcWrapper // hide frame from users (elide in tracebacks, don't count as a frame for recover())
+ funcABIWrapper // is an ABI wrapper (also set flagWrapper)
+ funcNeedctxt // function uses context register (has closure variables)
// true if closure inside a function; false if a simple function or a
// closure in a global variable initialization
funcIsHiddenClosure
@@ -212,10 +229,9 @@ const (
funcHasDefer // contains a defer statement
funcNilCheckDisabled // disable nil checks when compiling this function
funcInlinabilityChecked // inliner has already determined whether the function is inlinable
- funcExportInline // include inline body in export data
- funcInstrumentBody // add race/msan/asan instrumentation during SSA construction
+ funcNeverReturns // function never returns (in most cases calls panic(), os.Exit(), or equivalent)
funcOpenCodedDeferDisallowed // can't do open-coded defers
- funcClosureCalled // closure is only immediately called; used by escape analysis
+ funcClosureResultsLost // closure is called indirectly and we lost track of its results; used by escape analysis
funcPackageInit // compiler emitted .init func for package
)
@@ -228,32 +244,28 @@ func (f *Func) Dupok() bool { return f.flags&funcDupok != 0 }
func (f *Func) Wrapper() bool { return f.flags&funcWrapper != 0 }
func (f *Func) ABIWrapper() bool { return f.flags&funcABIWrapper != 0 }
func (f *Func) Needctxt() bool { return f.flags&funcNeedctxt != 0 }
-func (f *Func) ReflectMethod() bool { return f.flags&funcReflectMethod != 0 }
func (f *Func) IsHiddenClosure() bool { return f.flags&funcIsHiddenClosure != 0 }
func (f *Func) IsDeadcodeClosure() bool { return f.flags&funcIsDeadcodeClosure != 0 }
func (f *Func) HasDefer() bool { return f.flags&funcHasDefer != 0 }
func (f *Func) NilCheckDisabled() bool { return f.flags&funcNilCheckDisabled != 0 }
func (f *Func) InlinabilityChecked() bool { return f.flags&funcInlinabilityChecked != 0 }
-func (f *Func) ExportInline() bool { return f.flags&funcExportInline != 0 }
-func (f *Func) InstrumentBody() bool { return f.flags&funcInstrumentBody != 0 }
+func (f *Func) NeverReturns() bool { return f.flags&funcNeverReturns != 0 }
func (f *Func) OpenCodedDeferDisallowed() bool { return f.flags&funcOpenCodedDeferDisallowed != 0 }
-func (f *Func) ClosureCalled() bool { return f.flags&funcClosureCalled != 0 }
+func (f *Func) ClosureResultsLost() bool { return f.flags&funcClosureResultsLost != 0 }
func (f *Func) IsPackageInit() bool { return f.flags&funcPackageInit != 0 }
func (f *Func) SetDupok(b bool) { f.flags.set(funcDupok, b) }
func (f *Func) SetWrapper(b bool) { f.flags.set(funcWrapper, b) }
func (f *Func) SetABIWrapper(b bool) { f.flags.set(funcABIWrapper, b) }
func (f *Func) SetNeedctxt(b bool) { f.flags.set(funcNeedctxt, b) }
-func (f *Func) SetReflectMethod(b bool) { f.flags.set(funcReflectMethod, b) }
func (f *Func) SetIsHiddenClosure(b bool) { f.flags.set(funcIsHiddenClosure, b) }
func (f *Func) SetIsDeadcodeClosure(b bool) { f.flags.set(funcIsDeadcodeClosure, b) }
func (f *Func) SetHasDefer(b bool) { f.flags.set(funcHasDefer, b) }
func (f *Func) SetNilCheckDisabled(b bool) { f.flags.set(funcNilCheckDisabled, b) }
func (f *Func) SetInlinabilityChecked(b bool) { f.flags.set(funcInlinabilityChecked, b) }
-func (f *Func) SetExportInline(b bool) { f.flags.set(funcExportInline, b) }
-func (f *Func) SetInstrumentBody(b bool) { f.flags.set(funcInstrumentBody, b) }
+func (f *Func) SetNeverReturns(b bool) { f.flags.set(funcNeverReturns, b) }
func (f *Func) SetOpenCodedDeferDisallowed(b bool) { f.flags.set(funcOpenCodedDeferDisallowed, b) }
-func (f *Func) SetClosureCalled(b bool) { f.flags.set(funcClosureCalled, b) }
+func (f *Func) SetClosureResultsLost(b bool) { f.flags.set(funcClosureResultsLost, b) }
func (f *Func) SetIsPackageInit(b bool) { f.flags.set(funcPackageInit, b) }
func (f *Func) SetWBPos(pos src.XPos) {
@@ -301,12 +313,65 @@ func LinkFuncName(f *Func) string {
return objabi.PathToPrefix(pkg.Path) + "." + s.Name
}
-// IsEqOrHashFunc reports whether f is type eq/hash function.
-func IsEqOrHashFunc(f *Func) bool {
- if f == nil || f.Nname == nil {
- return false
+// ParseLinkFuncName parsers a symbol name (as returned from LinkFuncName) back
+// to the package path and local symbol name.
+func ParseLinkFuncName(name string) (pkg, sym string, err error) {
+ pkg, sym = splitPkg(name)
+ if pkg == "" {
+ return "", "", fmt.Errorf("no package path in name")
}
- return types.IsTypePkg(f.Sym().Pkg)
+
+ pkg, err = objabi.PrefixToPath(pkg) // unescape
+ if err != nil {
+ return "", "", fmt.Errorf("malformed package path: %v", err)
+ }
+
+ return pkg, sym, nil
+}
+
+// Borrowed from x/mod.
+func modPathOK(r rune) bool {
+ if r < utf8.RuneSelf {
+ return r == '-' || r == '.' || r == '_' || r == '~' ||
+ '0' <= r && r <= '9' ||
+ 'A' <= r && r <= 'Z' ||
+ 'a' <= r && r <= 'z'
+ }
+ return false
+}
+
+func escapedImportPathOK(r rune) bool {
+ return modPathOK(r) || r == '+' || r == '/' || r == '%'
+}
+
+// splitPkg splits the full linker symbol name into package and local symbol
+// name.
+func splitPkg(name string) (pkgpath, sym string) {
+ // package-sym split is at first dot after last the / that comes before
+ // any characters illegal in a package path.
+
+ lastSlashIdx := 0
+ for i, r := range name {
+ // Catches cases like:
+ // * example.foo[sync/atomic.Uint64].
+ // * example%2ecom.foo[sync/atomic.Uint64].
+ //
+ // Note that name is still escaped; unescape occurs after splitPkg.
+ if !escapedImportPathOK(r) {
+ break
+ }
+ if r == '/' {
+ lastSlashIdx = i
+ }
+ }
+ for i := lastSlashIdx; i < len(name); i++ {
+ r := name[i]
+ if r == '.' {
+ return name[:i], name[i+1:]
+ }
+ }
+
+ return "", name
}
var CurFunc *Func
@@ -326,16 +391,6 @@ func FuncSymName(s *types.Sym) string {
return s.Name + "·f"
}
-// MarkFunc marks a node as a function.
-func MarkFunc(n *Name) {
- if n.Op() != ONAME || n.Class != Pxxx {
- base.FatalfAt(n.Pos(), "expected ONAME/Pxxx node, got %v (%v/%v)", n, n.Op(), n.Class)
- }
-
- n.Class = PFUNC
- n.Sym().SetFunc(true)
-}
-
// ClosureDebugRuntimeCheck applies boilerplate checks for debug flags
// and compiling runtime.
func ClosureDebugRuntimeCheck(clo *ClosureExpr) {
@@ -361,25 +416,35 @@ func IsTrivialClosure(clo *ClosureExpr) bool {
var globClosgen int32
// closureName generates a new unique name for a closure within outerfn at pos.
-func closureName(outerfn *Func, pos src.XPos) *types.Sym {
+func closureName(outerfn *Func, pos src.XPos, why Op) *types.Sym {
pkg := types.LocalPkg
outer := "glob."
- prefix := "func"
- gen := &globClosgen
-
- if outerfn != nil {
- if outerfn.OClosure != nil {
- prefix = ""
+ var prefix string
+ switch why {
+ default:
+ base.FatalfAt(pos, "closureName: bad Op: %v", why)
+ case OCLOSURE:
+ if outerfn == nil || outerfn.OClosure == nil {
+ prefix = "func"
}
+ case OGO:
+ prefix = "gowrap"
+ case ODEFER:
+ prefix = "deferwrap"
+ }
+ gen := &globClosgen
+ // There may be multiple functions named "_". In those
+ // cases, we can't use their individual Closgens as it
+ // would lead to name clashes.
+ if outerfn != nil && !IsBlank(outerfn.Nname) {
pkg = outerfn.Sym().Pkg
outer = FuncName(outerfn)
- // There may be multiple functions named "_". In those
- // cases, we can't use their individual Closgens as it
- // would lead to name clashes.
- if !IsBlank(outerfn.Nname) {
- gen = &outerfn.Closgen
+ if why == OCLOSURE {
+ gen = &outerfn.funcLitGen
+ } else {
+ gen = &outerfn.goDeferGen
}
}
@@ -398,80 +463,136 @@ func closureName(outerfn *Func, pos src.XPos) *types.Sym {
return pkg.Lookup(fmt.Sprintf("%s.%s%d", outer, prefix, *gen))
}
-// NewClosureFunc creates a new Func to represent a function literal.
-// If hidden is true, then the closure is marked hidden (i.e., as a
-// function literal contained within another function, rather than a
-// package-scope variable initialization expression).
-func NewClosureFunc(pos src.XPos, hidden bool) *Func {
- fn := NewFunc(pos)
- fn.SetIsHiddenClosure(hidden)
+// NewClosureFunc creates a new Func to represent a function literal
+// with the given type.
+//
+// fpos the position used for the underlying ODCLFUNC and ONAME,
+// whereas cpos is the position used for the OCLOSURE. They're
+// separate because in the presence of inlining, the OCLOSURE node
+// should have an inline-adjusted position, whereas the ODCLFUNC and
+// ONAME must not.
+//
+// outerfn is the enclosing function, if any. The returned function is
+// appending to pkg.Funcs.
+//
+// why is the reason we're generating this Func. It can be OCLOSURE
+// (for a normal function literal) or OGO or ODEFER (for wrapping a
+// call expression that has parameters or results).
+func NewClosureFunc(fpos, cpos src.XPos, why Op, typ *types.Type, outerfn *Func, pkg *Package) *Func {
+ fn := NewFunc(fpos, fpos, closureName(outerfn, cpos, why), typ)
+ fn.SetIsHiddenClosure(outerfn != nil)
+
+ clo := &ClosureExpr{Func: fn}
+ clo.op = OCLOSURE
+ clo.pos = cpos
+ clo.SetType(typ)
+ clo.SetTypecheck(1)
+ fn.OClosure = clo
- fn.Nname = NewNameAt(pos, BlankNode.Sym())
- fn.Nname.Func = fn
fn.Nname.Defn = fn
-
- fn.OClosure = &ClosureExpr{Func: fn}
- fn.OClosure.op = OCLOSURE
- fn.OClosure.pos = pos
+ pkg.Funcs = append(pkg.Funcs, fn)
return fn
}
-// NameClosure generates a unique for the given function literal,
-// which must have appeared within outerfn.
-func NameClosure(clo *ClosureExpr, outerfn *Func) {
- fn := clo.Func
- if fn.IsHiddenClosure() != (outerfn != nil) {
- base.FatalfAt(clo.Pos(), "closure naming inconsistency: hidden %v, but outer %v", fn.IsHiddenClosure(), outerfn)
+// IsFuncPCIntrinsic returns whether n is a direct call of internal/abi.FuncPCABIxxx functions.
+func IsFuncPCIntrinsic(n *CallExpr) bool {
+ if n.Op() != OCALLFUNC || n.Fun.Op() != ONAME {
+ return false
}
+ fn := n.Fun.(*Name).Sym()
+ return (fn.Name == "FuncPCABI0" || fn.Name == "FuncPCABIInternal") &&
+ fn.Pkg.Path == "internal/abi"
+}
- name := fn.Nname
- if !IsBlank(name) {
- base.FatalfAt(clo.Pos(), "closure already named: %v", name)
+// IsIfaceOfFunc inspects whether n is an interface conversion from a direct
+// reference of a func. If so, it returns referenced Func; otherwise nil.
+//
+// This is only usable before walk.walkConvertInterface, which converts to an
+// OMAKEFACE.
+func IsIfaceOfFunc(n Node) *Func {
+ if n, ok := n.(*ConvExpr); ok && n.Op() == OCONVIFACE {
+ if name, ok := n.X.(*Name); ok && name.Op() == ONAME && name.Class == PFUNC {
+ return name.Func
+ }
}
-
- name.SetSym(closureName(outerfn, clo.Pos()))
- MarkFunc(name)
+ return nil
}
-// UseClosure checks that the given function literal has been setup
-// correctly, and then returns it as an expression.
-// It must be called after clo.Func.ClosureVars has been set.
-func UseClosure(clo *ClosureExpr, pkg *Package) Node {
- fn := clo.Func
- name := fn.Nname
-
- if IsBlank(name) {
- base.FatalfAt(fn.Pos(), "unnamed closure func: %v", fn)
- }
- // Caution: clo.Typecheck() is still 0 when UseClosure is called by
- // tcClosure.
- if fn.Typecheck() != 1 || name.Typecheck() != 1 {
- base.FatalfAt(fn.Pos(), "missed typecheck: %v", fn)
+// FuncPC returns a uintptr-typed expression that evaluates to the PC of a
+// function as uintptr, as returned by internal/abi.FuncPC{ABI0,ABIInternal}.
+//
+// n should be a Node of an interface type, as is passed to
+// internal/abi.FuncPC{ABI0,ABIInternal}.
+//
+// TODO(prattmic): Since n is simply an interface{} there is no assertion that
+// it is actually a function at all. Perhaps we should emit a runtime type
+// assertion?
+func FuncPC(pos src.XPos, n Node, wantABI obj.ABI) Node {
+ if !n.Type().IsInterface() {
+ base.ErrorfAt(pos, 0, "internal/abi.FuncPC%s expects an interface value, got %v", wantABI, n.Type())
}
- if clo.Type() == nil || name.Type() == nil {
- base.FatalfAt(fn.Pos(), "missing types: %v", fn)
+
+ if fn := IsIfaceOfFunc(n); fn != nil {
+ name := fn.Nname
+ abi := fn.ABI
+ if abi != wantABI {
+ base.ErrorfAt(pos, 0, "internal/abi.FuncPC%s expects an %v function, %s is defined as %v", wantABI, wantABI, name.Sym().Name, abi)
+ }
+ var e Node = NewLinksymExpr(pos, name.Sym().LinksymABI(abi), types.Types[types.TUINTPTR])
+ e = NewAddrExpr(pos, e)
+ e.SetType(types.Types[types.TUINTPTR].PtrTo())
+ e = NewConvExpr(pos, OCONVNOP, types.Types[types.TUINTPTR], e)
+ e.SetTypecheck(1)
+ return e
}
- if !types.Identical(clo.Type(), name.Type()) {
- base.FatalfAt(fn.Pos(), "mismatched types: %v", fn)
+ // fn is not a defined function. It must be ABIInternal.
+ // Read the address from func value, i.e. *(*uintptr)(idata(fn)).
+ if wantABI != obj.ABIInternal {
+ base.ErrorfAt(pos, 0, "internal/abi.FuncPC%s does not accept func expression, which is ABIInternal", wantABI)
}
+ var e Node = NewUnaryExpr(pos, OIDATA, n)
+ e.SetType(types.Types[types.TUINTPTR].PtrTo())
+ e.SetTypecheck(1)
+ e = NewStarExpr(pos, e)
+ e.SetType(types.Types[types.TUINTPTR])
+ e.SetTypecheck(1)
+ return e
+}
- if base.Flag.W > 1 {
- s := fmt.Sprintf("new closure func: %v", fn)
- Dump(s, fn)
+// DeclareParams creates Names for all of the parameters in fn's
+// signature and adds them to fn.Dcl.
+//
+// If setNname is true, then it also sets types.Field.Nname for each
+// parameter.
+func (fn *Func) DeclareParams(setNname bool) {
+ if fn.Dcl != nil {
+ base.FatalfAt(fn.Pos(), "%v already has Dcl", fn)
}
- if pkg != nil {
- pkg.Decls = append(pkg.Decls, fn)
+ declareParams := func(params []*types.Field, ctxt Class, prefix string, offset int) {
+ for i, param := range params {
+ sym := param.Sym
+ if sym == nil || sym.IsBlank() {
+ sym = fn.Sym().Pkg.LookupNum(prefix, i)
+ }
+
+ name := NewNameAt(param.Pos, sym, param.Type)
+ name.Class = ctxt
+ name.Curfn = fn
+ fn.Dcl[offset+i] = name
+
+ if setNname {
+ param.Nname = name
+ }
+ }
}
- if false && IsTrivialClosure(clo) {
- // TODO(mdempsky): Investigate if we can/should optimize this
- // case. walkClosure already handles it later, but it could be
- // useful to recognize earlier (e.g., it might allow multiple
- // inlined calls to a function to share a common trivial closure
- // func, rather than cloning it for each inlined call).
- }
+ sig := fn.Type()
+ params := sig.RecvParams()
+ results := sig.Results()
- return clo
+ fn.Dcl = make([]*Name, len(params)+len(results))
+ declareParams(params, PPARAM, "~p", 0)
+ declareParams(results, PPARAMOUT, "~r", len(params))
}
diff --git a/src/cmd/compile/internal/ir/func_test.go b/src/cmd/compile/internal/ir/func_test.go
new file mode 100644
index 0000000000..5b40c02dc4
--- /dev/null
+++ b/src/cmd/compile/internal/ir/func_test.go
@@ -0,0 +1,82 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ir
+
+import (
+ "testing"
+)
+
+func TestSplitPkg(t *testing.T) {
+ tests := []struct {
+ in string
+ pkg string
+ sym string
+ }{
+ {
+ in: "foo.Bar",
+ pkg: "foo",
+ sym: "Bar",
+ },
+ {
+ in: "foo/bar.Baz",
+ pkg: "foo/bar",
+ sym: "Baz",
+ },
+ {
+ in: "memeqbody",
+ pkg: "",
+ sym: "memeqbody",
+ },
+ {
+ in: `example%2ecom.Bar`,
+ pkg: `example%2ecom`,
+ sym: "Bar",
+ },
+ {
+ // Not a real generated symbol name, but easier to catch the general parameter form.
+ in: `foo.Bar[sync/atomic.Uint64]`,
+ pkg: `foo`,
+ sym: "Bar[sync/atomic.Uint64]",
+ },
+ {
+ in: `example%2ecom.Bar[sync/atomic.Uint64]`,
+ pkg: `example%2ecom`,
+ sym: "Bar[sync/atomic.Uint64]",
+ },
+ {
+ in: `gopkg.in/yaml%2ev3.Bar[sync/atomic.Uint64]`,
+ pkg: `gopkg.in/yaml%2ev3`,
+ sym: "Bar[sync/atomic.Uint64]",
+ },
+ {
+ // This one is a real symbol name.
+ in: `foo.Bar[go.shape.struct { sync/atomic._ sync/atomic.noCopy; sync/atomic._ sync/atomic.align64; sync/atomic.v uint64 }]`,
+ pkg: `foo`,
+ sym: "Bar[go.shape.struct { sync/atomic._ sync/atomic.noCopy; sync/atomic._ sync/atomic.align64; sync/atomic.v uint64 }]",
+ },
+ {
+ in: `example%2ecom.Bar[go.shape.struct { sync/atomic._ sync/atomic.noCopy; sync/atomic._ sync/atomic.align64; sync/atomic.v uint64 }]`,
+ pkg: `example%2ecom`,
+ sym: "Bar[go.shape.struct { sync/atomic._ sync/atomic.noCopy; sync/atomic._ sync/atomic.align64; sync/atomic.v uint64 }]",
+ },
+ {
+ in: `gopkg.in/yaml%2ev3.Bar[go.shape.struct { sync/atomic._ sync/atomic.noCopy; sync/atomic._ sync/atomic.align64; sync/atomic.v uint64 }]`,
+ pkg: `gopkg.in/yaml%2ev3`,
+ sym: "Bar[go.shape.struct { sync/atomic._ sync/atomic.noCopy; sync/atomic._ sync/atomic.align64; sync/atomic.v uint64 }]",
+ },
+ }
+
+ for _, tc := range tests {
+ t.Run(tc.in, func(t *testing.T) {
+ pkg, sym := splitPkg(tc.in)
+ if pkg != tc.pkg {
+ t.Errorf("splitPkg(%q) got pkg %q want %q", tc.in, pkg, tc.pkg)
+ }
+ if sym != tc.sym {
+ t.Errorf("splitPkg(%q) got sym %q want %q", tc.in, sym, tc.sym)
+ }
+ })
+ }
+}
diff --git a/src/cmd/compile/internal/ir/mknode.go b/src/cmd/compile/internal/ir/mknode.go
index 716e84389f..ca78a03d04 100644
--- a/src/cmd/compile/internal/ir/mknode.go
+++ b/src/cmd/compile/internal/ir/mknode.go
@@ -335,9 +335,9 @@ func processType(t *ast.TypeSpec) {
}
func generateHelpers() {
- for _, typ := range []string{"CaseClause", "CommClause", "Name", "Node", "Ntype"} {
+ for _, typ := range []string{"CaseClause", "CommClause", "Name", "Node"} {
ptr := "*"
- if typ == "Node" || typ == "Ntype" {
+ if typ == "Node" {
ptr = "" // interfaces don't need *
}
fmt.Fprintf(&buf, "\n")
diff --git a/src/cmd/compile/internal/ir/name.go b/src/cmd/compile/internal/ir/name.go
index c6eff27272..2844c0b869 100644
--- a/src/cmd/compile/internal/ir/name.go
+++ b/src/cmd/compile/internal/ir/name.go
@@ -47,7 +47,7 @@ type Name struct {
Embed *[]Embed // list of embedded files, for ONAME var
// For a local variable (not param) or extern, the initializing assignment (OAS or OAS2).
- // For a closure var, the ONAME node of the outer captured variable.
+ // For a closure var, the ONAME node of the original (outermost) captured variable.
// For the case-local variables of a type switch, the type switch guard (OTYPESW).
// For a range variable, the range statement (ORANGE)
// For a recv variable in a case of a select statement, the receive assignment (OSELRECV2)
@@ -59,77 +59,9 @@ type Name struct {
Heapaddr *Name // temp holding heap address of param
- // ONAME closure linkage
- // Consider:
- //
- // func f() {
- // x := 1 // x1
- // func() {
- // use(x) // x2
- // func() {
- // use(x) // x3
- // --- parser is here ---
- // }()
- // }()
- // }
- //
- // There is an original declaration of x and then a chain of mentions of x
- // leading into the current function. Each time x is mentioned in a new closure,
- // we create a variable representing x for use in that specific closure,
- // since the way you get to x is different in each closure.
- //
- // Let's number the specific variables as shown in the code:
- // x1 is the original x, x2 is when mentioned in the closure,
- // and x3 is when mentioned in the closure in the closure.
- //
- // We keep these linked (assume N > 1):
- //
- // - x1.Defn = original declaration statement for x (like most variables)
- // - x1.Innermost = current innermost closure x (in this case x3), or nil for none
- // - x1.IsClosureVar() = false
- //
- // - xN.Defn = x1, N > 1
- // - xN.IsClosureVar() = true, N > 1
- // - x2.Outer = nil
- // - xN.Outer = x(N-1), N > 2
- //
- //
- // When we look up x in the symbol table, we always get x1.
- // Then we can use x1.Innermost (if not nil) to get the x
- // for the innermost known closure function,
- // but the first reference in a closure will find either no x1.Innermost
- // or an x1.Innermost with .Funcdepth < Funcdepth.
- // In that case, a new xN must be created, linked in with:
- //
- // xN.Defn = x1
- // xN.Outer = x1.Innermost
- // x1.Innermost = xN
- //
- // When we finish the function, we'll process its closure variables
- // and find xN and pop it off the list using:
- //
- // x1 := xN.Defn
- // x1.Innermost = xN.Outer
- //
- // We leave x1.Innermost set so that we can still get to the original
- // variable quickly. Not shown here, but once we're
- // done parsing a function and no longer need xN.Outer for the
- // lexical x reference links as described above, funcLit
- // recomputes xN.Outer as the semantic x reference link tree,
- // even filling in x in intermediate closures that might not
- // have mentioned it along the way to inner closures that did.
- // See funcLit for details.
- //
- // During the eventual compilation, then, for closure variables we have:
- //
- // xN.Defn = original variable
- // xN.Outer = variable captured in next outward scope
- // to make closure where xN appears
- //
- // Because of the sharding of pieces of the node, x.Defn means x.Name.Defn
- // and x.Innermost/Outer means x.Name.Param.Innermost/Outer.
- Innermost *Name
- Outer *Name
+ // Outer points to the immediately enclosing function's copy of this
+ // closure variable. If not a closure variable, then Outer is nil.
+ Outer *Name
}
func (n *Name) isExpr() {}
@@ -147,11 +79,39 @@ func (n *Name) RecordFrameOffset(offset int64) {
// NewNameAt returns a new ONAME Node associated with symbol s at position pos.
// The caller is responsible for setting Curfn.
-func NewNameAt(pos src.XPos, sym *types.Sym) *Name {
+func NewNameAt(pos src.XPos, sym *types.Sym, typ *types.Type) *Name {
if sym == nil {
base.Fatalf("NewNameAt nil")
}
- return newNameAt(pos, ONAME, sym)
+ n := newNameAt(pos, ONAME, sym)
+ if typ != nil {
+ n.SetType(typ)
+ n.SetTypecheck(1)
+ }
+ return n
+}
+
+// NewBuiltin returns a new Name representing a builtin function,
+// either predeclared or from package unsafe.
+func NewBuiltin(sym *types.Sym, op Op) *Name {
+ n := newNameAt(src.NoXPos, ONAME, sym)
+ n.BuiltinOp = op
+ n.SetTypecheck(1)
+ sym.Def = n
+ return n
+}
+
+// NewLocal returns a new function-local variable with the given name and type.
+func (fn *Func) NewLocal(pos src.XPos, sym *types.Sym, typ *types.Type) *Name {
+ if fn.Dcl == nil {
+ base.FatalfAt(pos, "must call DeclParams on %v first", fn)
+ }
+
+ n := NewNameAt(pos, sym, typ)
+ n.Class = PAUTO
+ n.Curfn = fn
+ fn.Dcl = append(fn.Dcl, n)
+ return n
}
// NewDeclNameAt returns a new Name associated with symbol s at position pos.
@@ -176,6 +136,7 @@ func NewConstAt(pos src.XPos, sym *types.Sym, typ *types.Type, val constant.Valu
}
n := newNameAt(pos, OLITERAL, sym)
n.SetType(typ)
+ n.SetTypecheck(1)
n.SetVal(val)
return n
}
@@ -189,18 +150,12 @@ func newNameAt(pos src.XPos, op Op, sym *types.Sym) *Name {
return n
}
-func (n *Name) Name() *Name { return n }
-func (n *Name) Sym() *types.Sym { return n.sym }
-func (n *Name) SetSym(x *types.Sym) { n.sym = x }
-func (n *Name) SubOp() Op { return n.BuiltinOp }
-func (n *Name) SetSubOp(x Op) { n.BuiltinOp = x }
-func (n *Name) SetFunc(x *Func) { n.Func = x }
-func (n *Name) Offset() int64 { panic("Name.Offset") }
-func (n *Name) SetOffset(x int64) {
- if x != 0 {
- panic("Name.SetOffset")
- }
-}
+func (n *Name) Name() *Name { return n }
+func (n *Name) Sym() *types.Sym { return n.sym }
+func (n *Name) SetSym(x *types.Sym) { n.sym = x }
+func (n *Name) SubOp() Op { return n.BuiltinOp }
+func (n *Name) SetSubOp(x Op) { n.BuiltinOp = x }
+func (n *Name) SetFunc(x *Func) { n.Func = x }
func (n *Name) FrameOffset() int64 { return n.Offset_ }
func (n *Name) SetFrameOffset(x int64) { n.Offset_ = x }
@@ -351,16 +306,13 @@ func NewClosureVar(pos src.XPos, fn *Func, n *Name) *Name {
base.Fatalf("NewClosureVar: %+v", n)
}
- c := NewNameAt(pos, n.Sym())
+ c := NewNameAt(pos, n.Sym(), n.Type())
c.Curfn = fn
c.Class = PAUTOHEAP
c.SetIsClosureVar(true)
c.Defn = n.Canonical()
c.Outer = n
- c.SetType(n.Type())
- c.SetTypecheck(n.Typecheck())
-
fn.ClosureVars = append(fn.ClosureVars, c)
return c
@@ -377,88 +329,13 @@ func NewHiddenParam(pos src.XPos, fn *Func, sym *types.Sym, typ *types.Type) *Na
// Create a fake parameter, disassociated from any real function, to
// pretend to capture.
- fake := NewNameAt(pos, sym)
+ fake := NewNameAt(pos, sym, typ)
fake.Class = PPARAM
- fake.SetType(typ)
fake.SetByval(true)
return NewClosureVar(pos, fn, fake)
}
-// CaptureName returns a Name suitable for referring to n from within function
-// fn or from the package block if fn is nil. If n is a free variable declared
-// within a function that encloses fn, then CaptureName returns the closure
-// variable that refers to n within fn, creating it if necessary.
-// Otherwise, it simply returns n.
-func CaptureName(pos src.XPos, fn *Func, n *Name) *Name {
- if n.Op() != ONAME || n.Curfn == nil {
- return n // okay to use directly
- }
- if n.IsClosureVar() {
- base.FatalfAt(pos, "misuse of CaptureName on closure variable: %v", n)
- }
-
- c := n.Innermost
- if c == nil {
- c = n
- }
- if c.Curfn == fn {
- return c
- }
-
- if fn == nil {
- base.FatalfAt(pos, "package-block reference to %v, declared in %v", n, n.Curfn)
- }
-
- // Do not have a closure var for the active closure yet; make one.
- c = NewClosureVar(pos, fn, c)
-
- // Link into list of active closure variables.
- // Popped from list in FinishCaptureNames.
- n.Innermost = c
-
- return c
-}
-
-// FinishCaptureNames handles any work leftover from calling CaptureName
-// earlier. outerfn should be the function that immediately encloses fn.
-func FinishCaptureNames(pos src.XPos, outerfn, fn *Func) {
- // closure-specific variables are hanging off the
- // ordinary ones; see CaptureName above.
- // unhook them.
- // make the list of pointers for the closure call.
- for _, cv := range fn.ClosureVars {
- // Unlink from n; see comment above on type Name for these fields.
- n := cv.Defn.(*Name)
- n.Innermost = cv.Outer
-
- // If the closure usage of n is not dense, we need to make it
- // dense by recapturing n within the enclosing function.
- //
- // That is, suppose we just finished parsing the innermost
- // closure f4 in this code:
- //
- // func f() {
- // n := 1
- // func() { // f2
- // use(n)
- // func() { // f3
- // func() { // f4
- // use(n)
- // }()
- // }()
- // }()
- // }
- //
- // At this point cv.Outer is f2's n; there is no n for f3. To
- // construct the closure f4 from within f3, we need to use f3's
- // n and in this case we need to create f3's n with CaptureName.
- //
- // We'll decide later in walk whether to use v directly or &v.
- cv.Outer = CaptureName(pos, outerfn, n)
- }
-}
-
// SameSource reports whether two nodes refer to the same source
// element.
//
diff --git a/src/cmd/compile/internal/ir/node.go b/src/cmd/compile/internal/ir/node.go
index 769340e9cd..6513386f03 100644
--- a/src/cmd/compile/internal/ir/node.go
+++ b/src/cmd/compile/internal/ir/node.go
@@ -169,15 +169,12 @@ const (
OPTRLIT // &X (X is composite literal)
OCONV // Type(X) (type conversion)
OCONVIFACE // Type(X) (type conversion, to interface)
- OCONVIDATA // Builds a data word to store X in an interface. Equivalent to IDATA(CONVIFACE(X)). Is an ir.ConvExpr.
OCONVNOP // Type(X) (type conversion, no effect)
OCOPY // copy(X, Y)
ODCL // var X (declares X of type X.Type)
// Used during parsing but don't last.
- ODCLFUNC // func f() or func (r) f()
- ODCLCONST // const pi = 3.14
- ODCLTYPE // type Int int or type Int = int
+ ODCLFUNC // func f() or func (r) f()
ODELETE // delete(Args)
ODOT // X.Sel (X is of struct type)
@@ -226,7 +223,7 @@ const (
OOROR // X || Y
OPANIC // panic(X)
OPRINT // print(List)
- OPRINTN // println(List)
+ OPRINTLN // println(List)
OPAREN // (X)
OSEND // Chan <- Value
OSLICE // X[Low : High] (X is untypechecked or slice)
@@ -246,9 +243,6 @@ const (
OREAL // real(X)
OIMAG // imag(X)
OCOMPLEX // complex(X, Y)
- OALIGNOF // unsafe.Alignof(X)
- OOFFSETOF // unsafe.Offsetof(X)
- OSIZEOF // unsafe.Sizeof(X)
OUNSAFEADD // unsafe.Add(X, Y)
OUNSAFESLICE // unsafe.Slice(X, Y)
OUNSAFESLICEDATA // unsafe.SliceData(X)
@@ -282,24 +276,24 @@ const (
// OTYPESW: X := Y.(type) (appears as .Tag of OSWITCH)
// X is nil if there is no type-switch variable
OTYPESW
- OFUNCINST // instantiation of a generic function
// misc
// intermediate representation of an inlined call. Uses Init (assignments
// for the captured variables, parameters, retvars, & INLMARK op),
// Body (body of the inlined function), and ReturnVars (list of
// return values)
- OINLCALL // intermediary representation of an inlined call.
- OEFACE // itable and data words of an empty-interface value.
- OITAB // itable word of an interface value.
- OIDATA // data word of an interface value in X
- OSPTR // base pointer of a slice or string. Bounded==1 means known non-nil.
- OCFUNC // reference to c function pointer (not go func value)
- OCHECKNIL // emit code to ensure pointer/interface not nil
- ORESULT // result of a function call; Xoffset is stack offset
- OINLMARK // start of an inlined body, with file/line of caller. Xoffset is an index into the inline tree.
- OLINKSYMOFFSET // offset within a name
- OJUMPTABLE // A jump table structure for implementing dense expression switches
+ OINLCALL // intermediary representation of an inlined call.
+ OMAKEFACE // construct an interface value from rtype/itab and data pointers
+ OITAB // rtype/itab pointer of an interface value
+ OIDATA // data pointer of an interface value
+ OSPTR // base pointer of a slice or string. Bounded==1 means known non-nil.
+ OCFUNC // reference to c function pointer (not go func value)
+ OCHECKNIL // emit code to ensure pointer/interface not nil
+ ORESULT // result of a function call; Xoffset is stack offset
+ OINLMARK // start of an inlined body, with file/line of caller. Xoffset is an index into the inline tree.
+ OLINKSYMOFFSET // offset within a name
+ OJUMPTABLE // A jump table structure for implementing dense expression switches
+ OINTERFACESWITCH // A type switch with interface cases
// opcodes for generics
ODYNAMICDOTTYPE // x = i.(T) where T is a type parameter (or derived from a type parameter)
@@ -325,11 +319,18 @@ func (op Op) IsCmp() bool {
return false
}
-// Nodes is a pointer to a slice of *Node.
-// For fields that are not used in most nodes, this is used instead of
-// a slice to save space.
+// Nodes is a slice of Node.
type Nodes []Node
+// ToNodes returns s as a slice of Nodes.
+func ToNodes[T Node](s []T) Nodes {
+ res := make(Nodes, len(s))
+ for i, n := range s {
+ res[i] = n
+ }
+ return res
+}
+
// Append appends entries to Nodes.
func (n *Nodes) Append(a ...Node) {
if len(a) == 0 {
@@ -465,14 +466,7 @@ const (
)
-func AsNode(n types.Object) Node {
- if n == nil {
- return nil
- }
- return n.(Node)
-}
-
-var BlankNode Node
+var BlankNode *Name
func IsConst(n Node, ct constant.Kind) bool {
return ConstType(n) == ct
@@ -480,9 +474,7 @@ func IsConst(n Node, ct constant.Kind) bool {
// IsNil reports whether n represents the universal untyped zero value "nil".
func IsNil(n Node) bool {
- // Check n.Orig because constant propagation may produce typed nil constants,
- // which don't exist in the Go spec.
- return n != nil && Orig(n).Op() == ONIL
+ return n != nil && n.Op() == ONIL
}
func IsBlank(n Node) bool {
@@ -498,11 +490,6 @@ func IsMethod(n Node) bool {
return n.Type().Recv() != nil
}
-func HasNamedResults(fn *Func) bool {
- typ := fn.Type()
- return typ.NumResults() > 0 && types.OrigSym(typ.Results().Field(0).Sym) != nil
-}
-
// HasUniquePos reports whether n has a unique position that can be
// used for reporting error messages.
//
diff --git a/src/cmd/compile/internal/ir/node_gen.go b/src/cmd/compile/internal/ir/node_gen.go
index 2dda76b1e3..fc28067629 100644
--- a/src/cmd/compile/internal/ir/node_gen.go
+++ b/src/cmd/compile/internal/ir/node_gen.go
@@ -295,7 +295,7 @@ func (n *CallExpr) doChildren(do func(Node) bool) bool {
if doNodes(n.init, do) {
return true
}
- if n.X != nil && do(n.X) {
+ if n.Fun != nil && do(n.Fun) {
return true
}
if doNodes(n.Args, do) {
@@ -308,16 +308,16 @@ func (n *CallExpr) doChildren(do func(Node) bool) bool {
}
func (n *CallExpr) editChildren(edit func(Node) Node) {
editNodes(n.init, edit)
- if n.X != nil {
- n.X = edit(n.X).(Node)
+ if n.Fun != nil {
+ n.Fun = edit(n.Fun).(Node)
}
editNodes(n.Args, edit)
editNames(n.KeepAlive, edit)
}
func (n *CallExpr) editChildrenWithHidden(edit func(Node) Node) {
editNodes(n.init, edit)
- if n.X != nil {
- n.X = edit(n.X).(Node)
+ if n.Fun != nil {
+ n.Fun = edit(n.Fun).(Node)
}
editNodes(n.Args, edit)
if n.RType != nil {
@@ -471,25 +471,6 @@ func (n *CompLitExpr) editChildrenWithHidden(edit func(Node) Node) {
}
}
-func (n *ConstExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
-func (n *ConstExpr) copy() Node {
- c := *n
- c.init = copyNodes(c.init)
- return &c
-}
-func (n *ConstExpr) doChildren(do func(Node) bool) bool {
- if doNodes(n.init, do) {
- return true
- }
- return false
-}
-func (n *ConstExpr) editChildren(edit func(Node) Node) {
- editNodes(n.init, edit)
-}
-func (n *ConstExpr) editChildrenWithHidden(edit func(Node) Node) {
- editNodes(n.init, edit)
-}
-
func (n *ConvExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
func (n *ConvExpr) copy() Node {
c := *n
@@ -866,38 +847,50 @@ func (n *InlinedCallExpr) editChildrenWithHidden(edit func(Node) Node) {
editNodes(n.ReturnVars, edit)
}
-func (n *InstExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
-func (n *InstExpr) copy() Node {
+func (n *InterfaceSwitchStmt) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
+func (n *InterfaceSwitchStmt) copy() Node {
c := *n
c.init = copyNodes(c.init)
- c.Targs = copyNtypes(c.Targs)
return &c
}
-func (n *InstExpr) doChildren(do func(Node) bool) bool {
+func (n *InterfaceSwitchStmt) doChildren(do func(Node) bool) bool {
if doNodes(n.init, do) {
return true
}
- if n.X != nil && do(n.X) {
+ if n.Case != nil && do(n.Case) {
return true
}
- if doNtypes(n.Targs, do) {
+ if n.Itab != nil && do(n.Itab) {
+ return true
+ }
+ if n.RuntimeType != nil && do(n.RuntimeType) {
return true
}
return false
}
-func (n *InstExpr) editChildren(edit func(Node) Node) {
+func (n *InterfaceSwitchStmt) editChildren(edit func(Node) Node) {
editNodes(n.init, edit)
- if n.X != nil {
- n.X = edit(n.X).(Node)
+ if n.Case != nil {
+ n.Case = edit(n.Case).(Node)
+ }
+ if n.Itab != nil {
+ n.Itab = edit(n.Itab).(Node)
+ }
+ if n.RuntimeType != nil {
+ n.RuntimeType = edit(n.RuntimeType).(Node)
}
- editNtypes(n.Targs, edit)
}
-func (n *InstExpr) editChildrenWithHidden(edit func(Node) Node) {
+func (n *InterfaceSwitchStmt) editChildrenWithHidden(edit func(Node) Node) {
editNodes(n.init, edit)
- if n.X != nil {
- n.X = edit(n.X).(Node)
+ if n.Case != nil {
+ n.Case = edit(n.Case).(Node)
+ }
+ if n.Itab != nil {
+ n.Itab = edit(n.Itab).(Node)
+ }
+ if n.RuntimeType != nil {
+ n.RuntimeType = edit(n.RuntimeType).(Node)
}
- editNtypes(n.Targs, edit)
}
func (n *JumpTableStmt) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
@@ -1205,25 +1198,6 @@ func (n *RangeStmt) editChildrenWithHidden(edit func(Node) Node) {
}
}
-func (n *RawOrigExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
-func (n *RawOrigExpr) copy() Node {
- c := *n
- c.init = copyNodes(c.init)
- return &c
-}
-func (n *RawOrigExpr) doChildren(do func(Node) bool) bool {
- if doNodes(n.init, do) {
- return true
- }
- return false
-}
-func (n *RawOrigExpr) editChildren(edit func(Node) Node) {
- editNodes(n.init, edit)
-}
-func (n *RawOrigExpr) editChildrenWithHidden(edit func(Node) Node) {
- editNodes(n.init, edit)
-}
-
func (n *ResultExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
func (n *ResultExpr) copy() Node {
c := *n
@@ -1833,27 +1807,3 @@ func editNodes(list []Node, edit func(Node) Node) {
}
}
}
-
-func copyNtypes(list []Ntype) []Ntype {
- if list == nil {
- return nil
- }
- c := make([]Ntype, len(list))
- copy(c, list)
- return c
-}
-func doNtypes(list []Ntype, do func(Node) bool) bool {
- for _, x := range list {
- if x != nil && do(x) {
- return true
- }
- }
- return false
-}
-func editNtypes(list []Ntype, edit func(Node) Node) {
- for i, x := range list {
- if x != nil {
- list[i] = edit(x).(Ntype)
- }
- }
-}
diff --git a/src/cmd/compile/internal/ir/op_string.go b/src/cmd/compile/internal/ir/op_string.go
index 571ac6cb45..fb97ac68f4 100644
--- a/src/cmd/compile/internal/ir/op_string.go
+++ b/src/cmd/compile/internal/ir/op_string.go
@@ -53,124 +53,118 @@ func _() {
_ = x[OPTRLIT-42]
_ = x[OCONV-43]
_ = x[OCONVIFACE-44]
- _ = x[OCONVIDATA-45]
- _ = x[OCONVNOP-46]
- _ = x[OCOPY-47]
- _ = x[ODCL-48]
- _ = x[ODCLFUNC-49]
- _ = x[ODCLCONST-50]
- _ = x[ODCLTYPE-51]
- _ = x[ODELETE-52]
- _ = x[ODOT-53]
- _ = x[ODOTPTR-54]
- _ = x[ODOTMETH-55]
- _ = x[ODOTINTER-56]
- _ = x[OXDOT-57]
- _ = x[ODOTTYPE-58]
- _ = x[ODOTTYPE2-59]
- _ = x[OEQ-60]
- _ = x[ONE-61]
- _ = x[OLT-62]
- _ = x[OLE-63]
- _ = x[OGE-64]
- _ = x[OGT-65]
- _ = x[ODEREF-66]
- _ = x[OINDEX-67]
- _ = x[OINDEXMAP-68]
- _ = x[OKEY-69]
- _ = x[OSTRUCTKEY-70]
- _ = x[OLEN-71]
- _ = x[OMAKE-72]
- _ = x[OMAKECHAN-73]
- _ = x[OMAKEMAP-74]
- _ = x[OMAKESLICE-75]
- _ = x[OMAKESLICECOPY-76]
- _ = x[OMUL-77]
- _ = x[ODIV-78]
- _ = x[OMOD-79]
- _ = x[OLSH-80]
- _ = x[ORSH-81]
- _ = x[OAND-82]
- _ = x[OANDNOT-83]
- _ = x[ONEW-84]
- _ = x[ONOT-85]
- _ = x[OBITNOT-86]
- _ = x[OPLUS-87]
- _ = x[ONEG-88]
- _ = x[OOROR-89]
- _ = x[OPANIC-90]
- _ = x[OPRINT-91]
- _ = x[OPRINTN-92]
- _ = x[OPAREN-93]
- _ = x[OSEND-94]
- _ = x[OSLICE-95]
- _ = x[OSLICEARR-96]
- _ = x[OSLICESTR-97]
- _ = x[OSLICE3-98]
- _ = x[OSLICE3ARR-99]
- _ = x[OSLICEHEADER-100]
- _ = x[OSTRINGHEADER-101]
- _ = x[ORECOVER-102]
- _ = x[ORECOVERFP-103]
- _ = x[ORECV-104]
- _ = x[ORUNESTR-105]
- _ = x[OSELRECV2-106]
- _ = x[OMIN-107]
- _ = x[OMAX-108]
- _ = x[OREAL-109]
- _ = x[OIMAG-110]
- _ = x[OCOMPLEX-111]
- _ = x[OALIGNOF-112]
- _ = x[OOFFSETOF-113]
- _ = x[OSIZEOF-114]
- _ = x[OUNSAFEADD-115]
- _ = x[OUNSAFESLICE-116]
- _ = x[OUNSAFESLICEDATA-117]
- _ = x[OUNSAFESTRING-118]
- _ = x[OUNSAFESTRINGDATA-119]
- _ = x[OMETHEXPR-120]
- _ = x[OMETHVALUE-121]
- _ = x[OBLOCK-122]
- _ = x[OBREAK-123]
- _ = x[OCASE-124]
- _ = x[OCONTINUE-125]
- _ = x[ODEFER-126]
- _ = x[OFALL-127]
- _ = x[OFOR-128]
- _ = x[OGOTO-129]
- _ = x[OIF-130]
- _ = x[OLABEL-131]
- _ = x[OGO-132]
- _ = x[ORANGE-133]
- _ = x[ORETURN-134]
- _ = x[OSELECT-135]
- _ = x[OSWITCH-136]
- _ = x[OTYPESW-137]
- _ = x[OFUNCINST-138]
- _ = x[OINLCALL-139]
- _ = x[OEFACE-140]
- _ = x[OITAB-141]
- _ = x[OIDATA-142]
- _ = x[OSPTR-143]
- _ = x[OCFUNC-144]
- _ = x[OCHECKNIL-145]
- _ = x[ORESULT-146]
- _ = x[OINLMARK-147]
- _ = x[OLINKSYMOFFSET-148]
- _ = x[OJUMPTABLE-149]
- _ = x[ODYNAMICDOTTYPE-150]
- _ = x[ODYNAMICDOTTYPE2-151]
- _ = x[ODYNAMICTYPE-152]
- _ = x[OTAILCALL-153]
- _ = x[OGETG-154]
- _ = x[OGETCALLERPC-155]
- _ = x[OGETCALLERSP-156]
- _ = x[OEND-157]
+ _ = x[OCONVNOP-45]
+ _ = x[OCOPY-46]
+ _ = x[ODCL-47]
+ _ = x[ODCLFUNC-48]
+ _ = x[ODELETE-49]
+ _ = x[ODOT-50]
+ _ = x[ODOTPTR-51]
+ _ = x[ODOTMETH-52]
+ _ = x[ODOTINTER-53]
+ _ = x[OXDOT-54]
+ _ = x[ODOTTYPE-55]
+ _ = x[ODOTTYPE2-56]
+ _ = x[OEQ-57]
+ _ = x[ONE-58]
+ _ = x[OLT-59]
+ _ = x[OLE-60]
+ _ = x[OGE-61]
+ _ = x[OGT-62]
+ _ = x[ODEREF-63]
+ _ = x[OINDEX-64]
+ _ = x[OINDEXMAP-65]
+ _ = x[OKEY-66]
+ _ = x[OSTRUCTKEY-67]
+ _ = x[OLEN-68]
+ _ = x[OMAKE-69]
+ _ = x[OMAKECHAN-70]
+ _ = x[OMAKEMAP-71]
+ _ = x[OMAKESLICE-72]
+ _ = x[OMAKESLICECOPY-73]
+ _ = x[OMUL-74]
+ _ = x[ODIV-75]
+ _ = x[OMOD-76]
+ _ = x[OLSH-77]
+ _ = x[ORSH-78]
+ _ = x[OAND-79]
+ _ = x[OANDNOT-80]
+ _ = x[ONEW-81]
+ _ = x[ONOT-82]
+ _ = x[OBITNOT-83]
+ _ = x[OPLUS-84]
+ _ = x[ONEG-85]
+ _ = x[OOROR-86]
+ _ = x[OPANIC-87]
+ _ = x[OPRINT-88]
+ _ = x[OPRINTLN-89]
+ _ = x[OPAREN-90]
+ _ = x[OSEND-91]
+ _ = x[OSLICE-92]
+ _ = x[OSLICEARR-93]
+ _ = x[OSLICESTR-94]
+ _ = x[OSLICE3-95]
+ _ = x[OSLICE3ARR-96]
+ _ = x[OSLICEHEADER-97]
+ _ = x[OSTRINGHEADER-98]
+ _ = x[ORECOVER-99]
+ _ = x[ORECOVERFP-100]
+ _ = x[ORECV-101]
+ _ = x[ORUNESTR-102]
+ _ = x[OSELRECV2-103]
+ _ = x[OMIN-104]
+ _ = x[OMAX-105]
+ _ = x[OREAL-106]
+ _ = x[OIMAG-107]
+ _ = x[OCOMPLEX-108]
+ _ = x[OUNSAFEADD-109]
+ _ = x[OUNSAFESLICE-110]
+ _ = x[OUNSAFESLICEDATA-111]
+ _ = x[OUNSAFESTRING-112]
+ _ = x[OUNSAFESTRINGDATA-113]
+ _ = x[OMETHEXPR-114]
+ _ = x[OMETHVALUE-115]
+ _ = x[OBLOCK-116]
+ _ = x[OBREAK-117]
+ _ = x[OCASE-118]
+ _ = x[OCONTINUE-119]
+ _ = x[ODEFER-120]
+ _ = x[OFALL-121]
+ _ = x[OFOR-122]
+ _ = x[OGOTO-123]
+ _ = x[OIF-124]
+ _ = x[OLABEL-125]
+ _ = x[OGO-126]
+ _ = x[ORANGE-127]
+ _ = x[ORETURN-128]
+ _ = x[OSELECT-129]
+ _ = x[OSWITCH-130]
+ _ = x[OTYPESW-131]
+ _ = x[OINLCALL-132]
+ _ = x[OMAKEFACE-133]
+ _ = x[OITAB-134]
+ _ = x[OIDATA-135]
+ _ = x[OSPTR-136]
+ _ = x[OCFUNC-137]
+ _ = x[OCHECKNIL-138]
+ _ = x[ORESULT-139]
+ _ = x[OINLMARK-140]
+ _ = x[OLINKSYMOFFSET-141]
+ _ = x[OJUMPTABLE-142]
+ _ = x[OINTERFACESWITCH-143]
+ _ = x[ODYNAMICDOTTYPE-144]
+ _ = x[ODYNAMICDOTTYPE2-145]
+ _ = x[ODYNAMICTYPE-146]
+ _ = x[OTAILCALL-147]
+ _ = x[OGETG-148]
+ _ = x[OGETCALLERPC-149]
+ _ = x[OGETCALLERSP-150]
+ _ = x[OEND-151]
}
-const _Op_name = "XXXNAMENONAMETYPELITERALNILADDSUBORXORADDSTRADDRANDANDAPPENDBYTES2STRBYTES2STRTMPRUNES2STRSTR2BYTESSTR2BYTESTMPSTR2RUNESSLICE2ARRSLICE2ARRPTRASAS2AS2DOTTYPEAS2FUNCAS2MAPRAS2RECVASOPCALLCALLFUNCCALLMETHCALLINTERCAPCLEARCLOSECLOSURECOMPLITMAPLITSTRUCTLITARRAYLITSLICELITPTRLITCONVCONVIFACECONVIDATACONVNOPCOPYDCLDCLFUNCDCLCONSTDCLTYPEDELETEDOTDOTPTRDOTMETHDOTINTERXDOTDOTTYPEDOTTYPE2EQNELTLEGEGTDEREFINDEXINDEXMAPKEYSTRUCTKEYLENMAKEMAKECHANMAKEMAPMAKESLICEMAKESLICECOPYMULDIVMODLSHRSHANDANDNOTNEWNOTBITNOTPLUSNEGORORPANICPRINTPRINTNPARENSENDSLICESLICEARRSLICESTRSLICE3SLICE3ARRSLICEHEADERSTRINGHEADERRECOVERRECOVERFPRECVRUNESTRSELRECV2MINMAXREALIMAGCOMPLEXALIGNOFOFFSETOFSIZEOFUNSAFEADDUNSAFESLICEUNSAFESLICEDATAUNSAFESTRINGUNSAFESTRINGDATAMETHEXPRMETHVALUEBLOCKBREAKCASECONTINUEDEFERFALLFORGOTOIFLABELGORANGERETURNSELECTSWITCHTYPESWFUNCINSTINLCALLEFACEITABIDATASPTRCFUNCCHECKNILRESULTINLMARKLINKSYMOFFSETJUMPTABLEDYNAMICDOTTYPEDYNAMICDOTTYPE2DYNAMICTYPETAILCALLGETGGETCALLERPCGETCALLERSPEND"
+const _Op_name = "XXXNAMENONAMETYPELITERALNILADDSUBORXORADDSTRADDRANDANDAPPENDBYTES2STRBYTES2STRTMPRUNES2STRSTR2BYTESSTR2BYTESTMPSTR2RUNESSLICE2ARRSLICE2ARRPTRASAS2AS2DOTTYPEAS2FUNCAS2MAPRAS2RECVASOPCALLCALLFUNCCALLMETHCALLINTERCAPCLEARCLOSECLOSURECOMPLITMAPLITSTRUCTLITARRAYLITSLICELITPTRLITCONVCONVIFACECONVNOPCOPYDCLDCLFUNCDELETEDOTDOTPTRDOTMETHDOTINTERXDOTDOTTYPEDOTTYPE2EQNELTLEGEGTDEREFINDEXINDEXMAPKEYSTRUCTKEYLENMAKEMAKECHANMAKEMAPMAKESLICEMAKESLICECOPYMULDIVMODLSHRSHANDANDNOTNEWNOTBITNOTPLUSNEGORORPANICPRINTPRINTNPARENSENDSLICESLICEARRSLICESTRSLICE3SLICE3ARRSLICEHEADERSTRINGHEADERRECOVERRECOVERFPRECVRUNESTRSELRECV2MINMAXREALIMAGCOMPLEXUNSAFEADDUNSAFESLICEUNSAFESLICEDATAUNSAFESTRINGUNSAFESTRINGDATAMETHEXPRMETHVALUEBLOCKBREAKCASECONTINUEDEFERFALLFORGOTOIFLABELGORANGERETURNSELECTSWITCHTYPESWINLCALLMAKEFACEITABIDATASPTRCFUNCCHECKNILRESULTINLMARKLINKSYMOFFSETJUMPTABLEINTERFACESWITCHDYNAMICDOTTYPEDYNAMICDOTTYPE2DYNAMICTYPETAILCALLGETGGETCALLERPCGETCALLERSPEND"
-var _Op_index = [...]uint16{0, 3, 7, 13, 17, 24, 27, 30, 33, 35, 38, 44, 48, 54, 60, 69, 81, 90, 99, 111, 120, 129, 141, 143, 146, 156, 163, 170, 177, 181, 185, 193, 201, 210, 213, 218, 223, 230, 237, 243, 252, 260, 268, 274, 278, 287, 296, 303, 307, 310, 317, 325, 332, 338, 341, 347, 354, 362, 366, 373, 381, 383, 385, 387, 389, 391, 393, 398, 403, 411, 414, 423, 426, 430, 438, 445, 454, 467, 470, 473, 476, 479, 482, 485, 491, 494, 497, 503, 507, 510, 514, 519, 524, 530, 535, 539, 544, 552, 560, 566, 575, 586, 598, 605, 614, 618, 625, 633, 636, 639, 643, 647, 654, 661, 669, 675, 684, 695, 710, 722, 738, 746, 755, 760, 765, 769, 777, 782, 786, 789, 793, 795, 800, 802, 807, 813, 819, 825, 831, 839, 846, 851, 855, 860, 864, 869, 877, 883, 890, 903, 912, 926, 941, 952, 960, 964, 975, 986, 989}
+var _Op_index = [...]uint16{0, 3, 7, 13, 17, 24, 27, 30, 33, 35, 38, 44, 48, 54, 60, 69, 81, 90, 99, 111, 120, 129, 141, 143, 146, 156, 163, 170, 177, 181, 185, 193, 201, 210, 213, 218, 223, 230, 237, 243, 252, 260, 268, 274, 278, 287, 294, 298, 301, 308, 314, 317, 323, 330, 338, 342, 349, 357, 359, 361, 363, 365, 367, 369, 374, 379, 387, 390, 399, 402, 406, 414, 421, 430, 443, 446, 449, 452, 455, 458, 461, 467, 470, 473, 479, 483, 486, 490, 495, 500, 506, 511, 515, 520, 528, 536, 542, 551, 562, 574, 581, 590, 594, 601, 609, 612, 615, 619, 623, 630, 639, 650, 665, 677, 693, 701, 710, 715, 720, 724, 732, 737, 741, 744, 748, 750, 755, 757, 762, 768, 774, 780, 786, 793, 801, 805, 810, 814, 819, 827, 833, 840, 853, 862, 877, 891, 906, 917, 925, 929, 940, 951, 954}
func (i Op) String() string {
if i >= Op(len(_Op_index)-1) {
diff --git a/src/cmd/compile/internal/ir/package.go b/src/cmd/compile/internal/ir/package.go
index 3896e2b91b..3b70a9281a 100644
--- a/src/cmd/compile/internal/ir/package.go
+++ b/src/cmd/compile/internal/ir/package.go
@@ -15,14 +15,18 @@ type Package struct {
// Init functions, listed in source order.
Inits []*Func
- // Top-level declarations.
- Decls []Node
+ // Funcs contains all (instantiated) functions, methods, and
+ // function literals to be compiled.
+ Funcs []*Func
- // Extern (package global) declarations.
- Externs []Node
+ // Externs holds constants, (non-generic) types, and variables
+ // declared at package scope.
+ Externs []*Name
- // Assembly function declarations.
- Asms []*Name
+ // AsmHdrDecls holds declared constants and struct types that should
+ // be included in -asmhdr output. It's only populated when -asmhdr
+ // is set.
+ AsmHdrDecls []*Name
// Cgo directives.
CgoPragmas [][]string
@@ -30,6 +34,9 @@ type Package struct {
// Variables with //go:embed lines.
Embeds []*Name
- // Exported (or re-exported) symbols.
- Exports []*Name
+ // PluginExports holds exported functions and variables that are
+ // accessible through the package plugin API. It's only populated
+ // for -buildmode=plugin (i.e., compiling package main and -dynlink
+ // is set).
+ PluginExports []*Name
}
diff --git a/src/cmd/compile/internal/ir/reassign_consistency_check.go b/src/cmd/compile/internal/ir/reassign_consistency_check.go
new file mode 100644
index 0000000000..e4d928d132
--- /dev/null
+++ b/src/cmd/compile/internal/ir/reassign_consistency_check.go
@@ -0,0 +1,46 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ir
+
+import (
+ "cmd/compile/internal/base"
+ "cmd/internal/src"
+ "fmt"
+ "path/filepath"
+ "strings"
+)
+
+// checkStaticValueResult compares the result from ReassignOracle.StaticValue
+// with the corresponding result from ir.StaticValue to make sure they agree.
+// This method is called only when turned on via build tag.
+func checkStaticValueResult(n Node, newres Node) {
+ oldres := StaticValue(n)
+ if oldres != newres {
+ base.Fatalf("%s: new/old static value disagreement on %v:\nnew=%v\nold=%v", fmtFullPos(n.Pos()), n, newres, oldres)
+ }
+}
+
+// checkStaticValueResult compares the result from ReassignOracle.Reassigned
+// with the corresponding result from ir.Reassigned to make sure they agree.
+// This method is called only when turned on via build tag.
+func checkReassignedResult(n *Name, newres bool) {
+ origres := Reassigned(n)
+ if newres != origres {
+ base.Fatalf("%s: new/old reassigned disagreement on %v (class %s) newres=%v oldres=%v", fmtFullPos(n.Pos()), n, n.Class.String(), newres, origres)
+ }
+}
+
+// fmtFullPos returns a verbose dump for pos p, including inlines.
+func fmtFullPos(p src.XPos) string {
+ var sb strings.Builder
+ sep := ""
+ base.Ctxt.AllPos(p, func(pos src.Pos) {
+ fmt.Fprintf(&sb, sep)
+ sep = "|"
+ file := filepath.Base(pos.Filename())
+ fmt.Fprintf(&sb, "%s:%d:%d", file, pos.Line(), pos.Col())
+ })
+ return sb.String()
+}
diff --git a/src/cmd/compile/internal/ir/reassignment.go b/src/cmd/compile/internal/ir/reassignment.go
new file mode 100644
index 0000000000..9974292471
--- /dev/null
+++ b/src/cmd/compile/internal/ir/reassignment.go
@@ -0,0 +1,205 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ir
+
+import (
+ "cmd/compile/internal/base"
+)
+
+// A ReassignOracle efficiently answers queries about whether local
+// variables are reassigned. This helper works by looking for function
+// params and short variable declarations (e.g.
+// https://go.dev/ref/spec#Short_variable_declarations) that are
+// neither address taken nor subsequently re-assigned. It is intended
+// to operate much like "ir.StaticValue" and "ir.Reassigned", but in a
+// way that does just a single walk of the containing function (as
+// opposed to a new walk on every call).
+type ReassignOracle struct {
+ fn *Func
+ // maps candidate name to its defining assignment (or for
+ // for params, defining func).
+ singleDef map[*Name]Node
+}
+
+// Init initializes the oracle based on the IR in function fn, laying
+// the groundwork for future calls to the StaticValue and Reassigned
+// methods. If the fn's IR is subsequently modified, Init must be
+// called again.
+func (ro *ReassignOracle) Init(fn *Func) {
+ ro.fn = fn
+
+ // Collect candidate map. Start by adding function parameters
+ // explicitly.
+ ro.singleDef = make(map[*Name]Node)
+ sig := fn.Type()
+ numParams := sig.NumRecvs() + sig.NumParams()
+ for _, param := range fn.Dcl[:numParams] {
+ if IsBlank(param) {
+ continue
+ }
+ // For params, use func itself as defining node.
+ ro.singleDef[param] = fn
+ }
+
+ // Walk the function body to discover any locals assigned
+ // via ":=" syntax (e.g. "a := <expr>").
+ var findLocals func(n Node) bool
+ findLocals = func(n Node) bool {
+ if nn, ok := n.(*Name); ok {
+ if nn.Defn != nil && !nn.Addrtaken() && nn.Class == PAUTO {
+ ro.singleDef[nn] = nn.Defn
+ }
+ } else if nn, ok := n.(*ClosureExpr); ok {
+ Any(nn.Func, findLocals)
+ }
+ return false
+ }
+ Any(fn, findLocals)
+
+ outerName := func(x Node) *Name {
+ if x == nil {
+ return nil
+ }
+ n, ok := OuterValue(x).(*Name)
+ if ok {
+ return n.Canonical()
+ }
+ return nil
+ }
+
+ // pruneIfNeeded examines node nn appearing on the left hand side
+ // of assignment statement asn to see if it contains a reassignment
+ // to any nodes in our candidate map ro.singleDef; if a reassignment
+ // is found, the corresponding name is deleted from singleDef.
+ pruneIfNeeded := func(nn Node, asn Node) {
+ oname := outerName(nn)
+ if oname == nil {
+ return
+ }
+ defn, ok := ro.singleDef[oname]
+ if !ok {
+ return
+ }
+ // any assignment to a param invalidates the entry.
+ paramAssigned := oname.Class == PPARAM
+ // assignment to local ok iff assignment is its orig def.
+ localAssigned := (oname.Class == PAUTO && asn != defn)
+ if paramAssigned || localAssigned {
+ // We found an assignment to name N that doesn't
+ // correspond to its original definition; remove
+ // from candidates.
+ delete(ro.singleDef, oname)
+ }
+ }
+
+ // Prune away anything that looks assigned. This code modeled after
+ // similar code in ir.Reassigned; any changes there should be made
+ // here as well.
+ var do func(n Node) bool
+ do = func(n Node) bool {
+ switch n.Op() {
+ case OAS:
+ asn := n.(*AssignStmt)
+ pruneIfNeeded(asn.X, n)
+ case OAS2, OAS2FUNC, OAS2MAPR, OAS2DOTTYPE, OAS2RECV, OSELRECV2:
+ asn := n.(*AssignListStmt)
+ for _, p := range asn.Lhs {
+ pruneIfNeeded(p, n)
+ }
+ case OASOP:
+ asn := n.(*AssignOpStmt)
+ pruneIfNeeded(asn.X, n)
+ case ORANGE:
+ rs := n.(*RangeStmt)
+ pruneIfNeeded(rs.Key, n)
+ pruneIfNeeded(rs.Value, n)
+ case OCLOSURE:
+ n := n.(*ClosureExpr)
+ Any(n.Func, do)
+ }
+ return false
+ }
+ Any(fn, do)
+}
+
+// StaticValue method has the same semantics as the ir package function
+// of the same name; see comments on [StaticValue].
+func (ro *ReassignOracle) StaticValue(n Node) Node {
+ arg := n
+ for {
+ if n.Op() == OCONVNOP {
+ n = n.(*ConvExpr).X
+ continue
+ }
+
+ if n.Op() == OINLCALL {
+ n = n.(*InlinedCallExpr).SingleResult()
+ continue
+ }
+
+ n1 := ro.staticValue1(n)
+ if n1 == nil {
+ if consistencyCheckEnabled {
+ checkStaticValueResult(arg, n)
+ }
+ return n
+ }
+ n = n1
+ }
+}
+
+func (ro *ReassignOracle) staticValue1(nn Node) Node {
+ if nn.Op() != ONAME {
+ return nil
+ }
+ n := nn.(*Name).Canonical()
+ if n.Class != PAUTO {
+ return nil
+ }
+
+ defn := n.Defn
+ if defn == nil {
+ return nil
+ }
+
+ var rhs Node
+FindRHS:
+ switch defn.Op() {
+ case OAS:
+ defn := defn.(*AssignStmt)
+ rhs = defn.Y
+ case OAS2:
+ defn := defn.(*AssignListStmt)
+ for i, lhs := range defn.Lhs {
+ if lhs == n {
+ rhs = defn.Rhs[i]
+ break FindRHS
+ }
+ }
+ base.Fatalf("%v missing from LHS of %v", n, defn)
+ default:
+ return nil
+ }
+ if rhs == nil {
+ base.Fatalf("RHS is nil: %v", defn)
+ }
+
+ if _, ok := ro.singleDef[n]; !ok {
+ return nil
+ }
+
+ return rhs
+}
+
+// Reassigned method has the same semantics as the ir package function
+// of the same name; see comments on [Reassigned] for more info.
+func (ro *ReassignOracle) Reassigned(n *Name) bool {
+ _, ok := ro.singleDef[n]
+ result := !ok
+ if consistencyCheckEnabled {
+ checkReassignedResult(n, result)
+ }
+ return result
+}
diff --git a/src/cmd/compile/internal/ir/scc.go b/src/cmd/compile/internal/ir/scc.go
index b222939a1e..a640f4fc16 100644
--- a/src/cmd/compile/internal/ir/scc.go
+++ b/src/cmd/compile/internal/ir/scc.go
@@ -49,16 +49,13 @@ type bottomUpVisitor struct {
// If recursive is false, the list consists of only a single function and its closures.
// If recursive is true, the list may still contain only a single function,
// if that function is itself recursive.
-func VisitFuncsBottomUp(list []Node, analyze func(list []*Func, recursive bool)) {
+func VisitFuncsBottomUp(list []*Func, analyze func(list []*Func, recursive bool)) {
var v bottomUpVisitor
v.analyze = analyze
v.nodeID = make(map[*Func]uint32)
for _, n := range list {
- if n.Op() == ODCLFUNC {
- n := n.(*Func)
- if !n.IsHiddenClosure() {
- v.visit(n)
- }
+ if !n.IsHiddenClosure() {
+ v.visit(n)
}
}
}
diff --git a/src/cmd/compile/internal/ir/sizeof_test.go b/src/cmd/compile/internal/ir/sizeof_test.go
index 307f40d484..3b6823895c 100644
--- a/src/cmd/compile/internal/ir/sizeof_test.go
+++ b/src/cmd/compile/internal/ir/sizeof_test.go
@@ -20,8 +20,8 @@ func TestSizeof(t *testing.T) {
_32bit uintptr // size on 32bit platforms
_64bit uintptr // size on 64bit platforms
}{
- {Func{}, 188, 328},
- {Name{}, 100, 176},
+ {Func{}, 168, 288},
+ {Name{}, 96, 168},
}
for _, tt := range tests {
diff --git a/src/cmd/compile/internal/ir/stmt.go b/src/cmd/compile/internal/ir/stmt.go
index b6653ab528..0801ecdd9e 100644
--- a/src/cmd/compile/internal/ir/stmt.go
+++ b/src/cmd/compile/internal/ir/stmt.go
@@ -7,6 +7,7 @@ package ir
import (
"cmd/compile/internal/base"
"cmd/compile/internal/types"
+ "cmd/internal/obj"
"cmd/internal/src"
"go/constant"
)
@@ -23,7 +24,7 @@ func NewDecl(pos src.XPos, op Op, x *Name) *Decl {
switch op {
default:
panic("invalid Decl op " + op.String())
- case ODCL, ODCLCONST, ODCLTYPE:
+ case ODCL:
n.op = op
}
return n
@@ -242,7 +243,8 @@ func NewForStmt(pos src.XPos, init Node, cond, post Node, body []Node, distinctV
// in a different context (a separate goroutine or a later time).
type GoDeferStmt struct {
miniStmt
- Call Node
+ Call Node
+ DeferAt Expr
}
func NewGoDeferStmt(pos src.XPos, op Op, call Node) *GoDeferStmt {
@@ -285,7 +287,7 @@ func NewIfStmt(pos src.XPos, cond Node, body, els []Node) *IfStmt {
//
// Note that a JumpTableStmt is more like a multiway-goto than
// a multiway-if. In particular, the case bodies are just
-// labels to jump to, not not full Nodes lists.
+// labels to jump to, not full Nodes lists.
type JumpTableStmt struct {
miniStmt
@@ -308,6 +310,46 @@ func NewJumpTableStmt(pos src.XPos, idx Node) *JumpTableStmt {
return n
}
+// An InterfaceSwitchStmt is used to implement type switches.
+// Its semantics are:
+//
+// if RuntimeType implements Descriptor.Cases[0] {
+// Case, Itab = 0, itab<RuntimeType, Descriptor.Cases[0]>
+// } else if RuntimeType implements Descriptor.Cases[1] {
+// Case, Itab = 1, itab<RuntimeType, Descriptor.Cases[1]>
+// ...
+// } else if RuntimeType implements Descriptor.Cases[N-1] {
+// Case, Itab = N-1, itab<RuntimeType, Descriptor.Cases[N-1]>
+// } else {
+// Case, Itab = len(cases), nil
+// }
+//
+// RuntimeType must be a non-nil *runtime._type.
+// Hash must be the hash field of RuntimeType (or its copy loaded from an itab).
+// Descriptor must represent an abi.InterfaceSwitch global variable.
+type InterfaceSwitchStmt struct {
+ miniStmt
+
+ Case Node
+ Itab Node
+ RuntimeType Node
+ Hash Node
+ Descriptor *obj.LSym
+}
+
+func NewInterfaceSwitchStmt(pos src.XPos, case_, itab, runtimeType, hash Node, descriptor *obj.LSym) *InterfaceSwitchStmt {
+ n := &InterfaceSwitchStmt{
+ Case: case_,
+ Itab: itab,
+ RuntimeType: runtimeType,
+ Hash: hash,
+ Descriptor: descriptor,
+ }
+ n.pos = pos
+ n.op = OINTERFACESWITCH
+ return n
+}
+
// An InlineMarkStmt is a marker placed just before an inlined body.
type InlineMarkStmt struct {
miniStmt
@@ -373,15 +415,13 @@ func NewRangeStmt(pos src.XPos, key, value, x Node, body []Node, distinctVars bo
// A ReturnStmt is a return statement.
type ReturnStmt struct {
miniStmt
- origNode // for typecheckargs rewrite
- Results Nodes // return list
+ Results Nodes // return list
}
func NewReturnStmt(pos src.XPos, results []Node) *ReturnStmt {
n := &ReturnStmt{}
n.pos = pos
n.op = ORETURN
- n.orig = n
n.Results = results
return n
}
diff --git a/src/cmd/compile/internal/ir/symtab.go b/src/cmd/compile/internal/ir/symtab.go
index 6ee832e18d..202c4942de 100644
--- a/src/cmd/compile/internal/ir/symtab.go
+++ b/src/cmd/compile/internal/ir/symtab.go
@@ -10,7 +10,9 @@ import (
)
// Syms holds known symbols.
-var Syms struct {
+var Syms symsStruct
+
+type symsStruct struct {
AssertE2I *obj.LSym
AssertE2I2 *obj.LSym
AssertI2I *obj.LSym
@@ -21,6 +23,7 @@ var Syms struct {
CgoCheckPtrWrite *obj.LSym
CheckPtrAlignment *obj.LSym
Deferproc *obj.LSym
+ Deferprocat *obj.LSym
DeferprocStack *obj.LSym
Deferreturn *obj.LSym
Duffcopy *obj.LSym
@@ -28,6 +31,7 @@ var Syms struct {
GCWriteBarrier [8]*obj.LSym
Goschedguarded *obj.LSym
Growslice *obj.LSym
+ InterfaceSwitch *obj.LSym
Memmove *obj.LSym
Msanread *obj.LSym
Msanwrite *obj.LSym
@@ -40,10 +44,13 @@ var Syms struct {
PanicdottypeI *obj.LSym
Panicnildottype *obj.LSym
Panicoverflow *obj.LSym
+ Racefuncenter *obj.LSym
+ Racefuncexit *obj.LSym
Raceread *obj.LSym
Racereadrange *obj.LSym
Racewrite *obj.LSym
Racewriterange *obj.LSym
+ TypeAssert *obj.LSym
WBZero *obj.LSym
WBMove *obj.LSym
// Wasm
diff --git a/src/cmd/compile/internal/ir/type.go b/src/cmd/compile/internal/ir/type.go
index 033d1eed4a..7db76c1427 100644
--- a/src/cmd/compile/internal/ir/type.go
+++ b/src/cmd/compile/internal/ir/type.go
@@ -8,44 +8,10 @@ import (
"cmd/compile/internal/base"
"cmd/compile/internal/types"
"cmd/internal/src"
- "fmt"
)
-// Nodes that represent the syntax of a type before type-checking.
-// After type-checking, they serve only as shells around a *types.Type.
// Calling TypeNode converts a *types.Type to a Node shell.
-// An Ntype is a Node that syntactically looks like a type.
-// It can be the raw syntax for a type before typechecking,
-// or it can be an OTYPE with Type() set to a *types.Type.
-// Note that syntax doesn't guarantee it's a type: an expression
-// like *fmt is an Ntype (we don't know whether names are types yet),
-// but at least 1+1 is not an Ntype.
-type Ntype interface {
- Node
- CanBeNtype()
-}
-
-// A Field is a declared function parameter.
-// It is not a Node.
-type Field struct {
- Pos src.XPos
- Sym *types.Sym
- Type *types.Type
- IsDDD bool
-}
-
-func NewField(pos src.XPos, sym *types.Sym, typ *types.Type) *Field {
- return &Field{Pos: pos, Sym: sym, Type: typ}
-}
-
-func (f *Field) String() string {
- if f.Sym != nil {
- return fmt.Sprintf("%v %v", f.Sym, f.Type)
- }
- return fmt.Sprint(f.Type)
-}
-
// A typeNode is a Node wrapper for type t.
type typeNode struct {
miniNode
@@ -56,20 +22,20 @@ func newTypeNode(typ *types.Type) *typeNode {
n := &typeNode{typ: typ}
n.pos = src.NoXPos
n.op = OTYPE
+ n.SetTypecheck(1)
return n
}
func (n *typeNode) Type() *types.Type { return n.typ }
func (n *typeNode) Sym() *types.Sym { return n.typ.Sym() }
-func (n *typeNode) CanBeNtype() {}
// TypeNode returns the Node representing the type t.
-func TypeNode(t *types.Type) Ntype {
+func TypeNode(t *types.Type) Node {
if n := t.Obj(); n != nil {
if n.Type() != t {
base.Fatalf("type skew: %v has type %v, but expected %v", n, n.Type(), t)
}
- return n.(Ntype)
+ return n.(*Name)
}
return newTypeNode(t)
}
diff --git a/src/cmd/compile/internal/ir/val.go b/src/cmd/compile/internal/ir/val.go
index 75da5a1462..16c8a08ca0 100644
--- a/src/cmd/compile/internal/ir/val.go
+++ b/src/cmd/compile/internal/ir/val.go
@@ -60,23 +60,6 @@ func ValidTypeForConst(t *types.Type, v constant.Value) bool {
panic("unreachable")
}
-func idealType(ct constant.Kind) *types.Type {
- switch ct {
- case constant.String:
- return types.UntypedString
- case constant.Bool:
- return types.UntypedBool
- case constant.Int:
- return types.UntypedInt
- case constant.Float:
- return types.UntypedFloat
- case constant.Complex:
- return types.UntypedComplex
- }
- base.Fatalf("unexpected Ctype: %v", ct)
- return nil
-}
-
var OKForConst [types.NTYPE]bool
// Int64Val returns n as an int64.
diff --git a/src/cmd/compile/internal/liveness/arg.go b/src/cmd/compile/internal/liveness/arg.go
index 6375e43ff3..e1269a10b7 100644
--- a/src/cmd/compile/internal/liveness/arg.go
+++ b/src/cmd/compile/internal/liveness/arg.go
@@ -97,8 +97,8 @@ func ArgLiveness(fn *ir.Func, f *ssa.Func, pp *objw.Progs) (blockIdx, valueIdx m
}
// Gather all register arg spill slots.
for _, a := range f.OwnAux.ABIInfo().InParams() {
- n, ok := a.Name.(*ir.Name)
- if !ok || len(a.Registers) == 0 {
+ n := a.Name
+ if n == nil || len(a.Registers) == 0 {
continue
}
_, offs := a.RegisterTypesAndOffsets()
@@ -116,7 +116,7 @@ func ArgLiveness(fn *ir.Func, f *ssa.Func, pp *objw.Progs) (blockIdx, valueIdx m
}
// We spill address-taken or non-SSA-able value upfront, so they are always live.
- alwaysLive := func(n *ir.Name) bool { return n.Addrtaken() || !f.Frontend().CanSSA(n.Type()) }
+ alwaysLive := func(n *ir.Name) bool { return n.Addrtaken() || !ssa.CanSSA(n.Type()) }
// We'll emit the smallest offset for the slots that need liveness info.
// No need to include a slot with a lower offset if it is always live.
diff --git a/src/cmd/compile/internal/liveness/plive.go b/src/cmd/compile/internal/liveness/plive.go
index 169467e6f5..e4dbfa9fa3 100644
--- a/src/cmd/compile/internal/liveness/plive.go
+++ b/src/cmd/compile/internal/liveness/plive.go
@@ -116,6 +116,10 @@ type liveness struct {
// unsafePoints bit i is set if Value ID i is an unsafe-point
// (preemption is not allowed). Only valid if !allUnsafe.
unsafePoints bitvec.BitVec
+ // unsafeBlocks bit i is set if Block ID i is an unsafe-point
+ // (preemption is not allowed on any end-of-block
+ // instructions). Only valid if !allUnsafe.
+ unsafeBlocks bitvec.BitVec
// An array with a bit vector for each safe point in the
// current Block during liveness.epilogue. Indexed in Value
@@ -141,36 +145,61 @@ type liveness struct {
noClobberArgs bool // Do not clobber function arguments
}
-// Map maps from *ssa.Value to LivenessIndex.
+// Map maps from *ssa.Value to StackMapIndex.
+// Also keeps track of unsafe ssa.Values and ssa.Blocks.
+// (unsafe = can't be interrupted during GC.)
type Map struct {
- Vals map[ssa.ID]objw.LivenessIndex
+ Vals map[ssa.ID]objw.StackMapIndex
+ UnsafeVals map[ssa.ID]bool
+ UnsafeBlocks map[ssa.ID]bool
// The set of live, pointer-containing variables at the DeferReturn
// call (only set when open-coded defers are used).
- DeferReturn objw.LivenessIndex
+ DeferReturn objw.StackMapIndex
}
func (m *Map) reset() {
if m.Vals == nil {
- m.Vals = make(map[ssa.ID]objw.LivenessIndex)
+ m.Vals = make(map[ssa.ID]objw.StackMapIndex)
+ m.UnsafeVals = make(map[ssa.ID]bool)
+ m.UnsafeBlocks = make(map[ssa.ID]bool)
} else {
for k := range m.Vals {
delete(m.Vals, k)
}
+ for k := range m.UnsafeVals {
+ delete(m.UnsafeVals, k)
+ }
+ for k := range m.UnsafeBlocks {
+ delete(m.UnsafeBlocks, k)
+ }
}
- m.DeferReturn = objw.LivenessDontCare
+ m.DeferReturn = objw.StackMapDontCare
}
-func (m *Map) set(v *ssa.Value, i objw.LivenessIndex) {
+func (m *Map) set(v *ssa.Value, i objw.StackMapIndex) {
m.Vals[v.ID] = i
}
+func (m *Map) setUnsafeVal(v *ssa.Value) {
+ m.UnsafeVals[v.ID] = true
+}
+func (m *Map) setUnsafeBlock(b *ssa.Block) {
+ m.UnsafeBlocks[b.ID] = true
+}
-func (m Map) Get(v *ssa.Value) objw.LivenessIndex {
- // If v isn't in the map, then it's a "don't care" and not an
- // unsafe-point.
+func (m Map) Get(v *ssa.Value) objw.StackMapIndex {
+ // If v isn't in the map, then it's a "don't care".
if idx, ok := m.Vals[v.ID]; ok {
return idx
}
- return objw.LivenessIndex{StackMapIndex: objw.StackMapDontCare, IsUnsafePoint: false}
+ return objw.StackMapDontCare
+}
+func (m Map) GetUnsafe(v *ssa.Value) bool {
+ // default is safe
+ return m.UnsafeVals[v.ID]
+}
+func (m Map) GetUnsafeBlock(b *ssa.Block) bool {
+ // default is safe
+ return m.UnsafeBlocks[b.ID]
}
type progeffectscache struct {
@@ -377,8 +406,15 @@ func newliveness(fn *ir.Func, f *ssa.Func, vars []*ir.Name, idx map[*ir.Name]int
if cap(lc.be) >= f.NumBlocks() {
lv.be = lc.be[:f.NumBlocks()]
}
- lv.livenessMap = Map{Vals: lc.livenessMap.Vals, DeferReturn: objw.LivenessDontCare}
+ lv.livenessMap = Map{
+ Vals: lc.livenessMap.Vals,
+ UnsafeVals: lc.livenessMap.UnsafeVals,
+ UnsafeBlocks: lc.livenessMap.UnsafeBlocks,
+ DeferReturn: objw.StackMapDontCare,
+ }
lc.livenessMap.Vals = nil
+ lc.livenessMap.UnsafeVals = nil
+ lc.livenessMap.UnsafeBlocks = nil
}
if lv.be == nil {
lv.be = make([]blockEffects, f.NumBlocks())
@@ -460,6 +496,7 @@ func (lv *liveness) markUnsafePoints() {
}
lv.unsafePoints = bitvec.New(int32(lv.f.NumValues()))
+ lv.unsafeBlocks = bitvec.New(int32(lv.f.NumBlocks()))
// Mark architecture-specific unsafe points.
for _, b := range lv.f.Blocks {
@@ -489,8 +526,6 @@ func (lv *liveness) markUnsafePoints() {
// m2 = store operation ... m1
// m3 = store operation ... m2
// m4 = WBend m3
- //
- // (For now m2 and m3 won't be present.)
// Find first memory op in the block, which should be a Phi.
m := v
@@ -535,40 +570,38 @@ func (lv *liveness) markUnsafePoints() {
var load *ssa.Value
v := decisionBlock.Controls[0]
for {
- if sym, ok := v.Aux.(*obj.LSym); ok && sym == ir.Syms.WriteBarrier {
- load = v
- break
- }
- switch v.Op {
- case ssa.Op386TESTL:
- // 386 lowers Neq32 to (TESTL cond cond),
- if v.Args[0] == v.Args[1] {
- v = v.Args[0]
- continue
+ if v.MemoryArg() != nil {
+ // Single instruction to load (and maybe compare) the write barrier flag.
+ if sym, ok := v.Aux.(*obj.LSym); ok && sym == ir.Syms.WriteBarrier {
+ load = v
+ break
}
- case ssa.Op386MOVLload, ssa.OpARM64MOVWUload, ssa.OpMIPS64MOVWUload, ssa.OpPPC64MOVWZload, ssa.OpWasmI64Load32U:
- // Args[0] is the address of the write
- // barrier control. Ignore Args[1],
- // which is the mem operand.
- // TODO: Just ignore mem operands?
- v = v.Args[0]
- continue
+ // Some architectures have to materialize the address separate from
+ // the load.
+ if sym, ok := v.Args[0].Aux.(*obj.LSym); ok && sym == ir.Syms.WriteBarrier {
+ load = v
+ break
+ }
+ v.Fatalf("load of write barrier flag not from correct global: %s", v.LongString())
}
// Common case: just flow backwards.
- if len(v.Args) != 1 {
- v.Fatalf("write barrier control value has more than one argument: %s", v.LongString())
+ if len(v.Args) == 1 || len(v.Args) == 2 && v.Args[0] == v.Args[1] {
+ // Note: 386 lowers Neq32 to (TESTL cond cond),
+ v = v.Args[0]
+ continue
}
- v = v.Args[0]
+ v.Fatalf("write barrier control value has more than one argument: %s", v.LongString())
}
// Mark everything after the load unsafe.
found := false
for _, v := range decisionBlock.Values {
- found = found || v == load
if found {
lv.unsafePoints.Set(int32(v.ID))
}
+ found = found || v == load
}
+ lv.unsafeBlocks.Set(int32(decisionBlock.ID))
// Mark the write barrier on/off blocks as unsafe.
for _, e := range decisionBlock.Succs {
@@ -579,14 +612,15 @@ func (lv *liveness) markUnsafePoints() {
for _, v := range x.Values {
lv.unsafePoints.Set(int32(v.ID))
}
+ lv.unsafeBlocks.Set(int32(x.ID))
}
// Mark from the join point up to the WBend as unsafe.
for _, v := range b.Values {
- lv.unsafePoints.Set(int32(v.ID))
if v.Op == ssa.OpWBend {
break
}
+ lv.unsafePoints.Set(int32(v.ID))
}
}
}
@@ -828,13 +862,10 @@ func (lv *liveness) epilogue() {
// If we have an open-coded deferreturn call, make a liveness map for it.
if lv.fn.OpenCodedDeferDisallowed() {
- lv.livenessMap.DeferReturn = objw.LivenessDontCare
+ lv.livenessMap.DeferReturn = objw.StackMapDontCare
} else {
idx, _ := lv.stackMapSet.add(livedefer)
- lv.livenessMap.DeferReturn = objw.LivenessIndex{
- StackMapIndex: idx,
- IsUnsafePoint: false,
- }
+ lv.livenessMap.DeferReturn = objw.StackMapIndex(idx)
}
// Done compacting. Throw out the stack map set.
@@ -875,17 +906,18 @@ func (lv *liveness) compact(b *ssa.Block) {
pos++
}
for _, v := range b.Values {
- hasStackMap := lv.hasStackMap(v)
- isUnsafePoint := lv.allUnsafe || v.Op != ssa.OpClobber && lv.unsafePoints.Get(int32(v.ID))
- idx := objw.LivenessIndex{StackMapIndex: objw.StackMapDontCare, IsUnsafePoint: isUnsafePoint}
- if hasStackMap {
- idx.StackMapIndex, _ = lv.stackMapSet.add(lv.livevars[pos])
+ if lv.hasStackMap(v) {
+ idx, _ := lv.stackMapSet.add(lv.livevars[pos])
pos++
+ lv.livenessMap.set(v, objw.StackMapIndex(idx))
}
- if hasStackMap || isUnsafePoint {
- lv.livenessMap.set(v, idx)
+ if lv.allUnsafe || v.Op != ssa.OpClobber && lv.unsafePoints.Get(int32(v.ID)) {
+ lv.livenessMap.setUnsafeVal(v)
}
}
+ if lv.allUnsafe || lv.unsafeBlocks.Get(int32(b.ID)) {
+ lv.livenessMap.setUnsafeBlock(b)
+ }
// Reset livevars.
lv.livevars = lv.livevars[:0]
@@ -1039,7 +1071,7 @@ func clobberWalk(b *ssa.Block, v *ir.Name, offset int64, t *types.Type) {
}
case types.TSTRUCT:
- for _, t1 := range t.Fields().Slice() {
+ for _, t1 := range t.Fields() {
clobberWalk(b, v, offset+t1.Offset, t1.Type)
}
@@ -1221,7 +1253,7 @@ func (lv *liveness) printDebug() {
fmt.Printf("\tlive=")
printed = false
if pcdata.StackMapValid() {
- live := lv.stackMaps[pcdata.StackMapIndex]
+ live := lv.stackMaps[pcdata]
for j, n := range lv.vars {
if !live.Get(int32(j)) {
continue
@@ -1236,10 +1268,13 @@ func (lv *liveness) printDebug() {
fmt.Printf("\n")
}
- if pcdata.IsUnsafePoint {
+ if lv.livenessMap.GetUnsafe(v) {
fmt.Printf("\tunsafe-point\n")
}
}
+ if lv.livenessMap.GetUnsafeBlock(b) {
+ fmt.Printf("\tunsafe-block\n")
+ }
// bb bitsets
fmt.Printf("end\n")
@@ -1333,7 +1368,7 @@ func Compute(curfn *ir.Func, f *ssa.Func, stkptrsize int64, pp *objw.Progs) (Map
for _, b := range f.Blocks {
for _, val := range b.Values {
if idx := lv.livenessMap.Get(val); idx.StackMapValid() {
- lv.showlive(val, lv.stackMaps[idx.StackMapIndex])
+ lv.showlive(val, lv.stackMaps[idx])
}
}
}
@@ -1485,7 +1520,7 @@ func WriteFuncMap(fn *ir.Func, abiInfo *abi.ABIParamResultInfo) {
return
}
nptr := int(abiInfo.ArgWidth() / int64(types.PtrSize))
- bv := bitvec.New(int32(nptr) * 2)
+ bv := bitvec.New(int32(nptr))
for _, p := range abiInfo.InParams() {
typebits.SetNoCheck(p.Type, p.FrameOffset(abiInfo), bv)
diff --git a/src/cmd/compile/internal/logopt/logopt_test.go b/src/cmd/compile/internal/logopt/logopt_test.go
index 1c48351ab2..c7debd9897 100644
--- a/src/cmd/compile/internal/logopt/logopt_test.go
+++ b/src/cmd/compile/internal/logopt/logopt_test.go
@@ -205,15 +205,15 @@ func s15a8(x *[15]int64) [15]int64 {
`"relatedInformation":[`+
`{"location":{"uri":"file://tmpdir/file.go","range":{"start":{"line":9,"character":13},"end":{"line":9,"character":13}}},"message":"escflow: flow: y = z:"},`+
`{"location":{"uri":"file://tmpdir/file.go","range":{"start":{"line":9,"character":13},"end":{"line":9,"character":13}}},"message":"escflow: from y := z (assign-pair)"},`+
- `{"location":{"uri":"file://tmpdir/file.go","range":{"start":{"line":9,"character":13},"end":{"line":9,"character":13}}},"message":"escflow: flow: ~R0 = y:"},`+
+ `{"location":{"uri":"file://tmpdir/file.go","range":{"start":{"line":9,"character":13},"end":{"line":9,"character":13}}},"message":"escflow: flow: ~r0 = y:"},`+
`{"location":{"uri":"file://tmpdir/file.go","range":{"start":{"line":4,"character":11},"end":{"line":4,"character":11}}},"message":"inlineLoc"},`+
`{"location":{"uri":"file://tmpdir/file.go","range":{"start":{"line":9,"character":13},"end":{"line":9,"character":13}}},"message":"escflow: from y.b (dot of pointer)"},`+
`{"location":{"uri":"file://tmpdir/file.go","range":{"start":{"line":4,"character":11},"end":{"line":4,"character":11}}},"message":"inlineLoc"},`+
`{"location":{"uri":"file://tmpdir/file.go","range":{"start":{"line":9,"character":13},"end":{"line":9,"character":13}}},"message":"escflow: from \u0026y.b (address-of)"},`+
`{"location":{"uri":"file://tmpdir/file.go","range":{"start":{"line":4,"character":9},"end":{"line":4,"character":9}}},"message":"inlineLoc"},`+
- `{"location":{"uri":"file://tmpdir/file.go","range":{"start":{"line":9,"character":13},"end":{"line":9,"character":13}}},"message":"escflow: from ~R0 = \u0026y.b (assign-pair)"},`+
- `{"location":{"uri":"file://tmpdir/file.go","range":{"start":{"line":9,"character":3},"end":{"line":9,"character":3}}},"message":"escflow: flow: ~r0 = ~R0:"},`+
- `{"location":{"uri":"file://tmpdir/file.go","range":{"start":{"line":9,"character":3},"end":{"line":9,"character":3}}},"message":"escflow: from return ~R0 (return)"}]}`)
+ `{"location":{"uri":"file://tmpdir/file.go","range":{"start":{"line":9,"character":13},"end":{"line":9,"character":13}}},"message":"escflow: from ~r0 = \u0026y.b (assign-pair)"},`+
+ `{"location":{"uri":"file://tmpdir/file.go","range":{"start":{"line":9,"character":3},"end":{"line":9,"character":3}}},"message":"escflow: flow: ~r0 = ~r0:"},`+
+ `{"location":{"uri":"file://tmpdir/file.go","range":{"start":{"line":9,"character":3},"end":{"line":9,"character":3}}},"message":"escflow: from return ~r0 (return)"}]}`)
})
}
diff --git a/src/cmd/compile/internal/loong64/galign.go b/src/cmd/compile/internal/loong64/galign.go
index 99ab7bdfb5..a613165054 100644
--- a/src/cmd/compile/internal/loong64/galign.go
+++ b/src/cmd/compile/internal/loong64/galign.go
@@ -20,4 +20,6 @@ func Init(arch *ssagen.ArchInfo) {
arch.SSAMarkMoves = func(s *ssagen.State, b *ssa.Block) {}
arch.SSAGenValue = ssaGenValue
arch.SSAGenBlock = ssaGenBlock
+ arch.LoadRegResult = loadRegResult
+ arch.SpillArgReg = spillArgReg
}
diff --git a/src/cmd/compile/internal/loong64/ggen.go b/src/cmd/compile/internal/loong64/ggen.go
index 8a24d2f295..27d318a8bb 100644
--- a/src/cmd/compile/internal/loong64/ggen.go
+++ b/src/cmd/compile/internal/loong64/ggen.go
@@ -5,6 +5,7 @@
package loong64
import (
+ "cmd/compile/internal/base"
"cmd/compile/internal/ir"
"cmd/compile/internal/objw"
"cmd/compile/internal/types"
@@ -16,34 +17,38 @@ func zerorange(pp *objw.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog
if cnt == 0 {
return p
}
+
+ // Adjust the frame to account for LR.
+ off += base.Ctxt.Arch.FixedFrameSize
+
if cnt < int64(4*types.PtrSize) {
for i := int64(0); i < cnt; i += int64(types.PtrSize) {
- p = pp.Append(p, loong64.AMOVV, obj.TYPE_REG, loong64.REGZERO, 0, obj.TYPE_MEM, loong64.REGSP, 8+off+i)
+ p = pp.Append(p, loong64.AMOVV, obj.TYPE_REG, loong64.REGZERO, 0, obj.TYPE_MEM, loong64.REGSP, off+i)
}
} else if cnt <= int64(128*types.PtrSize) {
- p = pp.Append(p, loong64.AADDV, obj.TYPE_CONST, 0, 8+off-8, obj.TYPE_REG, loong64.REGRT1, 0)
+ p = pp.Append(p, loong64.AADDV, obj.TYPE_CONST, 0, off, obj.TYPE_REG, loong64.REGRT1, 0)
p.Reg = loong64.REGSP
p = pp.Append(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_MEM, 0, 0)
p.To.Name = obj.NAME_EXTERN
p.To.Sym = ir.Syms.Duffzero
p.To.Offset = 8 * (128 - cnt/int64(types.PtrSize))
} else {
- // ADDV $(8+frame+lo-8), SP, r1
+ // ADDV $(off), SP, r1
// ADDV $cnt, r1, r2
// loop:
- // MOVV R0, (Widthptr)r1
+ // MOVV R0, (r1)
// ADDV $Widthptr, r1
- // BNE r1, r2, loop
- p = pp.Append(p, loong64.AADDV, obj.TYPE_CONST, 0, 8+off-8, obj.TYPE_REG, loong64.REGRT1, 0)
+ // BNE r1, r2, loop
+ p = pp.Append(p, loong64.AADDV, obj.TYPE_CONST, 0, off, obj.TYPE_REG, loong64.REGRT1, 0)
p.Reg = loong64.REGSP
p = pp.Append(p, loong64.AADDV, obj.TYPE_CONST, 0, cnt, obj.TYPE_REG, loong64.REGRT2, 0)
p.Reg = loong64.REGRT1
- p = pp.Append(p, loong64.AMOVV, obj.TYPE_REG, loong64.REGZERO, 0, obj.TYPE_MEM, loong64.REGRT1, int64(types.PtrSize))
- p1 := p
+ p = pp.Append(p, loong64.AMOVV, obj.TYPE_REG, loong64.REGZERO, 0, obj.TYPE_MEM, loong64.REGRT1, 0)
+ loop := p
p = pp.Append(p, loong64.AADDV, obj.TYPE_CONST, 0, int64(types.PtrSize), obj.TYPE_REG, loong64.REGRT1, 0)
p = pp.Append(p, loong64.ABNE, obj.TYPE_REG, loong64.REGRT1, 0, obj.TYPE_BRANCH, 0, 0)
p.Reg = loong64.REGRT2
- p.To.SetTarget(p1)
+ p.To.SetTarget(loop)
}
return p
diff --git a/src/cmd/compile/internal/loong64/ssa.go b/src/cmd/compile/internal/loong64/ssa.go
index 8193b4e321..e7298bdb9f 100644
--- a/src/cmd/compile/internal/loong64/ssa.go
+++ b/src/cmd/compile/internal/loong64/ssa.go
@@ -10,6 +10,7 @@ import (
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
"cmd/compile/internal/logopt"
+ "cmd/compile/internal/objw"
"cmd/compile/internal/ssa"
"cmd/compile/internal/ssagen"
"cmd/compile/internal/types"
@@ -80,6 +81,28 @@ func storeByType(t *types.Type, r int16) obj.As {
panic("bad store type")
}
+// largestMove returns the largest move instruction possible and its size,
+// given the alignment of the total size of the move.
+//
+// e.g., a 16-byte move may use MOVV, but an 11-byte move must use MOVB.
+//
+// Note that the moves may not be on naturally aligned addresses depending on
+// the source and destination.
+//
+// This matches the calculation in ssa.moveSize.
+func largestMove(alignment int64) (obj.As, int64) {
+ switch {
+ case alignment%8 == 0:
+ return loong64.AMOVV, 8
+ case alignment%4 == 0:
+ return loong64.AMOVW, 4
+ case alignment%2 == 0:
+ return loong64.AMOVH, 2
+ default:
+ return loong64.AMOVB, 1
+ }
+}
+
func ssaGenValue(s *ssagen.State, v *ssa.Value) {
switch v.Op {
case ssa.OpCopy, ssa.OpLOONG64MOVVreg:
@@ -122,6 +145,18 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
p.From.Type = obj.TYPE_REG
p.From.Reg = r
ssagen.AddrAuto(&p.To, v)
+ case ssa.OpArgIntReg, ssa.OpArgFloatReg:
+ // The assembler needs to wrap the entry safepoint/stack growth code with spill/unspill
+ // The loop only runs once.
+ for _, a := range v.Block.Func.RegArgs {
+ // Pass the spill/unspill information along to the assembler, offset by size of
+ // the saved LR slot.
+ addr := ssagen.SpillSlotAddr(a, loong64.REGSP, base.Ctxt.Arch.FixedFrameSize)
+ s.FuncInfo().AddSpill(
+ obj.RegSpill{Reg: a.Reg, Addr: addr, Unspill: loadByType(a.Type, a.Reg), Spill: storeByType(a.Type, a.Reg)})
+ }
+ v.Block.Func.RegArgs = nil
+ ssagen.CheckArgReg(v)
case ssa.OpLOONG64ADDV,
ssa.OpLOONG64SUBV,
ssa.OpLOONG64AND,
@@ -340,62 +375,36 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
case ssa.OpLOONG64DUFFZERO:
- // runtime.duffzero expects start address - 8 in R19
- p := s.Prog(loong64.ASUBVU)
- p.From.Type = obj.TYPE_CONST
- p.From.Offset = 8
- p.Reg = v.Args[0].Reg()
- p.To.Type = obj.TYPE_REG
- p.To.Reg = loong64.REG_R19
- p = s.Prog(obj.ADUFFZERO)
+ // runtime.duffzero expects start address in R20
+ p := s.Prog(obj.ADUFFZERO)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
p.To.Sym = ir.Syms.Duffzero
p.To.Offset = v.AuxInt
case ssa.OpLOONG64LoweredZero:
- // SUBV $8, R19
- // MOVV R0, 8(R19)
- // ADDV $8, R19
- // BNE Rarg1, R19, -2(PC)
- // arg1 is the address of the last element to zero
- var sz int64
- var mov obj.As
- switch {
- case v.AuxInt%8 == 0:
- sz = 8
- mov = loong64.AMOVV
- case v.AuxInt%4 == 0:
- sz = 4
- mov = loong64.AMOVW
- case v.AuxInt%2 == 0:
- sz = 2
- mov = loong64.AMOVH
- default:
- sz = 1
- mov = loong64.AMOVB
- }
- p := s.Prog(loong64.ASUBVU)
- p.From.Type = obj.TYPE_CONST
- p.From.Offset = sz
- p.To.Type = obj.TYPE_REG
- p.To.Reg = loong64.REG_R19
- p2 := s.Prog(mov)
- p2.From.Type = obj.TYPE_REG
- p2.From.Reg = loong64.REGZERO
- p2.To.Type = obj.TYPE_MEM
- p2.To.Reg = loong64.REG_R19
- p2.To.Offset = sz
- p3 := s.Prog(loong64.AADDVU)
- p3.From.Type = obj.TYPE_CONST
- p3.From.Offset = sz
- p3.To.Type = obj.TYPE_REG
- p3.To.Reg = loong64.REG_R19
- p4 := s.Prog(loong64.ABNE)
- p4.From.Type = obj.TYPE_REG
- p4.From.Reg = v.Args[1].Reg()
- p4.Reg = loong64.REG_R19
- p4.To.Type = obj.TYPE_BRANCH
- p4.To.SetTarget(p2)
+ // MOVx R0, (Rarg0)
+ // ADDV $sz, Rarg0
+ // BGEU Rarg1, Rarg0, -2(PC)
+ mov, sz := largestMove(v.AuxInt)
+ p := s.Prog(mov)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = loong64.REGZERO
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[0].Reg()
+
+ p2 := s.Prog(loong64.AADDVU)
+ p2.From.Type = obj.TYPE_CONST
+ p2.From.Offset = sz
+ p2.To.Type = obj.TYPE_REG
+ p2.To.Reg = v.Args[0].Reg()
+
+ p3 := s.Prog(loong64.ABGEU)
+ p3.From.Type = obj.TYPE_REG
+ p3.From.Reg = v.Args[1].Reg()
+ p3.Reg = v.Args[0].Reg()
+ p3.To.Type = obj.TYPE_BRANCH
+ p3.To.SetTarget(p)
+
case ssa.OpLOONG64DUFFCOPY:
p := s.Prog(obj.ADUFFCOPY)
p.To.Type = obj.TYPE_MEM
@@ -403,61 +412,43 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
p.To.Sym = ir.Syms.Duffcopy
p.To.Offset = v.AuxInt
case ssa.OpLOONG64LoweredMove:
- // SUBV $8, R19
- // MOVV 8(R19), Rtmp
- // MOVV Rtmp, (R4)
- // ADDV $8, R19
- // ADDV $8, R4
- // BNE Rarg2, R19, -4(PC)
- // arg2 is the address of the last element of src
- var sz int64
- var mov obj.As
- switch {
- case v.AuxInt%8 == 0:
- sz = 8
- mov = loong64.AMOVV
- case v.AuxInt%4 == 0:
- sz = 4
- mov = loong64.AMOVW
- case v.AuxInt%2 == 0:
- sz = 2
- mov = loong64.AMOVH
- default:
- sz = 1
- mov = loong64.AMOVB
- }
- p := s.Prog(loong64.ASUBVU)
- p.From.Type = obj.TYPE_CONST
- p.From.Offset = sz
+ // MOVx (Rarg1), Rtmp
+ // MOVx Rtmp, (Rarg0)
+ // ADDV $sz, Rarg1
+ // ADDV $sz, Rarg0
+ // BGEU Rarg2, Rarg0, -4(PC)
+ mov, sz := largestMove(v.AuxInt)
+ p := s.Prog(mov)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = v.Args[1].Reg()
p.To.Type = obj.TYPE_REG
- p.To.Reg = loong64.REG_R19
+ p.To.Reg = loong64.REGTMP
+
p2 := s.Prog(mov)
- p2.From.Type = obj.TYPE_MEM
- p2.From.Reg = loong64.REG_R19
- p2.From.Offset = sz
- p2.To.Type = obj.TYPE_REG
- p2.To.Reg = loong64.REGTMP
- p3 := s.Prog(mov)
- p3.From.Type = obj.TYPE_REG
- p3.From.Reg = loong64.REGTMP
- p3.To.Type = obj.TYPE_MEM
- p3.To.Reg = loong64.REG_R4
+ p2.From.Type = obj.TYPE_REG
+ p2.From.Reg = loong64.REGTMP
+ p2.To.Type = obj.TYPE_MEM
+ p2.To.Reg = v.Args[0].Reg()
+
+ p3 := s.Prog(loong64.AADDVU)
+ p3.From.Type = obj.TYPE_CONST
+ p3.From.Offset = sz
+ p3.To.Type = obj.TYPE_REG
+ p3.To.Reg = v.Args[1].Reg()
+
p4 := s.Prog(loong64.AADDVU)
p4.From.Type = obj.TYPE_CONST
p4.From.Offset = sz
p4.To.Type = obj.TYPE_REG
- p4.To.Reg = loong64.REG_R19
- p5 := s.Prog(loong64.AADDVU)
- p5.From.Type = obj.TYPE_CONST
- p5.From.Offset = sz
- p5.To.Type = obj.TYPE_REG
- p5.To.Reg = loong64.REG_R4
- p6 := s.Prog(loong64.ABNE)
- p6.From.Type = obj.TYPE_REG
- p6.From.Reg = v.Args[2].Reg()
- p6.Reg = loong64.REG_R19
- p6.To.Type = obj.TYPE_BRANCH
- p6.To.SetTarget(p2)
+ p4.To.Reg = v.Args[0].Reg()
+
+ p5 := s.Prog(loong64.ABGEU)
+ p5.From.Type = obj.TYPE_REG
+ p5.From.Reg = v.Args[2].Reg()
+ p5.Reg = v.Args[1].Reg()
+ p5.To.Type = obj.TYPE_BRANCH
+ p5.To.SetTarget(p)
+
case ssa.OpLOONG64CALLstatic, ssa.OpLOONG64CALLclosure, ssa.OpLOONG64CALLinter:
s.Call(v)
case ssa.OpLOONG64CALLtail:
@@ -818,3 +809,22 @@ func ssaGenBlock(s *ssagen.State, b, next *ssa.Block) {
b.Fatalf("branch not implemented: %s", b.LongString())
}
}
+
+func loadRegResult(s *ssagen.State, f *ssa.Func, t *types.Type, reg int16, n *ir.Name, off int64) *obj.Prog {
+ p := s.Prog(loadByType(t, reg))
+ p.From.Type = obj.TYPE_MEM
+ p.From.Name = obj.NAME_AUTO
+ p.From.Sym = n.Linksym()
+ p.From.Offset = n.FrameOffset() + off
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = reg
+ return p
+}
+
+func spillArgReg(pp *objw.Progs, p *obj.Prog, f *ssa.Func, t *types.Type, reg int16, n *ir.Name, off int64) *obj.Prog {
+ p = pp.Append(p, storeByType(t, reg), obj.TYPE_REG, reg, 0, obj.TYPE_MEM, 0, n.FrameOffset()+off)
+ p.To.Name = obj.NAME_PARAM
+ p.To.Sym = n.Linksym()
+ p.Pos = p.Pos.WithNotStmt()
+ return p
+}
diff --git a/src/cmd/compile/internal/loopvar/loopvar.go b/src/cmd/compile/internal/loopvar/loopvar.go
index 43f081c10a..030fc04c13 100644
--- a/src/cmd/compile/internal/loopvar/loopvar.go
+++ b/src/cmd/compile/internal/loopvar/loopvar.go
@@ -107,7 +107,7 @@ func ForCapture(fn *ir.Func) []VarAndLoop {
if base.LoopVarHash.MatchPos(n.Pos(), desc) {
// Rename the loop key, prefix body with assignment from loop key
transformed = append(transformed, VarAndLoop{n, x, lastPos})
- tk := typecheck.Temp(n.Type())
+ tk := typecheck.TempAt(base.Pos, fn, n.Type())
tk.SetTypecheck(1)
as := ir.NewAssignStmt(x.Pos(), n, tk)
as.Def = true
@@ -298,7 +298,7 @@ func ForCapture(fn *ir.Func) []VarAndLoop {
for _, z := range leaked {
transformed = append(transformed, VarAndLoop{z, x, lastPos})
- tz := typecheck.Temp(z.Type())
+ tz := typecheck.TempAt(base.Pos, fn, z.Type())
tz.SetTypecheck(1)
zPrimeForZ[z] = tz
@@ -355,26 +355,17 @@ func ForCapture(fn *ir.Func) []VarAndLoop {
})
postNotNil := x.Post != nil
- var tmpFirstDcl *ir.AssignStmt
+ var tmpFirstDcl ir.Node
if postNotNil {
// body' = prebody +
// (6) if tmp_first {tmp_first = false} else {Post} +
// if !cond {break} + ...
- tmpFirst := typecheck.Temp(types.Types[types.TBOOL])
-
- // tmpFirstAssign assigns val to tmpFirst
- tmpFirstAssign := func(val bool) *ir.AssignStmt {
- s := ir.NewAssignStmt(x.Pos(), tmpFirst, typecheck.OrigBool(tmpFirst, val))
- s.SetTypecheck(1)
- return s
- }
-
- tmpFirstDcl = tmpFirstAssign(true)
- tmpFirstDcl.Def = true // also declares tmpFirst
- tmpFirstSetFalse := tmpFirstAssign(false)
+ tmpFirst := typecheck.TempAt(base.Pos, fn, types.Types[types.TBOOL])
+ tmpFirstDcl = typecheck.Stmt(ir.NewAssignStmt(x.Pos(), tmpFirst, ir.NewBool(base.Pos, true)))
+ tmpFirstSetFalse := typecheck.Stmt(ir.NewAssignStmt(x.Pos(), tmpFirst, ir.NewBool(base.Pos, false)))
ifTmpFirst := ir.NewIfStmt(x.Pos(), tmpFirst, ir.Nodes{tmpFirstSetFalse}, ir.Nodes{x.Post})
- ifTmpFirst.SetTypecheck(1)
- preBody.Append(ifTmpFirst)
+ ifTmpFirst.PtrInit().Append(typecheck.Stmt(ir.NewDecl(base.Pos, ir.ODCL, tmpFirst))) // declares tmpFirst
+ preBody.Append(typecheck.Stmt(ifTmpFirst))
}
// body' = prebody +
@@ -496,8 +487,6 @@ func rewriteNodes(fn *ir.Func, editNodes func(c ir.Nodes) ir.Nodes) {
switch x := n.(type) {
case *ir.Func:
x.Body = editNodes(x.Body)
- x.Enter = editNodes(x.Enter)
- x.Exit = editNodes(x.Exit)
case *ir.InlinedCallExpr:
x.Body = editNodes(x.Body)
@@ -605,7 +594,7 @@ func LogTransformations(transformed []VarAndLoop) {
// Intended to help with performance debugging, we record whole loop ranges
logopt.LogOptRange(pos, last, "loop-modified-"+loopKind, "loopvar", ir.FuncName(l.curfn))
}
- if print && 3 <= base.Debug.LoopVar {
+ if print && 4 <= base.Debug.LoopVar {
// TODO decide if we want to keep this, or not. It was helpful for validating logopt, otherwise, eh.
inner := base.Ctxt.InnermostPos(pos)
outer := base.Ctxt.OutermostPos(pos)
diff --git a/src/cmd/compile/internal/loopvar/loopvar_test.go b/src/cmd/compile/internal/loopvar/loopvar_test.go
index 03e6eec437..64cfdb77d9 100644
--- a/src/cmd/compile/internal/loopvar/loopvar_test.go
+++ b/src/cmd/compile/internal/loopvar/loopvar_test.go
@@ -51,7 +51,7 @@ var cases = []testcase{
}
// TestLoopVar checks that the GOEXPERIMENT and debug flags behave as expected.
-func TestLoopVar(t *testing.T) {
+func TestLoopVarGo1_21(t *testing.T) {
switch runtime.GOOS {
case "linux", "darwin":
default:
@@ -71,7 +71,7 @@ func TestLoopVar(t *testing.T) {
for i, tc := range cases {
for _, f := range tc.files {
source := f
- cmd := testenv.Command(t, gocmd, "build", "-o", output, "-gcflags=-d=loopvar="+tc.lvFlag, source)
+ cmd := testenv.Command(t, gocmd, "build", "-o", output, "-gcflags=-lang=go1.21 -d=loopvar="+tc.lvFlag, source)
cmd.Env = append(cmd.Env, "GOEXPERIMENT=loopvar", "HOME="+tmpdir)
cmd.Dir = "testdata"
t.Logf("File %s loopvar=%s expect '%s' exit code %d", f, tc.lvFlag, tc.buildExpect, tc.expectRC)
@@ -103,7 +103,7 @@ func TestLoopVar(t *testing.T) {
}
}
-func TestLoopVarInlines(t *testing.T) {
+func TestLoopVarInlinesGo1_21(t *testing.T) {
switch runtime.GOOS {
case "linux", "darwin":
default:
@@ -125,7 +125,7 @@ func TestLoopVarInlines(t *testing.T) {
// This disables the loopvar change, except for the specified package.
// The effect should follow the package, even though everything (except "c")
// is inlined.
- cmd := testenv.Command(t, gocmd, "run", "-gcflags="+pkg+"=-d=loopvar=1", root)
+ cmd := testenv.Command(t, gocmd, "run", "-gcflags="+root+"/...=-lang=go1.21", "-gcflags="+pkg+"=-d=loopvar=1", root)
cmd.Env = append(cmd.Env, "GOEXPERIMENT=noloopvar", "HOME="+tmpdir)
cmd.Dir = filepath.Join("testdata", "inlines")
@@ -166,6 +166,7 @@ func countMatches(s, re string) int {
}
func TestLoopVarHashes(t *testing.T) {
+ // This behavior does not depend on Go version (1.21 or greater)
switch runtime.GOOS {
case "linux", "darwin":
default:
@@ -187,7 +188,7 @@ func TestLoopVarHashes(t *testing.T) {
// This disables the loopvar change, except for the specified hash pattern.
// -trimpath is necessary so we get the same answer no matter where the
// Go repository is checked out. This is not normally a concern since people
- // do not rely on the meaning of specific hashes.
+ // do not normally rely on the meaning of specific hashes.
cmd := testenv.Command(t, gocmd, "run", "-trimpath", root)
cmd.Env = append(cmd.Env, "GOCOMPILEDEBUG=loopvarhash="+hash, "HOME="+tmpdir)
cmd.Dir = filepath.Join("testdata", "inlines")
@@ -225,7 +226,8 @@ func TestLoopVarHashes(t *testing.T) {
}
}
-func TestLoopVarOpt(t *testing.T) {
+// TestLoopVarVersionEnableFlag checks for loopvar transformation enabled by command line flag (1.22).
+func TestLoopVarVersionEnableFlag(t *testing.T) {
switch runtime.GOOS {
case "linux", "darwin":
default:
@@ -240,7 +242,8 @@ func TestLoopVarOpt(t *testing.T) {
testenv.MustHaveGoBuild(t)
gocmd := testenv.GoToolPath(t)
- cmd := testenv.Command(t, gocmd, "run", "-gcflags=-d=loopvar=2", "opt.go")
+ // loopvar=3 logs info but does not change loopvarness
+ cmd := testenv.Command(t, gocmd, "run", "-gcflags=-lang=go1.22 -d=loopvar=3", "opt.go")
cmd.Dir = filepath.Join("testdata")
b, err := cmd.CombinedOutput()
@@ -248,7 +251,7 @@ func TestLoopVarOpt(t *testing.T) {
t.Logf(m)
- yCount := strings.Count(m, "opt.go:16:6: loop variable private now per-iteration, heap-allocated (loop inlined into ./opt.go:30)")
+ yCount := strings.Count(m, "opt.go:16:6: loop variable private now per-iteration, heap-allocated (loop inlined into ./opt.go:29)")
nCount := strings.Count(m, "shared")
if yCount != 1 {
@@ -260,5 +263,121 @@ func TestLoopVarOpt(t *testing.T) {
if err != nil {
t.Errorf("err=%v != nil", err)
}
+}
+
+// TestLoopVarVersionEnableGoBuild checks for loopvar transformation enabled by go:build version (1.22).
+func TestLoopVarVersionEnableGoBuild(t *testing.T) {
+ switch runtime.GOOS {
+ case "linux", "darwin":
+ default:
+ t.Skipf("Slow test, usually avoid it, os=%s not linux or darwin", runtime.GOOS)
+ }
+ switch runtime.GOARCH {
+ case "amd64", "arm64":
+ default:
+ t.Skipf("Slow test, usually avoid it, arch=%s not amd64 or arm64", runtime.GOARCH)
+ }
+
+ testenv.MustHaveGoBuild(t)
+ gocmd := testenv.GoToolPath(t)
+
+ // loopvar=3 logs info but does not change loopvarness
+ cmd := testenv.Command(t, gocmd, "run", "-gcflags=-lang=go1.21 -d=loopvar=3", "opt-122.go")
+ cmd.Dir = filepath.Join("testdata")
+
+ b, err := cmd.CombinedOutput()
+ m := string(b)
+
+ t.Logf(m)
+
+ yCount := strings.Count(m, "opt-122.go:18:6: loop variable private now per-iteration, heap-allocated (loop inlined into ./opt-122.go:31)")
+ nCount := strings.Count(m, "shared")
+
+ if yCount != 1 {
+ t.Errorf("yCount=%d != 1", yCount)
+ }
+ if nCount > 0 {
+ t.Errorf("nCount=%d > 0", nCount)
+ }
+ if err != nil {
+ t.Errorf("err=%v != nil", err)
+ }
+}
+
+// TestLoopVarVersionDisableFlag checks for loopvar transformation DISABLED by command line version (1.21).
+func TestLoopVarVersionDisableFlag(t *testing.T) {
+ switch runtime.GOOS {
+ case "linux", "darwin":
+ default:
+ t.Skipf("Slow test, usually avoid it, os=%s not linux or darwin", runtime.GOOS)
+ }
+ switch runtime.GOARCH {
+ case "amd64", "arm64":
+ default:
+ t.Skipf("Slow test, usually avoid it, arch=%s not amd64 or arm64", runtime.GOARCH)
+ }
+ testenv.MustHaveGoBuild(t)
+ gocmd := testenv.GoToolPath(t)
+
+ // loopvar=3 logs info but does not change loopvarness
+ cmd := testenv.Command(t, gocmd, "run", "-gcflags=-lang=go1.21 -d=loopvar=3", "opt.go")
+ cmd.Dir = filepath.Join("testdata")
+
+ b, err := cmd.CombinedOutput()
+ m := string(b)
+
+ t.Logf(m) // expect error
+
+ yCount := strings.Count(m, "opt.go:16:6: loop variable private now per-iteration, heap-allocated (loop inlined into ./opt.go:29)")
+ nCount := strings.Count(m, "shared")
+
+ if yCount != 0 {
+ t.Errorf("yCount=%d != 0", yCount)
+ }
+ if nCount > 0 {
+ t.Errorf("nCount=%d > 0", nCount)
+ }
+ if err == nil { // expect error
+ t.Errorf("err=%v == nil", err)
+ }
+}
+
+// TestLoopVarVersionDisableGoBuild checks for loopvar transformation DISABLED by go:build version (1.21).
+func TestLoopVarVersionDisableGoBuild(t *testing.T) {
+ switch runtime.GOOS {
+ case "linux", "darwin":
+ default:
+ t.Skipf("Slow test, usually avoid it, os=%s not linux or darwin", runtime.GOOS)
+ }
+ switch runtime.GOARCH {
+ case "amd64", "arm64":
+ default:
+ t.Skipf("Slow test, usually avoid it, arch=%s not amd64 or arm64", runtime.GOARCH)
+ }
+
+ testenv.MustHaveGoBuild(t)
+ gocmd := testenv.GoToolPath(t)
+
+ // loopvar=3 logs info but does not change loopvarness
+ cmd := testenv.Command(t, gocmd, "run", "-gcflags=-lang=go1.22 -d=loopvar=3", "opt-121.go")
+ cmd.Dir = filepath.Join("testdata")
+
+ b, err := cmd.CombinedOutput()
+ m := string(b)
+
+ t.Logf(m) // expect error
+
+ yCount := strings.Count(m, "opt-121.go:18:6: loop variable private now per-iteration, heap-allocated (loop inlined into ./opt-121.go:31)")
+ nCount := strings.Count(m, "shared")
+
+ if yCount != 0 {
+ t.Errorf("yCount=%d != 0", yCount)
+ }
+ if nCount > 0 {
+ t.Errorf("nCount=%d > 0", nCount)
+ }
+ if err == nil { // expect error
+ t.Errorf("err=%v == nil", err)
+ }
}
diff --git a/src/cmd/compile/internal/loopvar/testdata/opt-121.go b/src/cmd/compile/internal/loopvar/testdata/opt-121.go
new file mode 100644
index 0000000000..4afb658fc8
--- /dev/null
+++ b/src/cmd/compile/internal/loopvar/testdata/opt-121.go
@@ -0,0 +1,43 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.21
+
+package main
+
+import (
+ "fmt"
+ "os"
+)
+
+var is []func() int
+
+func inline(j, k int) []*int {
+ var a []*int
+ for private := j; private < k; private++ {
+ a = append(a, &private)
+ }
+ return a
+}
+
+//go:noinline
+func notinline(j, k int) ([]*int, *int) {
+ for shared := j; shared < k; shared++ {
+ if shared == k/2 {
+ // want the call inlined, want "private" in that inline to be transformed,
+ // (believe it ends up on init node of the return).
+ // but do not want "shared" transformed,
+ return inline(j, k), &shared
+ }
+ }
+ return nil, &j
+}
+
+func main() {
+ a, p := notinline(2, 9)
+ fmt.Printf("a[0]=%d,*p=%d\n", *a[0], *p)
+ if *a[0] != 2 {
+ os.Exit(1)
+ }
+}
diff --git a/src/cmd/compile/internal/loopvar/testdata/opt-122.go b/src/cmd/compile/internal/loopvar/testdata/opt-122.go
new file mode 100644
index 0000000000..9dceab9175
--- /dev/null
+++ b/src/cmd/compile/internal/loopvar/testdata/opt-122.go
@@ -0,0 +1,43 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.22
+
+package main
+
+import (
+ "fmt"
+ "os"
+)
+
+var is []func() int
+
+func inline(j, k int) []*int {
+ var a []*int
+ for private := j; private < k; private++ {
+ a = append(a, &private)
+ }
+ return a
+}
+
+//go:noinline
+func notinline(j, k int) ([]*int, *int) {
+ for shared := j; shared < k; shared++ {
+ if shared == k/2 {
+ // want the call inlined, want "private" in that inline to be transformed,
+ // (believe it ends up on init node of the return).
+ // but do not want "shared" transformed,
+ return inline(j, k), &shared
+ }
+ }
+ return nil, &j
+}
+
+func main() {
+ a, p := notinline(2, 9)
+ fmt.Printf("a[0]=%d,*p=%d\n", *a[0], *p)
+ if *a[0] != 2 {
+ os.Exit(1)
+ }
+}
diff --git a/src/cmd/compile/internal/loopvar/testdata/opt.go b/src/cmd/compile/internal/loopvar/testdata/opt.go
index 1bcd73614d..82c8616bcd 100644
--- a/src/cmd/compile/internal/loopvar/testdata/opt.go
+++ b/src/cmd/compile/internal/loopvar/testdata/opt.go
@@ -17,7 +17,6 @@ func inline(j, k int) []*int {
a = append(a, &private)
}
return a
-
}
//go:noinline
diff --git a/src/cmd/compile/internal/mips/ggen.go b/src/cmd/compile/internal/mips/ggen.go
index a18440e7b3..e235ef9567 100644
--- a/src/cmd/compile/internal/mips/ggen.go
+++ b/src/cmd/compile/internal/mips/ggen.go
@@ -46,10 +46,6 @@ func zerorange(pp *objw.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog
}
func ginsnop(pp *objw.Progs) *obj.Prog {
- p := pp.Prog(mips.ANOR)
- p.From.Type = obj.TYPE_REG
- p.From.Reg = mips.REG_R0
- p.To.Type = obj.TYPE_REG
- p.To.Reg = mips.REG_R0
+ p := pp.Prog(mips.ANOOP)
return p
}
diff --git a/src/cmd/compile/internal/mips64/ggen.go b/src/cmd/compile/internal/mips64/ggen.go
index 37bb871958..5f3f3e64d9 100644
--- a/src/cmd/compile/internal/mips64/ggen.go
+++ b/src/cmd/compile/internal/mips64/ggen.go
@@ -50,10 +50,6 @@ func zerorange(pp *objw.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog
}
func ginsnop(pp *objw.Progs) *obj.Prog {
- p := pp.Prog(mips.ANOR)
- p.From.Type = obj.TYPE_REG
- p.From.Reg = mips.REG_R0
- p.To.Type = obj.TYPE_REG
- p.To.Reg = mips.REG_R0
+ p := pp.Prog(mips.ANOOP)
return p
}
diff --git a/src/cmd/compile/internal/noder/codes.go b/src/cmd/compile/internal/noder/codes.go
index c1ee8d15c5..8bdbfc9a88 100644
--- a/src/cmd/compile/internal/noder/codes.go
+++ b/src/cmd/compile/internal/noder/codes.go
@@ -55,10 +55,14 @@ const (
exprConvert
exprNew
exprMake
- exprNil
+ exprSizeof
+ exprAlignof
+ exprOffsetof
+ exprZero
exprFuncInst
exprRecv
exprReshape
+ exprRuntimeBuiltin // a reference to a runtime function from transformed syntax. Followed by string name, e.g., "panicrangeexit"
)
type codeAssign int
diff --git a/src/cmd/compile/internal/noder/decl.go b/src/cmd/compile/internal/noder/decl.go
deleted file mode 100644
index 8e23fcefa2..0000000000
--- a/src/cmd/compile/internal/noder/decl.go
+++ /dev/null
@@ -1,18 +0,0 @@
-// Copyright 2021 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package noder
-
-import (
- "cmd/compile/internal/syntax"
- "cmd/compile/internal/types2"
-)
-
-// pkgNameOf returns the PkgName associated with the given ImportDecl.
-func pkgNameOf(info *types2.Info, decl *syntax.ImportDecl) *types2.PkgName {
- if name := decl.LocalPkgName; name != nil {
- return info.Defs[name].(*types2.PkgName)
- }
- return info.Implicits[decl].(*types2.PkgName)
-}
diff --git a/src/cmd/compile/internal/noder/expr.go b/src/cmd/compile/internal/noder/expr.go
deleted file mode 100644
index 51b0656385..0000000000
--- a/src/cmd/compile/internal/noder/expr.go
+++ /dev/null
@@ -1,55 +0,0 @@
-// Copyright 2021 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package noder
-
-import (
- "fmt"
-
- "cmd/compile/internal/ir"
- "cmd/compile/internal/syntax"
-)
-
-func unpackListExpr(expr syntax.Expr) []syntax.Expr {
- switch expr := expr.(type) {
- case nil:
- return nil
- case *syntax.ListExpr:
- return expr.ElemList
- default:
- return []syntax.Expr{expr}
- }
-}
-
-// constExprOp returns an ir.Op that represents the outermost
-// operation of the given constant expression. It's intended for use
-// with ir.RawOrigExpr.
-func constExprOp(expr syntax.Expr) ir.Op {
- switch expr := expr.(type) {
- default:
- panic(fmt.Sprintf("%s: unexpected expression: %T", expr.Pos(), expr))
-
- case *syntax.BasicLit:
- return ir.OLITERAL
- case *syntax.Name, *syntax.SelectorExpr:
- return ir.ONAME
- case *syntax.CallExpr:
- return ir.OCALL
- case *syntax.Operation:
- if expr.Y == nil {
- return unOps[expr.Op]
- }
- return binOps[expr.Op]
- }
-}
-
-func unparen(expr syntax.Expr) syntax.Expr {
- for {
- paren, ok := expr.(*syntax.ParenExpr)
- if !ok {
- return expr
- }
- expr = paren.X
- }
-}
diff --git a/src/cmd/compile/internal/noder/helpers.go b/src/cmd/compile/internal/noder/helpers.go
index ff2d50fcc8..0bff71e658 100644
--- a/src/cmd/compile/internal/noder/helpers.go
+++ b/src/cmd/compile/internal/noder/helpers.go
@@ -7,7 +7,6 @@ package noder
import (
"go/constant"
- "cmd/compile/internal/base"
"cmd/compile/internal/ir"
"cmd/compile/internal/syntax"
"cmd/compile/internal/typecheck"
@@ -41,11 +40,6 @@ func typed(typ *types.Type, n ir.Node) ir.Node {
// Values
-func OrigConst(pos src.XPos, typ *types.Type, val constant.Value, op ir.Op, raw string) ir.Node {
- orig := ir.NewRawOrigExpr(pos, op, raw)
- return ir.NewConstExpr(val, typed(typ, orig))
-}
-
// FixValue returns val after converting and truncating it as
// appropriate for typ.
func FixValue(typ *types.Type, val constant.Value) constant.Value {
@@ -59,16 +53,12 @@ func FixValue(typ *types.Type, val constant.Value) constant.Value {
val = constant.ToComplex(val)
}
if !typ.IsUntyped() {
- val = typecheck.DefaultLit(ir.NewBasicLit(src.NoXPos, val), typ).Val()
+ val = typecheck.ConvertVal(val, typ, false)
}
ir.AssertValidTypeForConst(typ, val)
return val
}
-func Nil(pos src.XPos, typ *types.Type) ir.Node {
- return typed(typ, ir.NewNilExpr(pos))
-}
-
// Expressions
func Addr(pos src.XPos, x ir.Node) *ir.AddrExpr {
@@ -77,165 +67,40 @@ func Addr(pos src.XPos, x ir.Node) *ir.AddrExpr {
return n
}
-func Assert(pos src.XPos, x ir.Node, typ *types.Type) ir.Node {
- return typed(typ, ir.NewTypeAssertExpr(pos, x, nil))
-}
-
-func Binary(pos src.XPos, op ir.Op, typ *types.Type, x, y ir.Node) *ir.BinaryExpr {
- switch op {
- case ir.OADD:
- n := ir.NewBinaryExpr(pos, op, x, y)
- typed(typ, n)
- return n
- default:
- n := ir.NewBinaryExpr(pos, op, x, y)
- typed(x.Type(), n)
- return n
- }
-}
-
-func Compare(pos src.XPos, typ *types.Type, op ir.Op, x, y ir.Node) *ir.BinaryExpr {
- n := ir.NewBinaryExpr(pos, op, x, y)
- typed(typ, n)
- return n
-}
-
func Deref(pos src.XPos, typ *types.Type, x ir.Node) *ir.StarExpr {
n := ir.NewStarExpr(pos, x)
typed(typ, n)
return n
}
-func DotField(pos src.XPos, x ir.Node, index int) *ir.SelectorExpr {
- op, typ := ir.ODOT, x.Type()
- if typ.IsPtr() {
- op, typ = ir.ODOTPTR, typ.Elem()
- }
- if !typ.IsStruct() {
- base.FatalfAt(pos, "DotField of non-struct: %L", x)
- }
-
- // TODO(mdempsky): This is the backend's responsibility.
- types.CalcSize(typ)
-
- field := typ.Field(index)
- return dot(pos, field.Type, op, x, field)
-}
-
-func DotMethod(pos src.XPos, x ir.Node, index int) *ir.SelectorExpr {
- method := method(x.Type(), index)
-
- // Method value.
- typ := typecheck.NewMethodType(method.Type, nil)
- return dot(pos, typ, ir.OMETHVALUE, x, method)
-}
-
-// MethodExpr returns a OMETHEXPR node with the indicated index into the methods
-// of typ. The receiver type is set from recv, which is different from typ if the
-// method was accessed via embedded fields. Similarly, the X value of the
-// ir.SelectorExpr is recv, the original OTYPE node before passing through the
-// embedded fields.
-func MethodExpr(pos src.XPos, recv ir.Node, embed *types.Type, index int) *ir.SelectorExpr {
- method := method(embed, index)
- typ := typecheck.NewMethodType(method.Type, recv.Type())
- // The method expression T.m requires a wrapper when T
- // is different from m's declared receiver type. We
- // normally generate these wrappers while writing out
- // runtime type descriptors, which is always done for
- // types declared at package scope. However, we need
- // to make sure to generate wrappers for anonymous
- // receiver types too.
- if recv.Sym() == nil {
- typecheck.NeedRuntimeType(recv.Type())
- }
- return dot(pos, typ, ir.OMETHEXPR, recv, method)
-}
-
-func dot(pos src.XPos, typ *types.Type, op ir.Op, x ir.Node, selection *types.Field) *ir.SelectorExpr {
- n := ir.NewSelectorExpr(pos, op, x, selection.Sym)
- n.Selection = selection
- typed(typ, n)
- return n
-}
-
-// TODO(mdempsky): Move to package types.
-func method(typ *types.Type, index int) *types.Field {
- if typ.IsInterface() {
- return typ.AllMethods().Index(index)
- }
- return types.ReceiverBaseType(typ).Methods().Index(index)
-}
-
-func Index(pos src.XPos, typ *types.Type, x, index ir.Node) *ir.IndexExpr {
- n := ir.NewIndexExpr(pos, x, index)
- typed(typ, n)
- return n
-}
-
-func Slice(pos src.XPos, typ *types.Type, x, low, high, max ir.Node) *ir.SliceExpr {
- op := ir.OSLICE
- if max != nil {
- op = ir.OSLICE3
- }
- n := ir.NewSliceExpr(pos, op, x, low, high, max)
- typed(typ, n)
- return n
-}
-
-func Unary(pos src.XPos, typ *types.Type, op ir.Op, x ir.Node) ir.Node {
- switch op {
- case ir.OADDR:
- return Addr(pos, x)
- case ir.ODEREF:
- return Deref(pos, typ, x)
- }
-
- if op == ir.ORECV {
- if typ.IsFuncArgStruct() && typ.NumFields() == 2 {
- // Remove the second boolean type (if provided by type2),
- // since that works better with the rest of the compiler
- // (which will add it back in later).
- assert(typ.Field(1).Type.Kind() == types.TBOOL)
- typ = typ.Field(0).Type
- }
- }
- return typed(typ, ir.NewUnaryExpr(pos, op, x))
-}
-
// Statements
-var one = constant.MakeInt64(1)
-
-func IncDec(pos src.XPos, op ir.Op, x ir.Node) *ir.AssignOpStmt {
- assert(x.Type() != nil)
- bl := ir.NewBasicLit(pos, one)
- bl = typecheck.DefaultLit(bl, x.Type())
- return ir.NewAssignOpStmt(pos, op, x, bl)
-}
-
func idealType(tv syntax.TypeAndValue) types2.Type {
// The gc backend expects all expressions to have a concrete type, and
// types2 mostly satisfies this expectation already. But there are a few
// cases where the Go spec doesn't require converting to concrete type,
// and so types2 leaves them untyped. So we need to fix those up here.
- typ := tv.Type
+ typ := types2.Unalias(tv.Type)
if basic, ok := typ.(*types2.Basic); ok && basic.Info()&types2.IsUntyped != 0 {
switch basic.Kind() {
case types2.UntypedNil:
// ok; can appear in type switch case clauses
// TODO(mdempsky): Handle as part of type switches instead?
case types2.UntypedInt, types2.UntypedFloat, types2.UntypedComplex:
- // Untyped rhs of non-constant shift, e.g. x << 1.0.
- // If we have a constant value, it must be an int >= 0.
+ typ = types2.Typ[types2.Uint]
if tv.Value != nil {
s := constant.ToInt(tv.Value)
- assert(s.Kind() == constant.Int && constant.Sign(s) >= 0)
+ assert(s.Kind() == constant.Int)
+ if constant.Sign(s) < 0 {
+ typ = types2.Typ[types2.Int]
+ }
}
- typ = types2.Typ[types2.Uint]
case types2.UntypedBool:
typ = types2.Typ[types2.Bool] // expression in "if" or "for" condition
case types2.UntypedString:
typ = types2.Typ[types2.String] // argument to "append" or "copy" calls
+ case types2.UntypedRune:
+ typ = types2.Typ[types2.Int32] // range over rune
default:
return nil
}
@@ -244,13 +109,14 @@ func idealType(tv syntax.TypeAndValue) types2.Type {
}
func isTypeParam(t types2.Type) bool {
- _, ok := t.(*types2.TypeParam)
+ _, ok := types2.Unalias(t).(*types2.TypeParam)
return ok
}
// isNotInHeap reports whether typ is or contains an element of type
// runtime/internal/sys.NotInHeap.
func isNotInHeap(typ types2.Type) bool {
+ typ = types2.Unalias(typ)
if named, ok := typ.(*types2.Named); ok {
if obj := named.Obj(); obj.Name() == "nih" && obj.Pkg().Path() == "runtime/internal/sys" {
return true
diff --git a/src/cmd/compile/internal/noder/import.go b/src/cmd/compile/internal/noder/import.go
index b7008ac5e8..e9bb1e313b 100644
--- a/src/cmd/compile/internal/noder/import.go
+++ b/src/cmd/compile/internal/noder/import.go
@@ -133,7 +133,10 @@ func resolveImportPath(path string) (string, error) {
return "", errors.New("cannot import \"main\"")
}
- if base.Ctxt.Pkgpath != "" && path == base.Ctxt.Pkgpath {
+ if base.Ctxt.Pkgpath == "" {
+ panic("missing pkgpath")
+ }
+ if path == base.Ctxt.Pkgpath {
return "", fmt.Errorf("import %q while compiling that package (import cycle)", path)
}
diff --git a/src/cmd/compile/internal/noder/irgen.go b/src/cmd/compile/internal/noder/irgen.go
index df5de63620..e0b7bb946d 100644
--- a/src/cmd/compile/internal/noder/irgen.go
+++ b/src/cmd/compile/internal/noder/irgen.go
@@ -6,11 +6,13 @@ package noder
import (
"fmt"
+ "internal/buildcfg"
"internal/types/errors"
"regexp"
"sort"
"cmd/compile/internal/base"
+ "cmd/compile/internal/rangefunc"
"cmd/compile/internal/syntax"
"cmd/compile/internal/types2"
"cmd/internal/src"
@@ -27,8 +29,12 @@ func checkFiles(m posMap, noders []*noder) (*types2.Package, *types2.Info) {
// setup and syntax error reporting
files := make([]*syntax.File, len(noders))
+ // posBaseMap maps all file pos bases back to *syntax.File
+ // for checking Go version mismatched.
+ posBaseMap := make(map[*syntax.PosBase]*syntax.File)
for i, p := range noders {
files[i] = p.file
+ posBaseMap[p.file.Pos().Base()] = p.file
}
// typechecking
@@ -41,17 +47,8 @@ func checkFiles(m posMap, noders []*noder) (*types2.Package, *types2.Info) {
Context: ctxt,
GoVersion: base.Flag.Lang,
IgnoreBranchErrors: true, // parser already checked via syntax.CheckBranches mode
- Error: func(err error) {
- terr := err.(types2.Error)
- msg := terr.Msg
- // if we have a version error, hint at the -lang setting
- if versionErrorRx.MatchString(msg) {
- msg = fmt.Sprintf("%s (-lang was set to %s; check go.mod)", msg, base.Flag.Lang)
- }
- base.ErrorfAt(m.makeXPos(terr.Pos), terr.Code, "%s", msg)
- },
- Importer: &importer,
- Sizes: &gcSizes{},
+ Importer: &importer,
+ Sizes: types2.SizesFor("gc", buildcfg.GOARCH),
}
if base.Flag.ErrorURL {
conf.ErrorURL = " [go.dev/e/%s]"
@@ -64,30 +61,55 @@ func checkFiles(m posMap, noders []*noder) (*types2.Package, *types2.Info) {
Implicits: make(map[syntax.Node]types2.Object),
Scopes: make(map[syntax.Node]*types2.Scope),
Instances: make(map[*syntax.Name]types2.Instance),
+ FileVersions: make(map[*syntax.PosBase]string),
// expand as needed
}
+ conf.Error = func(err error) {
+ terr := err.(types2.Error)
+ msg := terr.Msg
+ if versionErrorRx.MatchString(msg) {
+ posBase := terr.Pos.Base()
+ for !posBase.IsFileBase() { // line directive base
+ posBase = posBase.Pos().Base()
+ }
+ fileVersion := info.FileVersions[posBase]
+ file := posBaseMap[posBase]
+ if file.GoVersion == fileVersion {
+ // If we have a version error caused by //go:build, report it.
+ msg = fmt.Sprintf("%s (file declares //go:build %s)", msg, fileVersion)
+ } else {
+ // Otherwise, hint at the -lang setting.
+ msg = fmt.Sprintf("%s (-lang was set to %s; check go.mod)", msg, base.Flag.Lang)
+ }
+ }
+ base.ErrorfAt(m.makeXPos(terr.Pos), terr.Code, "%s", msg)
+ }
pkg, err := conf.Check(base.Ctxt.Pkgpath, files, info)
+ base.ExitIfErrors()
+ if err != nil {
+ base.FatalfAt(src.NoXPos, "conf.Check error: %v", err)
+ }
// Check for anonymous interface cycles (#56103).
- if base.Debug.InterfaceCycles == 0 {
- var f cycleFinder
- for _, file := range files {
- syntax.Inspect(file, func(n syntax.Node) bool {
- if n, ok := n.(*syntax.InterfaceType); ok {
- if f.hasCycle(n.GetTypeInfo().Type.(*types2.Interface)) {
- base.ErrorfAt(m.makeXPos(n.Pos()), errors.InvalidTypeCycle, "invalid recursive type: anonymous interface refers to itself (see https://go.dev/issue/56103)")
-
- for typ := range f.cyclic {
- f.cyclic[typ] = false // suppress duplicate errors
- }
+ // TODO(gri) move this code into the type checkers (types2 and go/types)
+ var f cycleFinder
+ for _, file := range files {
+ syntax.Inspect(file, func(n syntax.Node) bool {
+ if n, ok := n.(*syntax.InterfaceType); ok {
+ if f.hasCycle(types2.Unalias(n.GetTypeInfo().Type).(*types2.Interface)) {
+ base.ErrorfAt(m.makeXPos(n.Pos()), errors.InvalidTypeCycle, "invalid recursive type: anonymous interface refers to itself (see https://go.dev/issue/56103)")
+
+ for typ := range f.cyclic {
+ f.cyclic[typ] = false // suppress duplicate errors
}
- return false
}
- return true
- })
- }
+ return false
+ }
+ return true
+ })
}
+ base.ExitIfErrors()
// Implementation restriction: we don't allow not-in-heap types to
// be used as type arguments (#54765).
@@ -113,11 +135,16 @@ func checkFiles(m posMap, noders []*noder) (*types2.Package, *types2.Info) {
base.ErrorfAt(targ.pos, 0, "cannot use incomplete (or unallocatable) type as a type argument: %v", targ.typ)
}
}
-
base.ExitIfErrors()
- if err != nil {
- base.FatalfAt(src.NoXPos, "conf.Check error: %v", err)
- }
+
+ // Rewrite range over function to explicit function calls
+ // with the loop bodies converted into new implicit closures.
+ // We do this now, before serialization to unified IR, so that if the
+ // implicit closures are inlined, we will have the unified IR form.
+ // If we do the rewrite in the back end, like between typecheck and walk,
+ // then the new implicit closure will not have a unified IR inline body,
+ // and bodyReaderFor will fail.
+ rangefunc.Rewrite(pkg, info, files)
return pkg, info
}
@@ -144,7 +171,7 @@ func (f *cycleFinder) hasCycle(typ *types2.Interface) bool {
// visit recursively walks typ0 to check any referenced interface types.
func (f *cycleFinder) visit(typ0 types2.Type) bool {
for { // loop for tail recursion
- switch typ := typ0.(type) {
+ switch typ := types2.Unalias(typ0).(type) {
default:
base.Fatalf("unexpected type: %T", typ)
diff --git a/src/cmd/compile/internal/noder/linker.go b/src/cmd/compile/internal/noder/linker.go
index 0efe6b630b..f5667f57ab 100644
--- a/src/cmd/compile/internal/noder/linker.go
+++ b/src/cmd/compile/internal/noder/linker.go
@@ -201,7 +201,7 @@ func (l *linker) relocObj(pr *pkgReader, idx pkgbits.Index) pkgbits.Index {
if obj.Op() == ir.OTYPE && !obj.Alias() {
if typ := obj.Type(); !typ.IsInterface() {
- for _, method := range typ.Methods().Slice() {
+ for _, method := range typ.Methods() {
l.exportBody(method.Nname.(*ir.Name), local)
}
}
@@ -233,7 +233,7 @@ func (l *linker) exportBody(obj *ir.Name, local bool) {
//
// TODO(mdempsky): Reimplement the reachable method crawling logic
// from typecheck/crawler.go.
- exportBody := local || fn.Inl.Body != nil
+ exportBody := local || fn.Inl.HaveDcl
if !exportBody {
return
}
@@ -289,15 +289,16 @@ func (l *linker) relocFuncExt(w *pkgbits.Encoder, name *ir.Name) {
w.Uint64(uint64(name.Func.ABI))
// Escape analysis.
- for _, fs := range &types.RecvsParams {
- for _, f := range fs(name.Type()).FieldSlice() {
- w.String(f.Note)
- }
+ for _, f := range name.Type().RecvParams() {
+ w.String(f.Note)
}
if inl := name.Func.Inl; w.Bool(inl != nil) {
w.Len(int(inl.Cost))
w.Bool(inl.CanDelayResults)
+ if buildcfg.Experiment.NewInliner {
+ w.String(inl.Properties)
+ }
}
w.Sync(pkgbits.SyncEOF)
@@ -315,7 +316,7 @@ func (l *linker) relocTypeExt(w *pkgbits.Encoder, name *ir.Name) {
l.lsymIdx(w, "", reflectdata.TypeLinksym(typ.PtrTo()))
if typ.Kind() != types.TINTER {
- for _, method := range typ.Methods().Slice() {
+ for _, method := range typ.Methods() {
l.relocFuncExt(w, method.Nname.(*ir.Name))
}
}
diff --git a/src/cmd/compile/internal/noder/noder.go b/src/cmd/compile/internal/noder/noder.go
index 94071581fe..1652dc6618 100644
--- a/src/cmd/compile/internal/noder/noder.go
+++ b/src/cmd/compile/internal/noder/noder.go
@@ -265,8 +265,7 @@ func (p *noder) pragma(pos syntax.Pos, blankLine bool, text string, old syntax.P
// user didn't provide one.
target = objabi.PathToPrefix(base.Ctxt.Pkgpath) + "." + f[1]
} else {
- p.error(syntax.Error{Pos: pos, Msg: "//go:linkname requires linkname argument or -p compiler flag"})
- break
+ panic("missing pkgpath")
}
p.linknames = append(p.linknames, linkname{pos, f[1], target})
diff --git a/src/cmd/compile/internal/noder/quirks.go b/src/cmd/compile/internal/noder/quirks.go
index a22577f965..dd9cec9250 100644
--- a/src/cmd/compile/internal/noder/quirks.go
+++ b/src/cmd/compile/internal/noder/quirks.go
@@ -62,7 +62,7 @@ func typeExprEndPos(expr0 syntax.Expr) syntax.Pos {
}
case *syntax.IndexExpr: // explicit type instantiation
- targs := unpackListExpr(expr.Index)
+ targs := syntax.UnpackListExpr(expr.Index)
expr0 = targs[len(targs)-1]
default:
diff --git a/src/cmd/compile/internal/noder/reader.go b/src/cmd/compile/internal/noder/reader.go
index 610d02c07c..2dddd20165 100644
--- a/src/cmd/compile/internal/noder/reader.go
+++ b/src/cmd/compile/internal/noder/reader.go
@@ -5,6 +5,7 @@
package noder
import (
+ "encoding/hex"
"fmt"
"go/constant"
"internal/buildcfg"
@@ -13,15 +14,16 @@ import (
"strings"
"cmd/compile/internal/base"
- "cmd/compile/internal/deadcode"
"cmd/compile/internal/dwarfgen"
"cmd/compile/internal/inline"
+ "cmd/compile/internal/inline/interleaved"
"cmd/compile/internal/ir"
"cmd/compile/internal/objw"
"cmd/compile/internal/reflectdata"
"cmd/compile/internal/staticinit"
"cmd/compile/internal/typecheck"
"cmd/compile/internal/types"
+ "cmd/internal/notsha256"
"cmd/internal/obj"
"cmd/internal/objabi"
"cmd/internal/src"
@@ -107,6 +109,11 @@ type reader struct {
locals []*ir.Name
closureVars []*ir.Name
+ // funarghack is used during inlining to suppress setting
+ // Field.Nname to the inlined copies of the parameters. This is
+ // necessary because we reuse the same types.Type as the original
+ // function, and most of the compiler still relies on field.Nname to
+ // find parameters/results.
funarghack bool
// methodSym is the name of method's name, if reading a method.
@@ -145,14 +152,6 @@ type reader struct {
// Label to return to.
retlabel *types.Sym
-
- // inlvars is the list of variables that the inlinee's arguments are
- // assigned to, one for each receiver and normal parameter, in order.
- inlvars ir.Nodes
-
- // retvars is the list of variables that the inlinee's results are
- // assigned to, one for each result parameter, in order.
- retvars ir.Nodes
}
// A readerDict represents an instantiated "compile-time dictionary,"
@@ -572,10 +571,7 @@ func (r *reader) interfaceType() *types.Type {
methods, embeddeds := fields[:nmethods], fields[nmethods:]
for i := range methods {
- pos := r.pos()
- _, sym := r.selector()
- mtyp := r.signature(types.FakeRecv())
- methods[i] = types.NewField(pos, sym, mtyp)
+ methods[i] = types.NewField(r.pos(), r.selector(), r.signature(types.FakeRecv()))
}
for i := range embeddeds {
embeddeds[i] = types.NewField(src.NoXPos, nil, r.typ())
@@ -590,18 +586,12 @@ func (r *reader) interfaceType() *types.Type {
func (r *reader) structType() *types.Type {
fields := make([]*types.Field, r.Len())
for i := range fields {
- pos := r.pos()
- _, sym := r.selector()
- ftyp := r.typ()
- tag := r.String()
- embedded := r.Bool()
-
- f := types.NewField(pos, sym, ftyp)
- f.Note = tag
- if embedded {
- f.Embedded = 1
+ field := types.NewField(r.pos(), r.selector(), r.typ())
+ field.Note = r.String()
+ if r.Bool() {
+ field.Embedded = 1
}
- fields[i] = f
+ fields[i] = field
}
return types.NewStruct(fields)
}
@@ -620,21 +610,16 @@ func (r *reader) signature(recv *types.Field) *types.Type {
func (r *reader) params() []*types.Field {
r.Sync(pkgbits.SyncParams)
- fields := make([]*types.Field, r.Len())
- for i := range fields {
- _, fields[i] = r.param()
+ params := make([]*types.Field, r.Len())
+ for i := range params {
+ params[i] = r.param()
}
- return fields
+ return params
}
-func (r *reader) param() (*types.Pkg, *types.Field) {
+func (r *reader) param() *types.Field {
r.Sync(pkgbits.SyncParam)
-
- pos := r.pos()
- pkg, sym := r.localIdent()
- typ := r.typ()
-
- return pkg, types.NewField(pos, sym, typ)
+ return types.NewField(r.pos(), r.localIdent(), r.typ())
}
// @@@ Objects
@@ -678,9 +663,24 @@ func (pr *pkgReader) objInstIdx(info objInfo, dict *readerDict, shaped bool) ir.
}
// objIdx returns the specified object, instantiated with the given
-// type arguments, if any. If shaped is true, then the shaped variant
-// of the object is returned instead.
+// type arguments, if any.
+// If shaped is true, then the shaped variant of the object is returned
+// instead.
func (pr *pkgReader) objIdx(idx pkgbits.Index, implicits, explicits []*types.Type, shaped bool) ir.Node {
+ n, err := pr.objIdxMayFail(idx, implicits, explicits, shaped)
+ if err != nil {
+ base.Fatalf("%v", err)
+ }
+ return n
+}
+
+// objIdxMayFail is equivalent to objIdx, but returns an error rather than
+// failing the build if this object requires type arguments and the incorrect
+// number of type arguments were passed.
+//
+// Other sources of internal failure (such as duplicate definitions) still fail
+// the build.
+func (pr *pkgReader) objIdxMayFail(idx pkgbits.Index, implicits, explicits []*types.Type, shaped bool) (ir.Node, error) {
rname := pr.newReader(pkgbits.RelocName, idx, pkgbits.SyncObject1)
_, sym := rname.qualifiedIdent()
tag := pkgbits.CodeObj(rname.Code(pkgbits.SyncCodeObj))
@@ -689,19 +689,25 @@ func (pr *pkgReader) objIdx(idx pkgbits.Index, implicits, explicits []*types.Typ
assert(!sym.IsBlank())
switch sym.Pkg {
case types.BuiltinPkg, types.UnsafePkg:
- return sym.Def.(ir.Node)
+ return sym.Def.(ir.Node), nil
}
if pri, ok := objReader[sym]; ok {
- return pri.pr.objIdx(pri.idx, nil, explicits, shaped)
+ return pri.pr.objIdxMayFail(pri.idx, nil, explicits, shaped)
+ }
+ if sym.Pkg.Path == "runtime" {
+ return typecheck.LookupRuntime(sym.Name), nil
}
base.Fatalf("unresolved stub: %v", sym)
}
- dict := pr.objDictIdx(sym, idx, implicits, explicits, shaped)
+ dict, err := pr.objDictIdx(sym, idx, implicits, explicits, shaped)
+ if err != nil {
+ return nil, err
+ }
sym = dict.baseSym
if !sym.IsBlank() && sym.Def != nil {
- return sym.Def.(*ir.Name)
+ return sym.Def.(*ir.Name), nil
}
r := pr.newReader(pkgbits.RelocObj, idx, pkgbits.SyncObject1)
@@ -737,7 +743,7 @@ func (pr *pkgReader) objIdx(idx pkgbits.Index, implicits, explicits []*types.Typ
name := do(ir.OTYPE, false)
setType(name, r.typ())
name.SetAlias(true)
- return name
+ return name, nil
case pkgbits.ObjConst:
name := do(ir.OLITERAL, false)
@@ -745,17 +751,28 @@ func (pr *pkgReader) objIdx(idx pkgbits.Index, implicits, explicits []*types.Typ
val := FixValue(typ, r.Value())
setType(name, typ)
setValue(name, val)
- return name
+ return name, nil
case pkgbits.ObjFunc:
if sym.Name == "init" {
sym = Renameinit()
}
- name := do(ir.ONAME, true)
- setType(name, r.signature(nil))
- name.Func = ir.NewFunc(r.pos())
- name.Func.Nname = name
+ npos := r.pos()
+ setBasePos(npos)
+ r.typeParamNames()
+ typ := r.signature(nil)
+ fpos := r.pos()
+
+ fn := ir.NewFunc(fpos, npos, sym, typ)
+ name := fn.Nname
+ if !sym.IsBlank() {
+ if sym.Def != nil {
+ base.FatalfAt(name.Pos(), "already have a definition for %v", name)
+ }
+ assert(sym.Def == nil)
+ sym.Def = name
+ }
if r.hasTypeParams() {
name.Func.SetDupok(true)
@@ -769,7 +786,7 @@ func (pr *pkgReader) objIdx(idx pkgbits.Index, implicits, explicits []*types.Typ
}
rext.funcExt(name, nil)
- return name
+ return name, nil
case pkgbits.ObjType:
name := do(ir.OTYPE, true)
@@ -799,20 +816,20 @@ func (pr *pkgReader) objIdx(idx pkgbits.Index, implicits, explicits []*types.Typ
methods[i] = r.method(rext)
}
if len(methods) != 0 {
- typ.Methods().Set(methods)
+ typ.SetMethods(methods)
}
if !r.dict.shaped {
r.needWrapper(typ)
}
- return name
+ return name, nil
case pkgbits.ObjVar:
name := do(ir.ONAME, false)
setType(name, r.typ())
rext.varExt(name)
- return name
+ return name, nil
}
}
@@ -886,7 +903,16 @@ func shapify(targ *types.Type, basic bool) *types.Type {
under = types.NewPtr(types.Types[types.TUINT8])
}
- sym := types.ShapePkg.Lookup(under.LinkString())
+ // Hash long type names to bound symbol name length seen by users,
+ // particularly for large protobuf structs (#65030).
+ uls := under.LinkString()
+ if base.Debug.MaxShapeLen != 0 &&
+ len(uls) > base.Debug.MaxShapeLen {
+ h := notsha256.Sum256([]byte(uls))
+ uls = hex.EncodeToString(h[:])
+ }
+
+ sym := types.ShapePkg.Lookup(uls)
if sym.Def == nil {
name := ir.NewDeclNameAt(under.Pos(), ir.OTYPE, sym)
typ := types.NewNamed(name)
@@ -900,7 +926,7 @@ func shapify(targ *types.Type, basic bool) *types.Type {
}
// objDictIdx reads and returns the specified object dictionary.
-func (pr *pkgReader) objDictIdx(sym *types.Sym, idx pkgbits.Index, implicits, explicits []*types.Type, shaped bool) *readerDict {
+func (pr *pkgReader) objDictIdx(sym *types.Sym, idx pkgbits.Index, implicits, explicits []*types.Type, shaped bool) (*readerDict, error) {
r := pr.newReader(pkgbits.RelocObjDict, idx, pkgbits.SyncObject1)
dict := readerDict{
@@ -911,7 +937,7 @@ func (pr *pkgReader) objDictIdx(sym *types.Sym, idx pkgbits.Index, implicits, ex
nexplicits := r.Len()
if nimplicits > len(implicits) || nexplicits != len(explicits) {
- base.Fatalf("%v has %v+%v params, but instantiated with %v+%v args", sym, nimplicits, nexplicits, len(implicits), len(explicits))
+ return nil, fmt.Errorf("%v has %v+%v params, but instantiated with %v+%v args", sym, nimplicits, nexplicits, len(implicits), len(explicits))
}
dict.targs = append(implicits[:nimplicits:nimplicits], explicits...)
@@ -956,7 +982,7 @@ func (pr *pkgReader) objDictIdx(sym *types.Sym, idx pkgbits.Index, implicits, ex
dict.typeParamMethodExprs = make([]readerMethodExprInfo, r.Len())
for i := range dict.typeParamMethodExprs {
typeParamIdx := r.Len()
- _, method := r.selector()
+ method := r.selector()
dict.typeParamMethodExprs[i] = readerMethodExprInfo{typeParamIdx, method}
}
@@ -976,7 +1002,7 @@ func (pr *pkgReader) objDictIdx(sym *types.Sym, idx pkgbits.Index, implicits, ex
dict.itabs[i] = itabInfo{typ: r.typInfo(), iface: r.typInfo()}
}
- return &dict
+ return &dict, nil
}
func (r *reader) typeParamNames() {
@@ -990,17 +1016,15 @@ func (r *reader) typeParamNames() {
func (r *reader) method(rext *reader) *types.Field {
r.Sync(pkgbits.SyncMethod)
- pos := r.pos()
- _, sym := r.selector()
+ npos := r.pos()
+ sym := r.selector()
r.typeParamNames()
- _, recv := r.param()
+ recv := r.param()
typ := r.signature(recv)
- name := ir.NewNameAt(pos, ir.MethodSym(recv.Type, sym))
- setType(name, typ)
-
- name.Func = ir.NewFunc(r.pos())
- name.Func.Nname = name
+ fpos := r.pos()
+ fn := ir.NewFunc(fpos, npos, ir.MethodSym(recv.Type, sym), typ)
+ name := fn.Nname
if r.hasTypeParams() {
name.Func.SetDupok(true)
@@ -1028,25 +1052,23 @@ func (r *reader) qualifiedIdent() (pkg *types.Pkg, sym *types.Sym) {
return
}
-func (r *reader) localIdent() (pkg *types.Pkg, sym *types.Sym) {
+func (r *reader) localIdent() *types.Sym {
r.Sync(pkgbits.SyncLocalIdent)
- pkg = r.pkg()
+ pkg := r.pkg()
if name := r.String(); name != "" {
- sym = pkg.Lookup(name)
+ return pkg.Lookup(name)
}
- return
+ return nil
}
-func (r *reader) selector() (origPkg *types.Pkg, sym *types.Sym) {
+func (r *reader) selector() *types.Sym {
r.Sync(pkgbits.SyncSelector)
- origPkg = r.pkg()
+ pkg := r.pkg()
name := r.String()
- pkg := origPkg
if types.IsExported(name) {
pkg = types.LocalPkg
}
- sym = pkg.Lookup(name)
- return
+ return pkg.Lookup(name)
}
func (r *reader) hasTypeParams() bool {
@@ -1062,9 +1084,6 @@ func (dict *readerDict) hasTypeParams() bool {
func (r *reader) funcExt(name *ir.Name, method *types.Sym) {
r.Sync(pkgbits.SyncFuncExt)
- name.Class = 0 // so MarkFunc doesn't complain
- ir.MarkFunc(name)
-
fn := name.Func
// XXX: Workaround because linker doesn't know how to copy Pos.
@@ -1098,18 +1117,14 @@ func (r *reader) funcExt(name *ir.Name, method *types.Sym) {
}
}
- typecheck.Func(fn)
-
if r.Bool() {
assert(name.Defn == nil)
fn.ABI = obj.ABI(r.Uint64())
// Escape analysis.
- for _, fs := range &types.RecvsParams {
- for _, f := range fs(name.Type()).FieldSlice() {
- f.Note = r.String()
- }
+ for _, f := range name.Type().RecvParams() {
+ f.Note = r.String()
}
if r.Bool() {
@@ -1117,6 +1132,9 @@ func (r *reader) funcExt(name *ir.Name, method *types.Sym) {
Cost: int32(r.Len()),
CanDelayResults: r.Bool(),
}
+ if buildcfg.Experiment.NewInliner {
+ fn.Inl.Properties = r.String()
+ }
}
} else {
r.addBody(name.Func, method)
@@ -1232,7 +1250,7 @@ func (r *reader) funcBody(fn *ir.Func) {
}
ir.WithFunc(fn, func() {
- r.funcargs(fn)
+ r.declareParams()
if r.syntheticBody(fn.Pos()) {
return
@@ -1289,7 +1307,7 @@ func (r *reader) callShaped(pos src.XPos) {
shapedFn = shapedMethodExpr(pos, shapedObj, r.methodSym)
}
- recvs, params := r.syntheticArgs(pos)
+ params := r.syntheticArgs()
// Construct the arguments list: receiver (if any), then runtime
// dictionary, and finally normal parameters.
@@ -1301,7 +1319,10 @@ func (r *reader) callShaped(pos src.XPos) {
// putting the dictionary parameter after that is the least invasive
// solution at the moment.
var args ir.Nodes
- args.Append(recvs...)
+ if r.methodSym != nil {
+ args.Append(params[0])
+ params = params[1:]
+ }
args.Append(typecheck.Expr(ir.NewAddrExpr(pos, r.p.dictNameOf(r.dict))))
args.Append(params...)
@@ -1310,51 +1331,9 @@ func (r *reader) callShaped(pos src.XPos) {
// syntheticArgs returns the recvs and params arguments passed to the
// current function.
-func (r *reader) syntheticArgs(pos src.XPos) (recvs, params ir.Nodes) {
+func (r *reader) syntheticArgs() ir.Nodes {
sig := r.curfn.Nname.Type()
-
- inlVarIdx := 0
- addParams := func(out *ir.Nodes, params []*types.Field) {
- for _, param := range params {
- var arg ir.Node
- if param.Nname != nil {
- name := param.Nname.(*ir.Name)
- if !ir.IsBlank(name) {
- if r.inlCall != nil {
- // During inlining, we want the respective inlvar where we
- // assigned the callee's arguments.
- arg = r.inlvars[inlVarIdx]
- } else {
- // Otherwise, we can use the parameter itself directly.
- base.AssertfAt(name.Curfn == r.curfn, name.Pos(), "%v has curfn %v, but want %v", name, name.Curfn, r.curfn)
- arg = name
- }
- }
- }
-
- // For anonymous and blank parameters, we don't have an *ir.Name
- // to use as the argument. However, since we know the shaped
- // function won't use the value either, we can just pass the
- // zero value. (Also unfortunately, we don't have an easy
- // zero-value IR node; so we use a default-initialized temporary
- // variable.)
- if arg == nil {
- tmp := typecheck.TempAt(pos, r.curfn, param.Type)
- r.curfn.Body.Append(
- typecheck.Stmt(ir.NewDecl(pos, ir.ODCL, tmp)),
- typecheck.Stmt(ir.NewAssignStmt(pos, tmp, nil)),
- )
- arg = tmp
- }
-
- out.Append(arg)
- inlVarIdx++
- }
- }
-
- addParams(&recvs, sig.Recvs().FieldSlice())
- addParams(&params, sig.Params().FieldSlice())
- return
+ return ir.ToNodes(r.curfn.Dcl[:sig.NumRecvs()+sig.NumParams()])
}
// syntheticTailCall emits a tail call to fn, passing the given
@@ -1387,7 +1366,7 @@ func (pr *pkgReader) dictNameOf(dict *readerDict) *ir.Name {
return sym.Def.(*ir.Name)
}
- name := ir.NewNameAt(pos, sym)
+ name := ir.NewNameAt(pos, sym, dict.varType())
name.Class = ir.PEXTERN
sym.Def = name // break cycles with mutual subdictionaries
@@ -1401,8 +1380,7 @@ func (pr *pkgReader) dictNameOf(dict *readerDict) *ir.Name {
assertOffset("type param method exprs", dict.typeParamMethodExprsOffset())
for _, info := range dict.typeParamMethodExprs {
typeParam := dict.targs[info.typeParamIdx]
- method := typecheck.Expr(ir.NewSelectorExpr(pos, ir.OXDOT, ir.TypeNode(typeParam), info.method)).(*ir.SelectorExpr)
- assert(method.Op() == ir.OMETHEXPR)
+ method := typecheck.NewMethodExpr(pos, typeParam, info.method)
rsym := method.FuncName().Linksym()
assert(rsym.ABI() == obj.ABIInternal) // must be ABIInternal; see ir.OCFUNC in ssagen/ssa.go
@@ -1454,9 +1432,6 @@ func (pr *pkgReader) dictNameOf(dict *readerDict) *ir.Name {
objw.Global(lsym, int32(ot), obj.DUPOK|obj.RODATA)
- name.SetType(dict.varType())
- name.SetTypecheck(1)
-
return name
}
@@ -1495,104 +1470,32 @@ func (dict *readerDict) varType() *types.Type {
return types.NewArray(types.Types[types.TUINTPTR], dict.numWords())
}
-func (r *reader) funcargs(fn *ir.Func) {
- sig := fn.Nname.Type()
-
- if recv := sig.Recv(); recv != nil {
- r.funcarg(recv, recv.Sym, ir.PPARAM)
- }
- for _, param := range sig.Params().FieldSlice() {
- r.funcarg(param, param.Sym, ir.PPARAM)
- }
-
- for i, param := range sig.Results().FieldSlice() {
- sym := types.OrigSym(param.Sym)
+func (r *reader) declareParams() {
+ r.curfn.DeclareParams(!r.funarghack)
- if sym == nil || sym.IsBlank() {
- prefix := "~r"
- if r.inlCall != nil {
- prefix = "~R"
- } else if sym != nil {
- prefix = "~b"
- }
- sym = typecheck.LookupNum(prefix, i)
- }
-
- r.funcarg(param, sym, ir.PPARAMOUT)
- }
-}
-
-func (r *reader) funcarg(param *types.Field, sym *types.Sym, ctxt ir.Class) {
- if sym == nil {
- assert(ctxt == ir.PPARAM)
- if r.inlCall != nil {
- r.inlvars.Append(ir.BlankNode)
+ for _, name := range r.curfn.Dcl {
+ if name.Sym().Name == dictParamName {
+ r.dictParam = name
+ continue
}
- return
- }
- name := ir.NewNameAt(r.inlPos(param.Pos), sym)
- setType(name, param.Type)
- r.addLocal(name, ctxt)
-
- if r.inlCall == nil {
- if !r.funarghack {
- param.Sym = sym
- param.Nname = name
- }
- } else {
- if ctxt == ir.PPARAMOUT {
- r.retvars.Append(name)
- } else {
- r.inlvars.Append(name)
- }
+ r.addLocal(name)
}
}
-func (r *reader) addLocal(name *ir.Name, ctxt ir.Class) {
- assert(ctxt == ir.PAUTO || ctxt == ir.PPARAM || ctxt == ir.PPARAMOUT)
-
- if name.Sym().Name == dictParamName {
- r.dictParam = name
- } else {
- if r.synthetic == nil {
- r.Sync(pkgbits.SyncAddLocal)
- if r.p.SyncMarkers() {
- want := r.Int()
- if have := len(r.locals); have != want {
- base.FatalfAt(name.Pos(), "locals table has desynced")
- }
+func (r *reader) addLocal(name *ir.Name) {
+ if r.synthetic == nil {
+ r.Sync(pkgbits.SyncAddLocal)
+ if r.p.SyncMarkers() {
+ want := r.Int()
+ if have := len(r.locals); have != want {
+ base.FatalfAt(name.Pos(), "locals table has desynced")
}
- r.varDictIndex(name)
- }
-
- r.locals = append(r.locals, name)
- }
-
- name.SetUsed(true)
-
- // TODO(mdempsky): Move earlier.
- if ir.IsBlank(name) {
- return
- }
-
- if r.inlCall != nil {
- if ctxt == ir.PAUTO {
- name.SetInlLocal(true)
- } else {
- name.SetInlFormal(true)
- ctxt = ir.PAUTO
}
+ r.varDictIndex(name)
}
- name.Class = ctxt
- name.Curfn = r.curfn
-
- r.curfn.Dcl = append(r.curfn.Dcl, name)
-
- if ctxt == ir.PAUTO {
- name.SetFrameOffset(0)
- }
+ r.locals = append(r.locals, name)
}
func (r *reader) useLocal() *ir.Name {
@@ -1678,7 +1581,11 @@ func (r *reader) closeAnotherScope() {
// @@@ Statements
func (r *reader) stmt() ir.Node {
- switch stmts := r.stmts(); len(stmts) {
+ return block(r.stmts())
+}
+
+func block(stmts []ir.Node) ir.Node {
+ switch len(stmts) {
case 0:
return nil
case 1:
@@ -1688,7 +1595,7 @@ func (r *reader) stmt() ir.Node {
}
}
-func (r *reader) stmts() []ir.Node {
+func (r *reader) stmts() ir.Nodes {
assert(ir.CurFunc == r.curfn)
var res ir.Nodes
@@ -1753,7 +1660,7 @@ func (r *reader) stmt1(tag codeStmt, out *ir.Nodes) ir.Node {
op := r.op()
lhs := r.expr()
pos := r.pos()
- n := ir.NewAssignOpStmt(pos, op, lhs, ir.NewBasicLit(pos, one))
+ n := ir.NewAssignOpStmt(pos, op, lhs, ir.NewOne(pos, lhs.Type()))
n.IncDec = true
return n
@@ -1771,7 +1678,14 @@ func (r *reader) stmt1(tag codeStmt, out *ir.Nodes) ir.Node {
pos := r.pos()
op := r.op()
call := r.expr()
- return ir.NewGoDeferStmt(pos, op, call)
+ stmt := ir.NewGoDeferStmt(pos, op, call)
+ if op == ir.ODEFER {
+ x := r.optExpr()
+ if x != nil {
+ stmt.DeferAt = x.(ir.Expr)
+ }
+ }
+ return stmt
case stmtExpr:
return r.expr()
@@ -1833,13 +1747,9 @@ func (r *reader) assign() (ir.Node, bool) {
case assignDef:
pos := r.pos()
- setBasePos(pos)
- _, sym := r.localIdent()
- typ := r.typ()
-
- name := ir.NewNameAt(pos, sym)
- setType(name, typ)
- r.addLocal(name, ir.PAUTO)
+ setBasePos(pos) // test/fixedbugs/issue49767.go depends on base.Pos being set for the r.typ() call here, ugh
+ name := r.curfn.NewLocal(pos, r.localIdent(), r.typ())
+ r.addLocal(name)
return name, true
case assignExpr:
@@ -1897,10 +1807,14 @@ func (r *reader) forStmt(label *types.Sym) ir.Node {
cond := r.optExpr()
post := r.stmt()
body := r.blockStmt()
- dv := r.Bool()
+ perLoopVars := r.Bool()
r.closeAnotherScope()
- stmt := ir.NewForStmt(pos, init, cond, post, body, dv)
+ if ir.IsConst(cond, constant.Bool) && !ir.BoolVal(cond) {
+ return init // simplify "for init; false; post { ... }" into "init"
+ }
+
+ stmt := ir.NewForStmt(pos, init, cond, post, body, perLoopVars)
stmt.Label = label
return stmt
}
@@ -1911,11 +1825,33 @@ func (r *reader) ifStmt() ir.Node {
pos := r.pos()
init := r.stmts()
cond := r.expr()
- then := r.blockStmt()
- els := r.stmts()
+ staticCond := r.Int()
+ var then, els []ir.Node
+ if staticCond >= 0 {
+ then = r.blockStmt()
+ } else {
+ r.lastCloseScopePos = r.pos()
+ }
+ if staticCond <= 0 {
+ els = r.stmts()
+ }
+ r.closeAnotherScope()
+
+ if staticCond != 0 {
+ // We may have removed a dead return statement, which can trip up
+ // later passes (#62211). To avoid confusion, we instead flatten
+ // the if statement into a block.
+
+ if cond.Op() != ir.OLITERAL {
+ init.Append(typecheck.Stmt(ir.NewAssignStmt(pos, ir.BlankNode, cond))) // for side effects
+ }
+ init.Append(then...)
+ init.Append(els...)
+ return block(init)
+ }
+
n := ir.NewIfStmt(pos, cond, then, els)
n.SetInit(init)
- r.closeAnotherScope()
return n
}
@@ -1995,9 +1931,7 @@ func (r *reader) switchStmt(label *types.Sym) ir.Node {
if r.Bool() {
pos := r.pos()
if r.Bool() {
- pos := r.pos()
- _, sym := r.localIdent()
- ident = ir.NewIdent(pos, sym)
+ ident = ir.NewIdent(r.pos(), r.localIdent())
}
x := r.expr()
iface = x.Type()
@@ -2053,12 +1987,8 @@ func (r *reader) switchStmt(label *types.Sym) ir.Node {
clause.RTypes = rtypes
if ident != nil {
- pos := r.pos()
- typ := r.typ()
-
- name := ir.NewNameAt(pos, ident.Sym())
- setType(name, typ)
- r.addLocal(name, ir.PAUTO)
+ name := r.curfn.NewLocal(r.pos(), ident.Sym(), r.typ())
+ r.addLocal(name)
clause.Var = name
name.Defn = tag
}
@@ -2147,14 +2077,12 @@ func (r *reader) expr() (res ir.Node) {
pos := r.pos()
typ := r.typ()
val := FixValue(typ, r.Value())
- op := r.op()
- orig := r.String()
- return typecheck.Expr(OrigConst(pos, typ, val, op, orig))
+ return ir.NewBasicLit(pos, typ, val)
- case exprNil:
+ case exprZero:
pos := r.pos()
typ := r.typ()
- return Nil(pos, typ)
+ return ir.NewZero(pos, typ)
case exprCompLit:
return r.compLit()
@@ -2165,9 +2093,9 @@ func (r *reader) expr() (res ir.Node) {
case exprFieldVal:
x := r.expr()
pos := r.pos()
- _, sym := r.selector()
+ sym := r.selector()
- return typecheck.Expr(ir.NewSelectorExpr(pos, ir.OXDOT, x, sym)).(*ir.SelectorExpr)
+ return typecheck.XDotField(pos, x, sym)
case exprMethodVal:
recv := r.expr()
@@ -2195,14 +2123,14 @@ func (r *reader) expr() (res ir.Node) {
// interface method values).
//
if recv.Type().HasShape() {
- typ := wrapperFn.Type().Params().Field(0).Type
+ typ := wrapperFn.Type().Param(0).Type
if !types.Identical(typ, recv.Type()) {
base.FatalfAt(wrapperFn.Pos(), "receiver %L does not match %L", recv, wrapperFn)
}
recv = typecheck.Expr(ir.NewConvExpr(recv.Pos(), ir.OCONVNOP, typ, recv))
}
- n := typecheck.Expr(ir.NewSelectorExpr(pos, ir.OXDOT, recv, wrapperFn.Sel)).(*ir.SelectorExpr)
+ n := typecheck.XDotMethod(pos, recv, wrapperFn.Sel, false)
// As a consistency check here, we make sure "n" selected the
// same method (represented by a types.Field) that wrapperFn
@@ -2257,7 +2185,7 @@ func (r *reader) expr() (res ir.Node) {
// rather than types.Identical, because the latter can be confused
// by tricky promoted methods (e.g., typeparam/mdempsky/21.go).
if wrapperFn != nil && len(implicits) == 0 && !deref && !addr {
- if !types.Identical(recv, wrapperFn.Type().Params().Field(0).Type) {
+ if !types.Identical(recv, wrapperFn.Type().Param(0).Type) {
base.FatalfAt(pos, "want receiver type %v, but have method %L", recv, wrapperFn)
}
return wrapperFn
@@ -2267,7 +2195,7 @@ func (r *reader) expr() (res ir.Node) {
// expression (OMETHEXPR) and the receiver type is unshaped, then
// we can rely on a statically generated wrapper being available.
if method, ok := wrapperFn.(*ir.SelectorExpr); ok && method.Op() == ir.OMETHEXPR && !recv.HasShape() {
- return typecheck.Expr(ir.NewSelectorExpr(pos, ir.OXDOT, ir.TypeNode(recv), method.Sel)).(*ir.SelectorExpr)
+ return typecheck.NewMethodExpr(pos, recv, method.Sel)
}
return r.methodExprWrap(origPos, recv, implicits, deref, addr, baseFn, dictPtr)
@@ -2334,6 +2262,13 @@ func (r *reader) expr() (res ir.Node) {
switch op {
case ir.OANDAND, ir.OOROR:
return typecheck.Expr(ir.NewLogicalExpr(pos, op, x, y))
+ case ir.OLSH, ir.ORSH:
+ // Untyped rhs of non-constant shift, e.g. x << 1.0.
+ // If we have a constant value, it must be an int >= 0.
+ if ir.IsConstNode(y) {
+ val := constant.ToInt(y.Val())
+ assert(val.Kind() == constant.Int && constant.Sign(val) >= 0)
+ }
}
return typecheck.Expr(ir.NewBinaryExpr(pos, op, x, y))
@@ -2341,7 +2276,7 @@ func (r *reader) expr() (res ir.Node) {
x := r.expr()
pos := r.pos()
for i, n := 0, r.Len(); i < n; i++ {
- x = Implicit(DotField(pos, x, r.Len()))
+ x = Implicit(typecheck.DotField(pos, x, r.Len()))
}
if r.Bool() { // needs deref
x = Implicit(Deref(pos, x.Type().Elem(), x))
@@ -2368,7 +2303,7 @@ func (r *reader) expr() (res ir.Node) {
// There are also corner cases where semantically it's perhaps
// significant; e.g., fixedbugs/issue15975.go, #38634, #52025.
- fun = typecheck.Callee(ir.NewSelectorExpr(method.Pos(), ir.OXDOT, recv, method.Sel))
+ fun = typecheck.XDotMethod(method.Pos(), recv, method.Sel, true)
} else {
if recv.Type().IsInterface() {
// N.B., this happens currently for typeparam/issue51521.go
@@ -2432,6 +2367,26 @@ func (r *reader) expr() (res ir.Node) {
typ := r.exprType()
return typecheck.Expr(ir.NewUnaryExpr(pos, ir.ONEW, typ))
+ case exprSizeof:
+ return ir.NewUintptr(r.pos(), r.typ().Size())
+
+ case exprAlignof:
+ return ir.NewUintptr(r.pos(), r.typ().Alignment())
+
+ case exprOffsetof:
+ pos := r.pos()
+ typ := r.typ()
+ types.CalcSize(typ)
+
+ var offset int64
+ for i := r.Len(); i >= 0; i-- {
+ field := typ.Field(r.Len())
+ offset += field.Offset
+ typ = field.Type
+ }
+
+ return ir.NewUintptr(pos, offset)
+
case exprReshape:
typ := r.typ()
x := r.expr()
@@ -2516,6 +2471,10 @@ func (r *reader) expr() (res ir.Node) {
n.SetTypecheck(1)
}
return n
+
+ case exprRuntimeBuiltin:
+ builtin := typecheck.LookupRuntime(r.String())
+ return builtin
}
}
@@ -2557,7 +2516,7 @@ func (r *reader) funcInst(pos src.XPos) (wrapperFn, baseFn, dictPtr ir.Node) {
// TODO(mdempsky): Is there a more robust way to get the
// dictionary pointer type here?
- dictPtrType := baseFn.Type().Params().Field(0).Type
+ dictPtrType := baseFn.Type().Param(0).Type
dictPtr = typecheck.Expr(ir.NewConvExpr(pos, ir.OCONVNOP, dictPtrType, r.dictWord(pos, r.dict.subdictsOffset()+idx)))
return
@@ -2588,7 +2547,10 @@ func (pr *pkgReader) objDictName(idx pkgbits.Index, implicits, explicits []*type
base.Fatalf("unresolved stub: %v", sym)
}
- dict := pr.objDictIdx(sym, idx, implicits, explicits, false)
+ dict, err := pr.objDictIdx(sym, idx, implicits, explicits, false)
+ if err != nil {
+ base.Fatalf("%v", err)
+ }
return pr.dictNameOf(dict)
}
@@ -2612,14 +2574,11 @@ func (r *reader) curry(origPos src.XPos, ifaceHack bool, fun ir.Node, arg0, arg1
typ := types.NewSignature(nil, params, results)
addBody := func(pos src.XPos, r *reader, captured []ir.Node) {
- recvs, params := r.syntheticArgs(pos)
- assert(len(recvs) == 0)
-
fun := captured[0]
var args ir.Nodes
args.Append(captured[1:]...)
- args.Append(params...)
+ args.Append(r.syntheticArgs()...)
r.syntheticTailCall(pos, fun, args)
}
@@ -2650,16 +2609,14 @@ func (r *reader) methodExprWrap(origPos src.XPos, recv *types.Type, implicits []
typ := types.NewSignature(nil, params, results)
addBody := func(pos src.XPos, r *reader, captured []ir.Node) {
- recvs, args := r.syntheticArgs(pos)
- assert(len(recvs) == 0)
-
fn := captured[0]
+ args := r.syntheticArgs()
// Rewrite first argument based on implicits/deref/addr.
{
arg := args[0]
for _, ix := range implicits {
- arg = Implicit(DotField(pos, arg, ix))
+ arg = Implicit(typecheck.DotField(pos, arg, ix))
}
if deref {
arg = Implicit(Deref(pos, arg.Type().Elem(), arg))
@@ -2715,20 +2672,11 @@ func (r *reader) syntheticClosure(origPos src.XPos, typ *types.Type, ifaceHack b
return false
}
- // The ODCLFUNC and its body need to use the original position, but
- // the OCLOSURE node and any Init statements should use the inlined
- // position instead. See also the explanation in reader.funcLit.
- inlPos := r.inlPos(origPos)
-
- fn := ir.NewClosureFunc(origPos, r.curfn != nil)
+ fn := r.inlClosureFunc(origPos, typ)
fn.SetWrapper(true)
- clo := fn.OClosure
- clo.SetPos(inlPos)
- ir.NameClosure(clo, r.curfn)
- setType(fn.Nname, typ)
- typecheck.Func(fn)
- setType(clo, fn.Type())
+ clo := fn.OClosure
+ inlPos := clo.Pos()
var init ir.Nodes
for i, n := range captures {
@@ -2765,8 +2713,7 @@ func (r *reader) syntheticClosure(origPos src.XPos, typ *types.Type, ifaceHack b
bodyReader[fn] = pri
pri.funcBody(fn)
- // TODO(mdempsky): Remove hard-coding of typecheck.Target.
- return ir.InitExpr(init, ir.UseClosure(clo, typecheck.Target))
+ return ir.InitExpr(init, clo)
}
// syntheticSig duplicates and returns the params and results lists
@@ -2776,23 +2723,19 @@ func syntheticSig(sig *types.Type) (params, results []*types.Field) {
clone := func(params []*types.Field) []*types.Field {
res := make([]*types.Field, len(params))
for i, param := range params {
- sym := param.Sym
- if sym == nil || sym.Name == "_" {
- sym = typecheck.LookupNum(".anon", i)
- }
// TODO(mdempsky): It would be nice to preserve the original
// parameter positions here instead, but at least
// typecheck.NewMethodType replaces them with base.Pos, making
// them useless. Worse, the positions copied from base.Pos may
// have inlining contexts, which we definitely don't want here
// (e.g., #54625).
- res[i] = types.NewField(base.AutogeneratedPos, sym, param.Type)
+ res[i] = types.NewField(base.AutogeneratedPos, param.Sym, param.Type)
res[i].SetIsDDD(param.IsDDD())
}
return res
}
- return clone(sig.Params().FieldSlice()), clone(sig.Results().FieldSlice())
+ return clone(sig.Params()), clone(sig.Results())
}
func (r *reader) optExpr() ir.Node {
@@ -2829,7 +2772,7 @@ func (r *reader) methodExpr() (wrapperFn, baseFn, dictPtr ir.Node) {
recv := r.typ()
sig0 := r.typ()
pos := r.pos()
- _, sym := r.selector()
+ sym := r.selector()
// Signature type to return (i.e., recv prepended to the method's
// normal parameters list).
@@ -2870,7 +2813,7 @@ func (r *reader) methodExpr() (wrapperFn, baseFn, dictPtr ir.Node) {
// TODO(mdempsky): Is there a more robust way to get the
// dictionary pointer type here?
- dictPtrType := shapedFn.Type().Params().Field(1).Type
+ dictPtrType := shapedFn.Type().Param(1).Type
dictPtr := typecheck.Expr(ir.NewConvExpr(pos, ir.OCONVNOP, dictPtrType, r.dictWord(pos, r.dict.subdictsOffset()+idx)))
return nil, shapedFn, dictPtr
@@ -2887,14 +2830,14 @@ func (r *reader) methodExpr() (wrapperFn, baseFn, dictPtr ir.Node) {
dictPtr := typecheck.Expr(ir.NewAddrExpr(pos, dict))
// Check that dictPtr matches shapedFn's dictionary parameter.
- if !types.Identical(dictPtr.Type(), shapedFn.Type().Params().Field(1).Type) {
+ if !types.Identical(dictPtr.Type(), shapedFn.Type().Param(1).Type) {
base.FatalfAt(pos, "dict %L, but shaped method %L", dict, shapedFn)
}
// For statically known instantiations, we can take advantage of
// the stenciled wrapper.
base.AssertfAt(!recv.HasShape(), pos, "shaped receiver %v", recv)
- wrapperFn := typecheck.Expr(ir.NewSelectorExpr(pos, ir.OXDOT, ir.TypeNode(recv), sym)).(*ir.SelectorExpr)
+ wrapperFn := typecheck.NewMethodExpr(pos, recv, sym)
base.AssertfAt(types.Identical(sig, wrapperFn.Type()), pos, "wrapper %L does not have type %v", wrapperFn, sig)
return wrapperFn, shapedFn, dictPtr
@@ -2902,7 +2845,7 @@ func (r *reader) methodExpr() (wrapperFn, baseFn, dictPtr ir.Node) {
// Simple method expression; no dictionary needed.
base.AssertfAt(!recv.HasShape() || recv.IsInterface(), pos, "shaped receiver %v", recv)
- fn := typecheck.Expr(ir.NewSelectorExpr(pos, ir.OXDOT, ir.TypeNode(recv), sym)).(*ir.SelectorExpr)
+ fn := typecheck.NewMethodExpr(pos, recv, sym)
return fn, fn, nil
}
@@ -2915,7 +2858,7 @@ func shapedMethodExpr(pos src.XPos, obj *ir.Name, sym *types.Sym) *ir.SelectorEx
assert(typ.HasShape())
method := func() *types.Field {
- for _, method := range typ.Methods().Slice() {
+ for _, method := range typ.Methods() {
if method.Sym == sym {
return method
}
@@ -2927,7 +2870,7 @@ func shapedMethodExpr(pos src.XPos, obj *ir.Name, sym *types.Sym) *ir.SelectorEx
// Construct an OMETHEXPR node.
recv := method.Type.Recv().Type
- return typecheck.Expr(ir.NewSelectorExpr(pos, ir.OXDOT, ir.TypeNode(recv), sym)).(*ir.SelectorExpr)
+ return typecheck.NewMethodExpr(pos, recv, sym)
}
func (r *reader) multiExpr() []ir.Node {
@@ -2973,45 +2916,12 @@ func (r *reader) multiExpr() []ir.Node {
// temp returns a new autotemp of the specified type.
func (r *reader) temp(pos src.XPos, typ *types.Type) *ir.Name {
- // See typecheck.typecheckargs.
- curfn := r.curfn
- if curfn == nil {
- curfn = typecheck.InitTodoFunc
- }
-
- return typecheck.TempAt(pos, curfn, typ)
+ return typecheck.TempAt(pos, r.curfn, typ)
}
// tempCopy declares and returns a new autotemp initialized to the
// value of expr.
func (r *reader) tempCopy(pos src.XPos, expr ir.Node, init *ir.Nodes) *ir.Name {
- if r.curfn == nil {
- // Escape analysis doesn't know how to handle package-scope
- // function literals with free variables (i.e., that capture
- // temporary variables added to typecheck.InitTodoFunc).
- //
- // stencil.go works around this limitation by spilling values to
- // global variables instead, but that causes the value to stay
- // alive indefinitely; see go.dev/issue/54343.
- //
- // This code path (which implements the same workaround) isn't
- // actually needed by unified IR, because it creates uses normal
- // OMETHEXPR/OMETHVALUE nodes when statically-known instantiated
- // types are used. But it's kept around for now because it's handy
- // for testing that the generic fallback paths work correctly.
- base.Fatalf("tempCopy called at package scope")
-
- tmp := staticinit.StaticName(expr.Type())
-
- assign := ir.NewAssignStmt(pos, tmp, expr)
- assign.Def = true
- tmp.Defn = assign
-
- typecheck.Target.Decls = append(typecheck.Target.Decls, typecheck.Stmt(assign))
-
- return tmp
- }
-
tmp := r.temp(pos, expr.Type())
init.Append(typecheck.Stmt(ir.NewDecl(pos, ir.ODCL, tmp)))
@@ -3073,7 +2983,7 @@ func (r *reader) compLit() ir.Node {
func wrapName(pos src.XPos, x ir.Node) ir.Node {
// These nodes do not carry line numbers.
// Introduce a wrapper node to give them the correct line.
- switch ir.Orig(x).Op() {
+ switch x.Op() {
case ir.OTYPE, ir.OLITERAL:
if x.Sym() == nil {
break
@@ -3107,21 +3017,16 @@ func (r *reader) funcLit() ir.Node {
// OCLOSURE node, because that position represents where any heap
// allocation of the closure is credited (#49171).
r.suppressInlPos++
- pos := r.pos()
- xtype2 := r.signature(nil)
+ origPos := r.pos()
+ sig := r.signature(nil)
r.suppressInlPos--
- fn := ir.NewClosureFunc(pos, r.curfn != nil)
- clo := fn.OClosure
- clo.SetPos(r.inlPos(pos)) // see comment above
- ir.NameClosure(clo, r.curfn)
-
- setType(fn.Nname, xtype2)
- typecheck.Func(fn)
- setType(clo, fn.Type())
+ fn := r.inlClosureFunc(origPos, sig)
fn.ClosureVars = make([]*ir.Name, 0, r.Len())
for len(fn.ClosureVars) < cap(fn.ClosureVars) {
+ // TODO(mdempsky): I think these should be original positions too
+ // (i.e., not inline-adjusted).
ir.NewClosureVar(r.pos(), fn, r.useLocal())
}
if param := r.dictParam; param != nil {
@@ -3132,8 +3037,24 @@ func (r *reader) funcLit() ir.Node {
r.addBody(fn, nil)
+ // un-hide closures belong to init function.
+ if (r.curfn.IsPackageInit() || strings.HasPrefix(r.curfn.Sym().Name, "init.")) && ir.IsTrivialClosure(fn.OClosure) {
+ fn.SetIsHiddenClosure(false)
+ }
+
+ return fn.OClosure
+}
+
+// inlClosureFunc constructs a new closure function, but correctly
+// handles inlining.
+func (r *reader) inlClosureFunc(origPos src.XPos, sig *types.Type) *ir.Func {
+ curfn := r.inlCaller
+ if curfn == nil {
+ curfn = r.curfn
+ }
+
// TODO(mdempsky): Remove hard-coding of typecheck.Target.
- return ir.UseClosure(clo, typecheck.Target)
+ return ir.NewClosureFunc(origPos, r.inlPos(origPos), ir.OCLOSURE, sig, curfn, typecheck.Target)
}
func (r *reader) exprList() []ir.Node {
@@ -3157,7 +3078,7 @@ func (r *reader) exprs() []ir.Node {
// uintptr-typed word from the dictionary parameter.
func (r *reader) dictWord(pos src.XPos, idx int) ir.Node {
base.AssertfAt(r.dictParam != nil, pos, "expected dictParam in %v", r.curfn)
- return typecheck.Expr(ir.NewIndexExpr(pos, r.dictParam, ir.NewBasicLit(pos, constant.MakeInt64(int64(idx)))))
+ return typecheck.Expr(ir.NewIndexExpr(pos, r.dictParam, ir.NewInt(pos, int64(idx))))
}
// rttiWord is like dictWord, but converts it to *byte (the type used
@@ -3277,10 +3198,7 @@ func (r *reader) exprType() ir.Node {
typ, rtype = r.rtype0(pos)
if !r.Bool() { // not derived
- // TODO(mdempsky): ir.TypeNode should probably return a typecheck'd node.
- n := ir.TypeNode(typ)
- n.SetTypecheck(1)
- return n
+ return ir.TypeNode(typ)
}
}
@@ -3303,11 +3221,66 @@ func (r *reader) pkgInit(self *types.Pkg, target *ir.Package) {
}
target.CgoPragmas = cgoPragmas
+ r.pkgInitOrder(target)
+
r.pkgDecls(target)
r.Sync(pkgbits.SyncEOF)
}
+// pkgInitOrder creates a synthetic init function to handle any
+// package-scope initialization statements.
+func (r *reader) pkgInitOrder(target *ir.Package) {
+ initOrder := make([]ir.Node, r.Len())
+ if len(initOrder) == 0 {
+ return
+ }
+
+ // Make a function that contains all the initialization statements.
+ pos := base.AutogeneratedPos
+ base.Pos = pos
+
+ fn := ir.NewFunc(pos, pos, typecheck.Lookup("init"), types.NewSignature(nil, nil, nil))
+ fn.SetIsPackageInit(true)
+ fn.SetInlinabilityChecked(true) // suppress useless "can inline" diagnostics
+
+ typecheck.DeclFunc(fn)
+ r.curfn = fn
+
+ for i := range initOrder {
+ lhs := make([]ir.Node, r.Len())
+ for j := range lhs {
+ lhs[j] = r.obj()
+ }
+ rhs := r.expr()
+ pos := lhs[0].Pos()
+
+ var as ir.Node
+ if len(lhs) == 1 {
+ as = typecheck.Stmt(ir.NewAssignStmt(pos, lhs[0], rhs))
+ } else {
+ as = typecheck.Stmt(ir.NewAssignListStmt(pos, ir.OAS2, lhs, []ir.Node{rhs}))
+ }
+
+ for _, v := range lhs {
+ v.(*ir.Name).Defn = as
+ }
+
+ initOrder[i] = as
+ }
+
+ fn.Body = initOrder
+
+ typecheck.FinishFuncBody()
+ r.curfn = nil
+ r.locals = nil
+
+ // Outline (if legal/profitable) global map inits.
+ staticinit.OutlineMapInits(fn)
+
+ target.Inits = append(target.Inits, fn)
+}
+
func (r *reader) pkgDecls(target *ir.Package) {
r.Sync(pkgbits.SyncDecls)
for {
@@ -3321,37 +3294,17 @@ func (r *reader) pkgDecls(target *ir.Package) {
case declFunc:
names := r.pkgObjs(target)
assert(len(names) == 1)
- target.Decls = append(target.Decls, names[0].Func)
+ target.Funcs = append(target.Funcs, names[0].Func)
case declMethod:
typ := r.typ()
- _, sym := r.selector()
+ sym := r.selector()
method := typecheck.Lookdot1(nil, sym, typ, typ.Methods(), 0)
- target.Decls = append(target.Decls, method.Nname.(*ir.Name).Func)
+ target.Funcs = append(target.Funcs, method.Nname.(*ir.Name).Func)
case declVar:
- pos := r.pos()
names := r.pkgObjs(target)
- values := r.exprList()
-
- if len(names) > 1 && len(values) == 1 {
- as := ir.NewAssignListStmt(pos, ir.OAS2, nil, values)
- for _, name := range names {
- as.Lhs.Append(name)
- name.Defn = as
- }
- target.Decls = append(target.Decls, as)
- } else {
- for i, name := range names {
- as := ir.NewAssignStmt(pos, name, nil)
- if i < len(values) {
- as.Y = values[i]
- }
- name.Defn = as
- target.Decls = append(target.Decls, as)
- }
- }
if n := r.Len(); n > 0 {
assert(len(names) == 1)
@@ -3399,15 +3352,15 @@ func (r *reader) pkgObjs(target *ir.Package) []*ir.Name {
}
}
- if types.IsExported(sym.Name) {
+ if base.Ctxt.Flag_dynlink && types.LocalPkg.Name == "main" && types.IsExported(sym.Name) && name.Op() == ir.ONAME {
assert(!sym.OnExportList())
- target.Exports = append(target.Exports, name)
+ target.PluginExports = append(target.PluginExports, name)
sym.SetOnExportList(true)
}
- if base.Flag.AsmHdr != "" {
+ if base.Flag.AsmHdr != "" && (name.Op() == ir.OLITERAL || name.Op() == ir.OTYPE) {
assert(!sym.Asm())
- target.Asms = append(target.Asms, name)
+ target.AsmHdrDecls = append(target.AsmHdrDecls, name)
sym.SetAsm(true)
}
}
@@ -3432,28 +3385,20 @@ var inlgen = 0
// unifiedInlineCall implements inline.NewInline by re-reading the function
// body from its Unified IR export data.
-func unifiedInlineCall(call *ir.CallExpr, fn *ir.Func, inlIndex int) *ir.InlinedCallExpr {
- // TODO(mdempsky): Turn callerfn into an explicit parameter.
- callerfn := ir.CurFunc
-
+func unifiedInlineCall(callerfn *ir.Func, call *ir.CallExpr, fn *ir.Func, inlIndex int) *ir.InlinedCallExpr {
pri, ok := bodyReaderFor(fn)
if !ok {
base.FatalfAt(call.Pos(), "cannot inline call to %v: missing inline body", fn)
}
- if fn.Inl.Body == nil {
+ if !fn.Inl.HaveDcl {
expandInline(fn, pri)
}
r := pri.asReader(pkgbits.RelocBody, pkgbits.SyncFuncBody)
- // TODO(mdempsky): This still feels clumsy. Can we do better?
- tmpfn := ir.NewFunc(fn.Pos())
- tmpfn.Nname = ir.NewNameAt(fn.Nname.Pos(), callerfn.Sym())
- tmpfn.Closgen = callerfn.Closgen
- defer func() { callerfn.Closgen = tmpfn.Closgen }()
+ tmpfn := ir.NewFunc(fn.Pos(), fn.Nname.Pos(), callerfn.Sym(), fn.Type())
- setType(tmpfn.Nname, fn.Type())
r.curfn = tmpfn
r.inlCaller = callerfn
@@ -3461,16 +3406,32 @@ func unifiedInlineCall(call *ir.CallExpr, fn *ir.Func, inlIndex int) *ir.Inlined
r.inlFunc = fn
r.inlTreeIndex = inlIndex
r.inlPosBases = make(map[*src.PosBase]*src.PosBase)
+ r.funarghack = true
r.closureVars = make([]*ir.Name, len(r.inlFunc.ClosureVars))
for i, cv := range r.inlFunc.ClosureVars {
+ // TODO(mdempsky): It should be possible to support this case, but
+ // for now we rely on the inliner avoiding it.
+ if cv.Outer.Curfn != callerfn {
+ base.FatalfAt(call.Pos(), "inlining closure call across frames")
+ }
r.closureVars[i] = cv.Outer
}
if len(r.closureVars) != 0 && r.hasTypeParams() {
r.dictParam = r.closureVars[len(r.closureVars)-1] // dictParam is last; see reader.funcLit
}
- r.funcargs(fn)
+ r.declareParams()
+
+ var inlvars, retvars []*ir.Name
+ {
+ sig := r.curfn.Type()
+ endParams := sig.NumRecvs() + sig.NumParams()
+ endResults := endParams + sig.NumResults()
+
+ inlvars = r.curfn.Dcl[:endParams]
+ retvars = r.curfn.Dcl[endParams:endResults]
+ }
r.delayResults = fn.Inl.CanDelayResults
@@ -3483,7 +3444,7 @@ func unifiedInlineCall(call *ir.CallExpr, fn *ir.Func, inlIndex int) *ir.Inlined
// may contain side effects. Make sure to preserve these,
// if necessary (#42703).
if call.Op() == ir.OCALLFUNC {
- inline.CalleeEffects(&init, call.X)
+ inline.CalleeEffects(&init, call.Fun)
}
var args ir.Nodes
@@ -3493,15 +3454,14 @@ func unifiedInlineCall(call *ir.CallExpr, fn *ir.Func, inlIndex int) *ir.Inlined
args.Append(call.Args...)
// Create assignment to declare and initialize inlvars.
- as2 := ir.NewAssignListStmt(call.Pos(), ir.OAS2, r.inlvars, args)
+ as2 := ir.NewAssignListStmt(call.Pos(), ir.OAS2, ir.ToNodes(inlvars), args)
as2.Def = true
var as2init ir.Nodes
- for _, name := range r.inlvars {
+ for _, name := range inlvars {
if ir.IsBlank(name) {
continue
}
// TODO(mdempsky): Use inlined position of name.Pos() instead?
- name := name.(*ir.Name)
as2init.Append(ir.NewDecl(call.Pos(), ir.ODCL, name))
name.Defn = as2
}
@@ -3511,9 +3471,8 @@ func unifiedInlineCall(call *ir.CallExpr, fn *ir.Func, inlIndex int) *ir.Inlined
if !r.delayResults {
// If not delaying retvars, declare and zero initialize the
// result variables now.
- for _, name := range r.retvars {
+ for _, name := range retvars {
// TODO(mdempsky): Use inlined position of name.Pos() instead?
- name := name.(*ir.Name)
init.Append(ir.NewDecl(call.Pos(), ir.ODCL, name))
ras := ir.NewAssignStmt(call.Pos(), name, nil)
init.Append(typecheck.Stmt(ras))
@@ -3527,8 +3486,6 @@ func unifiedInlineCall(call *ir.CallExpr, fn *ir.Func, inlIndex int) *ir.Inlined
// Note issue 28603.
init.Append(ir.NewInlineMarkStmt(call.Pos().WithIsStmt(), int64(r.inlTreeIndex)))
- nparams := len(r.curfn.Dcl)
-
ir.WithFunc(r.curfn, func() {
if !r.syntheticBody(call.Pos()) {
assert(r.Bool()) // have body
@@ -3544,13 +3501,11 @@ func unifiedInlineCall(call *ir.CallExpr, fn *ir.Func, inlIndex int) *ir.Inlined
// themselves. But currently it's an easy fix to #50552.
readBodies(typecheck.Target, true)
- deadcode.Func(r.curfn)
-
// Replace any "return" statements within the function body.
var edit func(ir.Node) ir.Node
edit = func(n ir.Node) ir.Node {
if ret, ok := n.(*ir.ReturnStmt); ok {
- n = typecheck.Stmt(r.inlReturn(ret))
+ n = typecheck.Stmt(r.inlReturn(ret, retvars))
}
ir.EditChildren(n, edit)
return n
@@ -3560,28 +3515,23 @@ func unifiedInlineCall(call *ir.CallExpr, fn *ir.Func, inlIndex int) *ir.Inlined
body := ir.Nodes(r.curfn.Body)
- // Quirkish: We need to eagerly prune variables added during
- // inlining, but removed by deadcode.FuncBody above. Unused
- // variables will get removed during stack frame layout anyway, but
- // len(fn.Dcl) ends up influencing things like autotmp naming.
+ // Reparent any declarations into the caller function.
+ for _, name := range r.curfn.Dcl {
+ name.Curfn = callerfn
- used := usedLocals(body)
-
- for i, name := range r.curfn.Dcl {
- if i < nparams || used.Has(name) {
- name.Curfn = callerfn
- callerfn.Dcl = append(callerfn.Dcl, name)
-
- if name.AutoTemp() {
- name.SetEsc(ir.EscUnknown)
- name.SetInlLocal(true)
- }
+ if name.Class != ir.PAUTO {
+ name.SetPos(r.inlPos(name.Pos()))
+ name.SetInlFormal(true)
+ name.Class = ir.PAUTO
+ } else {
+ name.SetInlLocal(true)
}
}
+ callerfn.Dcl = append(callerfn.Dcl, r.curfn.Dcl...)
body.Append(ir.NewLabelStmt(call.Pos(), r.retlabel))
- res := ir.NewInlinedCallExpr(call.Pos(), body, append([]ir.Node(nil), r.retvars...))
+ res := ir.NewInlinedCallExpr(call.Pos(), body, ir.ToNodes(retvars))
res.SetInit(init)
res.SetType(call.Type())
res.SetTypecheck(1)
@@ -3594,20 +3544,19 @@ func unifiedInlineCall(call *ir.CallExpr, fn *ir.Func, inlIndex int) *ir.Inlined
// inlReturn returns a statement that can substitute for the given
// return statement when inlining.
-func (r *reader) inlReturn(ret *ir.ReturnStmt) *ir.BlockStmt {
+func (r *reader) inlReturn(ret *ir.ReturnStmt, retvars []*ir.Name) *ir.BlockStmt {
pos := r.inlCall.Pos()
block := ir.TakeInit(ret)
if results := ret.Results; len(results) != 0 {
- assert(len(r.retvars) == len(results))
+ assert(len(retvars) == len(results))
- as2 := ir.NewAssignListStmt(pos, ir.OAS2, append([]ir.Node(nil), r.retvars...), ret.Results)
+ as2 := ir.NewAssignListStmt(pos, ir.OAS2, ir.ToNodes(retvars), ret.Results)
if r.delayResults {
- for _, name := range r.retvars {
+ for _, name := range retvars {
// TODO(mdempsky): Use inlined position of name.Pos() instead?
- name := name.(*ir.Name)
block.Append(ir.NewDecl(pos, ir.ODCL, name))
name.Defn = as2
}
@@ -3621,7 +3570,7 @@ func (r *reader) inlReturn(ret *ir.ReturnStmt) *ir.BlockStmt {
}
// expandInline reads in an extra copy of IR to populate
-// fn.Inl.{Dcl,Body}.
+// fn.Inl.Dcl.
func expandInline(fn *ir.Func, pri pkgReaderIndex) {
// TODO(mdempsky): Remove this function. It's currently needed by
// dwarfgen/dwarf.go:preInliningDcls, which requires fn.Inl.Dcl to
@@ -3629,35 +3578,26 @@ func expandInline(fn *ir.Func, pri pkgReaderIndex) {
// with the same information some other way.
fndcls := len(fn.Dcl)
- topdcls := len(typecheck.Target.Decls)
+ topdcls := len(typecheck.Target.Funcs)
- tmpfn := ir.NewFunc(fn.Pos())
- tmpfn.Nname = ir.NewNameAt(fn.Nname.Pos(), fn.Sym())
+ tmpfn := ir.NewFunc(fn.Pos(), fn.Nname.Pos(), fn.Sym(), fn.Type())
tmpfn.ClosureVars = fn.ClosureVars
{
r := pri.asReader(pkgbits.RelocBody, pkgbits.SyncFuncBody)
- setType(tmpfn.Nname, fn.Type())
// Don't change parameter's Sym/Nname fields.
r.funarghack = true
r.funcBody(tmpfn)
-
- ir.WithFunc(tmpfn, func() {
- deadcode.Func(tmpfn)
- })
}
- used := usedLocals(tmpfn.Body)
-
+ // Move tmpfn's params to fn.Inl.Dcl, and reparent under fn.
for _, name := range tmpfn.Dcl {
- if name.Class != ir.PAUTO || used.Has(name) {
- name.Curfn = fn
- fn.Inl.Dcl = append(fn.Inl.Dcl, name)
- }
+ name.Curfn = fn
}
- fn.Inl.Body = tmpfn.Body
+ fn.Inl.Dcl = tmpfn.Dcl
+ fn.Inl.HaveDcl = true
// Double check that we didn't change fn.Dcl by accident.
assert(fndcls == len(fn.Dcl))
@@ -3665,7 +3605,7 @@ func expandInline(fn *ir.Func, pri pkgReaderIndex) {
// typecheck.Stmts may have added function literals to
// typecheck.Target.Decls. Remove them again so we don't risk trying
// to compile them multiple times.
- typecheck.Target.Decls = typecheck.Target.Decls[:topdcls]
+ typecheck.Target.Funcs = typecheck.Target.Funcs[:topdcls]
}
// usedLocals returns a set of local variables that are used within body.
@@ -3781,7 +3721,7 @@ func wrapType(typ *types.Type, target *ir.Package, seen map[string]*types.Type,
if !typ.IsInterface() {
typecheck.CalcMethods(typ)
}
- for _, meth := range typ.AllMethods().Slice() {
+ for _, meth := range typ.AllMethods() {
if meth.Sym.IsBlank() || !meth.IsMethod() {
base.FatalfAt(meth.Pos, "invalid method: %v", meth)
}
@@ -3861,7 +3801,6 @@ func wrapMethodValue(recvType *types.Type, method *types.Field, target *ir.Packa
recv := ir.NewHiddenParam(pos, fn, typecheck.Lookup(".this"), recvType)
if !needed {
- typecheck.Func(fn)
return
}
@@ -3871,42 +3810,16 @@ func wrapMethodValue(recvType *types.Type, method *types.Field, target *ir.Packa
}
func newWrapperFunc(pos src.XPos, sym *types.Sym, wrapper *types.Type, method *types.Field) *ir.Func {
- fn := ir.NewFunc(pos)
- fn.SetDupok(true) // TODO(mdempsky): Leave unset for local, non-generic wrappers?
-
- name := ir.NewNameAt(pos, sym)
- ir.MarkFunc(name)
- name.Func = fn
- name.Defn = fn
- fn.Nname = name
-
sig := newWrapperType(wrapper, method)
- setType(name, sig)
-
- // TODO(mdempsky): De-duplicate with similar logic in funcargs.
- defParams := func(class ir.Class, params *types.Type) {
- for _, param := range params.FieldSlice() {
- name := ir.NewNameAt(param.Pos, param.Sym)
- name.Class = class
- setType(name, param.Type)
-
- name.Curfn = fn
- fn.Dcl = append(fn.Dcl, name)
- param.Nname = name
- }
- }
-
- defParams(ir.PPARAM, sig.Recvs())
- defParams(ir.PPARAM, sig.Params())
- defParams(ir.PPARAMOUT, sig.Results())
+ fn := ir.NewFunc(pos, pos, sym, sig)
+ fn.DeclareParams(true)
+ fn.SetDupok(true) // TODO(mdempsky): Leave unset for local, non-generic wrappers?
return fn
}
func finishWrapperFunc(fn *ir.Func, target *ir.Package) {
- typecheck.Func(fn)
-
ir.WithFunc(fn, func() {
typecheck.Stmts(fn.Body)
})
@@ -3914,7 +3827,7 @@ func finishWrapperFunc(fn *ir.Func, target *ir.Package) {
// We generate wrappers after the global inlining pass,
// so we're responsible for applying inlining ourselves here.
// TODO(prattmic): plumb PGO.
- inline.InlineCalls(fn, nil)
+ interleaved.DevirtualizeAndInlineFunc(fn, nil)
// The body of wrapper function after inlining may reveal new ir.OMETHVALUE node,
// we don't know whether wrapper function has been generated for it or not, so
@@ -3929,7 +3842,8 @@ func finishWrapperFunc(fn *ir.Func, target *ir.Package) {
}
})
- target.Decls = append(target.Decls, fn)
+ fn.Nname.Defn = fn
+ target.Funcs = append(target.Funcs, fn)
}
// newWrapperType returns a copy of the given signature type, but with
@@ -3940,11 +3854,7 @@ func newWrapperType(recvType *types.Type, method *types.Field) *types.Type {
clone := func(params []*types.Field) []*types.Field {
res := make([]*types.Field, len(params))
for i, param := range params {
- sym := param.Sym
- if sym == nil || sym.Name == "_" {
- sym = typecheck.LookupNum(".anon", i)
- }
- res[i] = types.NewField(param.Pos, sym, param.Type)
+ res[i] = types.NewField(param.Pos, param.Sym, param.Type)
res[i].SetIsDDD(param.IsDDD())
}
return res
@@ -3954,10 +3864,10 @@ func newWrapperType(recvType *types.Type, method *types.Field) *types.Type {
var recv *types.Field
if recvType != nil {
- recv = types.NewField(sig.Recv().Pos, typecheck.Lookup(".this"), recvType)
+ recv = types.NewField(sig.Recv().Pos, sig.Recv().Sym, recvType)
}
- params := clone(sig.Params().FieldSlice())
- results := clone(sig.Results().FieldSlice())
+ params := clone(sig.Params())
+ results := clone(sig.Results())
return types.NewSignature(recv, params, results)
}
@@ -3965,7 +3875,7 @@ func newWrapperType(recvType *types.Type, method *types.Field) *types.Type {
func addTailCall(pos src.XPos, fn *ir.Func, recv ir.Node, method *types.Field) {
sig := fn.Nname.Type()
args := make([]ir.Node, sig.NumParams())
- for i, param := range sig.Params().FieldSlice() {
+ for i, param := range sig.Params() {
args[i] = param.Nname.(*ir.Name)
}
@@ -3974,7 +3884,7 @@ func addTailCall(pos src.XPos, fn *ir.Func, recv ir.Node, method *types.Field) {
fn.SetWrapper(true) // TODO(mdempsky): Leave unset for tail calls?
- dot := ir.NewSelectorExpr(pos, ir.OXDOT, recv, method.Sym)
+ dot := typecheck.XDotMethod(pos, recv, method.Sym, true)
call := typecheck.Call(pos, dot, args, method.Type.IsVariadic()).(*ir.CallExpr)
if method.Type.NumResults() == 0 {
@@ -4014,16 +3924,16 @@ func shapeSig(fn *ir.Func, dict *readerDict) *types.Type {
recv = types.NewField(oldRecv.Pos, oldRecv.Sym, oldRecv.Type)
}
- params := make([]*types.Field, 1+sig.Params().Fields().Len())
+ params := make([]*types.Field, 1+sig.NumParams())
params[0] = types.NewField(fn.Pos(), fn.Sym().Pkg.Lookup(dictParamName), types.NewPtr(dict.varType()))
- for i, param := range sig.Params().Fields().Slice() {
+ for i, param := range sig.Params() {
d := types.NewField(param.Pos, param.Sym, param.Type)
d.SetIsDDD(param.IsDDD())
params[1+i] = d
}
- results := make([]*types.Field, sig.Results().Fields().Len())
- for i, result := range sig.Results().Fields().Slice() {
+ results := make([]*types.Field, sig.NumResults())
+ for i, result := range sig.Results() {
results[i] = types.NewField(result.Pos, result.Sym, result.Type)
}
diff --git a/src/cmd/compile/internal/noder/sizes.go b/src/cmd/compile/internal/noder/sizes.go
deleted file mode 100644
index dff8d7bb9a..0000000000
--- a/src/cmd/compile/internal/noder/sizes.go
+++ /dev/null
@@ -1,185 +0,0 @@
-// Copyright 2021 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package noder
-
-import (
- "fmt"
-
- "cmd/compile/internal/types"
- "cmd/compile/internal/types2"
-)
-
-// Code below based on go/types.StdSizes.
-// Intentional differences are marked with "gc:".
-
-type gcSizes struct{}
-
-func (s *gcSizes) Alignof(T types2.Type) int64 {
- // For arrays and structs, alignment is defined in terms
- // of alignment of the elements and fields, respectively.
- switch t := T.Underlying().(type) {
- case *types2.Array:
- // spec: "For a variable x of array type: unsafe.Alignof(x)
- // is the same as unsafe.Alignof(x[0]), but at least 1."
- return s.Alignof(t.Elem())
- case *types2.Struct:
- if t.NumFields() == 0 && types2.IsSyncAtomicAlign64(T) {
- // Special case: sync/atomic.align64 is an
- // empty struct we recognize as a signal that
- // the struct it contains must be
- // 64-bit-aligned.
- //
- // This logic is equivalent to the logic in
- // cmd/compile/internal/types/size.go:calcStructOffset
- return 8
- }
-
- // spec: "For a variable x of struct type: unsafe.Alignof(x)
- // is the largest of the values unsafe.Alignof(x.f) for each
- // field f of x, but at least 1."
- max := int64(1)
- for i, nf := 0, t.NumFields(); i < nf; i++ {
- if a := s.Alignof(t.Field(i).Type()); a > max {
- max = a
- }
- }
- return max
- case *types2.Slice, *types2.Interface:
- // Multiword data structures are effectively structs
- // in which each element has size PtrSize.
- return int64(types.PtrSize)
- case *types2.Basic:
- // Strings are like slices and interfaces.
- if t.Info()&types2.IsString != 0 {
- return int64(types.PtrSize)
- }
- }
- a := s.Sizeof(T) // may be 0
- // spec: "For a variable x of any type: unsafe.Alignof(x) is at least 1."
- if a < 1 {
- return 1
- }
- // complex{64,128} are aligned like [2]float{32,64}.
- if isComplex(T) {
- a /= 2
- }
- if a > int64(types.RegSize) {
- return int64(types.RegSize)
- }
- return a
-}
-
-func isComplex(T types2.Type) bool {
- basic, ok := T.Underlying().(*types2.Basic)
- return ok && basic.Info()&types2.IsComplex != 0
-}
-
-func (s *gcSizes) Offsetsof(fields []*types2.Var) []int64 {
- offsets := make([]int64, len(fields))
- var offs int64
- for i, f := range fields {
- if offs < 0 {
- // all remaining offsets are too large
- offsets[i] = -1
- continue
- }
- // offs >= 0
- typ := f.Type()
- a := s.Alignof(typ)
- offs = types.RoundUp(offs, a) // possibly < 0 if align overflows
- offsets[i] = offs
- if d := s.Sizeof(typ); d >= 0 && offs >= 0 {
- offs += d // ok to overflow to < 0
- } else {
- offs = -1
- }
- }
- return offsets
-}
-
-func (s *gcSizes) Sizeof(T types2.Type) int64 {
- switch t := T.Underlying().(type) {
- case *types2.Basic:
- k := t.Kind()
- if int(k) < len(basicSizes) {
- if s := basicSizes[k]; s > 0 {
- return int64(s)
- }
- }
- switch k {
- case types2.String:
- return int64(types.PtrSize) * 2
- case types2.Int, types2.Uint, types2.Uintptr, types2.UnsafePointer:
- return int64(types.PtrSize)
- }
- panic(fmt.Sprintf("unimplemented basic: %v (kind %v)", T, k))
- case *types2.Array:
- n := t.Len()
- if n <= 0 {
- return 0
- }
- // n > 0
- // gc: Size includes alignment padding.
- esize := s.Sizeof(t.Elem())
- if esize < 0 {
- return -1 // array element too large
- }
- if esize == 0 {
- return 0 // 0-size element
- }
- // esize > 0
- // Final size is esize * n; and size must be <= maxInt64.
- const maxInt64 = 1<<63 - 1
- if esize > maxInt64/n {
- return -1 // esize * n overflows
- }
- return esize * n
- case *types2.Slice:
- return int64(types.PtrSize) * 3
- case *types2.Struct:
- n := t.NumFields()
- if n == 0 {
- return 0
- }
- fields := make([]*types2.Var, n)
- for i := range fields {
- fields[i] = t.Field(i)
- }
- offsets := s.Offsetsof(fields)
-
- // gc: The last field of a non-zero-sized struct is not allowed to
- // have size 0.
- last := s.Sizeof(fields[n-1].Type())
- if last == 0 && offsets[n-1] > 0 {
- last = 1
- }
-
- // gc: Size includes alignment padding.
- return types.RoundUp(offsets[n-1]+last, s.Alignof(t)) // may overflow to < 0 which is ok
- case *types2.Interface:
- return int64(types.PtrSize) * 2
- case *types2.Chan, *types2.Map, *types2.Pointer, *types2.Signature:
- return int64(types.PtrSize)
- default:
- panic(fmt.Sprintf("unimplemented type: %T", t))
- }
-}
-
-var basicSizes = [...]byte{
- types2.Invalid: 1,
- types2.Bool: 1,
- types2.Int8: 1,
- types2.Int16: 2,
- types2.Int32: 4,
- types2.Int64: 8,
- types2.Uint8: 1,
- types2.Uint16: 2,
- types2.Uint32: 4,
- types2.Uint64: 8,
- types2.Float32: 4,
- types2.Float64: 8,
- types2.Complex64: 8,
- types2.Complex128: 16,
-}
diff --git a/src/cmd/compile/internal/noder/stmt.go b/src/cmd/compile/internal/noder/stmt.go
index aa82274f03..04f92d2cf5 100644
--- a/src/cmd/compile/internal/noder/stmt.go
+++ b/src/cmd/compile/internal/noder/stmt.go
@@ -22,33 +22,3 @@ var callOps = [...]ir.Op{
syntax.Defer: ir.ODEFER,
syntax.Go: ir.OGO,
}
-
-// initDefn marks the given names as declared by defn and populates
-// its Init field with ODCL nodes. It then reports whether any names
-// were so declared, which can be used to initialize defn.Def.
-func initDefn(defn ir.InitNode, names []*ir.Name) bool {
- if len(names) == 0 {
- return false
- }
-
- init := make([]ir.Node, len(names))
- for i, name := range names {
- name.Defn = defn
- init[i] = ir.NewDecl(name.Pos(), ir.ODCL, name)
- }
- defn.SetInit(init)
- return true
-}
-
-// unpackTwo returns the first two nodes in list. If list has fewer
-// than 2 nodes, then the missing nodes are replaced with nils.
-func unpackTwo(list []ir.Node) (fst, snd ir.Node) {
- switch len(list) {
- case 0:
- return nil, nil
- case 1:
- return list[0], nil
- default:
- return list[0], list[1]
- }
-}
diff --git a/src/cmd/compile/internal/noder/types.go b/src/cmd/compile/internal/noder/types.go
index 6caf158c7b..76c6d15dd8 100644
--- a/src/cmd/compile/internal/noder/types.go
+++ b/src/cmd/compile/internal/noder/types.go
@@ -9,8 +9,6 @@ import (
"cmd/compile/internal/types2"
)
-var universeAny = types2.Universe.Lookup("any").Type()
-
var basics = [...]**types.Type{
types2.Invalid: new(*types.Type),
types2.Bool: &types.Types[types.TBOOL],
diff --git a/src/cmd/compile/internal/noder/unified.go b/src/cmd/compile/internal/noder/unified.go
index 6c4ac66e3d..492b00d256 100644
--- a/src/cmd/compile/internal/noder/unified.go
+++ b/src/cmd/compile/internal/noder/unified.go
@@ -6,7 +6,6 @@ package noder
import (
"fmt"
- "internal/goversion"
"internal/pkgbits"
"io"
"runtime"
@@ -16,6 +15,7 @@ import (
"cmd/compile/internal/base"
"cmd/compile/internal/inline"
"cmd/compile/internal/ir"
+ "cmd/compile/internal/pgo"
"cmd/compile/internal/typecheck"
"cmd/compile/internal/types"
"cmd/compile/internal/types2"
@@ -27,6 +27,110 @@ import (
// later.
var localPkgReader *pkgReader
+// LookupMethodFunc returns the ir.Func for an arbitrary full symbol name if
+// that function exists in the set of available export data.
+//
+// This allows lookup of arbitrary functions and methods that aren't otherwise
+// referenced by the local package and thus haven't been read yet.
+//
+// TODO(prattmic): Does not handle instantiation of generic types. Currently
+// profiles don't contain the original type arguments, so we won't be able to
+// create the runtime dictionaries.
+//
+// TODO(prattmic): Hit rate of this function is usually fairly low, and errors
+// are only used when debug logging is enabled. Consider constructing cheaper
+// errors by default.
+func LookupFunc(fullName string) (*ir.Func, error) {
+ pkgPath, symName, err := ir.ParseLinkFuncName(fullName)
+ if err != nil {
+ return nil, fmt.Errorf("error parsing symbol name %q: %v", fullName, err)
+ }
+
+ pkg, ok := types.PkgMap()[pkgPath]
+ if !ok {
+ return nil, fmt.Errorf("pkg %s doesn't exist in %v", pkgPath, types.PkgMap())
+ }
+
+ // Symbol naming is ambiguous. We can't necessarily distinguish between
+ // a method and a closure. e.g., is foo.Bar.func1 a closure defined in
+ // function Bar, or a method on type Bar? Thus we must simply attempt
+ // to lookup both.
+
+ fn, err := lookupFunction(pkg, symName)
+ if err == nil {
+ return fn, nil
+ }
+
+ fn, mErr := lookupMethod(pkg, symName)
+ if mErr == nil {
+ return fn, nil
+ }
+
+ return nil, fmt.Errorf("%s is not a function (%v) or method (%v)", fullName, err, mErr)
+}
+
+func lookupFunction(pkg *types.Pkg, symName string) (*ir.Func, error) {
+ sym := pkg.Lookup(symName)
+
+ // TODO(prattmic): Enclosed functions (e.g., foo.Bar.func1) are not
+ // present in objReader, only as OCLOSURE nodes in the enclosing
+ // function.
+ pri, ok := objReader[sym]
+ if !ok {
+ return nil, fmt.Errorf("func sym %v missing objReader", sym)
+ }
+
+ node, err := pri.pr.objIdxMayFail(pri.idx, nil, nil, false)
+ if err != nil {
+ return nil, fmt.Errorf("func sym %v lookup error: %w", sym, err)
+ }
+ name := node.(*ir.Name)
+ if name.Op() != ir.ONAME || name.Class != ir.PFUNC {
+ return nil, fmt.Errorf("func sym %v refers to non-function name: %v", sym, name)
+ }
+ return name.Func, nil
+}
+
+func lookupMethod(pkg *types.Pkg, symName string) (*ir.Func, error) {
+ // N.B. readPackage creates a Sym for every object in the package to
+ // initialize objReader and importBodyReader, even if the object isn't
+ // read.
+ //
+ // However, objReader is only initialized for top-level objects, so we
+ // must first lookup the type and use that to find the method rather
+ // than looking for the method directly.
+ typ, meth, err := ir.LookupMethodSelector(pkg, symName)
+ if err != nil {
+ return nil, fmt.Errorf("error looking up method symbol %q: %v", symName, err)
+ }
+
+ pri, ok := objReader[typ]
+ if !ok {
+ return nil, fmt.Errorf("type sym %v missing objReader", typ)
+ }
+
+ node, err := pri.pr.objIdxMayFail(pri.idx, nil, nil, false)
+ if err != nil {
+ return nil, fmt.Errorf("func sym %v lookup error: %w", typ, err)
+ }
+ name := node.(*ir.Name)
+ if name.Op() != ir.OTYPE {
+ return nil, fmt.Errorf("type sym %v refers to non-type name: %v", typ, name)
+ }
+ if name.Alias() {
+ return nil, fmt.Errorf("type sym %v refers to alias", typ)
+ }
+
+ for _, m := range name.Type().Methods() {
+ if m.Sym == meth {
+ fn := m.Nname.(*ir.Name).Func
+ return fn, nil
+ }
+ }
+
+ return nil, fmt.Errorf("method %s missing from method set of %v", symName, typ)
+}
+
// unified constructs the local package's Internal Representation (IR)
// from its syntax tree (AST).
//
@@ -71,53 +175,43 @@ var localPkgReader *pkgReader
func unified(m posMap, noders []*noder) {
inline.InlineCall = unifiedInlineCall
typecheck.HaveInlineBody = unifiedHaveInlineBody
+ pgo.LookupFunc = LookupFunc
data := writePkgStub(m, noders)
- // We already passed base.Flag.Lang to types2 to handle validating
- // the user's source code. Bump it up now to the current version and
- // re-parse, so typecheck doesn't complain if we construct IR that
- // utilizes newer Go features.
- base.Flag.Lang = fmt.Sprintf("go1.%d", goversion.Version)
- types.ParseLangFlag()
-
target := typecheck.Target
- typecheck.TypecheckAllowed = true
-
localPkgReader = newPkgReader(pkgbits.NewPkgDecoder(types.LocalPkg.Path, data))
readPackage(localPkgReader, types.LocalPkg, true)
r := localPkgReader.newReader(pkgbits.RelocMeta, pkgbits.PrivateRootIdx, pkgbits.SyncPrivate)
r.pkgInit(types.LocalPkg, target)
- // Type-check any top-level assignments. We ignore non-assignments
- // here because other declarations are typechecked as they're
- // constructed.
- for i, ndecls := 0, len(target.Decls); i < ndecls; i++ {
- switch n := target.Decls[i]; n.Op() {
- case ir.OAS, ir.OAS2:
- target.Decls[i] = typecheck.Stmt(n)
- }
- }
-
readBodies(target, false)
// Check that nothing snuck past typechecking.
- for _, n := range target.Decls {
- if n.Typecheck() == 0 {
- base.FatalfAt(n.Pos(), "missed typecheck: %v", n)
+ for _, fn := range target.Funcs {
+ if fn.Typecheck() == 0 {
+ base.FatalfAt(fn.Pos(), "missed typecheck: %v", fn)
}
// For functions, check that at least their first statement (if
// any) was typechecked too.
- if fn, ok := n.(*ir.Func); ok && len(fn.Body) != 0 {
+ if len(fn.Body) != 0 {
if stmt := fn.Body[0]; stmt.Typecheck() == 0 {
base.FatalfAt(stmt.Pos(), "missed typecheck: %v", stmt)
}
}
}
+ // For functions originally came from package runtime,
+ // mark as norace to prevent instrumenting, see issue #60439.
+ for _, fn := range target.Funcs {
+ if !base.Flag.CompilingRuntime && types.RuntimeSymName(fn.Sym()) != "" {
+ fn.Pragma |= ir.Norace
+ }
+ }
+
base.ExitIfErrors() // just in case
}
@@ -128,7 +222,7 @@ func unified(m posMap, noders []*noder) {
// necessary on instantiations of imported generic functions, so their
// inlining costs can be computed.
func readBodies(target *ir.Package, duringInlining bool) {
- var inlDecls []ir.Node
+ var inlDecls []*ir.Func
// Don't use range--bodyIdx can add closures to todoBodies.
for {
@@ -165,7 +259,7 @@ func readBodies(target *ir.Package, duringInlining bool) {
if duringInlining && canSkipNonGenericMethod {
inlDecls = append(inlDecls, fn)
} else {
- target.Decls = append(target.Decls, fn)
+ target.Funcs = append(target.Funcs, fn)
}
}
@@ -194,11 +288,11 @@ func readBodies(target *ir.Package, duringInlining bool) {
oldLowerM := base.Flag.LowerM
base.Flag.LowerM = 0
- inline.InlineDecls(nil, inlDecls, false)
+ inline.CanInlineFuncs(inlDecls, nil)
base.Flag.LowerM = oldLowerM
for _, fn := range inlDecls {
- fn.(*ir.Func).Body = nil // free memory
+ fn.Body = nil // free memory
}
}
}
@@ -321,7 +415,7 @@ func readPackage(pr *pkgReader, importpkg *types.Pkg, localStub bool) {
if r.Bool() {
sym := importpkg.Lookup(".inittask")
- task := ir.NewNameAt(src.NoXPos, sym)
+ task := ir.NewNameAt(src.NoXPos, sym, nil)
task.Class = ir.PEXTERN
sym.Def = task
}
diff --git a/src/cmd/compile/internal/noder/writer.go b/src/cmd/compile/internal/noder/writer.go
index 178c3eb1a9..e5894c9505 100644
--- a/src/cmd/compile/internal/noder/writer.go
+++ b/src/cmd/compile/internal/noder/writer.go
@@ -6,8 +6,12 @@ package noder
import (
"fmt"
+ "go/constant"
+ "go/token"
+ "go/version"
"internal/buildcfg"
"internal/pkgbits"
+ "os"
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
@@ -213,7 +217,7 @@ type itabInfo struct {
// generic function or method.
func (dict *writerDict) typeParamIndex(typ *types2.TypeParam) int {
for idx, implicit := range dict.implicits {
- if implicit.Type().(*types2.TypeParam) == typ {
+ if types2.Unalias(implicit.Type()).(*types2.TypeParam) == typ {
return idx
}
}
@@ -494,7 +498,7 @@ func (pw *pkgWriter) typIdx(typ types2.Type, dict *writerDict) typeInfo {
w := pw.newWriter(pkgbits.RelocType, pkgbits.SyncTypeIdx)
w.dict = dict
- switch typ := typ.(type) {
+ switch typ := types2.Unalias(typ).(type) {
default:
base.Fatalf("unexpected type: %v (%T)", typ, typ)
@@ -885,7 +889,7 @@ func (w *writer) objDict(obj types2.Object, dict *writerDict) {
// parameter is constrained to `int | uint` but then never used in
// arithmetic/conversions/etc, we could shape those together.
for _, implicit := range dict.implicits {
- tparam := implicit.Type().(*types2.TypeParam)
+ tparam := types2.Unalias(implicit.Type()).(*types2.TypeParam)
w.Bool(tparam.Underlying().(*types2.Interface).IsMethodSet())
}
for i := 0; i < ntparams; i++ {
@@ -1053,6 +1057,9 @@ func (w *writer) funcExt(obj *types2.Func) {
sig, block := obj.Type().(*types2.Signature), decl.Body
body, closureVars := w.p.bodyIdx(sig, block, w.dict)
+ if len(closureVars) > 0 {
+ fmt.Fprintln(os.Stderr, "CLOSURE", closureVars)
+ }
assert(len(closureVars) == 0)
w.Sync(pkgbits.SyncFuncExt)
@@ -1112,7 +1119,7 @@ func (pw *pkgWriter) bodyIdx(sig *types2.Signature, block *syntax.BlockStmt, dic
w.sig = sig
w.dict = dict
- w.funcargs(sig)
+ w.declareParams(sig)
if w.Bool(block != nil) {
w.stmts(block.List)
w.pos(block.Rbrace)
@@ -1121,24 +1128,18 @@ func (pw *pkgWriter) bodyIdx(sig *types2.Signature, block *syntax.BlockStmt, dic
return w.Flush(), w.closureVars
}
-func (w *writer) funcargs(sig *types2.Signature) {
- do := func(params *types2.Tuple, result bool) {
+func (w *writer) declareParams(sig *types2.Signature) {
+ addLocals := func(params *types2.Tuple) {
for i := 0; i < params.Len(); i++ {
- w.funcarg(params.At(i), result)
+ w.addLocal(params.At(i))
}
}
if recv := sig.Recv(); recv != nil {
- w.funcarg(recv, false)
- }
- do(sig.Params(), false)
- do(sig.Results(), true)
-}
-
-func (w *writer) funcarg(param *types2.Var, result bool) {
- if param.Name() != "" || result {
- w.addLocal(param)
+ w.addLocal(recv)
}
+ addLocals(sig.Params())
+ addLocals(sig.Results())
}
// addLocal records the declaration of a new local variable.
@@ -1206,9 +1207,18 @@ func (w *writer) stmt(stmt syntax.Stmt) {
}
func (w *writer) stmts(stmts []syntax.Stmt) {
+ dead := false
w.Sync(pkgbits.SyncStmts)
for _, stmt := range stmts {
+ if dead {
+ // Any statements after a terminating statement are safe to
+ // omit, at least until the next labeled statement.
+ if _, ok := stmt.(*syntax.LabeledStmt); !ok {
+ continue
+ }
+ }
w.stmt1(stmt)
+ dead = w.p.terminates(stmt)
}
w.Code(stmtEnd)
w.Sync(pkgbits.SyncStmtsEnd)
@@ -1261,6 +1271,9 @@ func (w *writer) stmt1(stmt syntax.Stmt) {
w.pos(stmt)
w.op(callOps[stmt.Tok])
w.expr(stmt.Call)
+ if stmt.Tok == syntax.Defer {
+ w.optExpr(stmt.DeferAt)
+ }
case *syntax.DeclStmt:
for _, decl := range stmt.DeclList {
@@ -1293,7 +1306,7 @@ func (w *writer) stmt1(stmt syntax.Stmt) {
dstType := func(i int) types2.Type {
return resultTypes.At(i).Type()
}
- w.multiExpr(stmt, dstType, unpackListExpr(stmt.Results))
+ w.multiExpr(stmt, dstType, syntax.UnpackListExpr(stmt.Results))
case *syntax.SelectStmt:
w.Code(stmtSelect)
@@ -1314,7 +1327,7 @@ func (w *writer) stmt1(stmt syntax.Stmt) {
}
func (w *writer) assignList(expr syntax.Expr) {
- exprs := unpackListExpr(expr)
+ exprs := syntax.UnpackListExpr(expr)
w.Len(len(exprs))
for _, expr := range exprs {
@@ -1323,7 +1336,7 @@ func (w *writer) assignList(expr syntax.Expr) {
}
func (w *writer) assign(expr syntax.Expr) {
- expr = unparen(expr)
+ expr = syntax.Unparen(expr)
if name, ok := expr.(*syntax.Name); ok {
if name.Value == "_" {
@@ -1364,8 +1377,8 @@ func (w *writer) declStmt(decl syntax.Decl) {
// assignStmt writes out an assignment for "lhs = rhs".
func (w *writer) assignStmt(pos poser, lhs0, rhs0 syntax.Expr) {
- lhs := unpackListExpr(lhs0)
- rhs := unpackListExpr(rhs0)
+ lhs := syntax.UnpackListExpr(lhs0)
+ rhs := syntax.UnpackListExpr(rhs0)
w.Code(stmtAssign)
w.pos(pos)
@@ -1382,7 +1395,7 @@ func (w *writer) assignStmt(pos poser, lhs0, rhs0 syntax.Expr) {
// Finding dstType is somewhat involved, because for VarDecl
// statements, the Names are only added to the info.{Defs,Uses}
// maps, not to info.Types.
- if name, ok := unparen(dst).(*syntax.Name); ok {
+ if name, ok := syntax.Unparen(dst).(*syntax.Name); ok {
if name.Value == "_" {
return nil // ok: no implicit conversion
} else if def, ok := w.p.info.Defs[name].(*types2.Var); ok {
@@ -1421,12 +1434,12 @@ func (w *writer) forStmt(stmt *syntax.ForStmt) {
w.rtype(xtyp)
}
{
- lhs := unpackListExpr(rang.Lhs)
+ lhs := syntax.UnpackListExpr(rang.Lhs)
assign := func(i int, src types2.Type) {
if i >= len(lhs) {
return
}
- dst := unparen(lhs[i])
+ dst := syntax.Unparen(lhs[i])
if name, ok := dst.(*syntax.Name); ok && name.Value == "_" {
return
}
@@ -1443,12 +1456,17 @@ func (w *writer) forStmt(stmt *syntax.ForStmt) {
w.convRTTI(src, dstType)
}
- keyType, valueType := w.p.rangeTypes(rang.X)
+ keyType, valueType := types2.RangeKeyVal(w.p.typeOf(rang.X))
assign(0, keyType)
assign(1, valueType)
}
} else {
+ if stmt.Cond != nil && w.p.staticBool(&stmt.Cond) < 0 { // always false
+ stmt.Post = nil
+ stmt.Body.List = nil
+ }
+
w.pos(stmt)
w.stmt(stmt.Init)
w.optExpr(stmt.Cond)
@@ -1456,42 +1474,46 @@ func (w *writer) forStmt(stmt *syntax.ForStmt) {
}
w.blockStmt(stmt.Body)
- w.Bool(base.Debug.LoopVar > 0)
+ w.Bool(w.distinctVars(stmt))
w.closeAnotherScope()
}
-// rangeTypes returns the types of values produced by ranging over
-// expr.
-func (pw *pkgWriter) rangeTypes(expr syntax.Expr) (key, value types2.Type) {
- typ := pw.typeOf(expr)
- switch typ := types2.CoreType(typ).(type) {
- case *types2.Pointer: // must be pointer to array
- return types2.Typ[types2.Int], types2.CoreType(typ.Elem()).(*types2.Array).Elem()
- case *types2.Array:
- return types2.Typ[types2.Int], typ.Elem()
- case *types2.Slice:
- return types2.Typ[types2.Int], typ.Elem()
- case *types2.Basic:
- if typ.Info()&types2.IsString != 0 {
- return types2.Typ[types2.Int], runeTypeName.Type()
- }
- case *types2.Map:
- return typ.Key(), typ.Elem()
- case *types2.Chan:
- return typ.Elem(), nil
- }
- pw.fatalf(expr, "unexpected range type: %v", typ)
- panic("unreachable")
+func (w *writer) distinctVars(stmt *syntax.ForStmt) bool {
+ lv := base.Debug.LoopVar
+ fileVersion := w.p.info.FileVersions[stmt.Pos().Base()]
+ is122 := fileVersion == "" || version.Compare(fileVersion, "go1.22") >= 0
+
+ // Turning off loopvar for 1.22 is only possible with loopvarhash=qn
+ //
+ // Debug.LoopVar values to be preserved for 1.21 compatibility are 1 and 2,
+ // which are also set (=1) by GOEXPERIMENT=loopvar. The knobs for turning on
+ // the new, unshared, loopvar behavior apply to versions less than 1.21 because
+ // (1) 1.21 also did that and (2) this is believed to be the likely use case;
+ // anyone checking to see if it affects their code will just run the GOEXPERIMENT
+ // but will not also update all their go.mod files to 1.21.
+ //
+ // -gcflags=-d=loopvar=3 enables logging for 1.22 but does not turn loopvar on for <= 1.21.
+
+ return is122 || lv > 0 && lv != 3
}
func (w *writer) ifStmt(stmt *syntax.IfStmt) {
+ cond := w.p.staticBool(&stmt.Cond)
+
w.Sync(pkgbits.SyncIfStmt)
w.openScope(stmt.Pos())
w.pos(stmt)
w.stmt(stmt.Init)
w.expr(stmt.Cond)
- w.blockStmt(stmt.Then)
- w.stmt(stmt.Else)
+ w.Int(cond)
+ if cond >= 0 {
+ w.blockStmt(stmt.Then)
+ } else {
+ w.pos(stmt.Then.Rbrace)
+ }
+ if cond <= 0 {
+ w.stmt(stmt.Else)
+ }
w.closeAnotherScope()
}
@@ -1539,10 +1561,56 @@ func (w *writer) switchStmt(stmt *syntax.SwitchStmt) {
} else {
tag := stmt.Tag
+ var tagValue constant.Value
if tag != nil {
- tagType = w.p.typeOf(tag)
+ tv := w.p.typeAndValue(tag)
+ tagType = tv.Type
+ tagValue = tv.Value
} else {
tagType = types2.Typ[types2.Bool]
+ tagValue = constant.MakeBool(true)
+ }
+
+ if tagValue != nil {
+ // If the switch tag has a constant value, look for a case
+ // clause that we always branch to.
+ func() {
+ var target *syntax.CaseClause
+ Outer:
+ for _, clause := range stmt.Body {
+ if clause.Cases == nil {
+ target = clause
+ }
+ for _, cas := range syntax.UnpackListExpr(clause.Cases) {
+ tv := w.p.typeAndValue(cas)
+ if tv.Value == nil {
+ return // non-constant case; give up
+ }
+ if constant.Compare(tagValue, token.EQL, tv.Value) {
+ target = clause
+ break Outer
+ }
+ }
+ }
+ // We've found the target clause, if any.
+
+ if target != nil {
+ if hasFallthrough(target.Body) {
+ return // fallthrough is tricky; give up
+ }
+
+ // Rewrite as single "default" case.
+ target.Cases = nil
+ stmt.Body = []*syntax.CaseClause{target}
+ } else {
+ stmt.Body = nil
+ }
+
+ // Clear switch tag (i.e., replace with implicit "true").
+ tag = nil
+ stmt.Tag = nil
+ tagType = types2.Typ[types2.Bool]
+ }()
}
// Walk is going to emit comparisons between the tag value and
@@ -1552,7 +1620,7 @@ func (w *writer) switchStmt(stmt *syntax.SwitchStmt) {
// `any` instead.
Outer:
for _, clause := range stmt.Body {
- for _, cas := range unpackListExpr(clause.Cases) {
+ for _, cas := range syntax.UnpackListExpr(clause.Cases) {
if casType := w.p.typeOf(cas); !types2.AssignableTo(casType, tagType) {
tagType = types2.NewInterfaceType(nil, nil)
break Outer
@@ -1574,7 +1642,7 @@ func (w *writer) switchStmt(stmt *syntax.SwitchStmt) {
w.pos(clause)
- cases := unpackListExpr(clause.Cases)
+ cases := syntax.UnpackListExpr(clause.Cases)
if iface != nil {
w.Len(len(cases))
for _, cas := range cases {
@@ -1602,7 +1670,7 @@ func (w *writer) switchStmt(stmt *syntax.SwitchStmt) {
// instead just set the variable's DWARF scoping info earlier so
// we can give it the correct position information.
pos := clause.Pos()
- if typs := unpackListExpr(clause.Cases); len(typs) != 0 {
+ if typs := syntax.UnpackListExpr(clause.Cases); len(typs) != 0 {
pos = typeExprEndPos(typs[len(typs)-1])
}
w.pos(pos)
@@ -1641,12 +1709,21 @@ func (w *writer) optLabel(label *syntax.Name) {
func (w *writer) expr(expr syntax.Expr) {
base.Assertf(expr != nil, "missing expression")
- expr = unparen(expr) // skip parens; unneeded after typecheck
+ expr = syntax.Unparen(expr) // skip parens; unneeded after typecheck
obj, inst := lookupObj(w.p, expr)
targs := inst.TypeArgs
if tv, ok := w.p.maybeTypeAndValue(expr); ok {
+ if tv.IsRuntimeHelper() {
+ if pkg := obj.Pkg(); pkg != nil && pkg.Name() == "runtime" {
+ objName := obj.Name()
+ w.Code(exprRuntimeBuiltin)
+ w.String(objName)
+ return
+ }
+ }
+
if tv.IsType() {
w.p.fatalf(expr, "unexpected type expression %v", syntax.String(expr))
}
@@ -1658,16 +1735,11 @@ func (w *writer) expr(expr syntax.Expr) {
assert(typ != nil)
w.typ(typ)
w.Value(tv.Value)
-
- // TODO(mdempsky): These details are only important for backend
- // diagnostics. Explore writing them out separately.
- w.op(constExprOp(expr))
- w.String(syntax.String(expr))
return
}
if _, isNil := obj.(*types2.Nil); isNil {
- w.Code(exprNil)
+ w.Code(exprZero)
w.pos(expr)
w.typ(tv.Type)
return
@@ -1847,7 +1919,7 @@ func (w *writer) expr(expr syntax.Expr) {
var rtype types2.Type
if tv.IsBuiltin() {
- switch obj, _ := lookupObj(w.p, expr.Fun); obj.Name() {
+ switch obj, _ := lookupObj(w.p, syntax.Unparen(expr.Fun)); obj.Name() {
case "make":
assert(len(expr.ArgList) >= 1)
assert(!expr.HasDots)
@@ -1880,6 +1952,39 @@ func (w *writer) expr(expr syntax.Expr) {
w.exprType(nil, expr.ArgList[0])
return
+ case "Sizeof":
+ assert(len(expr.ArgList) == 1)
+ assert(!expr.HasDots)
+
+ w.Code(exprSizeof)
+ w.pos(expr)
+ w.typ(w.p.typeOf(expr.ArgList[0]))
+ return
+
+ case "Alignof":
+ assert(len(expr.ArgList) == 1)
+ assert(!expr.HasDots)
+
+ w.Code(exprAlignof)
+ w.pos(expr)
+ w.typ(w.p.typeOf(expr.ArgList[0]))
+ return
+
+ case "Offsetof":
+ assert(len(expr.ArgList) == 1)
+ assert(!expr.HasDots)
+ selector := syntax.Unparen(expr.ArgList[0]).(*syntax.SelectorExpr)
+ index := w.p.info.Selections[selector].Index()
+
+ w.Code(exprOffsetof)
+ w.pos(expr)
+ w.typ(deref2(w.p.typeOf(selector.X)))
+ w.Len(len(index) - 1)
+ for _, idx := range index {
+ w.Len(idx)
+ }
+ return
+
case "append":
rtype = sliceElem(w.p.typeOf(expr))
case "copy":
@@ -1900,7 +2005,7 @@ func (w *writer) expr(expr syntax.Expr) {
}
writeFunExpr := func() {
- fun := unparen(expr.Fun)
+ fun := syntax.Unparen(expr.Fun)
if selector, ok := fun.(*syntax.SelectorExpr); ok {
if sel, ok := w.p.info.Selections[selector]; ok && sel.Kind() == types2.MethodVal {
@@ -2019,7 +2124,7 @@ func (w *writer) methodExpr(expr *syntax.SelectorExpr, recv types2.Type, sel *ty
// Method on a type parameter. These require an indirect call
// through the current function's runtime dictionary.
- if typeParam, ok := recv.(*types2.TypeParam); w.Bool(ok) {
+ if typeParam, ok := types2.Unalias(recv).(*types2.TypeParam); w.Bool(ok) {
typeParamIdx := w.dict.typeParamIndex(typeParam)
methodInfo := w.p.selectorIdx(fun)
@@ -2032,7 +2137,7 @@ func (w *writer) methodExpr(expr *syntax.SelectorExpr, recv types2.Type, sel *ty
}
if !isInterface(recv) {
- if named, ok := deref2(recv).(*types2.Named); ok {
+ if named, ok := types2.Unalias(deref2(recv)).(*types2.Named); ok {
obj, targs := splitNamed(named)
info := w.p.objInstIdx(obj, targs, w.dict)
@@ -2212,9 +2317,13 @@ type posVar struct {
var_ *types2.Var
}
+func (p posVar) String() string {
+ return p.pos.String() + ":" + p.var_.String()
+}
+
func (w *writer) exprList(expr syntax.Expr) {
w.Sync(pkgbits.SyncExprList)
- w.exprs(unpackListExpr(expr))
+ w.exprs(syntax.UnpackListExpr(expr))
}
func (w *writer) exprs(exprs []syntax.Expr) {
@@ -2254,7 +2363,7 @@ func (w *writer) varDictIndex(obj *types2.Var) {
}
func isUntyped(typ types2.Type) bool {
- basic, ok := typ.(*types2.Basic)
+ basic, ok := types2.Unalias(typ).(*types2.Basic)
return ok && basic.Info()&types2.IsUntyped != 0
}
@@ -2307,7 +2416,7 @@ func (w *writer) exprType(iface types2.Type, typ syntax.Expr) {
// If typ is a type parameter, then isInterface reports an internal
// compiler error instead.
func isInterface(typ types2.Type) bool {
- if _, ok := typ.(*types2.TypeParam); ok {
+ if _, ok := types2.Unalias(typ).(*types2.TypeParam); ok {
// typ is a type parameter and may be instantiated as either a
// concrete or interface type, so the writer can't depend on
// knowing this.
@@ -2384,7 +2493,7 @@ func (c *declCollector) Visit(n syntax.Node) syntax.Visitor {
case *syntax.ImportDecl:
pw.checkPragmas(n.Pragma, 0, false)
- switch pkgNameOf(pw.info, n).Imported().Path() {
+ switch pw.info.PkgNameOf(n).Imported().Path() {
case "embed":
c.file.importedEmbed = true
case "unsafe":
@@ -2507,6 +2616,8 @@ func (w *writer) pkgInit(noders []*noder) {
w.Strings(cgoPragma)
}
+ w.pkgInitOrder()
+
w.Sync(pkgbits.SyncDecls)
for _, p := range noders {
for _, decl := range p.file.DeclList {
@@ -2518,6 +2629,18 @@ func (w *writer) pkgInit(noders []*noder) {
w.Sync(pkgbits.SyncEOF)
}
+func (w *writer) pkgInitOrder() {
+ // TODO(mdempsky): Write as a function body instead?
+ w.Len(len(w.p.info.InitOrder))
+ for _, init := range w.p.info.InitOrder {
+ w.Len(len(init.Lhs))
+ for _, v := range init.Lhs {
+ w.obj(v, nil)
+ }
+ w.expr(init.Rhs)
+ }
+}
+
func (w *writer) pkgDecl(decl syntax.Decl) {
switch decl := decl.(type) {
default:
@@ -2572,16 +2695,8 @@ func (w *writer) pkgDecl(decl syntax.Decl) {
case *syntax.VarDecl:
w.Code(declVar)
- w.pos(decl)
w.pkgObjs(decl.NameList...)
- // TODO(mdempsky): It would make sense to use multiExpr here, but
- // that results in IR that confuses pkginit/initorder.go. So we
- // continue using exprList, and let typecheck handle inserting any
- // implicit conversions. That's okay though, because package-scope
- // assignments never require dictionaries.
- w.exprList(decl.Values)
-
var embeds []pragmaEmbed
if p, ok := decl.Pragma.(*pragmas); ok {
embeds = p.Embeds
@@ -2609,12 +2724,67 @@ func (w *writer) pkgObjs(names ...*syntax.Name) {
// @@@ Helpers
+// staticBool analyzes a boolean expression and reports whether it's
+// always true (positive result), always false (negative result), or
+// unknown (zero).
+//
+// It also simplifies the expression while preserving semantics, if
+// possible.
+func (pw *pkgWriter) staticBool(ep *syntax.Expr) int {
+ if val := pw.typeAndValue(*ep).Value; val != nil {
+ if constant.BoolVal(val) {
+ return +1
+ } else {
+ return -1
+ }
+ }
+
+ if e, ok := (*ep).(*syntax.Operation); ok {
+ switch e.Op {
+ case syntax.Not:
+ return pw.staticBool(&e.X)
+
+ case syntax.AndAnd:
+ x := pw.staticBool(&e.X)
+ if x < 0 {
+ *ep = e.X
+ return x
+ }
+
+ y := pw.staticBool(&e.Y)
+ if x > 0 || y < 0 {
+ if pw.typeAndValue(e.X).Value != nil {
+ *ep = e.Y
+ }
+ return y
+ }
+
+ case syntax.OrOr:
+ x := pw.staticBool(&e.X)
+ if x > 0 {
+ *ep = e.X
+ return x
+ }
+
+ y := pw.staticBool(&e.Y)
+ if x < 0 || y > 0 {
+ if pw.typeAndValue(e.X).Value != nil {
+ *ep = e.Y
+ }
+ return y
+ }
+ }
+ }
+
+ return 0
+}
+
// hasImplicitTypeParams reports whether obj is a defined type with
// implicit type parameters (e.g., declared within a generic function
// or method).
-func (p *pkgWriter) hasImplicitTypeParams(obj *types2.TypeName) bool {
- if obj.Pkg() == p.curpkg {
- decl, ok := p.typDecls[obj]
+func (pw *pkgWriter) hasImplicitTypeParams(obj *types2.TypeName) bool {
+ if obj.Pkg() == pw.curpkg {
+ decl, ok := pw.typDecls[obj]
assert(ok)
if len(decl.implicits) != 0 {
return true
@@ -2643,7 +2813,7 @@ func isGlobal(obj types2.Object) bool {
// object is returned as well.
func lookupObj(p *pkgWriter, expr syntax.Expr) (obj types2.Object, inst types2.Instance) {
if index, ok := expr.(*syntax.IndexExpr); ok {
- args := unpackListExpr(index.Index)
+ args := syntax.UnpackListExpr(index.Index)
if len(args) == 1 {
tv := p.typeAndValue(args[0])
if tv.IsValue() {
@@ -2686,9 +2856,18 @@ func isNil(p *pkgWriter, expr syntax.Expr) bool {
return tv.IsNil()
}
+// isBuiltin reports whether expr is a (possibly parenthesized)
+// referenced to the specified built-in function.
+func (pw *pkgWriter) isBuiltin(expr syntax.Expr, builtin string) bool {
+ if name, ok := syntax.Unparen(expr).(*syntax.Name); ok && name.Value == builtin {
+ return pw.typeAndValue(name).IsBuiltin()
+ }
+ return false
+}
+
// recvBase returns the base type for the given receiver parameter.
func recvBase(recv *types2.Var) *types2.Named {
- typ := recv.Type()
+ typ := types2.Unalias(recv.Type())
if ptr, ok := typ.(*types2.Pointer); ok {
typ = ptr.Elem()
}
@@ -2766,6 +2945,59 @@ func asWasmImport(p syntax.Pragma) *WasmImport {
// isPtrTo reports whether from is the type *to.
func isPtrTo(from, to types2.Type) bool {
- ptr, ok := from.(*types2.Pointer)
+ ptr, ok := types2.Unalias(from).(*types2.Pointer)
return ok && types2.Identical(ptr.Elem(), to)
}
+
+// hasFallthrough reports whether stmts ends in a fallthrough
+// statement.
+func hasFallthrough(stmts []syntax.Stmt) bool {
+ last, ok := lastNonEmptyStmt(stmts).(*syntax.BranchStmt)
+ return ok && last.Tok == syntax.Fallthrough
+}
+
+// lastNonEmptyStmt returns the last non-empty statement in list, if
+// any.
+func lastNonEmptyStmt(stmts []syntax.Stmt) syntax.Stmt {
+ for i := len(stmts) - 1; i >= 0; i-- {
+ stmt := stmts[i]
+ if _, ok := stmt.(*syntax.EmptyStmt); !ok {
+ return stmt
+ }
+ }
+ return nil
+}
+
+// terminates reports whether stmt terminates normal control flow
+// (i.e., does not merely advance to the following statement).
+func (pw *pkgWriter) terminates(stmt syntax.Stmt) bool {
+ switch stmt := stmt.(type) {
+ case *syntax.BranchStmt:
+ if stmt.Tok == syntax.Goto {
+ return true
+ }
+ case *syntax.ReturnStmt:
+ return true
+ case *syntax.ExprStmt:
+ if call, ok := syntax.Unparen(stmt.X).(*syntax.CallExpr); ok {
+ if pw.isBuiltin(call.Fun, "panic") {
+ return true
+ }
+ }
+
+ // The handling of BlockStmt here is approximate, but it serves to
+ // allow dead-code elimination for:
+ //
+ // if true {
+ // return x
+ // }
+ // unreachable
+ case *syntax.IfStmt:
+ cond := pw.staticBool(&stmt.Cond)
+ return (cond < 0 || pw.terminates(stmt.Then)) && (cond > 0 || pw.terminates(stmt.Else))
+ case *syntax.BlockStmt:
+ return pw.terminates(lastNonEmptyStmt(stmt.List))
+ }
+
+ return false
+}
diff --git a/src/cmd/compile/internal/objw/objw.go b/src/cmd/compile/internal/objw/objw.go
index 4189337b8f..77744672c1 100644
--- a/src/cmd/compile/internal/objw/objw.go
+++ b/src/cmd/compile/internal/objw/objw.go
@@ -9,6 +9,7 @@ import (
"cmd/compile/internal/bitvec"
"cmd/compile/internal/types"
"cmd/internal/obj"
+ "encoding/binary"
)
// Uint8 writes an unsigned byte v into s at offset off,
@@ -29,6 +30,22 @@ func Uintptr(s *obj.LSym, off int, v uint64) int {
return UintN(s, off, v, types.PtrSize)
}
+// Uvarint writes a varint v into s at offset off,
+// and returns the next unused offset.
+func Uvarint(s *obj.LSym, off int, v uint64) int {
+ var buf [binary.MaxVarintLen64]byte
+ n := binary.PutUvarint(buf[:], v)
+ return int(s.WriteBytes(base.Ctxt, int64(off), buf[:n]))
+}
+
+func Bool(s *obj.LSym, off int, v bool) int {
+ w := 0
+ if v {
+ w = 1
+ }
+ return UintN(s, off, uint64(w), 1)
+}
+
// UintN writes an unsigned integer v of size wid bytes into s at offset off,
// and returns the next unused offset.
func UintN(s *obj.LSym, off int, v uint64, wid int) int {
diff --git a/src/cmd/compile/internal/objw/prog.go b/src/cmd/compile/internal/objw/prog.go
index 3175123e6e..84fb996723 100644
--- a/src/cmd/compile/internal/objw/prog.go
+++ b/src/cmd/compile/internal/objw/prog.go
@@ -57,8 +57,9 @@ func NewProgs(fn *ir.Func, worker int) *Progs {
pp.Pos = fn.Pos()
pp.SetText(fn)
// PCDATA tables implicitly start with index -1.
- pp.PrevLive = LivenessIndex{-1, false}
+ pp.PrevLive = -1
pp.NextLive = pp.PrevLive
+ pp.NextUnsafe = pp.PrevUnsafe
return pp
}
@@ -72,38 +73,25 @@ type Progs struct {
Cache []obj.Prog // local progcache
CacheIndex int // first free element of progcache
- NextLive LivenessIndex // liveness index for the next Prog
- PrevLive LivenessIndex // last emitted liveness index
-}
+ NextLive StackMapIndex // liveness index for the next Prog
+ PrevLive StackMapIndex // last emitted liveness index
-// LivenessIndex stores the liveness map information for a Value.
-type LivenessIndex struct {
- StackMapIndex int
-
- // IsUnsafePoint indicates that this is an unsafe-point.
- //
- // Note that it's possible for a call Value to have a stack
- // map while also being an unsafe-point. This means it cannot
- // be preempted at this instruction, but that a preemption or
- // stack growth may happen in the called function.
- IsUnsafePoint bool
+ NextUnsafe bool // unsafe mark for the next Prog
+ PrevUnsafe bool // last emitted unsafe mark
}
+type StackMapIndex int
+
// StackMapDontCare indicates that the stack map index at a Value
// doesn't matter.
//
// This is a sentinel value that should never be emitted to the PCDATA
// stream. We use -1000 because that's obviously never a valid stack
// index (but -1 is).
-const StackMapDontCare = -1000
-
-// LivenessDontCare indicates that the liveness information doesn't
-// matter. Currently it is used in deferreturn liveness when we don't
-// actually need it. It should never be emitted to the PCDATA stream.
-var LivenessDontCare = LivenessIndex{StackMapDontCare, true}
+const StackMapDontCare StackMapIndex = -1000
-func (idx LivenessIndex) StackMapValid() bool {
- return idx.StackMapIndex != StackMapDontCare
+func (s StackMapIndex) StackMapValid() bool {
+ return s != StackMapDontCare
}
func (pp *Progs) NewProg() *obj.Prog {
@@ -121,7 +109,7 @@ func (pp *Progs) NewProg() *obj.Prog {
// Flush converts from pp to machine code.
func (pp *Progs) Flush() {
plist := &obj.Plist{Firstpc: pp.Text, Curfn: pp.CurFunc}
- obj.Flushplist(base.Ctxt, plist, pp.NewProg, base.Ctxt.Pkgpath)
+ obj.Flushplist(base.Ctxt, plist, pp.NewProg)
}
// Free clears pp and any associated resources.
@@ -139,20 +127,20 @@ func (pp *Progs) Free() {
// Prog adds a Prog with instruction As to pp.
func (pp *Progs) Prog(as obj.As) *obj.Prog {
- if pp.NextLive.StackMapValid() && pp.NextLive.StackMapIndex != pp.PrevLive.StackMapIndex {
+ if pp.NextLive != StackMapDontCare && pp.NextLive != pp.PrevLive {
// Emit stack map index change.
- idx := pp.NextLive.StackMapIndex
- pp.PrevLive.StackMapIndex = idx
+ idx := pp.NextLive
+ pp.PrevLive = idx
p := pp.Prog(obj.APCDATA)
p.From.SetConst(abi.PCDATA_StackMapIndex)
p.To.SetConst(int64(idx))
}
- if pp.NextLive.IsUnsafePoint != pp.PrevLive.IsUnsafePoint {
+ if pp.NextUnsafe != pp.PrevUnsafe {
// Emit unsafe-point marker.
- pp.PrevLive.IsUnsafePoint = pp.NextLive.IsUnsafePoint
+ pp.PrevUnsafe = pp.NextUnsafe
p := pp.Prog(obj.APCDATA)
p.From.SetConst(abi.PCDATA_UnsafePoint)
- if pp.NextLive.IsUnsafePoint {
+ if pp.NextUnsafe {
p.To.SetConst(abi.UnsafePointUnsafe)
} else {
p.To.SetConst(abi.UnsafePointSafe)
diff --git a/src/cmd/compile/internal/pgo/internal/graph/graph.go b/src/cmd/compile/internal/pgo/internal/graph/graph.go
index 127529804f..4d89b1ba63 100644
--- a/src/cmd/compile/internal/pgo/internal/graph/graph.go
+++ b/src/cmd/compile/internal/pgo/internal/graph/graph.go
@@ -466,9 +466,9 @@ func (g *Graph) String() string {
// Sort returns a slice of the edges in the map, in a consistent
// order. The sort order is first based on the edge weight
// (higher-to-lower) and then by the node names to avoid flakiness.
-func (e EdgeMap) Sort() []*Edge {
- el := make(edgeList, 0, len(e))
- for _, w := range e {
+func (em EdgeMap) Sort() []*Edge {
+ el := make(edgeList, 0, len(em))
+ for _, w := range em {
el = append(el, w)
}
@@ -477,9 +477,9 @@ func (e EdgeMap) Sort() []*Edge {
}
// Sum returns the total weight for a set of nodes.
-func (e EdgeMap) Sum() int64 {
+func (em EdgeMap) Sum() int64 {
var ret int64
- for _, edge := range e {
+ for _, edge := range em {
ret += edge.Weight
}
return ret
diff --git a/src/cmd/compile/internal/pgo/irgraph.go b/src/cmd/compile/internal/pgo/irgraph.go
index 074f4a5a2f..96485e33ab 100644
--- a/src/cmd/compile/internal/pgo/irgraph.go
+++ b/src/cmd/compile/internal/pgo/irgraph.go
@@ -46,9 +46,11 @@ import (
"cmd/compile/internal/pgo/internal/graph"
"cmd/compile/internal/typecheck"
"cmd/compile/internal/types"
+ "errors"
"fmt"
"internal/profile"
"os"
+ "sort"
)
// IRGraph is a call graph with nodes pointing to IRs of functions and edges
@@ -62,7 +64,8 @@ import (
// TODO(prattmic): Consider merging this data structure with Graph. This is
// effectively a copy of Graph aggregated to line number and pointing to IR.
type IRGraph struct {
- // Nodes of the graph
+ // Nodes of the graph. Each node represents a function, keyed by linker
+ // symbol name.
IRNodes map[string]*IRNode
}
@@ -76,7 +79,7 @@ type IRNode struct {
// Set of out-edges in the callgraph. The map uniquely identifies each
// edge based on the callsite and callee, for fast lookup.
- OutEdges map[NodeMapKey]*IREdge
+ OutEdges map[NamedCallEdge]*IREdge
}
// Name returns the symbol name of this function.
@@ -96,21 +99,21 @@ type IREdge struct {
CallSiteOffset int // Line offset from function start line.
}
-// NodeMapKey represents a hash key to identify unique call-edges in profile
-// and in IR. Used for deduplication of call edges found in profile.
-//
-// TODO(prattmic): rename to something more descriptive.
-type NodeMapKey struct {
+// NamedCallEdge identifies a call edge by linker symbol names and call site
+// offset.
+type NamedCallEdge struct {
CallerName string
CalleeName string
CallSiteOffset int // Line offset from function start line.
}
-// Weights capture both node weight and edge weight.
-type Weights struct {
- NFlat int64
- NCum int64
- EWeight int64
+// NamedEdgeMap contains all unique call edges in the profile and their
+// edge weight.
+type NamedEdgeMap struct {
+ Weight map[NamedCallEdge]int64
+
+ // ByWeight lists all keys in Weight, sorted by edge weight.
+ ByWeight []NamedCallEdge
}
// CallSiteInfo captures call-site information and its caller/callee.
@@ -123,15 +126,13 @@ type CallSiteInfo struct {
// Profile contains the processed PGO profile and weighted call graph used for
// PGO optimizations.
type Profile struct {
- // Aggregated NodeWeights and EdgeWeights across the profile. This
- // helps us determine the percentage threshold for hot/cold
- // partitioning.
- TotalNodeWeight int64
- TotalEdgeWeight int64
+ // Aggregated edge weights across the profile. This helps us determine
+ // the percentage threshold for hot/cold partitioning.
+ TotalWeight int64
- // NodeMap contains all unique call-edges in the profile and their
- // aggregated weight.
- NodeMap map[NodeMapKey]*Weights
+ // NamedEdgeMap contains all unique call edges in the profile and their
+ // edge weight.
+ NamedEdgeMap NamedEdgeMap
// WeightedCG represents the IRGraph built from profile, which we will
// update as part of inlining.
@@ -145,18 +146,22 @@ func New(profileFile string) (*Profile, error) {
return nil, fmt.Errorf("error opening profile: %w", err)
}
defer f.Close()
- profile, err := profile.Parse(f)
- if err != nil {
+ p, err := profile.Parse(f)
+ if errors.Is(err, profile.ErrNoData) {
+ // Treat a completely empty file the same as a profile with no
+ // samples: nothing to do.
+ return nil, nil
+ } else if err != nil {
return nil, fmt.Errorf("error parsing profile: %w", err)
}
- if len(profile.Sample) == 0 {
+ if len(p.Sample) == 0 {
// We accept empty profiles, but there is nothing to do.
return nil, nil
}
valueIndex := -1
- for i, s := range profile.SampleType {
+ for i, s := range p.SampleType {
// Samples count is the raw data collected, and CPU nanoseconds is just
// a scaled version of it, so either one we can find is fine.
if (s.Type == "samples" && s.Unit == "count") ||
@@ -170,100 +175,106 @@ func New(profileFile string) (*Profile, error) {
return nil, fmt.Errorf(`profile does not contain a sample index with value/type "samples/count" or cpu/nanoseconds"`)
}
- g := graph.NewGraph(profile, &graph.Options{
+ g := graph.NewGraph(p, &graph.Options{
SampleValue: func(v []int64) int64 { return v[valueIndex] },
})
- p := &Profile{
- NodeMap: make(map[NodeMapKey]*Weights),
- WeightedCG: &IRGraph{
- IRNodes: make(map[string]*IRNode),
- },
- }
-
- // Build the node map and totals from the profile graph.
- if err := p.processprofileGraph(g); err != nil {
+ namedEdgeMap, totalWeight, err := createNamedEdgeMap(g)
+ if err != nil {
return nil, err
}
- if p.TotalNodeWeight == 0 || p.TotalEdgeWeight == 0 {
+ if totalWeight == 0 {
return nil, nil // accept but ignore profile with no samples.
}
// Create package-level call graph with weights from profile and IR.
- p.initializeIRGraph()
+ wg := createIRGraph(namedEdgeMap)
- return p, nil
+ return &Profile{
+ TotalWeight: totalWeight,
+ NamedEdgeMap: namedEdgeMap,
+ WeightedCG: wg,
+ }, nil
}
-// processprofileGraph builds various maps from the profile-graph.
-//
-// It initializes NodeMap and Total{Node,Edge}Weight based on the name and
-// callsite to compute node and edge weights which will be used later on to
-// create edges for WeightedCG.
+// createNamedEdgeMap builds a map of callsite-callee edge weights from the
+// profile-graph.
//
-// Caller should ignore the profile if p.TotalNodeWeight == 0 || p.TotalEdgeWeight == 0.
-func (p *Profile) processprofileGraph(g *graph.Graph) error {
- nFlat := make(map[string]int64)
- nCum := make(map[string]int64)
+// Caller should ignore the profile if totalWeight == 0.
+func createNamedEdgeMap(g *graph.Graph) (edgeMap NamedEdgeMap, totalWeight int64, err error) {
seenStartLine := false
- // Accummulate weights for the same node.
- for _, n := range g.Nodes {
- canonicalName := n.Info.Name
- nFlat[canonicalName] += n.FlatValue()
- nCum[canonicalName] += n.CumValue()
- }
-
// Process graph and build various node and edge maps which will
// be consumed by AST walk.
+ weight := make(map[NamedCallEdge]int64)
for _, n := range g.Nodes {
seenStartLine = seenStartLine || n.Info.StartLine != 0
- p.TotalNodeWeight += n.FlatValue()
canonicalName := n.Info.Name
// Create the key to the nodeMapKey.
- nodeinfo := NodeMapKey{
+ namedEdge := NamedCallEdge{
CallerName: canonicalName,
CallSiteOffset: n.Info.Lineno - n.Info.StartLine,
}
for _, e := range n.Out {
- p.TotalEdgeWeight += e.WeightValue()
- nodeinfo.CalleeName = e.Dest.Info.Name
- if w, ok := p.NodeMap[nodeinfo]; ok {
- w.EWeight += e.WeightValue()
- } else {
- weights := new(Weights)
- weights.NFlat = nFlat[canonicalName]
- weights.NCum = nCum[canonicalName]
- weights.EWeight = e.WeightValue()
- p.NodeMap[nodeinfo] = weights
- }
+ totalWeight += e.WeightValue()
+ namedEdge.CalleeName = e.Dest.Info.Name
+ // Create new entry or increment existing entry.
+ weight[namedEdge] += e.WeightValue()
}
}
- if p.TotalNodeWeight == 0 || p.TotalEdgeWeight == 0 {
- return nil // accept but ignore profile with no samples.
+ if totalWeight == 0 {
+ return NamedEdgeMap{}, 0, nil // accept but ignore profile with no samples.
}
if !seenStartLine {
// TODO(prattmic): If Function.start_line is missing we could
// fall back to using absolute line numbers, which is better
// than nothing.
- return fmt.Errorf("profile missing Function.start_line data (Go version of profiled application too old? Go 1.20+ automatically adds this to profiles)")
+ return NamedEdgeMap{}, 0, fmt.Errorf("profile missing Function.start_line data (Go version of profiled application too old? Go 1.20+ automatically adds this to profiles)")
}
- return nil
+ byWeight := make([]NamedCallEdge, 0, len(weight))
+ for namedEdge := range weight {
+ byWeight = append(byWeight, namedEdge)
+ }
+ sort.Slice(byWeight, func(i, j int) bool {
+ ei, ej := byWeight[i], byWeight[j]
+ if wi, wj := weight[ei], weight[ej]; wi != wj {
+ return wi > wj // want larger weight first
+ }
+ // same weight, order by name/line number
+ if ei.CallerName != ej.CallerName {
+ return ei.CallerName < ej.CallerName
+ }
+ if ei.CalleeName != ej.CalleeName {
+ return ei.CalleeName < ej.CalleeName
+ }
+ return ei.CallSiteOffset < ej.CallSiteOffset
+ })
+
+ edgeMap = NamedEdgeMap{
+ Weight: weight,
+ ByWeight: byWeight,
+ }
+
+ return edgeMap, totalWeight, nil
}
// initializeIRGraph builds the IRGraph by visiting all the ir.Func in decl list
// of a package.
-func (p *Profile) initializeIRGraph() {
+func createIRGraph(namedEdgeMap NamedEdgeMap) *IRGraph {
+ g := &IRGraph{
+ IRNodes: make(map[string]*IRNode),
+ }
+
// Bottomup walk over the function to create IRGraph.
- ir.VisitFuncsBottomUp(typecheck.Target.Decls, func(list []*ir.Func, recursive bool) {
+ ir.VisitFuncsBottomUp(typecheck.Target.Funcs, func(list []*ir.Func, recursive bool) {
for _, fn := range list {
- p.VisitIR(fn)
+ visitIR(fn, namedEdgeMap, g)
}
})
@@ -271,24 +282,20 @@ func (p *Profile) initializeIRGraph() {
// that IRNodes is fully populated (see the dummy node TODO in
// addIndirectEdges).
//
- // TODO(prattmic): VisitIR above populates the graph via direct calls
+ // TODO(prattmic): visitIR above populates the graph via direct calls
// discovered via the IR. addIndirectEdges populates the graph via
// calls discovered via the profile. This combination of opposite
// approaches is a bit awkward, particularly because direct calls are
// discoverable via the profile as well. Unify these into a single
// approach.
- p.addIndirectEdges()
-}
-
-// VisitIR traverses the body of each ir.Func and use NodeMap to determine if
-// we need to add an edge from ir.Func and any node in the ir.Func body.
-func (p *Profile) VisitIR(fn *ir.Func) {
- g := p.WeightedCG
+ addIndirectEdges(g, namedEdgeMap)
- if g.IRNodes == nil {
- g.IRNodes = make(map[string]*IRNode)
- }
+ return g
+}
+// visitIR traverses the body of each ir.Func adds edges to g from ir.Func to
+// any called function in the body.
+func visitIR(fn *ir.Func, namedEdgeMap NamedEdgeMap, g *IRGraph) {
name := ir.LinkFuncName(fn)
node, ok := g.IRNodes[name]
if !ok {
@@ -299,7 +306,29 @@ func (p *Profile) VisitIR(fn *ir.Func) {
}
// Recursively walk over the body of the function to create IRGraph edges.
- p.createIRGraphEdge(fn, node, name)
+ createIRGraphEdge(fn, node, name, namedEdgeMap, g)
+}
+
+// createIRGraphEdge traverses the nodes in the body of ir.Func and adds edges
+// between the callernode which points to the ir.Func and the nodes in the
+// body.
+func createIRGraphEdge(fn *ir.Func, callernode *IRNode, name string, namedEdgeMap NamedEdgeMap, g *IRGraph) {
+ ir.VisitList(fn.Body, func(n ir.Node) {
+ switch n.Op() {
+ case ir.OCALLFUNC:
+ call := n.(*ir.CallExpr)
+ // Find the callee function from the call site and add the edge.
+ callee := DirectCallee(call.Fun)
+ if callee != nil {
+ addIREdge(callernode, name, n, callee, namedEdgeMap, g)
+ }
+ case ir.OCALLMETH:
+ call := n.(*ir.CallExpr)
+ // Find the callee method from the call site and add the edge.
+ callee := ir.MethodExprName(call.Fun).Func
+ addIREdge(callernode, name, n, callee, namedEdgeMap, g)
+ }
+ })
}
// NodeLineOffset returns the line offset of n in fn.
@@ -312,9 +341,7 @@ func NodeLineOffset(n ir.Node, fn *ir.Func) int {
// addIREdge adds an edge between caller and new node that points to `callee`
// based on the profile-graph and NodeMap.
-func (p *Profile) addIREdge(callerNode *IRNode, callerName string, call ir.Node, callee *ir.Func) {
- g := p.WeightedCG
-
+func addIREdge(callerNode *IRNode, callerName string, call ir.Node, callee *ir.Func, namedEdgeMap NamedEdgeMap, g *IRGraph) {
calleeName := ir.LinkFuncName(callee)
calleeNode, ok := g.IRNodes[calleeName]
if !ok {
@@ -324,40 +351,36 @@ func (p *Profile) addIREdge(callerNode *IRNode, callerName string, call ir.Node,
g.IRNodes[calleeName] = calleeNode
}
- nodeinfo := NodeMapKey{
+ namedEdge := NamedCallEdge{
CallerName: callerName,
CalleeName: calleeName,
CallSiteOffset: NodeLineOffset(call, callerNode.AST),
}
- var weight int64
- if weights, ok := p.NodeMap[nodeinfo]; ok {
- weight = weights.EWeight
- }
-
// Add edge in the IRGraph from caller to callee.
edge := &IREdge{
Src: callerNode,
Dst: calleeNode,
- Weight: weight,
- CallSiteOffset: nodeinfo.CallSiteOffset,
+ Weight: namedEdgeMap.Weight[namedEdge],
+ CallSiteOffset: namedEdge.CallSiteOffset,
}
if callerNode.OutEdges == nil {
- callerNode.OutEdges = make(map[NodeMapKey]*IREdge)
+ callerNode.OutEdges = make(map[NamedCallEdge]*IREdge)
}
- callerNode.OutEdges[nodeinfo] = edge
+ callerNode.OutEdges[namedEdge] = edge
+}
+
+// LookupFunc looks up a function or method in export data. It is expected to
+// be overridden by package noder, to break a dependency cycle.
+var LookupFunc = func(fullName string) (*ir.Func, error) {
+ base.Fatalf("pgo.LookupMethodFunc not overridden")
+ panic("unreachable")
}
// addIndirectEdges adds indirect call edges found in the profile to the graph,
// to be used for devirtualization.
//
-// targetDeclFuncs is the set of functions in typecheck.Target.Decls. Only
-// edges from these functions will be added.
-//
-// Devirtualization is only applied to typecheck.Target.Decls functions, so there
-// is no need to add edges from other functions.
-//
// N.B. despite the name, addIndirectEdges will add any edges discovered via
// the profile. We don't know for sure that they are indirect, but assume they
// are since direct calls would already be added. (e.g., direct calls that have
@@ -366,9 +389,7 @@ func (p *Profile) addIREdge(callerNode *IRNode, callerName string, call ir.Node,
// TODO(prattmic): Devirtualization runs before inlining, so we can't devirtualize
// calls inside inlined call bodies. If we did add that, we'd need edges from
// inlined bodies as well.
-func (p *Profile) addIndirectEdges() {
- g := p.WeightedCG
-
+func addIndirectEdges(g *IRGraph, namedEdgeMap NamedEdgeMap) {
// g.IRNodes is populated with the set of functions in the local
// package build by VisitIR. We want to filter for local functions
// below, but we also add unknown callees to IRNodes as we go. So make
@@ -378,7 +399,15 @@ func (p *Profile) addIndirectEdges() {
localNodes[k] = v
}
- for key, weights := range p.NodeMap {
+ // N.B. We must consider edges in a stable order because export data
+ // lookup order (LookupMethodFunc, below) can impact the export data of
+ // this package, which must be stable across different invocations for
+ // reproducibility.
+ //
+ // The weight ordering of ByWeight is irrelevant, it just happens to be
+ // an ordered list of edges that is already available.
+ for _, key := range namedEdgeMap.ByWeight {
+ weight := namedEdgeMap.Weight[key]
// All callers in the local package build were added to IRNodes
// in VisitIR. If a caller isn't in the local package build we
// can skip adding edges, since we won't be devirtualizing in
@@ -395,25 +424,55 @@ func (p *Profile) addIndirectEdges() {
calleeNode, ok := g.IRNodes[key.CalleeName]
if !ok {
- // IR is missing for this callee. Most likely this is
- // because the callee isn't in the transitive deps of
- // this package.
+ // IR is missing for this callee. VisitIR populates
+ // IRNodes with all functions discovered via local
+ // package function declarations and calls. This
+ // function may still be available from export data of
+ // a transitive dependency.
//
- // Record this call anyway. If this is the hottest,
- // then we want to skip devirtualization rather than
- // devirtualizing to the second most common callee.
+ // TODO(prattmic): Parameterized types/functions are
+ // not supported.
//
- // TODO(prattmic): VisitIR populates IRNodes with all
- // of the functions discovered via local package
- // function declarations and calls. Thus we could miss
- // functions that are available in export data of
- // transitive deps, but aren't directly reachable. We
- // need to do a lookup directly from package export
- // data to get complete coverage.
- calleeNode = &IRNode{
- LinkerSymbolName: key.CalleeName,
- // TODO: weights? We don't need them.
+ // TODO(prattmic): This eager lookup during graph load
+ // is simple, but wasteful. We are likely to load many
+ // functions that we never need. We could delay load
+ // until we actually need the method in
+ // devirtualization. Instantiation of generic functions
+ // will likely need to be done at the devirtualization
+ // site, if at all.
+ fn, err := LookupFunc(key.CalleeName)
+ if err == nil {
+ if base.Debug.PGODebug >= 3 {
+ fmt.Printf("addIndirectEdges: %s found in export data\n", key.CalleeName)
+ }
+ calleeNode = &IRNode{AST: fn}
+
+ // N.B. we could call createIRGraphEdge to add
+ // direct calls in this newly-imported
+ // function's body to the graph. Similarly, we
+ // could add to this function's queue to add
+ // indirect calls. However, those would be
+ // useless given the visit order of inlining,
+ // and the ordering of PGO devirtualization and
+ // inlining. This function can only be used as
+ // an inlined body. We will never do PGO
+ // devirtualization inside an inlined call. Nor
+ // will we perform inlining inside an inlined
+ // call.
+ } else {
+ // Still not found. Most likely this is because
+ // the callee isn't in the transitive deps of
+ // this package.
+ //
+ // Record this call anyway. If this is the hottest,
+ // then we want to skip devirtualization rather than
+ // devirtualizing to the second most common callee.
+ if base.Debug.PGODebug >= 3 {
+ fmt.Printf("addIndirectEdges: %s not found in export data: %v\n", key.CalleeName, err)
+ }
+ calleeNode = &IRNode{LinkerSymbolName: key.CalleeName}
}
+
// Add dummy node back to IRNodes. We don't need this
// directly, but PrintWeightedCallGraphDOT uses these
// to print nodes.
@@ -422,39 +481,17 @@ func (p *Profile) addIndirectEdges() {
edge := &IREdge{
Src: callerNode,
Dst: calleeNode,
- Weight: weights.EWeight,
+ Weight: weight,
CallSiteOffset: key.CallSiteOffset,
}
if callerNode.OutEdges == nil {
- callerNode.OutEdges = make(map[NodeMapKey]*IREdge)
+ callerNode.OutEdges = make(map[NamedCallEdge]*IREdge)
}
callerNode.OutEdges[key] = edge
}
}
-// createIRGraphEdge traverses the nodes in the body of ir.Func and adds edges
-// between the callernode which points to the ir.Func and the nodes in the
-// body.
-func (p *Profile) createIRGraphEdge(fn *ir.Func, callernode *IRNode, name string) {
- ir.VisitList(fn.Body, func(n ir.Node) {
- switch n.Op() {
- case ir.OCALLFUNC:
- call := n.(*ir.CallExpr)
- // Find the callee function from the call site and add the edge.
- callee := DirectCallee(call.X)
- if callee != nil {
- p.addIREdge(callernode, name, n, callee)
- }
- case ir.OCALLMETH:
- call := n.(*ir.CallExpr)
- // Find the callee method from the call site and add the edge.
- callee := ir.MethodExprName(call.X).Func
- p.addIREdge(callernode, name, n, callee)
- }
- })
-}
-
// WeightInPercentage converts profile weights to a percentage.
func WeightInPercentage(value int64, total int64) float64 {
return (float64(value) / float64(total)) * 100
@@ -467,7 +504,7 @@ func (p *Profile) PrintWeightedCallGraphDOT(edgeThreshold float64) {
// List of functions in this package.
funcs := make(map[string]struct{})
- ir.VisitFuncsBottomUp(typecheck.Target.Decls, func(list []*ir.Func, recursive bool) {
+ ir.VisitFuncsBottomUp(typecheck.Target.Funcs, func(list []*ir.Func, recursive bool) {
for _, f := range list {
name := ir.LinkFuncName(f)
funcs[name] = struct{}{}
@@ -511,7 +548,7 @@ func (p *Profile) PrintWeightedCallGraphDOT(edgeThreshold float64) {
}
}
// Print edges.
- ir.VisitFuncsBottomUp(typecheck.Target.Decls, func(list []*ir.Func, recursive bool) {
+ ir.VisitFuncsBottomUp(typecheck.Target.Funcs, func(list []*ir.Func, recursive bool) {
for _, f := range list {
name := ir.LinkFuncName(f)
if n, ok := p.WeightedCG.IRNodes[name]; ok {
@@ -521,7 +558,7 @@ func (p *Profile) PrintWeightedCallGraphDOT(edgeThreshold float64) {
style = "dashed"
}
color := "black"
- edgepercent := WeightInPercentage(e.Weight, p.TotalEdgeWeight)
+ edgepercent := WeightInPercentage(e.Weight, p.TotalWeight)
if edgepercent > edgeThreshold {
color = "red"
}
diff --git a/src/cmd/compile/internal/pkginit/init.go b/src/cmd/compile/internal/pkginit/init.go
index edb0d6a533..9278890b63 100644
--- a/src/cmd/compile/internal/pkginit/init.go
+++ b/src/cmd/compile/internal/pkginit/init.go
@@ -15,82 +15,15 @@ import (
"cmd/internal/obj"
"cmd/internal/objabi"
"cmd/internal/src"
- "fmt"
- "os"
)
-// MakeInit creates a synthetic init function to handle any
-// package-scope initialization statements.
-//
-// TODO(mdempsky): Move into noder, so that the types2-based frontends
-// can use Info.InitOrder instead.
-func MakeInit() {
- nf := initOrder(typecheck.Target.Decls)
- if len(nf) == 0 {
- return
- }
-
- // Make a function that contains all the initialization statements.
- base.Pos = nf[0].Pos() // prolog/epilog gets line number of first init stmt
- initializers := typecheck.Lookup("init")
- fn := typecheck.DeclFunc(initializers, nil, nil, nil)
- for _, dcl := range typecheck.InitTodoFunc.Dcl {
- dcl.Curfn = fn
- }
- fn.Dcl = append(fn.Dcl, typecheck.InitTodoFunc.Dcl...)
- typecheck.InitTodoFunc.Dcl = nil
- fn.SetIsPackageInit(true)
-
- // Outline (if legal/profitable) global map inits.
- newfuncs := []*ir.Func{}
- nf, newfuncs = staticinit.OutlineMapInits(nf)
-
- // Suppress useless "can inline" diagnostics.
- // Init functions are only called dynamically.
- fn.SetInlinabilityChecked(true)
- for _, nfn := range newfuncs {
- nfn.SetInlinabilityChecked(true)
- }
-
- fn.Body = nf
- typecheck.FinishFuncBody()
-
- typecheck.Func(fn)
- ir.WithFunc(fn, func() {
- typecheck.Stmts(nf)
- })
- typecheck.Target.Decls = append(typecheck.Target.Decls, fn)
- if base.Debug.WrapGlobalMapDbg > 1 {
- fmt.Fprintf(os.Stderr, "=-= len(newfuncs) is %d for %v\n",
- len(newfuncs), fn)
- }
- for _, nfn := range newfuncs {
- if base.Debug.WrapGlobalMapDbg > 1 {
- fmt.Fprintf(os.Stderr, "=-= add to target.decls %v\n", nfn)
- }
- typecheck.Target.Decls = append(typecheck.Target.Decls, ir.Node(nfn))
- }
-
- // Prepend to Inits, so it runs first, before any user-declared init
- // functions.
- typecheck.Target.Inits = append([]*ir.Func{fn}, typecheck.Target.Inits...)
-
- if typecheck.InitTodoFunc.Dcl != nil {
- // We only generate temps using InitTodoFunc if there
- // are package-scope initialization statements, so
- // something's weird if we get here.
- base.Fatalf("InitTodoFunc still has declarations")
- }
- typecheck.InitTodoFunc = nil
-}
-
-// Task makes and returns an initialization record for the package.
+// MakeTask makes an initialization record for the package, if necessary.
// See runtime/proc.go:initTask for its layout.
// The 3 tasks for initialization are:
// 1. Initialize all of the packages the current package depends on.
// 2. Initialize all the variables that have initializers.
// 3. Run any init functions.
-func Task() *ir.Name {
+func MakeTask() {
var deps []*obj.LSym // initTask records for packages the current package depends on
var fns []*obj.LSym // functions to call for package initialization
@@ -125,35 +58,29 @@ func Task() *ir.Name {
ni := len(InstrumentGlobalsMap)
if ni != 0 {
// Make an init._ function.
- base.Pos = base.AutogeneratedPos
- typecheck.DeclContext = ir.PEXTERN
- name := noder.Renameinit()
- fnInit := typecheck.DeclFunc(name, nil, nil, nil)
+ pos := base.AutogeneratedPos
+ base.Pos = pos
+
+ sym := noder.Renameinit()
+ fnInit := ir.NewFunc(pos, pos, sym, types.NewSignature(nil, nil, nil))
+ typecheck.DeclFunc(fnInit)
// Get an array of instrumented global variables.
globals := instrumentGlobals(fnInit)
// Call runtime.asanregisterglobals function to poison redzones.
// runtime.asanregisterglobals(unsafe.Pointer(&globals[0]), ni)
- asanf := typecheck.NewName(ir.Pkgs.Runtime.Lookup("asanregisterglobals"))
- ir.MarkFunc(asanf)
- asanf.SetType(types.NewSignature(nil, []*types.Field{
- types.NewField(base.Pos, nil, types.Types[types.TUNSAFEPTR]),
- types.NewField(base.Pos, nil, types.Types[types.TUINTPTR]),
- }, nil))
- asancall := ir.NewCallExpr(base.Pos, ir.OCALL, asanf, nil)
+ asancall := ir.NewCallExpr(base.Pos, ir.OCALL, typecheck.LookupRuntime("asanregisterglobals"), nil)
asancall.Args.Append(typecheck.ConvNop(typecheck.NodAddr(
ir.NewIndexExpr(base.Pos, globals, ir.NewInt(base.Pos, 0))), types.Types[types.TUNSAFEPTR]))
asancall.Args.Append(typecheck.DefaultLit(ir.NewInt(base.Pos, int64(ni)), types.Types[types.TUINTPTR]))
fnInit.Body.Append(asancall)
typecheck.FinishFuncBody()
- typecheck.Func(fnInit)
ir.CurFunc = fnInit
typecheck.Stmts(fnInit.Body)
ir.CurFunc = nil
- typecheck.Target.Decls = append(typecheck.Target.Decls, fnInit)
typecheck.Target.Inits = append(typecheck.Target.Inits, fnInit)
}
}
@@ -191,13 +118,12 @@ func Task() *ir.Name {
}
if len(deps) == 0 && len(fns) == 0 && types.LocalPkg.Path != "main" && types.LocalPkg.Path != "runtime" {
- return nil // nothing to initialize
+ return // nothing to initialize
}
// Make an .inittask structure.
sym := typecheck.Lookup(".inittask")
- task := typecheck.NewName(sym)
- task.SetType(types.Types[types.TUINT8]) // fake type
+ task := ir.NewNameAt(base.Pos, sym, types.Types[types.TUINT8]) // fake type
task.Class = ir.PEXTERN
sym.Def = task
lsym := task.Linksym()
@@ -219,20 +145,4 @@ func Task() *ir.Name {
// An initTask has pointers, but none into the Go heap.
// It's not quite read only, the state field must be modifiable.
objw.Global(lsym, int32(ot), obj.NOPTR)
- return task
-}
-
-// initRequiredForCoverage returns TRUE if we need to force creation
-// of an init function for the package so as to insert a coverage
-// runtime registration call.
-func initRequiredForCoverage(l []ir.Node) bool {
- if base.Flag.Cfg.CoverageInfo == nil {
- return false
- }
- for _, n := range l {
- if n.Op() == ir.ODCLFUNC {
- return true
- }
- }
- return false
}
diff --git a/src/cmd/compile/internal/pkginit/initAsanGlobals.go b/src/cmd/compile/internal/pkginit/initAsanGlobals.go
index ce26cbf189..42db0eaf1b 100644
--- a/src/cmd/compile/internal/pkginit/initAsanGlobals.go
+++ b/src/cmd/compile/internal/pkginit/initAsanGlobals.go
@@ -23,8 +23,7 @@ func instrumentGlobals(fn *ir.Func) *ir.Name {
// var asanglobals []asanGlobalStruct
arraytype := types.NewArray(asanGlobalStruct, int64(len(InstrumentGlobalsMap)))
symG := lname(".asanglobals")
- globals := typecheck.NewName(symG)
- globals.SetType(arraytype)
+ globals := ir.NewNameAt(base.Pos, symG, arraytype)
globals.Class = ir.PEXTERN
symG.Def = globals
typecheck.Target.Externs = append(typecheck.Target.Externs, globals)
@@ -32,8 +31,7 @@ func instrumentGlobals(fn *ir.Func) *ir.Name {
// var asanL []asanLocationStruct
arraytype = types.NewArray(asanLocationStruct, int64(len(InstrumentGlobalsMap)))
symL := lname(".asanL")
- asanlocation := typecheck.NewName(symL)
- asanlocation.SetType(arraytype)
+ asanlocation := ir.NewNameAt(base.Pos, symL, arraytype)
asanlocation.Class = ir.PEXTERN
symL.Def = asanlocation
typecheck.Target.Externs = append(typecheck.Target.Externs, asanlocation)
@@ -43,22 +41,19 @@ func instrumentGlobals(fn *ir.Func) *ir.Name {
// var asanModulename string
// var asanFilename string
symL = lname(".asanName")
- asanName := typecheck.NewName(symL)
- asanName.SetType(types.Types[types.TSTRING])
+ asanName := ir.NewNameAt(base.Pos, symL, types.Types[types.TSTRING])
asanName.Class = ir.PEXTERN
symL.Def = asanName
typecheck.Target.Externs = append(typecheck.Target.Externs, asanName)
symL = lname(".asanModulename")
- asanModulename := typecheck.NewName(symL)
- asanModulename.SetType(types.Types[types.TSTRING])
+ asanModulename := ir.NewNameAt(base.Pos, symL, types.Types[types.TSTRING])
asanModulename.Class = ir.PEXTERN
symL.Def = asanModulename
typecheck.Target.Externs = append(typecheck.Target.Externs, asanModulename)
symL = lname(".asanFilename")
- asanFilename := typecheck.NewName(symL)
- asanFilename.SetType(types.Types[types.TSTRING])
+ asanFilename := ir.NewNameAt(base.Pos, symL, types.Types[types.TSTRING])
asanFilename.Class = ir.PEXTERN
symL.Def = asanFilename
typecheck.Target.Externs = append(typecheck.Target.Externs, asanFilename)
diff --git a/src/cmd/compile/internal/pkginit/initorder.go b/src/cmd/compile/internal/pkginit/initorder.go
deleted file mode 100644
index 9416470ca1..0000000000
--- a/src/cmd/compile/internal/pkginit/initorder.go
+++ /dev/null
@@ -1,369 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package pkginit
-
-import (
- "container/heap"
- "fmt"
- "internal/types/errors"
- "strings"
-
- "cmd/compile/internal/base"
- "cmd/compile/internal/ir"
-)
-
-// Package initialization
-//
-// Here we implement the algorithm for ordering package-level variable
-// initialization. The spec is written in terms of variable
-// initialization, but multiple variables initialized by a single
-// assignment are handled together, so here we instead focus on
-// ordering initialization assignments. Conveniently, this maps well
-// to how we represent package-level initializations using the Node
-// AST.
-//
-// Assignments are in one of three phases: NotStarted, Pending, or
-// Done. For assignments in the Pending phase, we use Xoffset to
-// record the number of unique variable dependencies whose
-// initialization assignment is not yet Done. We also maintain a
-// "blocking" map that maps assignments back to all of the assignments
-// that depend on it.
-//
-// For example, for an initialization like:
-//
-// var x = f(a, b, b)
-// var a, b = g()
-//
-// the "x = f(a, b, b)" assignment depends on two variables (a and b),
-// so its Xoffset will be 2. Correspondingly, the "a, b = g()"
-// assignment's "blocking" entry will have two entries back to x's
-// assignment.
-//
-// Logically, initialization works by (1) taking all NotStarted
-// assignments, calculating their dependencies, and marking them
-// Pending; (2) adding all Pending assignments with Xoffset==0 to a
-// "ready" priority queue (ordered by variable declaration position);
-// and (3) iteratively processing the next Pending assignment from the
-// queue, decreasing the Xoffset of assignments it's blocking, and
-// adding them to the queue if decremented to 0.
-//
-// As an optimization, we actually apply each of these three steps for
-// each assignment. This yields the same order, but keeps queue size
-// down and thus also heap operation costs.
-
-// Static initialization phase.
-// These values are stored in two bits in Node.flags.
-const (
- InitNotStarted = iota
- InitDone
- InitPending
-)
-
-type InitOrder struct {
- // blocking maps initialization assignments to the assignments
- // that depend on it.
- blocking map[ir.Node][]ir.Node
-
- // ready is the queue of Pending initialization assignments
- // that are ready for initialization.
- ready declOrder
-
- order map[ir.Node]int
-}
-
-// initOrder computes initialization order for a list l of
-// package-level declarations (in declaration order) and outputs the
-// corresponding list of statements to include in the init() function
-// body.
-func initOrder(l []ir.Node) []ir.Node {
- var res ir.Nodes
- o := InitOrder{
- blocking: make(map[ir.Node][]ir.Node),
- order: make(map[ir.Node]int),
- }
-
- // Process all package-level assignment in declaration order.
- for _, n := range l {
- switch n.Op() {
- case ir.OAS, ir.OAS2DOTTYPE, ir.OAS2FUNC, ir.OAS2MAPR, ir.OAS2RECV:
- o.processAssign(n)
- o.flushReady(func(n ir.Node) { res.Append(n) })
- case ir.ODCLCONST, ir.ODCLFUNC, ir.ODCLTYPE:
- // nop
- default:
- base.Fatalf("unexpected package-level statement: %v", n)
- }
- }
-
- // Check that all assignments are now Done; if not, there must
- // have been a dependency cycle.
- for _, n := range l {
- switch n.Op() {
- case ir.OAS, ir.OAS2DOTTYPE, ir.OAS2FUNC, ir.OAS2MAPR, ir.OAS2RECV:
- if o.order[n] != orderDone {
- // If there have already been errors
- // printed, those errors may have
- // confused us and there might not be
- // a loop. Let the user fix those
- // first.
- base.ExitIfErrors()
-
- o.findInitLoopAndExit(firstLHS(n), new([]*ir.Name), new(ir.NameSet))
- base.Fatalf("initialization unfinished, but failed to identify loop")
- }
- }
- }
-
- // Invariant consistency check. If this is non-zero, then we
- // should have found a cycle above.
- if len(o.blocking) != 0 {
- base.Fatalf("expected empty map: %v", o.blocking)
- }
-
- return res
-}
-
-func (o *InitOrder) processAssign(n ir.Node) {
- if _, ok := o.order[n]; ok {
- base.Fatalf("unexpected state: %v, %v", n, o.order[n])
- }
- o.order[n] = 0
-
- // Compute number of variable dependencies and build the
- // inverse dependency ("blocking") graph.
- for dep := range collectDeps(n, true) {
- defn := dep.Defn
- // Skip dependencies on functions (PFUNC) and
- // variables already initialized (InitDone).
- if dep.Class != ir.PEXTERN || o.order[defn] == orderDone {
- continue
- }
- o.order[n]++
- o.blocking[defn] = append(o.blocking[defn], n)
- }
-
- if o.order[n] == 0 {
- heap.Push(&o.ready, n)
- }
-}
-
-const orderDone = -1000
-
-// flushReady repeatedly applies initialize to the earliest (in
-// declaration order) assignment ready for initialization and updates
-// the inverse dependency ("blocking") graph.
-func (o *InitOrder) flushReady(initialize func(ir.Node)) {
- for o.ready.Len() != 0 {
- n := heap.Pop(&o.ready).(ir.Node)
- if order, ok := o.order[n]; !ok || order != 0 {
- base.Fatalf("unexpected state: %v, %v, %v", n, ok, order)
- }
-
- initialize(n)
- o.order[n] = orderDone
-
- blocked := o.blocking[n]
- delete(o.blocking, n)
-
- for _, m := range blocked {
- if o.order[m]--; o.order[m] == 0 {
- heap.Push(&o.ready, m)
- }
- }
- }
-}
-
-// findInitLoopAndExit searches for an initialization loop involving variable
-// or function n. If one is found, it reports the loop as an error and exits.
-//
-// path points to a slice used for tracking the sequence of
-// variables/functions visited. Using a pointer to a slice allows the
-// slice capacity to grow and limit reallocations.
-func (o *InitOrder) findInitLoopAndExit(n *ir.Name, path *[]*ir.Name, ok *ir.NameSet) {
- for i, x := range *path {
- if x == n {
- reportInitLoopAndExit((*path)[i:])
- return
- }
- }
-
- // There might be multiple loops involving n; by sorting
- // references, we deterministically pick the one reported.
- refers := collectDeps(n.Defn, false).Sorted(func(ni, nj *ir.Name) bool {
- return ni.Pos().Before(nj.Pos())
- })
-
- *path = append(*path, n)
- for _, ref := range refers {
- // Short-circuit variables that were initialized.
- if ref.Class == ir.PEXTERN && o.order[ref.Defn] == orderDone || ok.Has(ref) {
- continue
- }
-
- o.findInitLoopAndExit(ref, path, ok)
- }
-
- // n is not involved in a cycle.
- // Record that fact to avoid checking it again when reached another way,
- // or else this traversal will take exponential time traversing all paths
- // through the part of the package's call graph implicated in the cycle.
- ok.Add(n)
-
- *path = (*path)[:len(*path)-1]
-}
-
-// reportInitLoopAndExit reports and initialization loop as an error
-// and exits. However, if l is not actually an initialization loop, it
-// simply returns instead.
-func reportInitLoopAndExit(l []*ir.Name) {
- // Rotate loop so that the earliest variable declaration is at
- // the start.
- i := -1
- for j, n := range l {
- if n.Class == ir.PEXTERN && (i == -1 || n.Pos().Before(l[i].Pos())) {
- i = j
- }
- }
- if i == -1 {
- // False positive: loop only involves recursive
- // functions. Return so that findInitLoop can continue
- // searching.
- return
- }
- l = append(l[i:], l[:i]...)
-
- // TODO(mdempsky): Method values are printed as "T.m-fm"
- // rather than "T.m". Figure out how to avoid that.
-
- var msg strings.Builder
- fmt.Fprintf(&msg, "initialization loop:\n")
- for _, n := range l {
- fmt.Fprintf(&msg, "\t%v: %v refers to\n", ir.Line(n), n)
- }
- fmt.Fprintf(&msg, "\t%v: %v", ir.Line(l[0]), l[0])
-
- base.ErrorfAt(l[0].Pos(), errors.InvalidInitCycle, msg.String())
- base.ErrorExit()
-}
-
-// collectDeps returns all of the package-level functions and
-// variables that declaration n depends on. If transitive is true,
-// then it also includes the transitive dependencies of any depended
-// upon functions (but not variables).
-func collectDeps(n ir.Node, transitive bool) ir.NameSet {
- d := initDeps{transitive: transitive}
- switch n.Op() {
- case ir.OAS:
- n := n.(*ir.AssignStmt)
- d.inspect(n.Y)
- case ir.OAS2DOTTYPE, ir.OAS2FUNC, ir.OAS2MAPR, ir.OAS2RECV:
- n := n.(*ir.AssignListStmt)
- d.inspect(n.Rhs[0])
- case ir.ODCLFUNC:
- n := n.(*ir.Func)
- d.inspectList(n.Body)
- default:
- base.Fatalf("unexpected Op: %v", n.Op())
- }
- return d.seen
-}
-
-type initDeps struct {
- transitive bool
- seen ir.NameSet
- cvisit func(ir.Node)
-}
-
-func (d *initDeps) cachedVisit() func(ir.Node) {
- if d.cvisit == nil {
- d.cvisit = d.visit // cache closure
- }
- return d.cvisit
-}
-
-func (d *initDeps) inspect(n ir.Node) { ir.Visit(n, d.cachedVisit()) }
-func (d *initDeps) inspectList(l ir.Nodes) { ir.VisitList(l, d.cachedVisit()) }
-
-// visit calls foundDep on any package-level functions or variables
-// referenced by n, if any.
-func (d *initDeps) visit(n ir.Node) {
- switch n.Op() {
- case ir.ONAME:
- n := n.(*ir.Name)
- switch n.Class {
- case ir.PEXTERN, ir.PFUNC:
- d.foundDep(n)
- }
-
- case ir.OCLOSURE:
- n := n.(*ir.ClosureExpr)
- d.inspectList(n.Func.Body)
-
- case ir.ODOTMETH, ir.OMETHVALUE, ir.OMETHEXPR:
- d.foundDep(ir.MethodExprName(n))
- }
-}
-
-// foundDep records that we've found a dependency on n by adding it to
-// seen.
-func (d *initDeps) foundDep(n *ir.Name) {
- // Can happen with method expressions involving interface
- // types; e.g., fixedbugs/issue4495.go.
- if n == nil {
- return
- }
-
- // Names without definitions aren't interesting as far as
- // initialization ordering goes.
- if n.Defn == nil {
- return
- }
-
- if d.seen.Has(n) {
- return
- }
- d.seen.Add(n)
- if d.transitive && n.Class == ir.PFUNC {
- d.inspectList(n.Defn.(*ir.Func).Body)
- }
-}
-
-// declOrder implements heap.Interface, ordering assignment statements
-// by the position of their first LHS expression.
-//
-// N.B., the Pos of the first LHS expression is used because because
-// an OAS node's Pos may not be unique. For example, given the
-// declaration "var a, b = f(), g()", "a" must be ordered before "b",
-// but both OAS nodes use the "=" token's position as their Pos.
-type declOrder []ir.Node
-
-func (s declOrder) Len() int { return len(s) }
-func (s declOrder) Less(i, j int) bool {
- return firstLHS(s[i]).Pos().Before(firstLHS(s[j]).Pos())
-}
-func (s declOrder) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
-
-func (s *declOrder) Push(x interface{}) { *s = append(*s, x.(ir.Node)) }
-func (s *declOrder) Pop() interface{} {
- n := (*s)[len(*s)-1]
- *s = (*s)[:len(*s)-1]
- return n
-}
-
-// firstLHS returns the first expression on the left-hand side of
-// assignment n.
-func firstLHS(n ir.Node) *ir.Name {
- switch n.Op() {
- case ir.OAS:
- n := n.(*ir.AssignStmt)
- return n.X.Name()
- case ir.OAS2DOTTYPE, ir.OAS2FUNC, ir.OAS2RECV, ir.OAS2MAPR:
- n := n.(*ir.AssignListStmt)
- return n.Lhs[0].Name()
- }
-
- base.Fatalf("unexpected Op: %v", n.Op())
- return nil
-}
diff --git a/src/cmd/compile/internal/ppc64/ssa.go b/src/cmd/compile/internal/ppc64/ssa.go
index 23df7eefad..d20a31e38a 100644
--- a/src/cmd/compile/internal/ppc64/ssa.go
+++ b/src/cmd/compile/internal/ppc64/ssa.go
@@ -347,9 +347,9 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
// BNE end
// STDCCC Rarg2, (Rarg0)
// BNE loop
- // LWSYNC // Only for sequential consistency; not required in CasRel.
// MOVD $1, Rout
// end:
+ // LWSYNC // Only for sequential consistency; not required in CasRel.
ld := ppc64.ALDAR
st := ppc64.ASTDCCC
cmp := ppc64.ACMP
@@ -402,22 +402,24 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
p4 := s.Prog(ppc64.ABNE)
p4.To.Type = obj.TYPE_BRANCH
p4.To.SetTarget(p0)
+ // return value true
+ p5 := s.Prog(ppc64.AMOVD)
+ p5.From.Type = obj.TYPE_CONST
+ p5.From.Offset = 1
+ p5.To.Type = obj.TYPE_REG
+ p5.To.Reg = out
// LWSYNC - Assuming shared data not write-through-required nor
// caching-inhibited. See Appendix B.2.1.1 in the ISA 2.07b.
// If the operation is a CAS-Release, then synchronization is not necessary.
if v.AuxInt != 0 {
plwsync2 := s.Prog(ppc64.ALWSYNC)
plwsync2.To.Type = obj.TYPE_NONE
+ p2.To.SetTarget(plwsync2)
+ } else {
+ // done (label)
+ p6 := s.Prog(obj.ANOP)
+ p2.To.SetTarget(p6)
}
- // return value true
- p5 := s.Prog(ppc64.AMOVD)
- p5.From.Type = obj.TYPE_CONST
- p5.From.Offset = 1
- p5.To.Type = obj.TYPE_REG
- p5.To.Reg = out
- // done (label)
- p6 := s.Prog(obj.ANOP)
- p2.To.SetTarget(p6)
case ssa.OpPPC64LoweredPubBarrier:
// LWSYNC
@@ -573,18 +575,6 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
p.To.Type = obj.TYPE_REG
p.To.Reg = r
- // Mask has been set as sh
- case ssa.OpPPC64RLDICL:
- r := v.Reg()
- r1 := v.Args[0].Reg()
- shifts := v.AuxInt
- p := s.Prog(v.Op.Asm())
- p.From = obj.Addr{Type: obj.TYPE_CONST, Offset: ssa.GetPPC64Shiftsh(shifts)}
- p.AddRestSourceConst(ssa.GetPPC64Shiftmb(shifts))
- p.Reg = r1
- p.To.Type = obj.TYPE_REG
- p.To.Reg = r
-
case ssa.OpPPC64ADD, ssa.OpPPC64FADD, ssa.OpPPC64FADDS, ssa.OpPPC64SUB, ssa.OpPPC64FSUB, ssa.OpPPC64FSUBS,
ssa.OpPPC64MULLD, ssa.OpPPC64MULLW, ssa.OpPPC64DIVDU, ssa.OpPPC64DIVWU,
ssa.OpPPC64SRAD, ssa.OpPPC64SRAW, ssa.OpPPC64SRD, ssa.OpPPC64SRW, ssa.OpPPC64SLD, ssa.OpPPC64SLW,
@@ -603,7 +593,8 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
p.To.Type = obj.TYPE_REG
p.To.Reg = r
- case ssa.OpPPC64ANDCC, ssa.OpPPC64ORCC, ssa.OpPPC64XORCC:
+ case ssa.OpPPC64ADDCC, ssa.OpPPC64ANDCC, ssa.OpPPC64SUBCC, ssa.OpPPC64ORCC, ssa.OpPPC64XORCC, ssa.OpPPC64NORCC,
+ ssa.OpPPC64ANDNCC:
r1 := v.Args[0].Reg()
r2 := v.Args[1].Reg()
p := s.Prog(v.Op.Asm())
@@ -613,6 +604,13 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg0()
+ case ssa.OpPPC64NEGCC, ssa.OpPPC64CNTLZDCC:
+ p := s.Prog(v.Op.Asm())
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg0()
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[0].Reg()
+
case ssa.OpPPC64ROTLconst, ssa.OpPPC64ROTLWconst:
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_CONST
@@ -623,13 +621,27 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
// Auxint holds encoded rotate + mask
case ssa.OpPPC64RLWINM, ssa.OpPPC64RLWMI:
- rot, mb, me, _ := ssa.DecodePPC64RotateMask(v.AuxInt)
+ sh, mb, me, _ := ssa.DecodePPC64RotateMask(v.AuxInt)
p := s.Prog(v.Op.Asm())
p.To = obj.Addr{Type: obj.TYPE_REG, Reg: v.Reg()}
p.Reg = v.Args[0].Reg()
- p.From = obj.Addr{Type: obj.TYPE_CONST, Offset: int64(rot)}
+ p.From = obj.Addr{Type: obj.TYPE_CONST, Offset: int64(sh)}
p.AddRestSourceArgs([]obj.Addr{{Type: obj.TYPE_CONST, Offset: mb}, {Type: obj.TYPE_CONST, Offset: me}})
// Auxint holds mask
+
+ case ssa.OpPPC64RLDICL, ssa.OpPPC64RLDICR:
+ sh, mb, me, _ := ssa.DecodePPC64RotateMask(v.AuxInt)
+ p := s.Prog(v.Op.Asm())
+ p.From = obj.Addr{Type: obj.TYPE_CONST, Offset: sh}
+ switch v.Op {
+ case ssa.OpPPC64RLDICL:
+ p.AddRestSourceConst(mb)
+ case ssa.OpPPC64RLDICR:
+ p.AddRestSourceConst(me)
+ }
+ p.Reg = v.Args[0].Reg()
+ p.To = obj.Addr{Type: obj.TYPE_REG, Reg: v.Reg()}
+
case ssa.OpPPC64RLWNM:
_, mb, me, _ := ssa.DecodePPC64RotateMask(v.AuxInt)
p := s.Prog(v.Op.Asm())
@@ -730,13 +742,12 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
- case ssa.OpPPC64ANDCCconst:
+ case ssa.OpPPC64ADDCCconst, ssa.OpPPC64ANDCCconst:
p := s.Prog(v.Op.Asm())
p.Reg = v.Args[0].Reg()
p.From.Type = obj.TYPE_CONST
p.From.Offset = v.AuxInt
p.To.Type = obj.TYPE_REG
- // p.To.Reg = ppc64.REGTMP // discard result
p.To.Reg = v.Reg0()
case ssa.OpPPC64MOVDaddr:
diff --git a/src/cmd/compile/internal/rangefunc/rangefunc_test.go b/src/cmd/compile/internal/rangefunc/rangefunc_test.go
new file mode 100644
index 0000000000..16856c648c
--- /dev/null
+++ b/src/cmd/compile/internal/rangefunc/rangefunc_test.go
@@ -0,0 +1,1297 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build goexperiment.rangefunc
+
+package rangefunc_test
+
+import (
+ "slices"
+ "testing"
+)
+
+type Seq2[T1, T2 any] func(yield func(T1, T2) bool)
+
+// OfSliceIndex returns a Seq over the elements of s. It is equivalent
+// to range s.
+func OfSliceIndex[T any, S ~[]T](s S) Seq2[int, T] {
+ return func(yield func(int, T) bool) {
+ for i, v := range s {
+ if !yield(i, v) {
+ return
+ }
+ }
+ return
+ }
+}
+
+// BadOfSliceIndex is "bad" because it ignores the return value from yield
+// and just keeps on iterating.
+func BadOfSliceIndex[T any, S ~[]T](s S) Seq2[int, T] {
+ return func(yield func(int, T) bool) {
+ for i, v := range s {
+ yield(i, v)
+ }
+ return
+ }
+}
+
+// VeryBadOfSliceIndex is "very bad" because it ignores the return value from yield
+// and just keeps on iterating, and also wraps that call in a defer-recover so it can
+// keep on trying after the first panic.
+func VeryBadOfSliceIndex[T any, S ~[]T](s S) Seq2[int, T] {
+ return func(yield func(int, T) bool) {
+ for i, v := range s {
+ func() {
+ defer func() {
+ recover()
+ }()
+ yield(i, v)
+ }()
+ }
+ return
+ }
+}
+
+// CooperativeBadOfSliceIndex calls the loop body from a goroutine after
+// a ping on a channel, and returns recover()on that same channel.
+func CooperativeBadOfSliceIndex[T any, S ~[]T](s S, proceed chan any) Seq2[int, T] {
+ return func(yield func(int, T) bool) {
+ for i, v := range s {
+ if !yield(i, v) {
+ // if the body breaks, call yield just once in a goroutine
+ go func() {
+ <-proceed
+ defer func() {
+ proceed <- recover()
+ }()
+ yield(0, s[0])
+ }()
+ return
+ }
+ }
+ return
+ }
+}
+
+// TrickyIterator is a type intended to test whether an iterator that
+// calls a yield function after loop exit must inevitably escape the
+// closure; this might be relevant to future checking/optimization.
+type TrickyIterator struct {
+ yield func(int, int) bool
+}
+
+func (ti *TrickyIterator) iterAll(s []int) Seq2[int, int] {
+ return func(yield func(int, int) bool) {
+ ti.yield = yield // Save yield for future abuse
+ for i, v := range s {
+ if !yield(i, v) {
+ return
+ }
+ }
+ return
+ }
+}
+
+func (ti *TrickyIterator) iterOne(s []int) Seq2[int, int] {
+ return func(yield func(int, int) bool) {
+ ti.yield = yield // Save yield for future abuse
+ if len(s) > 0 { // Not in a loop might escape differently
+ yield(0, s[0])
+ }
+ return
+ }
+}
+
+func (ti *TrickyIterator) iterZero(s []int) Seq2[int, int] {
+ return func(yield func(int, int) bool) {
+ ti.yield = yield // Save yield for future abuse
+ // Don't call it at all, maybe it won't escape
+ return
+ }
+}
+
+func (ti *TrickyIterator) fail() {
+ if ti.yield != nil {
+ ti.yield(1, 1)
+ }
+}
+
+// Check wraps the function body passed to iterator forall
+// in code that ensures that it cannot (successfully) be called
+// either after body return false (control flow out of loop) or
+// forall itself returns (the iteration is now done).
+//
+// Note that this can catch errors before the inserted checks.
+func Check[U, V any](forall Seq2[U, V]) Seq2[U, V] {
+ return func(body func(U, V) bool) {
+ ret := true
+ forall(func(u U, v V) bool {
+ if !ret {
+ panic("Checked iterator access after exit")
+ }
+ ret = body(u, v)
+ return ret
+ })
+ ret = false
+ }
+}
+
+func TestCheck(t *testing.T) {
+ i := 0
+ defer func() {
+ if r := recover(); r != nil {
+ t.Logf("Saw expected panic '%v'", r)
+ } else {
+ t.Error("Wanted to see a failure")
+ }
+ }()
+ for _, x := range Check(BadOfSliceIndex([]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10})) {
+ i += x
+ if i > 4*9 {
+ break
+ }
+ }
+}
+
+func TestCooperativeBadOfSliceIndex(t *testing.T) {
+ i := 0
+ proceed := make(chan any)
+ for _, x := range CooperativeBadOfSliceIndex([]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, proceed) {
+ i += x
+ if i >= 36 {
+ break
+ }
+ }
+ proceed <- true
+ if r := <-proceed; r != nil {
+ t.Logf("Saw expected panic '%v'", r)
+ } else {
+ t.Error("Wanted to see a failure")
+ }
+ if i != 36 {
+ t.Errorf("Expected i == 36, saw %d instead", i)
+ } else {
+ t.Logf("i = %d", i)
+ }
+}
+
+func TestCheckCooperativeBadOfSliceIndex(t *testing.T) {
+ i := 0
+ proceed := make(chan any)
+ for _, x := range Check(CooperativeBadOfSliceIndex([]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, proceed)) {
+ i += x
+ if i >= 36 {
+ break
+ }
+ }
+ proceed <- true
+ if r := <-proceed; r != nil {
+ t.Logf("Saw expected panic '%v'", r)
+ } else {
+ t.Error("Wanted to see a failure")
+ }
+ if i != 36 {
+ t.Errorf("Expected i == 36, saw %d instead", i)
+ } else {
+ t.Logf("i = %d", i)
+ }
+}
+
+func TestTrickyIterAll(t *testing.T) {
+ trickItAll := TrickyIterator{}
+ i := 0
+ for _, x := range trickItAll.iterAll([]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}) {
+ i += x
+ if i >= 36 {
+ break
+ }
+ }
+
+ if i != 36 {
+ t.Errorf("Expected i == 36, saw %d instead", i)
+ } else {
+ t.Logf("i = %d", i)
+ }
+
+ defer func() {
+ if r := recover(); r != nil {
+ t.Logf("Saw expected panic '%v'", r)
+ } else {
+ t.Error("Wanted to see a failure")
+ }
+ }()
+
+ trickItAll.fail()
+}
+
+func TestTrickyIterOne(t *testing.T) {
+ trickItOne := TrickyIterator{}
+ i := 0
+ for _, x := range trickItOne.iterOne([]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}) {
+ i += x
+ if i >= 36 {
+ break
+ }
+ }
+
+ // Don't care about value, ought to be 36 anyhow.
+ t.Logf("i = %d", i)
+
+ defer func() {
+ if r := recover(); r != nil {
+ t.Logf("Saw expected panic '%v'", r)
+ } else {
+ t.Error("Wanted to see a failure")
+ }
+ }()
+
+ trickItOne.fail()
+}
+
+func TestTrickyIterZero(t *testing.T) {
+ trickItZero := TrickyIterator{}
+ i := 0
+ for _, x := range trickItZero.iterZero([]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}) {
+ i += x
+ if i >= 36 {
+ break
+ }
+ }
+
+ // Don't care about value, ought to be 0 anyhow.
+ t.Logf("i = %d", i)
+
+ defer func() {
+ if r := recover(); r != nil {
+ t.Logf("Saw expected panic '%v'", r)
+ } else {
+ t.Error("Wanted to see a failure")
+ }
+ }()
+
+ trickItZero.fail()
+}
+
+func TestCheckTrickyIterZero(t *testing.T) {
+ trickItZero := TrickyIterator{}
+ i := 0
+ for _, x := range Check(trickItZero.iterZero([]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10})) {
+ i += x
+ if i >= 36 {
+ break
+ }
+ }
+
+ // Don't care about value, ought to be 0 anyhow.
+ t.Logf("i = %d", i)
+
+ defer func() {
+ if r := recover(); r != nil {
+ t.Logf("Saw expected panic '%v'", r)
+ } else {
+ t.Error("Wanted to see a failure")
+ }
+ }()
+
+ trickItZero.fail()
+}
+
+// TestBreak1 should just work, with well-behaved iterators.
+// (The misbehaving iterator detector should not trigger.)
+func TestBreak1(t *testing.T) {
+ var result []int
+ var expect = []int{1, 2, -1, 1, 2, -2, 1, 2, -3}
+ for _, x := range OfSliceIndex([]int{-1, -2, -3, -4}) {
+ if x == -4 {
+ break
+ }
+ for _, y := range OfSliceIndex([]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}) {
+ if y == 3 {
+ break
+ }
+ result = append(result, y)
+ }
+ result = append(result, x)
+ }
+ if !slices.Equal(expect, result) {
+ t.Errorf("Expected %v, got %v", expect, result)
+ }
+}
+
+// TestBreak2 should just work, with well-behaved iterators.
+// (The misbehaving iterator detector should not trigger.)
+func TestBreak2(t *testing.T) {
+ var result []int
+ var expect = []int{1, 2, -1, 1, 2, -2, 1, 2, -3}
+outer:
+ for _, x := range OfSliceIndex([]int{-1, -2, -3, -4}) {
+ for _, y := range OfSliceIndex([]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}) {
+ if y == 3 {
+ break
+ }
+ if x == -4 {
+ break outer
+ }
+
+ result = append(result, y)
+ }
+ result = append(result, x)
+ }
+ if !slices.Equal(expect, result) {
+ t.Errorf("Expected %v, got %v", expect, result)
+ }
+}
+
+// TestContinue should just work, with well-behaved iterators.
+// (The misbehaving iterator detector should not trigger.)
+func TestContinue(t *testing.T) {
+ var result []int
+ var expect = []int{-1, 1, 2, -2, 1, 2, -3, 1, 2, -4}
+outer:
+ for _, x := range OfSliceIndex([]int{-1, -2, -3, -4}) {
+ result = append(result, x)
+ for _, y := range OfSliceIndex([]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}) {
+ if y == 3 {
+ continue outer
+ }
+ if x == -4 {
+ break outer
+ }
+
+ result = append(result, y)
+ }
+ result = append(result, x-10)
+ }
+ if !slices.Equal(expect, result) {
+ t.Errorf("Expected %v, got %v", expect, result)
+ }
+}
+
+// TestBreak3 should just work, with well-behaved iterators.
+// (The misbehaving iterator detector should not trigger.)
+func TestBreak3(t *testing.T) {
+ var result []int
+ var expect = []int{100, 10, 2, 4, 200, 10, 2, 4, 20, 2, 4, 300, 10, 2, 4, 20, 2, 4, 30}
+X:
+ for _, x := range OfSliceIndex([]int{100, 200, 300, 400}) {
+ Y:
+ for _, y := range OfSliceIndex([]int{10, 20, 30, 40}) {
+ if 10*y >= x {
+ break
+ }
+ result = append(result, y)
+ if y == 30 {
+ continue X
+ }
+ Z:
+ for _, z := range OfSliceIndex([]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}) {
+ if z&1 == 1 {
+ continue Z
+ }
+ result = append(result, z)
+ if z >= 4 {
+ continue Y
+ }
+ }
+ result = append(result, -y) // should never be executed
+ }
+ result = append(result, x)
+ }
+ if !slices.Equal(expect, result) {
+ t.Errorf("Expected %v, got %v", expect, result)
+ }
+}
+
+// TestBreak1BadA should end in a panic when the outer-loop's
+// single-level break is ignore by BadOfSliceIndex
+func TestBreak1BadA(t *testing.T) {
+ var result []int
+ var expect = []int{1, 2, -1, 1, 2, -2, 1, 2, -3}
+
+ defer func() {
+ if r := recover(); r != nil {
+ t.Logf("Saw expected panic '%v'", r)
+ if !slices.Equal(expect, result) {
+ t.Errorf("Expected %v, got %v", expect, result)
+ }
+ } else {
+ t.Error("Wanted to see a failure")
+ }
+ }()
+
+ for _, x := range BadOfSliceIndex([]int{-1, -2, -3, -4, -5}) {
+ if x == -4 {
+ break
+ }
+ for _, y := range OfSliceIndex([]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}) {
+ if y == 3 {
+ break
+ }
+ result = append(result, y)
+ }
+ result = append(result, x)
+ }
+}
+
+// TestBreak1BadB should end in a panic, sooner, when the inner-loop's
+// (nested) single-level break is ignored by BadOfSliceIndex
+func TestBreak1BadB(t *testing.T) {
+ var result []int
+ var expect = []int{1, 2} // inner breaks, panics, after before outer appends
+
+ defer func() {
+ if r := recover(); r != nil {
+ t.Logf("Saw expected panic '%v'", r)
+ if !slices.Equal(expect, result) {
+ t.Errorf("Expected %v, got %v", expect, result)
+ }
+ } else {
+ t.Error("Wanted to see a failure")
+ }
+ }()
+
+ for _, x := range OfSliceIndex([]int{-1, -2, -3, -4, -5}) {
+ if x == -4 {
+ break
+ }
+ for _, y := range BadOfSliceIndex([]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}) {
+ if y == 3 {
+ break
+ }
+ result = append(result, y)
+ }
+ result = append(result, x)
+ }
+}
+
+// TestMultiCont0 tests multilevel continue with no bad iterators
+// (it should just work)
+func TestMultiCont0(t *testing.T) {
+ var result []int
+ var expect = []int{1000, 10, 2, 4, 2000}
+
+W:
+ for _, w := range OfSliceIndex([]int{1000, 2000}) {
+ result = append(result, w)
+ if w == 2000 {
+ break
+ }
+ for _, x := range OfSliceIndex([]int{100, 200, 300, 400}) {
+ for _, y := range OfSliceIndex([]int{10, 20, 30, 40}) {
+ result = append(result, y)
+ for _, z := range OfSliceIndex([]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}) {
+ if z&1 == 1 {
+ continue
+ }
+ result = append(result, z)
+ if z >= 4 {
+ continue W // modified to be multilevel
+ }
+ }
+ result = append(result, -y) // should never be executed
+ }
+ result = append(result, x)
+ }
+ }
+ if !slices.Equal(expect, result) {
+ t.Errorf("Expected %v, got %v", expect, result)
+ }
+}
+
+// TestMultiCont1 tests multilevel continue with a bad iterator
+// in the outermost loop exited by the continue.
+func TestMultiCont1(t *testing.T) {
+ var result []int
+ var expect = []int{1000, 10, 2, 4}
+ defer func() {
+ if r := recover(); r != nil {
+ t.Logf("Saw expected panic '%v'", r)
+ if !slices.Equal(expect, result) {
+ t.Errorf("Expected %v, got %v", expect, result)
+ }
+ } else {
+ t.Errorf("Wanted to see a failure, result was %v", result)
+ }
+ }()
+
+W:
+ for _, w := range OfSliceIndex([]int{1000, 2000}) {
+ result = append(result, w)
+ if w == 2000 {
+ break
+ }
+ for _, x := range BadOfSliceIndex([]int{100, 200, 300, 400}) {
+ for _, y := range OfSliceIndex([]int{10, 20, 30, 40}) {
+ result = append(result, y)
+ for _, z := range OfSliceIndex([]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}) {
+ if z&1 == 1 {
+ continue
+ }
+ result = append(result, z)
+ if z >= 4 {
+ continue W
+ }
+ }
+ result = append(result, -y) // should never be executed
+ }
+ result = append(result, x)
+ }
+ }
+ if !slices.Equal(expect, result) {
+ t.Errorf("Expected %v, got %v", expect, result)
+ }
+}
+
+// TestMultiCont2 tests multilevel continue with a bad iterator
+// in a middle loop exited by the continue.
+func TestMultiCont2(t *testing.T) {
+ var result []int
+ var expect = []int{1000, 10, 2, 4}
+ defer func() {
+ if r := recover(); r != nil {
+ t.Logf("Saw expected panic '%v'", r)
+ if !slices.Equal(expect, result) {
+ t.Errorf("Expected %v, got %v", expect, result)
+ }
+ } else {
+ t.Errorf("Wanted to see a failure, result was %v", result)
+ }
+ }()
+
+W:
+ for _, w := range OfSliceIndex([]int{1000, 2000}) {
+ result = append(result, w)
+ if w == 2000 {
+ break
+ }
+ for _, x := range OfSliceIndex([]int{100, 200, 300, 400}) {
+ for _, y := range BadOfSliceIndex([]int{10, 20, 30, 40}) {
+ result = append(result, y)
+ for _, z := range OfSliceIndex([]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}) {
+ if z&1 == 1 {
+ continue
+ }
+ result = append(result, z)
+ if z >= 4 {
+ continue W
+ }
+ }
+ result = append(result, -y) // should never be executed
+ }
+ result = append(result, x)
+ }
+ }
+ if !slices.Equal(expect, result) {
+ t.Errorf("Expected %v, got %v", expect, result)
+ }
+}
+
+// TestMultiCont3 tests multilevel continue with a bad iterator
+// in the innermost loop exited by the continue.
+func TestMultiCont3(t *testing.T) {
+ var result []int
+ var expect = []int{1000, 10, 2, 4}
+ defer func() {
+ if r := recover(); r != nil {
+ t.Logf("Saw expected panic '%v'", r)
+ if !slices.Equal(expect, result) {
+ t.Errorf("Expected %v, got %v", expect, result)
+ }
+ } else {
+ t.Errorf("Wanted to see a failure, result was %v", result)
+ }
+ }()
+
+W:
+ for _, w := range OfSliceIndex([]int{1000, 2000}) {
+ result = append(result, w)
+ if w == 2000 {
+ break
+ }
+ for _, x := range OfSliceIndex([]int{100, 200, 300, 400}) {
+ for _, y := range OfSliceIndex([]int{10, 20, 30, 40}) {
+ result = append(result, y)
+ for _, z := range BadOfSliceIndex([]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}) {
+ if z&1 == 1 {
+ continue
+ }
+ result = append(result, z)
+ if z >= 4 {
+ continue W
+ }
+ }
+ result = append(result, -y) // should never be executed
+ }
+ result = append(result, x)
+ }
+ }
+ if !slices.Equal(expect, result) {
+ t.Errorf("Expected %v, got %v", expect, result)
+ }
+}
+
+// TestMultiBreak0 tests multilevel break with a bad iterator
+// in the outermost loop exited by the break (the outermost loop).
+func TestMultiBreak0(t *testing.T) {
+ var result []int
+ var expect = []int{1000, 10, 2, 4}
+ defer func() {
+ if r := recover(); r != nil {
+ t.Logf("Saw expected panic '%v'", r)
+ if !slices.Equal(expect, result) {
+ t.Errorf("Expected %v, got %v", expect, result)
+ }
+ } else {
+ t.Errorf("Wanted to see a failure, result was %v", result)
+ }
+ }()
+
+W:
+ for _, w := range BadOfSliceIndex([]int{1000, 2000}) {
+ result = append(result, w)
+ if w == 2000 {
+ break
+ }
+ for _, x := range OfSliceIndex([]int{100, 200, 300, 400}) {
+ for _, y := range OfSliceIndex([]int{10, 20, 30, 40}) {
+ result = append(result, y)
+ for _, z := range OfSliceIndex([]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}) {
+ if z&1 == 1 {
+ continue
+ }
+ result = append(result, z)
+ if z >= 4 {
+ break W
+ }
+ }
+ result = append(result, -y) // should never be executed
+ }
+ result = append(result, x)
+ }
+ }
+ if !slices.Equal(expect, result) {
+ t.Errorf("Expected %v, got %v", expect, result)
+ }
+}
+
+// TestMultiBreak1 tests multilevel break with a bad iterator
+// in an intermediate loop exited by the break.
+func TestMultiBreak1(t *testing.T) {
+ var result []int
+ var expect = []int{1000, 10, 2, 4}
+ defer func() {
+ if r := recover(); r != nil {
+ t.Logf("Saw expected panic '%v'", r)
+ if !slices.Equal(expect, result) {
+ t.Errorf("Expected %v, got %v", expect, result)
+ }
+ } else {
+ t.Errorf("Wanted to see a failure, result was %v", result)
+ }
+ }()
+
+W:
+ for _, w := range OfSliceIndex([]int{1000, 2000}) {
+ result = append(result, w)
+ if w == 2000 {
+ break
+ }
+ for _, x := range BadOfSliceIndex([]int{100, 200, 300, 400}) {
+ for _, y := range OfSliceIndex([]int{10, 20, 30, 40}) {
+ result = append(result, y)
+ for _, z := range OfSliceIndex([]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}) {
+ if z&1 == 1 {
+ continue
+ }
+ result = append(result, z)
+ if z >= 4 {
+ break W
+ }
+ }
+ result = append(result, -y) // should never be executed
+ }
+ result = append(result, x)
+ }
+ }
+ if !slices.Equal(expect, result) {
+ t.Errorf("Expected %v, got %v", expect, result)
+ }
+}
+
+// TestMultiBreak2 tests multilevel break with two bad iterators
+// in intermediate loops exited by the break.
+func TestMultiBreak2(t *testing.T) {
+ var result []int
+ var expect = []int{1000, 10, 2, 4}
+ defer func() {
+ if r := recover(); r != nil {
+ t.Logf("Saw expected panic '%v'", r)
+ if !slices.Equal(expect, result) {
+ t.Errorf("Expected %v, got %v", expect, result)
+ }
+ } else {
+ t.Errorf("Wanted to see a failure, result was %v", result)
+ }
+ }()
+
+W:
+ for _, w := range OfSliceIndex([]int{1000, 2000}) {
+ result = append(result, w)
+ if w == 2000 {
+ break
+ }
+ for _, x := range BadOfSliceIndex([]int{100, 200, 300, 400}) {
+ for _, y := range BadOfSliceIndex([]int{10, 20, 30, 40}) {
+ result = append(result, y)
+ for _, z := range OfSliceIndex([]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}) {
+ if z&1 == 1 {
+ continue
+ }
+ result = append(result, z)
+ if z >= 4 {
+ break W
+ }
+ }
+ result = append(result, -y) // should never be executed
+ }
+ result = append(result, x)
+ }
+ }
+ if !slices.Equal(expect, result) {
+ t.Errorf("Expected %v, got %v", expect, result)
+ }
+}
+
+// TestMultiBreak3 tests multilevel break with the bad iterator
+// in the innermost loop exited by the break.
+func TestMultiBreak3(t *testing.T) {
+ var result []int
+ var expect = []int{1000, 10, 2, 4}
+ defer func() {
+ if r := recover(); r != nil {
+ t.Logf("Saw expected panic '%v'", r)
+ if !slices.Equal(expect, result) {
+ t.Errorf("Expected %v, got %v", expect, result)
+ }
+ } else {
+ t.Errorf("Wanted to see a failure, result was %v", result)
+ }
+ }()
+
+W:
+ for _, w := range OfSliceIndex([]int{1000, 2000}) {
+ result = append(result, w)
+ if w == 2000 {
+ break
+ }
+ for _, x := range OfSliceIndex([]int{100, 200, 300, 400}) {
+ for _, y := range OfSliceIndex([]int{10, 20, 30, 40}) {
+ result = append(result, y)
+ for _, z := range BadOfSliceIndex([]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}) {
+ if z&1 == 1 {
+ continue
+ }
+ result = append(result, z)
+ if z >= 4 {
+ break W
+ }
+ }
+ result = append(result, -y) // should never be executed
+ }
+ result = append(result, x)
+ }
+ }
+ if !slices.Equal(expect, result) {
+ t.Errorf("Expected %v, got %v", expect, result)
+ }
+}
+
+// veryBad tests that a loop nest behaves sensibly in the face of a
+// "very bad" iterator. In this case, "sensibly" means that the
+// break out of X still occurs after the very bad iterator finally
+// quits running (the control flow bread crumbs remain.)
+func veryBad(s []int) []int {
+ var result []int
+X:
+ for _, x := range OfSliceIndex([]int{1, 2, 3}) {
+
+ result = append(result, x)
+
+ for _, y := range VeryBadOfSliceIndex(s) {
+ result = append(result, y)
+ break X
+ }
+ for _, z := range OfSliceIndex([]int{100, 200, 300}) {
+ result = append(result, z)
+ if z == 100 {
+ break
+ }
+ }
+ }
+ return result
+}
+
+// checkVeryBad wraps a "very bad" iterator with Check,
+// demonstrating that the very bad iterator also hides panics
+// thrown by Check.
+func checkVeryBad(s []int) []int {
+ var result []int
+X:
+ for _, x := range OfSliceIndex([]int{1, 2, 3}) {
+
+ result = append(result, x)
+
+ for _, y := range Check(VeryBadOfSliceIndex(s)) {
+ result = append(result, y)
+ break X
+ }
+ for _, z := range OfSliceIndex([]int{100, 200, 300}) {
+ result = append(result, z)
+ if z == 100 {
+ break
+ }
+ }
+ }
+ return result
+}
+
+// okay is the not-bad version of veryBad.
+// They should behave the same.
+func okay(s []int) []int {
+ var result []int
+X:
+ for _, x := range OfSliceIndex([]int{1, 2, 3}) {
+
+ result = append(result, x)
+
+ for _, y := range OfSliceIndex(s) {
+ result = append(result, y)
+ break X
+ }
+ for _, z := range OfSliceIndex([]int{100, 200, 300}) {
+ result = append(result, z)
+ if z == 100 {
+ break
+ }
+ }
+ }
+ return result
+}
+
+// TestVeryBad1 checks the behavior of an extremely poorly behaved iterator.
+func TestVeryBad1(t *testing.T) {
+ result := veryBad([]int{10, 20, 30, 40, 50}) // odd length
+ expect := []int{1, 10}
+
+ if !slices.Equal(expect, result) {
+ t.Errorf("Expected %v, got %v", expect, result)
+ }
+}
+
+// TestVeryBad2 checks the behavior of an extremely poorly behaved iterator.
+func TestVeryBad2(t *testing.T) {
+ result := veryBad([]int{10, 20, 30, 40}) // even length
+ expect := []int{1, 10}
+
+ if !slices.Equal(expect, result) {
+ t.Errorf("Expected %v, got %v", expect, result)
+ }
+}
+
+// TestCheckVeryBad checks the behavior of an extremely poorly behaved iterator,
+// which also suppresses the exceptions from "Check"
+func TestCheckVeryBad(t *testing.T) {
+ result := checkVeryBad([]int{10, 20, 30, 40}) // even length
+ expect := []int{1, 10}
+
+ if !slices.Equal(expect, result) {
+ t.Errorf("Expected %v, got %v", expect, result)
+ }
+}
+
+// TestOk is the nice version of the very bad iterator.
+func TestOk(t *testing.T) {
+ result := okay([]int{10, 20, 30, 40, 50}) // odd length
+ expect := []int{1, 10}
+
+ if !slices.Equal(expect, result) {
+ t.Errorf("Expected %v, got %v", expect, result)
+ }
+}
+
+// testBreak1BadDefer checks that defer behaves properly even in
+// the presence of loop bodies panicking out of bad iterators.
+// (i.e., the instrumentation did not break defer in these loops)
+func testBreak1BadDefer(t *testing.T) (result []int) {
+ var expect = []int{1, 2, -1, 1, 2, -2, 1, 2, -3, -30, -20, -10}
+
+ defer func() {
+ if r := recover(); r != nil {
+ t.Logf("Saw expected panic '%v'", r)
+ if !slices.Equal(expect, result) {
+ t.Errorf("(Inner) Expected %v, got %v", expect, result)
+ }
+ } else {
+ t.Error("Wanted to see a failure")
+ }
+ }()
+
+ for _, x := range BadOfSliceIndex([]int{-1, -2, -3, -4, -5}) {
+ if x == -4 {
+ break
+ }
+ defer func() {
+ result = append(result, x*10)
+ }()
+ for _, y := range OfSliceIndex([]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}) {
+ if y == 3 {
+ break
+ }
+ result = append(result, y)
+ }
+ result = append(result, x)
+ }
+ return
+}
+
+func TestBreak1BadDefer(t *testing.T) {
+ var result []int
+ var expect = []int{1, 2, -1, 1, 2, -2, 1, 2, -3, -30, -20, -10}
+ result = testBreak1BadDefer(t)
+ if !slices.Equal(expect, result) {
+ t.Errorf("(Outer) Expected %v, got %v", expect, result)
+ }
+}
+
+// testReturn1 has no bad iterators.
+func testReturn1(t *testing.T) (result []int, err any) {
+ defer func() {
+ err = recover()
+ }()
+ for _, x := range OfSliceIndex([]int{-1, -2, -3, -4, -5}) {
+ result = append(result, x)
+ if x == -4 {
+ break
+ }
+ defer func() {
+ result = append(result, x*10)
+ }()
+ for _, y := range OfSliceIndex([]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}) {
+ if y == 3 {
+ return
+ }
+ result = append(result, y)
+ }
+ result = append(result, x)
+ }
+ return
+}
+
+// testReturn2 has an outermost bad iterator
+func testReturn2(t *testing.T) (result []int, err any) {
+ defer func() {
+ err = recover()
+ }()
+ for _, x := range BadOfSliceIndex([]int{-1, -2, -3, -4, -5}) {
+ result = append(result, x)
+ if x == -4 {
+ break
+ }
+ defer func() {
+ result = append(result, x*10)
+ }()
+ for _, y := range OfSliceIndex([]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}) {
+ if y == 3 {
+ return
+ }
+ result = append(result, y)
+ }
+ result = append(result, x)
+ }
+ return
+}
+
+// testReturn3 has an innermost bad iterator
+func testReturn3(t *testing.T) (result []int, err any) {
+ defer func() {
+ err = recover()
+ }()
+ for _, x := range OfSliceIndex([]int{-1, -2, -3, -4, -5}) {
+ result = append(result, x)
+ if x == -4 {
+ break
+ }
+ defer func() {
+ result = append(result, x*10)
+ }()
+ for _, y := range BadOfSliceIndex([]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}) {
+ if y == 3 {
+ return
+ }
+ result = append(result, y)
+ }
+ }
+ return
+}
+
+// TestReturns checks that returns through bad iterators behave properly,
+// for inner and outer bad iterators.
+func TestReturns(t *testing.T) {
+ var result []int
+ var expect = []int{-1, 1, 2, -10}
+ var err any
+
+ result, err = testReturn1(t)
+ if !slices.Equal(expect, result) {
+ t.Errorf("Expected %v, got %v", expect, result)
+ }
+ if err != nil {
+ t.Errorf("Unexpected error %v", err)
+ }
+
+ result, err = testReturn2(t)
+ if !slices.Equal(expect, result) {
+ t.Errorf("Expected %v, got %v", expect, result)
+ }
+ if err == nil {
+ t.Errorf("Missing expected error")
+ } else {
+ t.Logf("Saw expected panic '%v'", err)
+ }
+
+ result, err = testReturn3(t)
+ if !slices.Equal(expect, result) {
+ t.Errorf("Expected %v, got %v", expect, result)
+ }
+ if err == nil {
+ t.Errorf("Missing expected error")
+ } else {
+ t.Logf("Saw expected panic '%v'", err)
+ }
+
+}
+
+// testGotoA1 tests loop-nest-internal goto, no bad iterators.
+func testGotoA1(t *testing.T) (result []int, err any) {
+ defer func() {
+ err = recover()
+ }()
+ for _, x := range OfSliceIndex([]int{-1, -2, -3, -4, -5}) {
+ result = append(result, x)
+ if x == -4 {
+ break
+ }
+ defer func() {
+ result = append(result, x*10)
+ }()
+ for _, y := range OfSliceIndex([]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}) {
+ if y == 3 {
+ goto A
+ }
+ result = append(result, y)
+ }
+ result = append(result, x)
+ A:
+ }
+ return
+}
+
+// testGotoA2 tests loop-nest-internal goto, outer bad iterator.
+func testGotoA2(t *testing.T) (result []int, err any) {
+ defer func() {
+ err = recover()
+ }()
+ for _, x := range BadOfSliceIndex([]int{-1, -2, -3, -4, -5}) {
+ result = append(result, x)
+ if x == -4 {
+ break
+ }
+ defer func() {
+ result = append(result, x*10)
+ }()
+ for _, y := range OfSliceIndex([]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}) {
+ if y == 3 {
+ goto A
+ }
+ result = append(result, y)
+ }
+ result = append(result, x)
+ A:
+ }
+ return
+}
+
+// testGotoA3 tests loop-nest-internal goto, inner bad iterator.
+func testGotoA3(t *testing.T) (result []int, err any) {
+ defer func() {
+ err = recover()
+ }()
+ for _, x := range OfSliceIndex([]int{-1, -2, -3, -4, -5}) {
+ result = append(result, x)
+ if x == -4 {
+ break
+ }
+ defer func() {
+ result = append(result, x*10)
+ }()
+ for _, y := range BadOfSliceIndex([]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}) {
+ if y == 3 {
+ goto A
+ }
+ result = append(result, y)
+ }
+ result = append(result, x)
+ A:
+ }
+ return
+}
+
+func TestGotoA(t *testing.T) {
+ var result []int
+ var expect = []int{-1, 1, 2, -2, 1, 2, -3, 1, 2, -4, -30, -20, -10}
+ var expect3 = []int{-1, 1, 2, -10} // first goto becomes a panic
+ var err any
+
+ result, err = testGotoA1(t)
+ if !slices.Equal(expect, result) {
+ t.Errorf("Expected %v, got %v", expect, result)
+ }
+ if err != nil {
+ t.Errorf("Unexpected error %v", err)
+ }
+
+ result, err = testGotoA2(t)
+ if !slices.Equal(expect, result) {
+ t.Errorf("Expected %v, got %v", expect, result)
+ }
+ if err == nil {
+ t.Errorf("Missing expected error")
+ } else {
+ t.Logf("Saw expected panic '%v'", err)
+ }
+
+ result, err = testGotoA3(t)
+ if !slices.Equal(expect3, result) {
+ t.Errorf("Expected %v, got %v", expect3, result)
+ }
+ if err == nil {
+ t.Errorf("Missing expected error")
+ } else {
+ t.Logf("Saw expected panic '%v'", err)
+ }
+}
+
+// testGotoB1 tests loop-nest-exiting goto, no bad iterators.
+func testGotoB1(t *testing.T) (result []int, err any) {
+ defer func() {
+ err = recover()
+ }()
+ for _, x := range OfSliceIndex([]int{-1, -2, -3, -4, -5}) {
+ result = append(result, x)
+ if x == -4 {
+ break
+ }
+ defer func() {
+ result = append(result, x*10)
+ }()
+ for _, y := range OfSliceIndex([]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}) {
+ if y == 3 {
+ goto B
+ }
+ result = append(result, y)
+ }
+ result = append(result, x)
+ }
+B:
+ result = append(result, 999)
+ return
+}
+
+// testGotoB2 tests loop-nest-exiting goto, outer bad iterator.
+func testGotoB2(t *testing.T) (result []int, err any) {
+ defer func() {
+ err = recover()
+ }()
+ for _, x := range BadOfSliceIndex([]int{-1, -2, -3, -4, -5}) {
+ result = append(result, x)
+ if x == -4 {
+ break
+ }
+ defer func() {
+ result = append(result, x*10)
+ }()
+ for _, y := range OfSliceIndex([]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}) {
+ if y == 3 {
+ goto B
+ }
+ result = append(result, y)
+ }
+ result = append(result, x)
+ }
+B:
+ result = append(result, 999)
+ return
+}
+
+// testGotoB3 tests loop-nest-exiting goto, inner bad iterator.
+func testGotoB3(t *testing.T) (result []int, err any) {
+ defer func() {
+ err = recover()
+ }()
+ for _, x := range OfSliceIndex([]int{-1, -2, -3, -4, -5}) {
+ result = append(result, x)
+ if x == -4 {
+ break
+ }
+ defer func() {
+ result = append(result, x*10)
+ }()
+ for _, y := range BadOfSliceIndex([]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}) {
+ if y == 3 {
+ goto B
+ }
+ result = append(result, y)
+ }
+ result = append(result, x)
+ }
+B:
+ result = append(result, 999)
+ return
+}
+
+func TestGotoB(t *testing.T) {
+ var result []int
+ var expect = []int{-1, 1, 2, 999, -10}
+ var expectX = []int{-1, 1, 2, -10}
+ var err any
+
+ result, err = testGotoB1(t)
+ if !slices.Equal(expect, result) {
+ t.Errorf("Expected %v, got %v", expect, result)
+ }
+ if err != nil {
+ t.Errorf("Unexpected error %v", err)
+ }
+
+ result, err = testGotoB2(t)
+ if !slices.Equal(expectX, result) {
+ t.Errorf("Expected %v, got %v", expectX, result)
+ }
+ if err == nil {
+ t.Errorf("Missing expected error")
+ } else {
+ t.Logf("Saw expected panic '%v'", err)
+ }
+
+ result, err = testGotoB3(t)
+ if !slices.Equal(expectX, result) {
+ t.Errorf("Expected %v, got %v", expectX, result)
+ }
+ if err == nil {
+ t.Errorf("Missing expected error")
+ } else {
+ t.Logf("Saw expected panic '%v'", err)
+ }
+}
diff --git a/src/cmd/compile/internal/rangefunc/rewrite.go b/src/cmd/compile/internal/rangefunc/rewrite.go
new file mode 100644
index 0000000000..d439412ea8
--- /dev/null
+++ b/src/cmd/compile/internal/rangefunc/rewrite.go
@@ -0,0 +1,1334 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+/*
+Package rangefunc rewrites range-over-func to code that doesn't use range-over-funcs.
+Rewriting the construct in the front end, before noder, means the functions generated during
+the rewrite are available in a noder-generated representation for inlining by the back end.
+
+# Theory of Operation
+
+The basic idea is to rewrite
+
+ for x := range f {
+ ...
+ }
+
+into
+
+ f(func(x T) bool {
+ ...
+ })
+
+But it's not usually that easy.
+
+# Range variables
+
+For a range not using :=, the assigned variables cannot be function parameters
+in the generated body function. Instead, we allocate fake parameters and
+start the body with an assignment. For example:
+
+ for expr1, expr2 = range f {
+ ...
+ }
+
+becomes
+
+ f(func(#p1 T1, #p2 T2) bool {
+ expr1, expr2 = #p1, #p2
+ ...
+ })
+
+(All the generated variables have a # at the start to signal that they
+are internal variables when looking at the generated code in a
+debugger. Because variables have all been resolved to the specific
+objects they represent, there is no danger of using plain "p1" and
+colliding with a Go variable named "p1"; the # is just nice to have,
+not for correctness.)
+
+It can also happen that there are fewer range variables than function
+arguments, in which case we end up with something like
+
+ f(func(x T1, _ T2) bool {
+ ...
+ })
+
+or
+
+ f(func(#p1 T1, #p2 T2, _ T3) bool {
+ expr1, expr2 = #p1, #p2
+ ...
+ })
+
+# Return
+
+If the body contains a "break", that break turns into "return false",
+to tell f to stop. And if the body contains a "continue", that turns
+into "return true", to tell f to proceed with the next value.
+Those are the easy cases.
+
+If the body contains a return or a break/continue/goto L, then we need
+to rewrite that into code that breaks out of the loop and then
+triggers that control flow. In general we rewrite
+
+ for x := range f {
+ ...
+ }
+
+into
+
+ {
+ var #next int
+ f(func(x T1) bool {
+ ...
+ return true
+ })
+ ... check #next ...
+ }
+
+The variable #next is an integer code that says what to do when f
+returns. Each difficult statement sets #next and then returns false to
+stop f.
+
+A plain "return" rewrites to {#next = -1; return false}.
+The return false breaks the loop. Then when f returns, the "check
+#next" section includes
+
+ if #next == -1 { return }
+
+which causes the return we want.
+
+Return with arguments is more involved. We need somewhere to store the
+arguments while we break out of f, so we add them to the var
+declaration, like:
+
+ {
+ var (
+ #next int
+ #r1 type1
+ #r2 type2
+ )
+ f(func(x T1) bool {
+ ...
+ {
+ // return a, b
+ #r1, #r2 = a, b
+ #next = -2
+ return false
+ }
+ ...
+ return true
+ })
+ if #next == -2 { return #r1, #r2 }
+ }
+
+TODO: What about:
+
+ func f() (x bool) {
+ for range g(&x) {
+ return true
+ }
+ }
+
+ func g(p *bool) func(func() bool) {
+ return func(yield func() bool) {
+ yield()
+ // Is *p true or false here?
+ }
+ }
+
+With this rewrite the "return true" is not visible after yield returns,
+but maybe it should be?
+
+# Checking
+
+To permit checking that an iterator is well-behaved -- that is, that
+it does not call the loop body again after it has returned false or
+after the entire loop has exited (it might retain a copy of the body
+function, or pass it to another goroutine) -- each generated loop has
+its own #exitK flag that is checked before each iteration, and set both
+at any early exit and after the iteration completes.
+
+For example:
+
+ for x := range f {
+ ...
+ if ... { break }
+ ...
+ }
+
+becomes
+
+ {
+ var #exit1 bool
+ f(func(x T1) bool {
+ if #exit1 { runtime.panicrangeexit() }
+ ...
+ if ... { #exit1 = true ; return false }
+ ...
+ return true
+ })
+ #exit1 = true
+ }
+
+# Nested Loops
+
+So far we've only considered a single loop. If a function contains a
+sequence of loops, each can be translated individually. But loops can
+be nested. It would work to translate the innermost loop and then
+translate the loop around it, and so on, except that there'd be a lot
+of rewriting of rewritten code and the overall traversals could end up
+taking time quadratic in the depth of the nesting. To avoid all that,
+we use a single rewriting pass that handles a top-most range-over-func
+loop and all the range-over-func loops it contains at the same time.
+
+If we need to return from inside a doubly-nested loop, the rewrites
+above stay the same, but the check after the inner loop only says
+
+ if #next < 0 { return false }
+
+to stop the outer loop so it can do the actual return. That is,
+
+ for range f {
+ for range g {
+ ...
+ return a, b
+ ...
+ }
+ }
+
+becomes
+
+ {
+ var (
+ #next int
+ #r1 type1
+ #r2 type2
+ )
+ var #exit1 bool
+ f(func() {
+ if #exit1 { runtime.panicrangeexit() }
+ var #exit2 bool
+ g(func() {
+ if #exit2 { runtime.panicrangeexit() }
+ ...
+ {
+ // return a, b
+ #r1, #r2 = a, b
+ #next = -2
+ #exit1, #exit2 = true, true
+ return false
+ }
+ ...
+ return true
+ })
+ #exit2 = true
+ if #next < 0 {
+ return false
+ }
+ return true
+ })
+ #exit1 = true
+ if #next == -2 {
+ return #r1, #r2
+ }
+ }
+
+Note that the #next < 0 after the inner loop handles both kinds of
+return with a single check.
+
+# Labeled break/continue of range-over-func loops
+
+For a labeled break or continue of an outer range-over-func, we
+use positive #next values. Any such labeled break or continue
+really means "do N breaks" or "do N breaks and 1 continue".
+We encode that as perLoopStep*N or perLoopStep*N+1 respectively.
+
+Loops that might need to propagate a labeled break or continue
+add one or both of these to the #next checks:
+
+ if #next >= 2 {
+ #next -= 2
+ return false
+ }
+
+ if #next == 1 {
+ #next = 0
+ return true
+ }
+
+For example
+
+ F: for range f {
+ for range g {
+ for range h {
+ ...
+ break F
+ ...
+ ...
+ continue F
+ ...
+ }
+ }
+ ...
+ }
+
+becomes
+
+ {
+ var #next int
+ var #exit1 bool
+ f(func() {
+ if #exit1 { runtime.panicrangeexit() }
+ var #exit2 bool
+ g(func() {
+ if #exit2 { runtime.panicrangeexit() }
+ var #exit3 bool
+ h(func() {
+ if #exit3 { runtime.panicrangeexit() }
+ ...
+ {
+ // break F
+ #next = 4
+ #exit1, #exit2, #exit3 = true, true, true
+ return false
+ }
+ ...
+ {
+ // continue F
+ #next = 3
+ #exit2, #exit3 = true, true
+ return false
+ }
+ ...
+ return true
+ })
+ #exit3 = true
+ if #next >= 2 {
+ #next -= 2
+ return false
+ }
+ return true
+ })
+ #exit2 = true
+ if #next >= 2 {
+ #next -= 2
+ return false
+ }
+ if #next == 1 {
+ #next = 0
+ return true
+ }
+ ...
+ return true
+ })
+ #exit1 = true
+ }
+
+Note that the post-h checks only consider a break,
+since no generated code tries to continue g.
+
+# Gotos and other labeled break/continue
+
+The final control flow translations are goto and break/continue of a
+non-range-over-func statement. In both cases, we may need to break out
+of one or more range-over-func loops before we can do the actual
+control flow statement. Each such break/continue/goto L statement is
+assigned a unique negative #next value (below -2, since -1 and -2 are
+for the two kinds of return). Then the post-checks for a given loop
+test for the specific codes that refer to labels directly targetable
+from that block. Otherwise, the generic
+
+ if #next < 0 { return false }
+
+check handles stopping the next loop to get one step closer to the label.
+
+For example
+
+ Top: print("start\n")
+ for range f {
+ for range g {
+ ...
+ for range h {
+ ...
+ goto Top
+ ...
+ }
+ }
+ }
+
+becomes
+
+ Top: print("start\n")
+ {
+ var #next int
+ var #exit1 bool
+ f(func() {
+ if #exit1 { runtime.panicrangeexit() }
+ var #exit2 bool
+ g(func() {
+ if #exit2 { runtime.panicrangeexit() }
+ ...
+ var #exit3 bool
+ h(func() {
+ if #exit3 { runtime.panicrangeexit() }
+ ...
+ {
+ // goto Top
+ #next = -3
+ #exit1, #exit2, #exit3 = true, true, true
+ return false
+ }
+ ...
+ return true
+ })
+ #exit3 = true
+ if #next < 0 {
+ return false
+ }
+ return true
+ })
+ #exit2 = true
+ if #next < 0 {
+ return false
+ }
+ return true
+ })
+ #exit1 = true
+ if #next == -3 {
+ #next = 0
+ goto Top
+ }
+ }
+
+Labeled break/continue to non-range-over-funcs are handled the same
+way as goto.
+
+# Defers
+
+The last wrinkle is handling defer statements. If we have
+
+ for range f {
+ defer print("A")
+ }
+
+we cannot rewrite that into
+
+ f(func() {
+ defer print("A")
+ })
+
+because the deferred code will run at the end of the iteration, not
+the end of the containing function. To fix that, the runtime provides
+a special hook that lets us obtain a defer "token" representing the
+outer function and then use it in a later defer to attach the deferred
+code to that outer function.
+
+Normally,
+
+ defer print("A")
+
+compiles to
+
+ runtime.deferproc(func() { print("A") })
+
+This changes in a range-over-func. For example:
+
+ for range f {
+ defer print("A")
+ }
+
+compiles to
+
+ var #defers = runtime.deferrangefunc()
+ f(func() {
+ runtime.deferprocat(func() { print("A") }, #defers)
+ })
+
+For this rewriting phase, we insert the explicit initialization of
+#defers and then attach the #defers variable to the CallStmt
+representing the defer. That variable will be propagated to the
+backend and will cause the backend to compile the defer using
+deferprocat instead of an ordinary deferproc.
+
+TODO: Could call runtime.deferrangefuncend after f.
+*/
+package rangefunc
+
+import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/syntax"
+ "cmd/compile/internal/types2"
+ "fmt"
+ "go/constant"
+ "os"
+)
+
+// nopos is the zero syntax.Pos.
+var nopos syntax.Pos
+
+// A rewriter implements rewriting the range-over-funcs in a given function.
+type rewriter struct {
+ pkg *types2.Package
+ info *types2.Info
+ outer *syntax.FuncType
+ body *syntax.BlockStmt
+
+ // References to important types and values.
+ any types2.Object
+ bool types2.Object
+ int types2.Object
+ true types2.Object
+ false types2.Object
+
+ // Branch numbering, computed as needed.
+ branchNext map[branch]int // branch -> #next value
+ labelLoop map[string]*syntax.ForStmt // label -> innermost rangefunc loop it is declared inside (nil for no loop)
+
+ // Stack of nodes being visited.
+ stack []syntax.Node // all nodes
+ forStack []*forLoop // range-over-func loops
+
+ rewritten map[*syntax.ForStmt]syntax.Stmt
+
+ // Declared variables in generated code for outermost loop.
+ declStmt *syntax.DeclStmt
+ nextVar types2.Object
+ retVars []types2.Object
+ defers types2.Object
+ exitVarCount int // exitvars are referenced from their respective loops
+}
+
+// A branch is a single labeled branch.
+type branch struct {
+ tok syntax.Token
+ label string
+}
+
+// A forLoop describes a single range-over-func loop being processed.
+type forLoop struct {
+ nfor *syntax.ForStmt // actual syntax
+ exitFlag *types2.Var // #exit variable for this loop
+ exitFlagDecl *syntax.VarDecl
+
+ checkRet bool // add check for "return" after loop
+ checkRetArgs bool // add check for "return args" after loop
+ checkBreak bool // add check for "break" after loop
+ checkContinue bool // add check for "continue" after loop
+ checkBranch []branch // add check for labeled branch after loop
+}
+
+// Rewrite rewrites all the range-over-funcs in the files.
+func Rewrite(pkg *types2.Package, info *types2.Info, files []*syntax.File) {
+ for _, file := range files {
+ syntax.Inspect(file, func(n syntax.Node) bool {
+ switch n := n.(type) {
+ case *syntax.FuncDecl:
+ rewriteFunc(pkg, info, n.Type, n.Body)
+ return false
+ case *syntax.FuncLit:
+ rewriteFunc(pkg, info, n.Type, n.Body)
+ return false
+ }
+ return true
+ })
+ }
+}
+
+// rewriteFunc rewrites all the range-over-funcs in a single function (a top-level func or a func literal).
+// The typ and body are the function's type and body.
+func rewriteFunc(pkg *types2.Package, info *types2.Info, typ *syntax.FuncType, body *syntax.BlockStmt) {
+ if body == nil {
+ return
+ }
+ r := &rewriter{
+ pkg: pkg,
+ info: info,
+ outer: typ,
+ body: body,
+ }
+ syntax.Inspect(body, r.inspect)
+ if (base.Flag.W != 0) && r.forStack != nil {
+ syntax.Fdump(os.Stderr, body)
+ }
+}
+
+// checkFuncMisuse reports whether to check for misuse of iterator callbacks functions.
+func (r *rewriter) checkFuncMisuse() bool {
+ return base.Debug.RangeFuncCheck != 0
+}
+
+// inspect is a callback for syntax.Inspect that drives the actual rewriting.
+// If it sees a func literal, it kicks off a separate rewrite for that literal.
+// Otherwise, it maintains a stack of range-over-func loops and
+// converts each in turn.
+func (r *rewriter) inspect(n syntax.Node) bool {
+ switch n := n.(type) {
+ case *syntax.FuncLit:
+ rewriteFunc(r.pkg, r.info, n.Type, n.Body)
+ return false
+
+ default:
+ // Push n onto stack.
+ r.stack = append(r.stack, n)
+ if nfor, ok := forRangeFunc(n); ok {
+ loop := &forLoop{nfor: nfor}
+ r.forStack = append(r.forStack, loop)
+ r.startLoop(loop)
+ }
+
+ case nil:
+ // n == nil signals that we are done visiting
+ // the top-of-stack node's children. Find it.
+ n = r.stack[len(r.stack)-1]
+
+ // If we are inside a range-over-func,
+ // take this moment to replace any break/continue/goto/return
+ // statements directly contained in this node.
+ // Also replace any converted for statements
+ // with the rewritten block.
+ switch n := n.(type) {
+ case *syntax.BlockStmt:
+ for i, s := range n.List {
+ n.List[i] = r.editStmt(s)
+ }
+ case *syntax.CaseClause:
+ for i, s := range n.Body {
+ n.Body[i] = r.editStmt(s)
+ }
+ case *syntax.CommClause:
+ for i, s := range n.Body {
+ n.Body[i] = r.editStmt(s)
+ }
+ case *syntax.LabeledStmt:
+ n.Stmt = r.editStmt(n.Stmt)
+ }
+
+ // Pop n.
+ if len(r.forStack) > 0 && r.stack[len(r.stack)-1] == r.forStack[len(r.forStack)-1].nfor {
+ r.endLoop(r.forStack[len(r.forStack)-1])
+ r.forStack = r.forStack[:len(r.forStack)-1]
+ }
+ r.stack = r.stack[:len(r.stack)-1]
+ }
+ return true
+}
+
+// startLoop sets up for converting a range-over-func loop.
+func (r *rewriter) startLoop(loop *forLoop) {
+ // For first loop in function, allocate syntax for any, bool, int, true, and false.
+ if r.any == nil {
+ r.any = types2.Universe.Lookup("any")
+ r.bool = types2.Universe.Lookup("bool")
+ r.int = types2.Universe.Lookup("int")
+ r.true = types2.Universe.Lookup("true")
+ r.false = types2.Universe.Lookup("false")
+ r.rewritten = make(map[*syntax.ForStmt]syntax.Stmt)
+ }
+ if r.checkFuncMisuse() {
+ // declare the exit flag for this loop's body
+ loop.exitFlag, loop.exitFlagDecl = r.exitVar(loop.nfor.Pos())
+ }
+}
+
+// editStmt returns the replacement for the statement x,
+// or x itself if it should be left alone.
+// This includes the for loops we are converting,
+// as left in x.rewritten by r.endLoop.
+func (r *rewriter) editStmt(x syntax.Stmt) syntax.Stmt {
+ if x, ok := x.(*syntax.ForStmt); ok {
+ if s := r.rewritten[x]; s != nil {
+ return s
+ }
+ }
+
+ if len(r.forStack) > 0 {
+ switch x := x.(type) {
+ case *syntax.BranchStmt:
+ return r.editBranch(x)
+ case *syntax.CallStmt:
+ if x.Tok == syntax.Defer {
+ return r.editDefer(x)
+ }
+ case *syntax.ReturnStmt:
+ return r.editReturn(x)
+ }
+ }
+
+ return x
+}
+
+// editDefer returns the replacement for the defer statement x.
+// See the "Defers" section in the package doc comment above for more context.
+func (r *rewriter) editDefer(x *syntax.CallStmt) syntax.Stmt {
+ if r.defers == nil {
+ // Declare and initialize the #defers token.
+ init := &syntax.CallExpr{
+ Fun: runtimeSym(r.info, "deferrangefunc"),
+ }
+ tv := syntax.TypeAndValue{Type: r.any.Type()}
+ tv.SetIsValue()
+ init.SetTypeInfo(tv)
+ r.defers = r.declVar("#defers", r.any.Type(), init)
+ }
+
+ // Attach the token as an "extra" argument to the defer.
+ x.DeferAt = r.useVar(r.defers)
+ setPos(x.DeferAt, x.Pos())
+ return x
+}
+
+func (r *rewriter) exitVar(pos syntax.Pos) (*types2.Var, *syntax.VarDecl) {
+ r.exitVarCount++
+
+ name := fmt.Sprintf("#exit%d", r.exitVarCount)
+ typ := r.bool.Type()
+ obj := types2.NewVar(pos, r.pkg, name, typ)
+ n := syntax.NewName(pos, name)
+ setValueType(n, typ)
+ r.info.Defs[n] = obj
+
+ return obj, &syntax.VarDecl{NameList: []*syntax.Name{n}}
+}
+
+// editReturn returns the replacement for the return statement x.
+// See the "Return" section in the package doc comment above for more context.
+func (r *rewriter) editReturn(x *syntax.ReturnStmt) syntax.Stmt {
+ // #next = -1 is return with no arguments; -2 is return with arguments.
+ var next int
+ if x.Results == nil {
+ next = -1
+ r.forStack[0].checkRet = true
+ } else {
+ next = -2
+ r.forStack[0].checkRetArgs = true
+ }
+
+ // Tell the loops along the way to check for a return.
+ for _, loop := range r.forStack[1:] {
+ loop.checkRet = true
+ }
+
+ // Assign results, set #next, and return false.
+ bl := &syntax.BlockStmt{}
+ if x.Results != nil {
+ if r.retVars == nil {
+ for i, a := range r.outer.ResultList {
+ obj := r.declVar(fmt.Sprintf("#r%d", i+1), a.Type.GetTypeInfo().Type, nil)
+ r.retVars = append(r.retVars, obj)
+ }
+ }
+ bl.List = append(bl.List, &syntax.AssignStmt{Lhs: r.useList(r.retVars), Rhs: x.Results})
+ }
+ bl.List = append(bl.List, &syntax.AssignStmt{Lhs: r.next(), Rhs: r.intConst(next)})
+ if r.checkFuncMisuse() {
+ // mark all enclosing loop bodies as exited
+ for i := 0; i < len(r.forStack); i++ {
+ bl.List = append(bl.List, r.setExitedAt(i))
+ }
+ }
+ bl.List = append(bl.List, &syntax.ReturnStmt{Results: r.useVar(r.false)})
+ setPos(bl, x.Pos())
+ return bl
+}
+
+// perLoopStep is part of the encoding of loop-spanning control flow
+// for function range iterators. Each multiple of two encodes a "return false"
+// passing control to an enclosing iterator; a terminal value of 1 encodes
+// "return true" (i.e., local continue) from the body function, and a terminal
+// value of 0 encodes executing the remainder of the body function.
+const perLoopStep = 2
+
+// editBranch returns the replacement for the branch statement x,
+// or x itself if it should be left alone.
+// See the package doc comment above for more context.
+func (r *rewriter) editBranch(x *syntax.BranchStmt) syntax.Stmt {
+ if x.Tok == syntax.Fallthrough {
+ // Fallthrough is unaffected by the rewrite.
+ return x
+ }
+
+ // Find target of break/continue/goto in r.forStack.
+ // (The target may not be in r.forStack at all.)
+ targ := x.Target
+ i := len(r.forStack) - 1
+ if x.Label == nil && r.forStack[i].nfor != targ {
+ // Unlabeled break or continue that's not nfor must be inside nfor. Leave alone.
+ return x
+ }
+ for i >= 0 && r.forStack[i].nfor != targ {
+ i--
+ }
+ // exitFrom is the index of the loop interior to the target of the control flow,
+ // if such a loop exists (it does not if i == len(r.forStack) - 1)
+ exitFrom := i + 1
+
+ // Compute the value to assign to #next and the specific return to use.
+ var next int
+ var ret *syntax.ReturnStmt
+ if x.Tok == syntax.Goto || i < 0 {
+ // goto Label
+ // or break/continue of labeled non-range-over-func loop.
+ // We may be able to leave it alone, or we may have to break
+ // out of one or more nested loops and then use #next to signal
+ // to complete the break/continue/goto.
+ // Figure out which range-over-func loop contains the label.
+ r.computeBranchNext()
+ nfor := r.forStack[len(r.forStack)-1].nfor
+ label := x.Label.Value
+ targ := r.labelLoop[label]
+ if nfor == targ {
+ // Label is in the innermost range-over-func loop; use it directly.
+ return x
+ }
+
+ // Set #next to the code meaning break/continue/goto label.
+ next = r.branchNext[branch{x.Tok, label}]
+
+ // Break out of nested loops up to targ.
+ i := len(r.forStack) - 1
+ for i >= 0 && r.forStack[i].nfor != targ {
+ i--
+ }
+ exitFrom = i + 1
+
+ // Mark loop we exit to get to targ to check for that branch.
+ // When i==-1 that's the outermost func body
+ top := r.forStack[i+1]
+ top.checkBranch = append(top.checkBranch, branch{x.Tok, label})
+
+ // Mark loops along the way to check for a plain return, so they break.
+ for j := i + 2; j < len(r.forStack); j++ {
+ r.forStack[j].checkRet = true
+ }
+
+ // In the innermost loop, use a plain "return false".
+ ret = &syntax.ReturnStmt{Results: r.useVar(r.false)}
+ } else {
+ // break/continue of labeled range-over-func loop.
+ depth := len(r.forStack) - 1 - i
+
+ // For continue of innermost loop, use "return true".
+ // Otherwise we are breaking the innermost loop, so "return false".
+
+ if depth == 0 && x.Tok == syntax.Continue {
+ ret = &syntax.ReturnStmt{Results: r.useVar(r.true)}
+ setPos(ret, x.Pos())
+ return ret
+ }
+ ret = &syntax.ReturnStmt{Results: r.useVar(r.false)}
+
+ // If this is a simple break, mark this loop as exited and return false.
+ // No adjustments to #next.
+ if depth == 0 {
+ var stmts []syntax.Stmt
+ if r.checkFuncMisuse() {
+ stmts = []syntax.Stmt{r.setExited(), ret}
+ } else {
+ stmts = []syntax.Stmt{ret}
+ }
+ bl := &syntax.BlockStmt{
+ List: stmts,
+ }
+ setPos(bl, x.Pos())
+ return bl
+ }
+
+ // The loop inside the one we are break/continue-ing
+ // needs to make that happen when we break out of it.
+ if x.Tok == syntax.Continue {
+ r.forStack[exitFrom].checkContinue = true
+ } else {
+ exitFrom = i
+ r.forStack[exitFrom].checkBreak = true
+ }
+
+ // The loops along the way just need to break.
+ for j := exitFrom + 1; j < len(r.forStack); j++ {
+ r.forStack[j].checkBreak = true
+ }
+
+ // Set next to break the appropriate number of times;
+ // the final time may be a continue, not a break.
+ next = perLoopStep * depth
+ if x.Tok == syntax.Continue {
+ next--
+ }
+ }
+
+ // Assign #next = next and do the return.
+ as := &syntax.AssignStmt{Lhs: r.next(), Rhs: r.intConst(next)}
+ bl := &syntax.BlockStmt{
+ List: []syntax.Stmt{as},
+ }
+
+ if r.checkFuncMisuse() {
+ // Set #exitK for this loop and those exited by the control flow.
+ for i := exitFrom; i < len(r.forStack); i++ {
+ bl.List = append(bl.List, r.setExitedAt(i))
+ }
+ }
+
+ bl.List = append(bl.List, ret)
+ setPos(bl, x.Pos())
+ return bl
+}
+
+// computeBranchNext computes the branchNext numbering
+// and determines which labels end up inside which range-over-func loop bodies.
+func (r *rewriter) computeBranchNext() {
+ if r.labelLoop != nil {
+ return
+ }
+
+ r.labelLoop = make(map[string]*syntax.ForStmt)
+ r.branchNext = make(map[branch]int)
+
+ var labels []string
+ var stack []syntax.Node
+ var forStack []*syntax.ForStmt
+ forStack = append(forStack, nil)
+ syntax.Inspect(r.body, func(n syntax.Node) bool {
+ if n != nil {
+ stack = append(stack, n)
+ if nfor, ok := forRangeFunc(n); ok {
+ forStack = append(forStack, nfor)
+ }
+ if n, ok := n.(*syntax.LabeledStmt); ok {
+ l := n.Label.Value
+ labels = append(labels, l)
+ f := forStack[len(forStack)-1]
+ r.labelLoop[l] = f
+ }
+ } else {
+ n := stack[len(stack)-1]
+ stack = stack[:len(stack)-1]
+ if n == forStack[len(forStack)-1] {
+ forStack = forStack[:len(forStack)-1]
+ }
+ }
+ return true
+ })
+
+ // Assign numbers to all the labels we observed.
+ used := -2
+ for _, l := range labels {
+ used -= 3
+ r.branchNext[branch{syntax.Break, l}] = used
+ r.branchNext[branch{syntax.Continue, l}] = used + 1
+ r.branchNext[branch{syntax.Goto, l}] = used + 2
+ }
+}
+
+// endLoop finishes the conversion of a range-over-func loop.
+// We have inspected and rewritten the body of the loop and can now
+// construct the body function and rewrite the for loop into a call
+// bracketed by any declarations and checks it requires.
+func (r *rewriter) endLoop(loop *forLoop) {
+ // Pick apart for range X { ... }
+ nfor := loop.nfor
+ start, end := nfor.Pos(), nfor.Body.Rbrace // start, end position of for loop
+ rclause := nfor.Init.(*syntax.RangeClause)
+ rfunc := types2.CoreType(rclause.X.GetTypeInfo().Type).(*types2.Signature) // type of X - func(func(...)bool)
+ if rfunc.Params().Len() != 1 {
+ base.Fatalf("invalid typecheck of range func")
+ }
+ ftyp := types2.CoreType(rfunc.Params().At(0).Type()).(*types2.Signature) // func(...) bool
+ if ftyp.Results().Len() != 1 {
+ base.Fatalf("invalid typecheck of range func")
+ }
+
+ // Build X(bodyFunc)
+ call := &syntax.ExprStmt{
+ X: &syntax.CallExpr{
+ Fun: rclause.X,
+ ArgList: []syntax.Expr{
+ r.bodyFunc(nfor.Body.List, syntax.UnpackListExpr(rclause.Lhs), rclause.Def, ftyp, start, end),
+ },
+ },
+ }
+ setPos(call, start)
+
+ // Build checks based on #next after X(bodyFunc)
+ checks := r.checks(loop, end)
+
+ // Rewrite for vars := range X { ... } to
+ //
+ // {
+ // r.declStmt
+ // call
+ // checks
+ // }
+ //
+ // The r.declStmt can be added to by this loop or any inner loop
+ // during the creation of r.bodyFunc; it is only emitted in the outermost
+ // converted range loop.
+ block := &syntax.BlockStmt{Rbrace: end}
+ setPos(block, start)
+ if len(r.forStack) == 1 && r.declStmt != nil {
+ setPos(r.declStmt, start)
+ block.List = append(block.List, r.declStmt)
+ }
+
+ // declare the exitFlag here so it has proper scope and zeroing
+ if r.checkFuncMisuse() {
+ exitFlagDecl := &syntax.DeclStmt{DeclList: []syntax.Decl{loop.exitFlagDecl}}
+ block.List = append(block.List, exitFlagDecl)
+ }
+
+ // iteratorFunc(bodyFunc)
+ block.List = append(block.List, call)
+
+ if r.checkFuncMisuse() {
+ // iteratorFunc has exited, mark the exit flag for the body
+ block.List = append(block.List, r.setExited())
+ }
+ block.List = append(block.List, checks...)
+
+ if len(r.forStack) == 1 { // ending an outermost loop
+ r.declStmt = nil
+ r.nextVar = nil
+ r.retVars = nil
+ r.defers = nil
+ }
+
+ r.rewritten[nfor] = block
+}
+
+func (r *rewriter) setExited() *syntax.AssignStmt {
+ return r.setExitedAt(len(r.forStack) - 1)
+}
+
+func (r *rewriter) setExitedAt(index int) *syntax.AssignStmt {
+ loop := r.forStack[index]
+ return &syntax.AssignStmt{
+ Lhs: r.useVar(loop.exitFlag),
+ Rhs: r.useVar(r.true),
+ }
+}
+
+// bodyFunc converts the loop body (control flow has already been updated)
+// to a func literal that can be passed to the range function.
+//
+// vars is the range variables from the range statement.
+// def indicates whether this is a := range statement.
+// ftyp is the type of the function we are creating
+// start and end are the syntax positions to use for new nodes
+// that should be at the start or end of the loop.
+func (r *rewriter) bodyFunc(body []syntax.Stmt, lhs []syntax.Expr, def bool, ftyp *types2.Signature, start, end syntax.Pos) *syntax.FuncLit {
+ // Starting X(bodyFunc); build up bodyFunc first.
+ var params, results []*types2.Var
+ results = append(results, types2.NewVar(start, nil, "", r.bool.Type()))
+ bodyFunc := &syntax.FuncLit{
+ // Note: Type is ignored but needs to be non-nil to avoid panic in syntax.Inspect.
+ Type: &syntax.FuncType{},
+ Body: &syntax.BlockStmt{
+ List: []syntax.Stmt{},
+ Rbrace: end,
+ },
+ }
+ setPos(bodyFunc, start)
+
+ for i := 0; i < ftyp.Params().Len(); i++ {
+ typ := ftyp.Params().At(i).Type()
+ var paramVar *types2.Var
+ if i < len(lhs) && def {
+ // Reuse range variable as parameter.
+ x := lhs[i]
+ paramVar = r.info.Defs[x.(*syntax.Name)].(*types2.Var)
+ } else {
+ // Declare new parameter and assign it to range expression.
+ paramVar = types2.NewVar(start, r.pkg, fmt.Sprintf("#p%d", 1+i), typ)
+ if i < len(lhs) {
+ x := lhs[i]
+ as := &syntax.AssignStmt{Lhs: x, Rhs: r.useVar(paramVar)}
+ as.SetPos(x.Pos())
+ setPos(as.Rhs, x.Pos())
+ bodyFunc.Body.List = append(bodyFunc.Body.List, as)
+ }
+ }
+ params = append(params, paramVar)
+ }
+
+ tv := syntax.TypeAndValue{
+ Type: types2.NewSignatureType(nil, nil, nil,
+ types2.NewTuple(params...),
+ types2.NewTuple(results...),
+ false),
+ }
+ tv.SetIsValue()
+ bodyFunc.SetTypeInfo(tv)
+
+ loop := r.forStack[len(r.forStack)-1]
+
+ if r.checkFuncMisuse() {
+ bodyFunc.Body.List = append(bodyFunc.Body.List, r.assertNotExited(start, loop))
+ }
+
+ // Original loop body (already rewritten by editStmt during inspect).
+ bodyFunc.Body.List = append(bodyFunc.Body.List, body...)
+
+ // return true to continue at end of loop body
+ ret := &syntax.ReturnStmt{Results: r.useVar(r.true)}
+ ret.SetPos(end)
+ bodyFunc.Body.List = append(bodyFunc.Body.List, ret)
+
+ return bodyFunc
+}
+
+// checks returns the post-call checks that need to be done for the given loop.
+func (r *rewriter) checks(loop *forLoop, pos syntax.Pos) []syntax.Stmt {
+ var list []syntax.Stmt
+ if len(loop.checkBranch) > 0 {
+ did := make(map[branch]bool)
+ for _, br := range loop.checkBranch {
+ if did[br] {
+ continue
+ }
+ did[br] = true
+ doBranch := &syntax.BranchStmt{Tok: br.tok, Label: &syntax.Name{Value: br.label}}
+ list = append(list, r.ifNext(syntax.Eql, r.branchNext[br], doBranch))
+ }
+ }
+ if len(r.forStack) == 1 {
+ if loop.checkRetArgs {
+ list = append(list, r.ifNext(syntax.Eql, -2, retStmt(r.useList(r.retVars))))
+ }
+ if loop.checkRet {
+ list = append(list, r.ifNext(syntax.Eql, -1, retStmt(nil)))
+ }
+ } else {
+ if loop.checkRetArgs || loop.checkRet {
+ // Note: next < 0 also handles gotos handled by outer loops.
+ // We set checkRet in that case to trigger this check.
+ list = append(list, r.ifNext(syntax.Lss, 0, retStmt(r.useVar(r.false))))
+ }
+ if loop.checkBreak {
+ list = append(list, r.ifNext(syntax.Geq, perLoopStep, retStmt(r.useVar(r.false))))
+ }
+ if loop.checkContinue {
+ list = append(list, r.ifNext(syntax.Eql, perLoopStep-1, retStmt(r.useVar(r.true))))
+ }
+ }
+
+ for _, j := range list {
+ setPos(j, pos)
+ }
+ return list
+}
+
+// retStmt returns a return statement returning the given return values.
+func retStmt(results syntax.Expr) *syntax.ReturnStmt {
+ return &syntax.ReturnStmt{Results: results}
+}
+
+// ifNext returns the statement:
+//
+// if #next op c { adjust; then }
+//
+// When op is >=, adjust is #next -= c.
+// When op is == and c is not -1 or -2, adjust is #next = 0.
+// Otherwise adjust is omitted.
+func (r *rewriter) ifNext(op syntax.Operator, c int, then syntax.Stmt) syntax.Stmt {
+ nif := &syntax.IfStmt{
+ Cond: &syntax.Operation{Op: op, X: r.next(), Y: r.intConst(c)},
+ Then: &syntax.BlockStmt{
+ List: []syntax.Stmt{then},
+ },
+ }
+ tv := syntax.TypeAndValue{Type: r.bool.Type()}
+ tv.SetIsValue()
+ nif.Cond.SetTypeInfo(tv)
+
+ if op == syntax.Geq {
+ sub := &syntax.AssignStmt{
+ Op: syntax.Sub,
+ Lhs: r.next(),
+ Rhs: r.intConst(c),
+ }
+ nif.Then.List = []syntax.Stmt{sub, then}
+ }
+ if op == syntax.Eql && c != -1 && c != -2 {
+ clr := &syntax.AssignStmt{
+ Lhs: r.next(),
+ Rhs: r.intConst(0),
+ }
+ nif.Then.List = []syntax.Stmt{clr, then}
+ }
+
+ return nif
+}
+
+// setValueType marks x as a value with type typ.
+func setValueType(x syntax.Expr, typ syntax.Type) {
+ tv := syntax.TypeAndValue{Type: typ}
+ tv.SetIsValue()
+ x.SetTypeInfo(tv)
+}
+
+// assertNotExited returns the statement:
+//
+// if #exitK { runtime.panicrangeexit() }
+//
+// where #exitK is the exit guard for loop.
+func (r *rewriter) assertNotExited(start syntax.Pos, loop *forLoop) syntax.Stmt {
+ callPanicExpr := &syntax.CallExpr{
+ Fun: runtimeSym(r.info, "panicrangeexit"),
+ }
+ setValueType(callPanicExpr, nil) // no result type
+
+ callPanic := &syntax.ExprStmt{X: callPanicExpr}
+
+ nif := &syntax.IfStmt{
+ Cond: r.useVar(loop.exitFlag),
+ Then: &syntax.BlockStmt{
+ List: []syntax.Stmt{callPanic},
+ },
+ }
+ setPos(nif, start)
+ return nif
+}
+
+// next returns a reference to the #next variable.
+func (r *rewriter) next() *syntax.Name {
+ if r.nextVar == nil {
+ r.nextVar = r.declVar("#next", r.int.Type(), nil)
+ }
+ return r.useVar(r.nextVar)
+}
+
+// forRangeFunc checks whether n is a range-over-func.
+// If so, it returns n.(*syntax.ForStmt), true.
+// Otherwise it returns nil, false.
+func forRangeFunc(n syntax.Node) (*syntax.ForStmt, bool) {
+ nfor, ok := n.(*syntax.ForStmt)
+ if !ok {
+ return nil, false
+ }
+ nrange, ok := nfor.Init.(*syntax.RangeClause)
+ if !ok {
+ return nil, false
+ }
+ _, ok = types2.CoreType(nrange.X.GetTypeInfo().Type).(*types2.Signature)
+ if !ok {
+ return nil, false
+ }
+ return nfor, true
+}
+
+// intConst returns syntax for an integer literal with the given value.
+func (r *rewriter) intConst(c int) *syntax.BasicLit {
+ lit := &syntax.BasicLit{
+ Value: fmt.Sprint(c),
+ Kind: syntax.IntLit,
+ }
+ tv := syntax.TypeAndValue{Type: r.int.Type(), Value: constant.MakeInt64(int64(c))}
+ tv.SetIsValue()
+ lit.SetTypeInfo(tv)
+ return lit
+}
+
+// useVar returns syntax for a reference to decl, which should be its declaration.
+func (r *rewriter) useVar(obj types2.Object) *syntax.Name {
+ n := syntax.NewName(nopos, obj.Name())
+ tv := syntax.TypeAndValue{Type: obj.Type()}
+ tv.SetIsValue()
+ n.SetTypeInfo(tv)
+ r.info.Uses[n] = obj
+ return n
+}
+
+// useList is useVar for a list of decls.
+func (r *rewriter) useList(vars []types2.Object) syntax.Expr {
+ var new []syntax.Expr
+ for _, obj := range vars {
+ new = append(new, r.useVar(obj))
+ }
+ if len(new) == 1 {
+ return new[0]
+ }
+ return &syntax.ListExpr{ElemList: new}
+}
+
+// declVar declares a variable with a given name type and initializer value.
+func (r *rewriter) declVar(name string, typ types2.Type, init syntax.Expr) *types2.Var {
+ if r.declStmt == nil {
+ r.declStmt = &syntax.DeclStmt{}
+ }
+ stmt := r.declStmt
+ obj := types2.NewVar(stmt.Pos(), r.pkg, name, typ)
+ n := syntax.NewName(stmt.Pos(), name)
+ tv := syntax.TypeAndValue{Type: typ}
+ tv.SetIsValue()
+ n.SetTypeInfo(tv)
+ r.info.Defs[n] = obj
+ stmt.DeclList = append(stmt.DeclList, &syntax.VarDecl{
+ NameList: []*syntax.Name{n},
+ // Note: Type is ignored
+ Values: init,
+ })
+ return obj
+}
+
+// declType declares a type with the given name and type.
+// This is more like "type name = typ" than "type name typ".
+func declType(pos syntax.Pos, name string, typ types2.Type) *syntax.Name {
+ n := syntax.NewName(pos, name)
+ n.SetTypeInfo(syntax.TypeAndValue{Type: typ})
+ return n
+}
+
+// runtimePkg is a fake runtime package that contains what we need to refer to in package runtime.
+var runtimePkg = func() *types2.Package {
+ var nopos syntax.Pos
+ pkg := types2.NewPackage("runtime", "runtime")
+ anyType := types2.Universe.Lookup("any").Type()
+
+ // func deferrangefunc() unsafe.Pointer
+ obj := types2.NewFunc(nopos, pkg, "deferrangefunc", types2.NewSignatureType(nil, nil, nil, nil, types2.NewTuple(types2.NewParam(nopos, pkg, "extra", anyType)), false))
+ pkg.Scope().Insert(obj)
+
+ // func panicrangeexit()
+ obj = types2.NewFunc(nopos, pkg, "panicrangeexit", types2.NewSignatureType(nil, nil, nil, nil, nil, false))
+ pkg.Scope().Insert(obj)
+
+ return pkg
+}()
+
+// runtimeSym returns a reference to a symbol in the fake runtime package.
+func runtimeSym(info *types2.Info, name string) *syntax.Name {
+ obj := runtimePkg.Scope().Lookup(name)
+ n := syntax.NewName(nopos, "runtime."+name)
+ tv := syntax.TypeAndValue{Type: obj.Type()}
+ tv.SetIsValue()
+ tv.SetIsRuntimeHelper()
+ n.SetTypeInfo(tv)
+ info.Uses[n] = obj
+ return n
+}
+
+// setPos walks the top structure of x that has no position assigned
+// and assigns it all to have position pos.
+// When setPos encounters a syntax node with a position assigned,
+// setPos does not look inside that node.
+// setPos only needs to handle syntax we create in this package;
+// all other syntax should have positions assigned already.
+func setPos(x syntax.Node, pos syntax.Pos) {
+ if x == nil {
+ return
+ }
+ syntax.Inspect(x, func(n syntax.Node) bool {
+ if n == nil || n.Pos() != nopos {
+ return false
+ }
+ n.SetPos(pos)
+ switch n := n.(type) {
+ case *syntax.BlockStmt:
+ if n.Rbrace == nopos {
+ n.Rbrace = pos
+ }
+ }
+ return true
+ })
+}
diff --git a/src/cmd/compile/internal/reflectdata/alg.go b/src/cmd/compile/internal/reflectdata/alg.go
index 69de685ca0..a0f5522153 100644
--- a/src/cmd/compile/internal/reflectdata/alg.go
+++ b/src/cmd/compile/internal/reflectdata/alg.go
@@ -115,7 +115,7 @@ func genhash(t *types.Type) *obj.LSym {
case types.TARRAY:
genhash(t.Elem())
case types.TSTRUCT:
- for _, f := range t.FieldSlice() {
+ for _, f := range t.Fields() {
genhash(f.Type)
}
}
@@ -140,20 +140,25 @@ func hashFunc(t *types.Type) *ir.Func {
return sym.Def.(*ir.Name).Func
}
- base.Pos = base.AutogeneratedPos // less confusing than end of input
- typecheck.DeclContext = ir.PEXTERN
+ pos := base.AutogeneratedPos // less confusing than end of input
+ base.Pos = pos
// func sym(p *T, h uintptr) uintptr
- args := []*ir.Field{
- ir.NewField(base.Pos, typecheck.Lookup("p"), types.NewPtr(t)),
- ir.NewField(base.Pos, typecheck.Lookup("h"), types.Types[types.TUINTPTR]),
- }
- results := []*ir.Field{ir.NewField(base.Pos, nil, types.Types[types.TUINTPTR])}
-
- fn := typecheck.DeclFunc(sym, nil, args, results)
+ fn := ir.NewFunc(pos, pos, sym, types.NewSignature(nil,
+ []*types.Field{
+ types.NewField(pos, typecheck.Lookup("p"), types.NewPtr(t)),
+ types.NewField(pos, typecheck.Lookup("h"), types.Types[types.TUINTPTR]),
+ },
+ []*types.Field{
+ types.NewField(pos, nil, types.Types[types.TUINTPTR]),
+ },
+ ))
sym.Def = fn.Nname
- np := ir.AsNode(fn.Type().Params().Field(0).Nname)
- nh := ir.AsNode(fn.Type().Params().Field(1).Nname)
+ fn.Pragma |= ir.Noinline // TODO(mdempsky): We need to emit this during the unified frontend instead, to allow inlining.
+
+ typecheck.DeclFunc(fn)
+ np := fn.Dcl[0]
+ nh := fn.Dcl[1]
switch t.Kind() {
case types.TARRAY:
@@ -163,7 +168,7 @@ func hashFunc(t *types.Type) *ir.Func {
hashel := hashfor(t.Elem())
// for i := 0; i < nelem; i++
- ni := typecheck.Temp(types.Types[types.TINT])
+ ni := typecheck.TempAt(base.Pos, ir.CurFunc, types.Types[types.TINT])
init := ir.NewAssignStmt(base.Pos, ni, ir.NewInt(base.Pos, 0))
cond := ir.NewBinaryExpr(base.Pos, ir.OLT, ni, ir.NewInt(base.Pos, t.NumElem()))
post := ir.NewAssignStmt(base.Pos, ni, ir.NewBinaryExpr(base.Pos, ir.OADD, ni, ir.NewInt(base.Pos, 1)))
@@ -185,7 +190,7 @@ func hashFunc(t *types.Type) *ir.Func {
case types.TSTRUCT:
// Walk the struct using memhash for runs of AMEM
// and calling specific hash functions for the others.
- for i, fields := 0, t.FieldSlice(); i < len(fields); {
+ for i, fields := 0, t.Fields(); i < len(fields); {
f := fields[i]
// Skip blank fields.
@@ -198,8 +203,7 @@ func hashFunc(t *types.Type) *ir.Func {
if !compare.IsRegularMemory(f.Type) {
hashel := hashfor(f.Type)
call := ir.NewCallExpr(base.Pos, ir.OCALL, hashel, nil)
- nx := ir.NewSelectorExpr(base.Pos, ir.OXDOT, np, f.Sym) // TODO: fields from other packages?
- na := typecheck.NodAddr(nx)
+ na := typecheck.NodAddr(typecheck.DotField(base.Pos, np, i))
call.Args.Append(na)
call.Args.Append(nh)
fn.Body.Append(ir.NewAssignStmt(base.Pos, nh, call))
@@ -213,8 +217,7 @@ func hashFunc(t *types.Type) *ir.Func {
// h = hashel(&p.first, size, h)
hashel := hashmem(f.Type)
call := ir.NewCallExpr(base.Pos, ir.OCALL, hashel, nil)
- nx := ir.NewSelectorExpr(base.Pos, ir.OXDOT, np, f.Sym) // TODO: fields from other packages?
- na := typecheck.NodAddr(nx)
+ na := typecheck.NodAddr(typecheck.DotField(base.Pos, np, i))
call.Args.Append(na)
call.Args.Append(nh)
call.Args.Append(ir.NewInt(base.Pos, size))
@@ -235,22 +238,18 @@ func hashFunc(t *types.Type) *ir.Func {
typecheck.FinishFuncBody()
fn.SetDupok(true)
- typecheck.Func(fn)
ir.WithFunc(fn, func() {
typecheck.Stmts(fn.Body)
})
fn.SetNilCheckDisabled(true)
- typecheck.Target.Decls = append(typecheck.Target.Decls, fn)
return fn
}
func runtimeHashFor(name string, t *types.Type) *ir.Name {
- n := typecheck.LookupRuntime(name)
- n = typecheck.SubstArgTypes(n, t)
- return n
+ return typecheck.LookupRuntime(name, t)
}
// hashfor returns the function to compute the hash of a value of type t.
@@ -366,18 +365,27 @@ func eqFunc(t *types.Type) *ir.Func {
if sym.Def != nil {
return sym.Def.(*ir.Name).Func
}
- base.Pos = base.AutogeneratedPos // less confusing than end of input
- typecheck.DeclContext = ir.PEXTERN
+
+ pos := base.AutogeneratedPos // less confusing than end of input
+ base.Pos = pos
// func sym(p, q *T) bool
- fn := typecheck.DeclFunc(sym, nil,
- []*ir.Field{ir.NewField(base.Pos, typecheck.Lookup("p"), types.NewPtr(t)), ir.NewField(base.Pos, typecheck.Lookup("q"), types.NewPtr(t))},
- []*ir.Field{ir.NewField(base.Pos, typecheck.Lookup("r"), types.Types[types.TBOOL])},
- )
+ fn := ir.NewFunc(pos, pos, sym, types.NewSignature(nil,
+ []*types.Field{
+ types.NewField(pos, typecheck.Lookup("p"), types.NewPtr(t)),
+ types.NewField(pos, typecheck.Lookup("q"), types.NewPtr(t)),
+ },
+ []*types.Field{
+ types.NewField(pos, typecheck.Lookup("r"), types.Types[types.TBOOL]),
+ },
+ ))
sym.Def = fn.Nname
- np := ir.AsNode(fn.Type().Params().Field(0).Nname)
- nq := ir.AsNode(fn.Type().Params().Field(1).Nname)
- nr := ir.AsNode(fn.Type().Results().Field(0).Nname)
+ fn.Pragma |= ir.Noinline // TODO(mdempsky): We need to emit this during the unified frontend instead, to allow inlining.
+
+ typecheck.DeclFunc(fn)
+ np := fn.Dcl[0]
+ nq := fn.Dcl[1]
+ nr := fn.Dcl[2]
// Label to jump to if an equality test fails.
neq := typecheck.AutoLabel(".neq")
@@ -440,7 +448,7 @@ func eqFunc(t *types.Type) *ir.Func {
if iterateTo > 0 {
// Generate an unrolled for loop.
// for i := 0; i < nelem/unroll*unroll; i += unroll
- i := typecheck.Temp(types.Types[types.TINT])
+ i := typecheck.TempAt(base.Pos, ir.CurFunc, types.Types[types.TINT])
init := ir.NewAssignStmt(base.Pos, i, ir.NewInt(base.Pos, 0))
cond := ir.NewBinaryExpr(base.Pos, ir.OLT, i, ir.NewInt(base.Pos, iterateTo))
loop := ir.NewForStmt(base.Pos, nil, cond, nil, nil, false)
@@ -619,7 +627,6 @@ func eqFunc(t *types.Type) *ir.Func {
typecheck.FinishFuncBody()
fn.SetDupok(true)
- typecheck.Func(fn)
ir.WithFunc(fn, func() {
typecheck.Stmts(fn.Body)
@@ -630,7 +637,6 @@ func eqFunc(t *types.Type) *ir.Func {
// neither of which can be nil, and our comparisons
// are shallow.
fn.SetNilCheckDisabled(true)
- typecheck.Target.Decls = append(typecheck.Target.Decls, fn)
return fn
}
@@ -639,9 +645,7 @@ func eqFunc(t *types.Type) *ir.Func {
func EqFor(t *types.Type) (ir.Node, bool) {
switch a, _ := types.AlgType(t); a {
case types.AMEM:
- n := typecheck.LookupRuntime("memequal")
- n = typecheck.SubstArgTypes(n, t, t)
- return n, true
+ return typecheck.LookupRuntime("memequal", t, t), true
case types.ASPECIAL:
fn := eqFunc(t)
return fn.Nname, false
@@ -659,7 +663,5 @@ func anyCall(fn *ir.Func) bool {
}
func hashmem(t *types.Type) ir.Node {
- n := typecheck.LookupRuntime("memhash")
- n = typecheck.SubstArgTypes(n, t)
- return n
+ return typecheck.LookupRuntime("memhash", t)
}
diff --git a/src/cmd/compile/internal/reflectdata/helpers.go b/src/cmd/compile/internal/reflectdata/helpers.go
index f2d69cd256..9ba62d6a29 100644
--- a/src/cmd/compile/internal/reflectdata/helpers.go
+++ b/src/cmd/compile/internal/reflectdata/helpers.go
@@ -16,16 +16,6 @@ func hasRType(n, rtype ir.Node, fieldName string) bool {
return true
}
- // We make an exception for `init`, because we still depend on
- // pkginit for sorting package initialization statements, and it
- // gets confused by implicit conversions. Also, because
- // package-scope statements can never be generic, so they'll never
- // require dictionary lookups.
- if ir.CurFunc.Nname.Sym().Name != "init" {
- ir.Dump("CurFunc", ir.CurFunc)
- base.FatalfAt(n.Pos(), "missing %s in %v: %+v", fieldName, ir.CurFunc, n)
- }
-
return false
}
@@ -126,11 +116,11 @@ func ConvIfaceTypeWord(pos src.XPos, n *ir.ConvExpr) ir.Node {
}
// ConvIfaceSrcRType asserts that n is a conversion from
-// non-interface type to interface type (or OCONVIDATA operation), and
+// non-interface type to interface type, and
// returns an expression that yields the *runtime._type for copying
// the convertee value to the heap.
func ConvIfaceSrcRType(pos src.XPos, n *ir.ConvExpr) ir.Node {
- assertOp2(n, ir.OCONVIFACE, ir.OCONVIDATA)
+ assertOp(n, ir.OCONVIFACE)
if hasRType(n, n.SrcRType, "SrcRType") {
return n.SrcRType
}
diff --git a/src/cmd/compile/internal/reflectdata/reflect.go b/src/cmd/compile/internal/reflectdata/reflect.go
index 6ef40cb84c..c2407af017 100644
--- a/src/cmd/compile/internal/reflectdata/reflect.go
+++ b/src/cmd/compile/internal/reflectdata/reflect.go
@@ -18,6 +18,8 @@ import (
"cmd/compile/internal/compare"
"cmd/compile/internal/ir"
"cmd/compile/internal/objw"
+ "cmd/compile/internal/rttype"
+ "cmd/compile/internal/staticdata"
"cmd/compile/internal/typebits"
"cmd/compile/internal/typecheck"
"cmd/compile/internal/types"
@@ -32,10 +34,6 @@ type ptabEntry struct {
t *types.Type
}
-func CountPTabs() int {
- return len(ptabs)
-}
-
// runtime interface and reflection data structures
var (
// protects signatset and signatslice
@@ -47,8 +45,6 @@ var (
gcsymmu sync.Mutex // protects gcsymset and gcsymslice
gcsymset = make(map[*types.Type]struct{})
-
- ptabs []*ir.Name
)
type typeSig struct {
@@ -77,15 +73,13 @@ const (
MAXELEMSIZE = abi.MapMaxElemBytes
)
-func structfieldSize() int { return abi.StructFieldSize(types.PtrSize) } // Sizeof(runtime.structfield{})
-func imethodSize() int { return abi.IMethodSize(types.PtrSize) } // Sizeof(runtime.imethod{})
-func commonSize() int { return abi.CommonSize(types.PtrSize) } // Sizeof(runtime._type{})
+func commonSize() int { return int(rttype.Type.Size()) } // Sizeof(runtime._type{})
func uncommonSize(t *types.Type) int { // Sizeof(runtime.uncommontype{})
if t.Sym() == nil && len(methods(t)) == 0 {
return 0
}
- return int(abi.UncommonSize())
+ return int(rttype.UncommonType.Size())
}
func makefield(name string, t *types.Type) *types.Field {
@@ -201,15 +195,15 @@ func MapBucketType(t *types.Type) *types.Type {
return bucket
}
-// MapType builds a type representing a Hmap structure for the given map type.
+var hmapType *types.Type
+
+// MapType returns a type interchangeable with runtime.hmap.
// Make sure this stays in sync with runtime/map.go.
-func MapType(t *types.Type) *types.Type {
- if t.MapType().Hmap != nil {
- return t.MapType().Hmap
+func MapType() *types.Type {
+ if hmapType != nil {
+ return hmapType
}
- bmap := MapBucketType(t)
-
// build a struct:
// type hmap struct {
// count int
@@ -217,8 +211,8 @@ func MapType(t *types.Type) *types.Type {
// B uint8
// noverflow uint16
// hash0 uint32
- // buckets *bmap
- // oldbuckets *bmap
+ // buckets unsafe.Pointer
+ // oldbuckets unsafe.Pointer
// nevacuate uintptr
// extra unsafe.Pointer // *mapextra
// }
@@ -228,15 +222,19 @@ func MapType(t *types.Type) *types.Type {
makefield("flags", types.Types[types.TUINT8]),
makefield("B", types.Types[types.TUINT8]),
makefield("noverflow", types.Types[types.TUINT16]),
- makefield("hash0", types.Types[types.TUINT32]), // Used in walk.go for OMAKEMAP.
- makefield("buckets", types.NewPtr(bmap)), // Used in walk.go for OMAKEMAP.
- makefield("oldbuckets", types.NewPtr(bmap)),
+ makefield("hash0", types.Types[types.TUINT32]), // Used in walk.go for OMAKEMAP.
+ makefield("buckets", types.Types[types.TUNSAFEPTR]), // Used in walk.go for OMAKEMAP.
+ makefield("oldbuckets", types.Types[types.TUNSAFEPTR]),
makefield("nevacuate", types.Types[types.TUINTPTR]),
makefield("extra", types.Types[types.TUNSAFEPTR]),
}
- hmap := types.NewStruct(fields)
- hmap.SetNoalg(true)
+ n := ir.NewDeclNameAt(src.NoXPos, ir.OTYPE, ir.Pkgs.Runtime.Lookup("hmap"))
+ hmap := types.NewNamed(n)
+ n.SetType(hmap)
+ n.SetTypecheck(1)
+
+ hmap.SetUnderlying(types.NewStruct(fields))
types.CalcSize(hmap)
// The size of hmap should be 48 bytes on 64 bit
@@ -245,29 +243,29 @@ func MapType(t *types.Type) *types.Type {
base.Fatalf("hmap size not correct: got %d, want %d", hmap.Size(), size)
}
- t.MapType().Hmap = hmap
- hmap.StructType().Map = t
+ hmapType = hmap
return hmap
}
-// MapIterType builds a type representing an Hiter structure for the given map type.
+var hiterType *types.Type
+
+// MapIterType returns a type interchangeable with runtime.hiter.
// Make sure this stays in sync with runtime/map.go.
-func MapIterType(t *types.Type) *types.Type {
- if t.MapType().Hiter != nil {
- return t.MapType().Hiter
+func MapIterType() *types.Type {
+ if hiterType != nil {
+ return hiterType
}
- hmap := MapType(t)
- bmap := MapBucketType(t)
+ hmap := MapType()
// build a struct:
// type hiter struct {
- // key *Key
- // elem *Elem
+ // key unsafe.Pointer // *Key
+ // elem unsafe.Pointer // *Elem
// t unsafe.Pointer // *MapType
// h *hmap
- // buckets *bmap
- // bptr *bmap
+ // buckets unsafe.Pointer
+ // bptr unsafe.Pointer // *bmap
// overflow unsafe.Pointer // *[]*bmap
// oldoverflow unsafe.Pointer // *[]*bmap
// startBucket uintptr
@@ -280,12 +278,12 @@ func MapIterType(t *types.Type) *types.Type {
// }
// must match runtime/map.go:hiter.
fields := []*types.Field{
- makefield("key", types.NewPtr(t.Key())), // Used in range.go for TMAP.
- makefield("elem", types.NewPtr(t.Elem())), // Used in range.go for TMAP.
+ makefield("key", types.Types[types.TUNSAFEPTR]), // Used in range.go for TMAP.
+ makefield("elem", types.Types[types.TUNSAFEPTR]), // Used in range.go for TMAP.
makefield("t", types.Types[types.TUNSAFEPTR]),
makefield("h", types.NewPtr(hmap)),
- makefield("buckets", types.NewPtr(bmap)),
- makefield("bptr", types.NewPtr(bmap)),
+ makefield("buckets", types.Types[types.TUNSAFEPTR]),
+ makefield("bptr", types.Types[types.TUNSAFEPTR]),
makefield("overflow", types.Types[types.TUNSAFEPTR]),
makefield("oldoverflow", types.Types[types.TUNSAFEPTR]),
makefield("startBucket", types.Types[types.TUINTPTR]),
@@ -298,14 +296,18 @@ func MapIterType(t *types.Type) *types.Type {
}
// build iterator struct holding the above fields
- hiter := types.NewStruct(fields)
- hiter.SetNoalg(true)
+ n := ir.NewDeclNameAt(src.NoXPos, ir.OTYPE, ir.Pkgs.Runtime.Lookup("hiter"))
+ hiter := types.NewNamed(n)
+ n.SetType(hiter)
+ n.SetTypecheck(1)
+
+ hiter.SetUnderlying(types.NewStruct(fields))
types.CalcSize(hiter)
if hiter.Size() != int64(12*types.PtrSize) {
base.Fatalf("hash_iter size not correct %d %d", hiter.Size(), 12*types.PtrSize)
}
- t.MapType().Hiter = hiter
- hiter.StructType().Map = t
+
+ hiterType = hiter
return hiter
}
@@ -327,7 +329,7 @@ func methods(t *types.Type) []*typeSig {
// make list of methods for t,
// generating code if necessary.
var ms []*typeSig
- for _, f := range mt.AllMethods().Slice() {
+ for _, f := range mt.AllMethods() {
if f.Sym == nil {
base.Fatalf("method with no sym on %v", mt)
}
@@ -374,7 +376,7 @@ func methods(t *types.Type) []*typeSig {
// imethods returns the methods of the interface type t, sorted by name.
func imethods(t *types.Type) []*typeSig {
var methods []*typeSig
- for _, f := range t.AllMethods().Slice() {
+ for _, f := range t.AllMethods() {
if f.Type.Kind() != types.TFUNC || f.Sym == nil {
continue
}
@@ -410,6 +412,10 @@ func dimportpath(p *types.Pkg) {
return
}
+ if p == types.LocalPkg && base.Ctxt.Pkgpath == "" {
+ panic("missing pkgpath")
+ }
+
// If we are compiling the runtime package, there are two runtime packages around
// -- localpkg and Pkgs.Runtime. We don't want to produce import path symbols for
// both of them, so just produce one for localpkg.
@@ -424,51 +430,35 @@ func dimportpath(p *types.Pkg) {
p.Pathsym = s
}
-func dgopkgpath(s *obj.LSym, ot int, pkg *types.Pkg) int {
+func dgopkgpath(c rttype.Cursor, pkg *types.Pkg) {
+ c = c.Field("Bytes")
if pkg == nil {
- return objw.Uintptr(s, ot, 0)
- }
-
- if pkg == types.LocalPkg && base.Ctxt.Pkgpath == "" {
- // If we don't know the full import path of the package being compiled
- // (i.e. -p was not passed on the compiler command line), emit a reference to
- // type:.importpath.""., which the linker will rewrite using the correct import path.
- // Every package that imports this one directly defines the symbol.
- // See also https://groups.google.com/forum/#!topic/golang-dev/myb9s53HxGQ.
- ns := base.Ctxt.Lookup(`type:.importpath."".`)
- return objw.SymPtr(s, ot, ns, 0)
+ c.WritePtr(nil)
+ return
}
dimportpath(pkg)
- return objw.SymPtr(s, ot, pkg.Pathsym, 0)
+ c.WritePtr(pkg.Pathsym)
}
-// dgopkgpathOff writes an offset relocation in s at offset ot to the pkg path symbol.
-func dgopkgpathOff(s *obj.LSym, ot int, pkg *types.Pkg) int {
+// dgopkgpathOff writes an offset relocation to the pkg path symbol to c.
+func dgopkgpathOff(c rttype.Cursor, pkg *types.Pkg) {
if pkg == nil {
- return objw.Uint32(s, ot, 0)
- }
- if pkg == types.LocalPkg && base.Ctxt.Pkgpath == "" {
- // If we don't know the full import path of the package being compiled
- // (i.e. -p was not passed on the compiler command line), emit a reference to
- // type:.importpath.""., which the linker will rewrite using the correct import path.
- // Every package that imports this one directly defines the symbol.
- // See also https://groups.google.com/forum/#!topic/golang-dev/myb9s53HxGQ.
- ns := base.Ctxt.Lookup(`type:.importpath."".`)
- return objw.SymPtrOff(s, ot, ns)
+ c.WriteInt32(0)
+ return
}
dimportpath(pkg)
- return objw.SymPtrOff(s, ot, pkg.Pathsym)
+ c.WriteSymPtrOff(pkg.Pathsym, false)
}
// dnameField dumps a reflect.name for a struct field.
-func dnameField(lsym *obj.LSym, ot int, spkg *types.Pkg, ft *types.Field) int {
+func dnameField(c rttype.Cursor, spkg *types.Pkg, ft *types.Field) {
if !types.IsExported(ft.Sym.Name) && ft.Sym.Pkg != spkg {
base.Fatalf("package mismatch for %v", ft.Sym)
}
nsym := dname(ft.Sym.Name, ft.Note, nil, types.IsExported(ft.Sym.Name), ft.Embedded != 0)
- return objw.SymPtr(lsym, ot, nsym, 0)
+ c.Field("Bytes").WritePtr(nsym)
}
// dnameData writes the contents of a reflect.name into s at offset ot.
@@ -513,7 +503,9 @@ func dnameData(s *obj.LSym, ot int, name, tag string, pkg *types.Pkg, exported,
ot = int(s.WriteBytes(base.Ctxt, int64(ot), b))
if pkg != nil {
- ot = dgopkgpathOff(s, ot, pkg)
+ c := rttype.NewCursor(s, int64(ot), types.Types[types.TUINT32])
+ dgopkgpathOff(c, pkg)
+ ot += 4
}
return ot
@@ -544,7 +536,9 @@ func dname(name, tag string, pkg *types.Pkg, exported, embedded bool) *obj.LSym
}
}
} else {
- sname = fmt.Sprintf(`%s"".%d`, sname, dnameCount)
+ // TODO(mdempsky): We should be able to share these too (except
+ // maybe when dynamic linking).
+ sname = fmt.Sprintf("%s%s.%d", sname, types.LocalPkg.Prefix, dnameCount)
dnameCount++
}
if embedded {
@@ -562,14 +556,14 @@ func dname(name, tag string, pkg *types.Pkg, exported, embedded bool) *obj.LSym
// dextratype dumps the fields of a runtime.uncommontype.
// dataAdd is the offset in bytes after the header where the
-// backing array of the []method field is written (by dextratypeData).
-func dextratype(lsym *obj.LSym, ot int, t *types.Type, dataAdd int) int {
+// backing array of the []method field should be written.
+func dextratype(lsym *obj.LSym, off int64, t *types.Type, dataAdd int) {
m := methods(t)
if t.Sym() == nil && len(m) == 0 {
- return ot
+ base.Fatalf("extra requested of type with no extra info %v", t)
}
- noff := int(types.RoundUp(int64(ot), int64(types.PtrSize)))
- if noff != ot {
+ noff := types.RoundUp(off, int64(types.PtrSize))
+ if noff != off {
base.Fatalf("unexpected alignment in dextratype for %v", t)
}
@@ -577,7 +571,8 @@ func dextratype(lsym *obj.LSym, ot int, t *types.Type, dataAdd int) int {
writeType(a.type_)
}
- ot = dgopkgpathOff(lsym, ot, typePkg(t))
+ c := rttype.NewCursor(lsym, off, rttype.UncommonType)
+ dgopkgpathOff(c.Field("PkgPath"), typePkg(t))
dataAdd += uncommonSize(t)
mcount := len(m)
@@ -589,11 +584,27 @@ func dextratype(lsym *obj.LSym, ot int, t *types.Type, dataAdd int) int {
base.Fatalf("methods are too far away on %v: %d", t, dataAdd)
}
- ot = objw.Uint16(lsym, ot, uint16(mcount))
- ot = objw.Uint16(lsym, ot, uint16(xcount))
- ot = objw.Uint32(lsym, ot, uint32(dataAdd))
- ot = objw.Uint32(lsym, ot, 0)
- return ot
+ c.Field("Mcount").WriteUint16(uint16(mcount))
+ c.Field("Xcount").WriteUint16(uint16(xcount))
+ c.Field("Moff").WriteUint32(uint32(dataAdd))
+ // Note: there is an unused uint32 field here.
+
+ // Write the backing array for the []method field.
+ array := rttype.NewArrayCursor(lsym, off+int64(dataAdd), rttype.Method, mcount)
+ for i, a := range m {
+ exported := types.IsExported(a.name.Name)
+ var pkg *types.Pkg
+ if !exported && a.name.Pkg != typePkg(t) {
+ pkg = a.name.Pkg
+ }
+ nsym := dname(a.name.Name, "", pkg, exported, false)
+
+ e := array.Elem(i)
+ e.Field("Name").WriteSymPtrOff(nsym, false)
+ dmethodptrOff(e.Field("Mtyp"), writeType(a.mtype))
+ dmethodptrOff(e.Field("Ifn"), a.isym)
+ dmethodptrOff(e.Field("Tfn"), a.tsym)
+ }
}
func typePkg(t *types.Type) *types.Pkg {
@@ -612,34 +623,11 @@ func typePkg(t *types.Type) *types.Pkg {
return nil
}
-// dextratypeData dumps the backing array for the []method field of
-// runtime.uncommontype.
-func dextratypeData(lsym *obj.LSym, ot int, t *types.Type) int {
- for _, a := range methods(t) {
- // ../../../../runtime/type.go:/method
- exported := types.IsExported(a.name.Name)
- var pkg *types.Pkg
- if !exported && a.name.Pkg != typePkg(t) {
- pkg = a.name.Pkg
- }
- nsym := dname(a.name.Name, "", pkg, exported, false)
-
- ot = objw.SymPtrOff(lsym, ot, nsym)
- ot = dmethodptrOff(lsym, ot, writeType(a.mtype))
- ot = dmethodptrOff(lsym, ot, a.isym)
- ot = dmethodptrOff(lsym, ot, a.tsym)
- }
- return ot
-}
-
-func dmethodptrOff(s *obj.LSym, ot int, x *obj.LSym) int {
- objw.Uint32(s, ot, 0)
- r := obj.Addrel(s)
- r.Off = int32(ot)
- r.Siz = 4
+func dmethodptrOff(c rttype.Cursor, x *obj.LSym) {
+ c.WriteInt32(0)
+ r := c.Reloc()
r.Sym = x
r.Type = objabi.R_METHODOFF
- return ot + 4
}
var kinds = []int{
@@ -676,8 +664,8 @@ var (
memequalvarlen *obj.LSym
)
-// dcommontype dumps the contents of a reflect.rtype (runtime._type).
-func dcommontype(lsym *obj.LSym, t *types.Type) int {
+// dcommontype dumps the contents of a reflect.rtype (runtime._type) to c.
+func dcommontype(c rttype.Cursor, t *types.Type) {
types.CalcSize(t)
eqfunc := geneq(t)
@@ -709,10 +697,9 @@ func dcommontype(lsym *obj.LSym, t *types.Type) int {
// str nameOff
// ptrToThis typeOff
// }
- ot := 0
- ot = objw.Uintptr(lsym, ot, uint64(t.Size()))
- ot = objw.Uintptr(lsym, ot, uint64(ptrdata))
- ot = objw.Uint32(lsym, ot, types.TypeHash(t))
+ c.Field("Size_").WriteUintptr(uint64(t.Size()))
+ c.Field("PtrBytes").WriteUintptr(uint64(ptrdata))
+ c.Field("Hash").WriteUint32(types.TypeHash(t))
var tflag abi.TFlag
if uncommonSize(t) != 0 {
@@ -748,7 +735,7 @@ func dcommontype(lsym *obj.LSym, t *types.Type) int {
// this should optimize away completely
panic("Unexpected change in size of abi.TFlag")
}
- ot = objw.Uint8(lsym, ot, uint8(tflag))
+ c.Field("TFlag").WriteUint8(uint8(tflag))
// runtime (and common sense) expects alignment to be a power of two.
i := int(uint8(t.Alignment()))
@@ -759,8 +746,8 @@ func dcommontype(lsym *obj.LSym, t *types.Type) int {
if i&(i-1) != 0 {
base.Fatalf("invalid alignment %d for %v", uint8(t.Alignment()), t)
}
- ot = objw.Uint8(lsym, ot, uint8(t.Alignment())) // align
- ot = objw.Uint8(lsym, ot, uint8(t.Alignment())) // fieldAlign
+ c.Field("Align_").WriteUint8(uint8(t.Alignment()))
+ c.Field("FieldAlign_").WriteUint8(uint8(t.Alignment()))
i = kinds[t.Kind()]
if types.IsDirectIface(t) {
@@ -769,26 +756,14 @@ func dcommontype(lsym *obj.LSym, t *types.Type) int {
if useGCProg {
i |= objabi.KindGCProg
}
- ot = objw.Uint8(lsym, ot, uint8(i)) // kind
- if eqfunc != nil {
- ot = objw.SymPtr(lsym, ot, eqfunc, 0) // equality function
- } else {
- ot = objw.Uintptr(lsym, ot, 0) // type we can't do == with
- }
- ot = objw.SymPtr(lsym, ot, gcsym, 0) // gcdata
+ c.Field("Kind_").WriteUint8(uint8(i))
- nsym := dname(p, "", nil, exported, false)
- ot = objw.SymPtrOff(lsym, ot, nsym) // str
- // ptrToThis
- if sptr == nil {
- ot = objw.Uint32(lsym, ot, 0)
- } else if sptrWeak {
- ot = objw.SymPtrWeakOff(lsym, ot, sptr)
- } else {
- ot = objw.SymPtrOff(lsym, ot, sptr)
- }
+ c.Field("Equal").WritePtr(eqfunc)
+ c.Field("GCData").WritePtr(gcsym)
- return ot
+ nsym := dname(p, "", nil, exported, false)
+ c.Field("Str").WriteSymPtrOff(nsym, false)
+ c.Field("PtrToThis").WriteSymPtrOff(sptr, sptrWeak)
}
// TrackSym returns the symbol for tracking use of field/method f, assumed
@@ -845,11 +820,6 @@ func TypeLinksym(t *types.Type) *obj.LSym {
return lsym
}
-// Deprecated: Use TypePtrAt instead.
-func TypePtr(t *types.Type) *ir.AddrExpr {
- return TypePtrAt(base.Pos, t)
-}
-
// TypePtrAt returns an expression that evaluates to the
// *runtime._type value for t.
func TypePtrAt(pos src.XPos, t *types.Type) *ir.AddrExpr {
@@ -873,11 +843,6 @@ func ITabLsym(typ, iface *types.Type) *obj.LSym {
return lsym
}
-// Deprecated: Use ITabAddrAt instead.
-func ITabAddr(typ, iface *types.Type) *ir.AddrExpr {
- return ITabAddrAt(base.Pos, typ, iface)
-}
-
// ITabAddrAt returns an expression that evaluates to the
// *runtime.itab value for concrete type typ implementing interface
// iface.
@@ -909,7 +874,7 @@ func needkeyupdate(t *types.Type) bool {
return needkeyupdate(t.Elem())
case types.TSTRUCT:
- for _, t1 := range t.Fields().Slice() {
+ for _, t1 := range t.Fields() {
if needkeyupdate(t1.Type) {
return true
}
@@ -932,7 +897,7 @@ func hashMightPanic(t *types.Type) bool {
return hashMightPanic(t.Elem())
case types.TSTRUCT:
- for _, t1 := range t.Fields().Slice() {
+ for _, t1 := range t.Fields() {
if hashMightPanic(t1.Type) {
return true
}
@@ -963,16 +928,11 @@ func writeType(t *types.Type) *obj.LSym {
s := types.TypeSym(t)
lsym := s.Linksym()
- if s.Siggen() {
- return lsym
- }
- s.SetSiggen(true)
// special case (look for runtime below):
// when compiling package runtime,
// emit the type structures for int, float, etc.
tbase := t
-
if t.IsPtr() && t.Sym() == nil && t.Elem().Sym() != nil {
tbase = t.Elem()
}
@@ -980,6 +940,19 @@ func writeType(t *types.Type) *obj.LSym {
base.Fatalf("unresolved defined type: %v", tbase)
}
+ // This is a fake type we generated for our builtin pseudo-runtime
+ // package. We'll emit a description for the real type while
+ // compiling package runtime, so we don't need or want to emit one
+ // from this fake type.
+ if sym := tbase.Sym(); sym != nil && sym.Pkg == ir.Pkgs.Runtime {
+ return lsym
+ }
+
+ if s.Siggen() {
+ return lsym
+ }
+ s.SetSiggen(true)
+
if !NeedEmit(tbase) {
if i := typecheck.BaseTypeIndex(t); i >= 0 {
lsym.Pkg = tbase.Sym().Pkg.Prefix
@@ -994,101 +967,137 @@ func writeType(t *types.Type) *obj.LSym {
return lsym
}
- ot := 0
+ // Type layout Written by Marker
+ // +--------------------------------+ - 0
+ // | abi/internal.Type | dcommontype
+ // +--------------------------------+ - A
+ // | additional type-dependent | code in the switch below
+ // | fields, e.g. |
+ // | abi/internal.ArrayType.Len |
+ // +--------------------------------+ - B
+ // | internal/abi.UncommonType | dextratype
+ // | This section is optional, |
+ // | if type has a name or methods |
+ // +--------------------------------+ - C
+ // | variable-length data | code in the switch below
+ // | referenced by |
+ // | type-dependent fields, e.g. |
+ // | abi/internal.StructType.Fields |
+ // | dataAdd = size of this section |
+ // +--------------------------------+ - D
+ // | method list, if any | dextratype
+ // +--------------------------------+ - E
+
+ // UncommonType section is included if we have a name or a method.
+ extra := t.Sym() != nil || len(methods(t)) != 0
+
+ // Decide the underlying type of the descriptor, and remember
+ // the size we need for variable-length data.
+ var rt *types.Type
+ dataAdd := 0
switch t.Kind() {
default:
- ot = dcommontype(lsym, t)
- ot = dextratype(lsym, ot, t, 0)
+ rt = rttype.Type
+ case types.TARRAY:
+ rt = rttype.ArrayType
+ case types.TSLICE:
+ rt = rttype.SliceType
+ case types.TCHAN:
+ rt = rttype.ChanType
+ case types.TFUNC:
+ rt = rttype.FuncType
+ dataAdd = (t.NumRecvs() + t.NumParams() + t.NumResults()) * types.PtrSize
+ case types.TINTER:
+ rt = rttype.InterfaceType
+ dataAdd = len(imethods(t)) * int(rttype.IMethod.Size())
+ case types.TMAP:
+ rt = rttype.MapType
+ case types.TPTR:
+ rt = rttype.PtrType
+ // TODO: use rttype.Type for Elem() is ANY?
+ case types.TSTRUCT:
+ rt = rttype.StructType
+ dataAdd = t.NumFields() * int(rttype.StructField.Size())
+ }
+
+ // Compute offsets of each section.
+ B := rt.Size()
+ C := B
+ if extra {
+ C = B + rttype.UncommonType.Size()
+ }
+ D := C + int64(dataAdd)
+ E := D + int64(len(methods(t)))*rttype.Method.Size()
+ // Write the runtime._type
+ c := rttype.NewCursor(lsym, 0, rt)
+ if rt == rttype.Type {
+ dcommontype(c, t)
+ } else {
+ dcommontype(c.Field("Type"), t)
+ }
+
+ // Write additional type-specific data
+ // (Both the fixed size and variable-sized sections.)
+ switch t.Kind() {
case types.TARRAY:
- // ../../../../runtime/type.go:/arrayType
+ // internal/abi.ArrayType
s1 := writeType(t.Elem())
t2 := types.NewSlice(t.Elem())
s2 := writeType(t2)
- ot = dcommontype(lsym, t)
- ot = objw.SymPtr(lsym, ot, s1, 0)
- ot = objw.SymPtr(lsym, ot, s2, 0)
- ot = objw.Uintptr(lsym, ot, uint64(t.NumElem()))
- ot = dextratype(lsym, ot, t, 0)
+ c.Field("Elem").WritePtr(s1)
+ c.Field("Slice").WritePtr(s2)
+ c.Field("Len").WriteUintptr(uint64(t.NumElem()))
case types.TSLICE:
- // ../../../../runtime/type.go:/sliceType
+ // internal/abi.SliceType
s1 := writeType(t.Elem())
- ot = dcommontype(lsym, t)
- ot = objw.SymPtr(lsym, ot, s1, 0)
- ot = dextratype(lsym, ot, t, 0)
+ c.Field("Elem").WritePtr(s1)
case types.TCHAN:
- // ../../../../runtime/type.go:/chanType
+ // internal/abi.ChanType
s1 := writeType(t.Elem())
- ot = dcommontype(lsym, t)
- ot = objw.SymPtr(lsym, ot, s1, 0)
- ot = objw.Uintptr(lsym, ot, uint64(t.ChanDir()))
- ot = dextratype(lsym, ot, t, 0)
+ c.Field("Elem").WritePtr(s1)
+ c.Field("Dir").WriteInt(int64(t.ChanDir()))
case types.TFUNC:
- for _, t1 := range t.Recvs().Fields().Slice() {
- writeType(t1.Type)
- }
- isddd := false
- for _, t1 := range t.Params().Fields().Slice() {
- isddd = t1.IsDDD()
- writeType(t1.Type)
- }
- for _, t1 := range t.Results().Fields().Slice() {
+ // internal/abi.FuncType
+ for _, t1 := range t.RecvParamsResults() {
writeType(t1.Type)
}
-
- ot = dcommontype(lsym, t)
inCount := t.NumRecvs() + t.NumParams()
outCount := t.NumResults()
- if isddd {
+ if t.IsVariadic() {
outCount |= 1 << 15
}
- ot = objw.Uint16(lsym, ot, uint16(inCount))
- ot = objw.Uint16(lsym, ot, uint16(outCount))
- if types.PtrSize == 8 {
- ot += 4 // align for *rtype
- }
- dataAdd := (inCount + t.NumResults()) * types.PtrSize
- ot = dextratype(lsym, ot, t, dataAdd)
+ c.Field("InCount").WriteUint16(uint16(inCount))
+ c.Field("OutCount").WriteUint16(uint16(outCount))
// Array of rtype pointers follows funcType.
- for _, t1 := range t.Recvs().Fields().Slice() {
- ot = objw.SymPtr(lsym, ot, writeType(t1.Type), 0)
- }
- for _, t1 := range t.Params().Fields().Slice() {
- ot = objw.SymPtr(lsym, ot, writeType(t1.Type), 0)
- }
- for _, t1 := range t.Results().Fields().Slice() {
- ot = objw.SymPtr(lsym, ot, writeType(t1.Type), 0)
+ typs := t.RecvParamsResults()
+ array := rttype.NewArrayCursor(lsym, C, types.Types[types.TUNSAFEPTR], len(typs))
+ for i, t1 := range typs {
+ array.Elem(i).WritePtr(writeType(t1.Type))
}
case types.TINTER:
+ // internal/abi.InterfaceType
m := imethods(t)
n := len(m)
for _, a := range m {
writeType(a.type_)
}
- // ../../../../runtime/type.go:/interfaceType
- ot = dcommontype(lsym, t)
-
var tpkg *types.Pkg
if t.Sym() != nil && t != types.Types[t.Kind()] && t != types.ErrorType {
tpkg = t.Sym().Pkg
}
- ot = dgopkgpath(lsym, ot, tpkg)
-
- ot = objw.SymPtr(lsym, ot, lsym, ot+3*types.PtrSize+uncommonSize(t))
- ot = objw.Uintptr(lsym, ot, uint64(n))
- ot = objw.Uintptr(lsym, ot, uint64(n))
- dataAdd := imethodSize() * n
- ot = dextratype(lsym, ot, t, dataAdd)
+ dgopkgpath(c.Field("PkgPath"), tpkg)
+ c.Field("Methods").WriteSlice(lsym, C, int64(n), int64(n))
- for _, a := range m {
- // ../../../../runtime/type.go:/imethod
+ array := rttype.NewArrayCursor(lsym, C, rttype.IMethod, n)
+ for i, a := range m {
exported := types.IsExported(a.name.Name)
var pkg *types.Pkg
if !exported && a.name.Pkg != tpkg {
@@ -1096,39 +1105,39 @@ func writeType(t *types.Type) *obj.LSym {
}
nsym := dname(a.name.Name, "", pkg, exported, false)
- ot = objw.SymPtrOff(lsym, ot, nsym)
- ot = objw.SymPtrOff(lsym, ot, writeType(a.type_))
+ e := array.Elem(i)
+ e.Field("Name").WriteSymPtrOff(nsym, false)
+ e.Field("Typ").WriteSymPtrOff(writeType(a.type_), false)
}
- // ../../../../runtime/type.go:/mapType
case types.TMAP:
+ // internal/abi.MapType
s1 := writeType(t.Key())
s2 := writeType(t.Elem())
s3 := writeType(MapBucketType(t))
hasher := genhash(t.Key())
- ot = dcommontype(lsym, t)
- ot = objw.SymPtr(lsym, ot, s1, 0)
- ot = objw.SymPtr(lsym, ot, s2, 0)
- ot = objw.SymPtr(lsym, ot, s3, 0)
- ot = objw.SymPtr(lsym, ot, hasher, 0)
+ c.Field("Key").WritePtr(s1)
+ c.Field("Elem").WritePtr(s2)
+ c.Field("Bucket").WritePtr(s3)
+ c.Field("Hasher").WritePtr(hasher)
var flags uint32
// Note: flags must match maptype accessors in ../../../../runtime/type.go
// and maptype builder in ../../../../reflect/type.go:MapOf.
if t.Key().Size() > MAXKEYSIZE {
- ot = objw.Uint8(lsym, ot, uint8(types.PtrSize))
+ c.Field("KeySize").WriteUint8(uint8(types.PtrSize))
flags |= 1 // indirect key
} else {
- ot = objw.Uint8(lsym, ot, uint8(t.Key().Size()))
+ c.Field("KeySize").WriteUint8(uint8(t.Key().Size()))
}
if t.Elem().Size() > MAXELEMSIZE {
- ot = objw.Uint8(lsym, ot, uint8(types.PtrSize))
+ c.Field("ValueSize").WriteUint8(uint8(types.PtrSize))
flags |= 2 // indirect value
} else {
- ot = objw.Uint8(lsym, ot, uint8(t.Elem().Size()))
+ c.Field("ValueSize").WriteUint8(uint8(t.Elem().Size()))
}
- ot = objw.Uint16(lsym, ot, uint16(MapBucketType(t).Size()))
+ c.Field("BucketSize").WriteUint16(uint16(MapBucketType(t).Size()))
if types.IsReflexive(t.Key()) {
flags |= 4 // reflexive key
}
@@ -1138,8 +1147,8 @@ func writeType(t *types.Type) *obj.LSym {
if hashMightPanic(t.Key()) {
flags |= 16 // hash might panic
}
- ot = objw.Uint32(lsym, ot, flags)
- ot = dextratype(lsym, ot, t, 0)
+ c.Field("Flags").WriteUint32(flags)
+
if u := t.Underlying(); u != t {
// If t is a named map type, also keep the underlying map
// type live in the binary. This is important to make sure that
@@ -1151,25 +1160,17 @@ func writeType(t *types.Type) *obj.LSym {
}
case types.TPTR:
+ // internal/abi.PtrType
if t.Elem().Kind() == types.TANY {
- // ../../../../runtime/type.go:/UnsafePointerType
- ot = dcommontype(lsym, t)
- ot = dextratype(lsym, ot, t, 0)
-
- break
+ base.Fatalf("bad pointer base type")
}
- // ../../../../runtime/type.go:/ptrType
s1 := writeType(t.Elem())
+ c.Field("Elem").WritePtr(s1)
- ot = dcommontype(lsym, t)
- ot = objw.SymPtr(lsym, ot, s1, 0)
- ot = dextratype(lsym, ot, t, 0)
-
- // ../../../../runtime/type.go:/structType
- // for security, only the exported fields.
case types.TSTRUCT:
- fields := t.Fields().Slice()
+ // internal/abi.StructType
+ fields := t.Fields()
for _, t1 := range fields {
writeType(t1.Type)
}
@@ -1187,23 +1188,23 @@ func writeType(t *types.Type) *obj.LSym {
}
}
- ot = dcommontype(lsym, t)
- ot = dgopkgpath(lsym, ot, spkg)
- ot = objw.SymPtr(lsym, ot, lsym, ot+3*types.PtrSize+uncommonSize(t))
- ot = objw.Uintptr(lsym, ot, uint64(len(fields)))
- ot = objw.Uintptr(lsym, ot, uint64(len(fields)))
+ dgopkgpath(c.Field("PkgPath"), spkg)
+ c.Field("Fields").WriteSlice(lsym, C, int64(len(fields)), int64(len(fields)))
- dataAdd := len(fields) * structfieldSize()
- ot = dextratype(lsym, ot, t, dataAdd)
-
- for _, f := range fields {
- // ../../../../runtime/type.go:/structField
- ot = dnameField(lsym, ot, spkg, f)
- ot = objw.SymPtr(lsym, ot, writeType(f.Type), 0)
- ot = objw.Uintptr(lsym, ot, uint64(f.Offset))
+ array := rttype.NewArrayCursor(lsym, C, rttype.StructField, len(fields))
+ for i, f := range fields {
+ e := array.Elem(i)
+ dnameField(e.Field("Name"), spkg, f)
+ e.Field("Typ").WritePtr(writeType(f.Type))
+ e.Field("Offset").WriteUintptr(uint64(f.Offset))
}
}
+ // Write the extra info, if any.
+ if extra {
+ dextratype(lsym, B, t, dataAdd)
+ }
+
// Note: DUPOK is required to ensure that we don't end up with more
// than one type descriptor for a given type, if the type descriptor
// can be defined in multiple packages, that is, unnamed types,
@@ -1213,8 +1214,7 @@ func writeType(t *types.Type) *obj.LSym {
dupok = obj.DUPOK
}
- ot = dextratypeData(lsym, ot, t)
- objw.Global(lsym, int32(ot), int16(dupok|obj.RODATA))
+ objw.Global(lsym, int32(E), int16(dupok|obj.RODATA))
// The linker will leave a table of all the typelinks for
// types in the binary, so the runtime can find them.
@@ -1224,7 +1224,7 @@ func writeType(t *types.Type) *obj.LSym {
keep := base.Ctxt.Flag_dynlink
if !keep && t.Sym() == nil {
// For an unnamed type, we only need the link if the type can
- // be created at run time by reflect.PtrTo and similar
+ // be created at run time by reflect.PointerTo and similar
// functions. If the type exists in the program, those
// functions must return the existing type structure rather
// than creating a new one.
@@ -1280,7 +1280,9 @@ func WriteRuntimeTypes() {
}
signatslice = signatslice[len(signats):]
}
+}
+func WriteGCSymbols() {
// Emit GC data symbols.
gcsyms := make([]typeAndStr, 0, len(gcsymset))
for t := range gcsymset {
@@ -1305,7 +1307,7 @@ func writeITab(lsym *obj.LSym, typ, iface *types.Type, allowNonImplement bool) {
base.Fatalf("writeITab(%v, %v)", typ, iface)
}
- sigs := iface.AllMethods().Slice()
+ sigs := iface.AllMethods()
entries := make([]*obj.LSym, 0, len(sigs))
// both sigs and methods are sorted by name,
@@ -1352,58 +1354,52 @@ func writeITab(lsym *obj.LSym, typ, iface *types.Type, allowNonImplement bool) {
lsym.Set(obj.AttrContentAddressable, true)
}
-func WriteTabs() {
- // process ptabs
- if types.LocalPkg.Name == "main" && len(ptabs) > 0 {
- ot := 0
- s := base.Ctxt.Lookup("go:plugin.tabs")
- for _, p := range ptabs {
- // Dump ptab symbol into go.pluginsym package.
- //
- // type ptab struct {
- // name nameOff
- // typ typeOff // pointer to symbol
- // }
- nsym := dname(p.Sym().Name, "", nil, true, false)
- t := p.Type()
- if p.Class != ir.PFUNC {
- t = types.NewPtr(t)
- }
- tsym := writeType(t)
- ot = objw.SymPtrOff(s, ot, nsym)
- ot = objw.SymPtrOff(s, ot, tsym)
- // Plugin exports symbols as interfaces. Mark their types
- // as UsedInIface.
- tsym.Set(obj.AttrUsedInIface, true)
- }
- objw.Global(s, int32(ot), int16(obj.RODATA))
+func WritePluginTable() {
+ ptabs := typecheck.Target.PluginExports
+ if len(ptabs) == 0 {
+ return
+ }
- ot = 0
- s = base.Ctxt.Lookup("go:plugin.exports")
- for _, p := range ptabs {
- ot = objw.SymPtr(s, ot, p.Linksym(), 0)
+ lsym := base.Ctxt.Lookup("go:plugin.tabs")
+ ot := 0
+ for _, p := range ptabs {
+ // Dump ptab symbol into go.pluginsym package.
+ //
+ // type ptab struct {
+ // name nameOff
+ // typ typeOff // pointer to symbol
+ // }
+ nsym := dname(p.Sym().Name, "", nil, true, false)
+ t := p.Type()
+ if p.Class != ir.PFUNC {
+ t = types.NewPtr(t)
}
- objw.Global(s, int32(ot), int16(obj.RODATA))
+ tsym := writeType(t)
+ ot = objw.SymPtrOff(lsym, ot, nsym)
+ ot = objw.SymPtrOff(lsym, ot, tsym)
+ // Plugin exports symbols as interfaces. Mark their types
+ // as UsedInIface.
+ tsym.Set(obj.AttrUsedInIface, true)
}
-}
+ objw.Global(lsym, int32(ot), int16(obj.RODATA))
-func WriteImportStrings() {
- // generate import strings for imported packages
- for _, p := range types.ImportedPkgList() {
- dimportpath(p)
+ lsym = base.Ctxt.Lookup("go:plugin.exports")
+ ot = 0
+ for _, p := range ptabs {
+ ot = objw.SymPtr(lsym, ot, p.Linksym(), 0)
}
+ objw.Global(lsym, int32(ot), int16(obj.RODATA))
}
// writtenByWriteBasicTypes reports whether typ is written by WriteBasicTypes.
// WriteBasicTypes always writes pointer types; any pointer has been stripped off typ already.
func writtenByWriteBasicTypes(typ *types.Type) bool {
if typ.Sym() == nil && typ.Kind() == types.TFUNC {
- f := typ.FuncType()
// func(error) string
- if f.Receiver.NumFields() == 0 &&
- f.Params.NumFields() == 1 && f.Results.NumFields() == 1 &&
- f.Params.FieldType(0) == types.ErrorType &&
- f.Results.FieldType(0) == types.Types[types.TSTRING] {
+ if typ.NumRecvs() == 0 &&
+ typ.NumParams() == 1 && typ.NumResults() == 1 &&
+ typ.Param(0).Type == types.ErrorType &&
+ typ.Result(0).Type == types.Types[types.TSTRING] {
return true
}
}
@@ -1431,45 +1427,32 @@ func WriteBasicTypes() {
// another possible choice would be package main,
// but using runtime means fewer copies in object files.
// The code here needs to be in sync with writtenByWriteBasicTypes above.
- if base.Ctxt.Pkgpath == "runtime" {
- // Note: always write NewPtr(t) because NeedEmit's caller strips the pointer.
- var list []*types.Type
- for i := types.Kind(1); i <= types.TBOOL; i++ {
- list = append(list, types.Types[i])
- }
- list = append(list,
- types.Types[types.TSTRING],
- types.Types[types.TUNSAFEPTR],
- types.AnyType,
- types.ErrorType)
- for _, t := range list {
- writeType(types.NewPtr(t))
- writeType(types.NewPtr(types.NewSlice(t)))
- }
-
- // emit type for func(error) string,
- // which is the type of an auto-generated wrapper.
- writeType(types.NewPtr(types.NewSignature(nil, []*types.Field{
- types.NewField(base.Pos, nil, types.ErrorType),
- }, []*types.Field{
- types.NewField(base.Pos, nil, types.Types[types.TSTRING]),
- })))
-
- // add paths for runtime and main, which 6l imports implicitly.
- dimportpath(ir.Pkgs.Runtime)
-
- if base.Flag.Race {
- dimportpath(types.NewPkg("runtime/race", ""))
- }
- if base.Flag.MSan {
- dimportpath(types.NewPkg("runtime/msan", ""))
- }
- if base.Flag.ASan {
- dimportpath(types.NewPkg("runtime/asan", ""))
- }
+ if base.Ctxt.Pkgpath != "runtime" {
+ return
+ }
- dimportpath(types.NewPkg("main", ""))
+ // Note: always write NewPtr(t) because NeedEmit's caller strips the pointer.
+ var list []*types.Type
+ for i := types.Kind(1); i <= types.TBOOL; i++ {
+ list = append(list, types.Types[i])
+ }
+ list = append(list,
+ types.Types[types.TSTRING],
+ types.Types[types.TUNSAFEPTR],
+ types.AnyType,
+ types.ErrorType)
+ for _, t := range list {
+ writeType(types.NewPtr(t))
+ writeType(types.NewPtr(types.NewSlice(t)))
}
+
+ // emit type for func(error) string,
+ // which is the type of an auto-generated wrapper.
+ writeType(types.NewPtr(types.NewSignature(nil, []*types.Field{
+ types.NewField(base.Pos, nil, types.ErrorType),
+ }, []*types.Field{
+ types.NewField(base.Pos, nil, types.Types[types.TSTRING]),
+ })))
}
type typeAndStr struct {
@@ -1509,8 +1492,8 @@ func (a typesByString) Less(i, j int) bool {
// will be equal for the above checks, but different in DWARF output.
// Sort by source position to ensure deterministic order.
// See issues 27013 and 30202.
- if a[i].t.Kind() == types.TINTER && a[i].t.AllMethods().Len() > 0 {
- return a[i].t.AllMethods().Index(0).Pos.Before(a[j].t.AllMethods().Index(0).Pos)
+ if a[i].t.Kind() == types.TINTER && len(a[i].t.AllMethods()) > 0 {
+ return a[i].t.AllMethods()[0].Pos.Before(a[j].t.AllMethods()[0].Pos)
}
return false
}
@@ -1734,7 +1717,7 @@ func (p *gcProg) emit(t *types.Type, offset int64) {
p.w.Repeat(elem.Size()/int64(types.PtrSize), count-1)
case types.TSTRUCT:
- for _, t1 := range t.Fields().Slice() {
+ for _, t1 := range t.Fields() {
p.emit(t1.Type, offset+t1.Offset)
}
}
@@ -1754,30 +1737,6 @@ func ZeroAddr(size int64) ir.Node {
return typecheck.Expr(typecheck.NodAddr(x))
}
-func CollectPTabs() {
- if !base.Ctxt.Flag_dynlink || types.LocalPkg.Name != "main" {
- return
- }
- for _, exportn := range typecheck.Target.Exports {
- s := exportn.Sym()
- nn := ir.AsNode(s.Def)
- if nn == nil {
- continue
- }
- if nn.Op() != ir.ONAME {
- continue
- }
- n := nn.(*ir.Name)
- if !types.IsExported(s.Name) {
- continue
- }
- if s.Pkg.Name != "main" {
- continue
- }
- ptabs = append(ptabs, n)
- }
-}
-
// NeedEmit reports whether typ is a type that we need to emit code
// for (e.g., runtime type descriptors, method wrappers).
func NeedEmit(typ *types.Type) bool {
@@ -1893,7 +1852,7 @@ func MarkUsedIfaceMethod(n *ir.CallExpr) {
if ir.CurFunc.LSym == nil {
return
}
- dot := n.X.(*ir.SelectorExpr)
+ dot := n.Fun.(*ir.SelectorExpr)
ityp := dot.X.Type()
if ityp.HasShape() {
// Here we're calling a method on a generic interface. Something like:
@@ -1916,17 +1875,8 @@ func MarkUsedIfaceMethod(n *ir.CallExpr) {
// some sort of fuzzy shape matching. For now, only use the name
// of the method for matching.
r := obj.Addrel(ir.CurFunc.LSym)
- // We use a separate symbol just to tell the linker the method name.
- // (The symbol itself is not needed in the final binary. Do not use
- // staticdata.StringSym, which creates a content addessable symbol,
- // which may have trailing zero bytes. This symbol doesn't need to
- // be deduplicated anyway.)
- name := dot.Sel.Name
- var nameSym obj.LSym
- nameSym.WriteString(base.Ctxt, 0, len(name), name)
- objw.Global(&nameSym, int32(len(name)), obj.RODATA)
- r.Sym = &nameSym
- r.Type = objabi.R_USEGENERICIFACEMETHOD
+ r.Sym = staticdata.StringSymNoCommon(dot.Sel.Name)
+ r.Type = objabi.R_USENAMEDMETHOD
return
}
diff --git a/src/cmd/compile/internal/riscv64/ssa.go b/src/cmd/compile/internal/riscv64/ssa.go
index 2eb1e7ffa0..22338188e5 100644
--- a/src/cmd/compile/internal/riscv64/ssa.go
+++ b/src/cmd/compile/internal/riscv64/ssa.go
@@ -193,7 +193,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
// input args need no code
case ssa.OpPhi:
ssagen.CheckLoweredPhi(v)
- case ssa.OpCopy, ssa.OpRISCV64MOVconvert, ssa.OpRISCV64MOVDreg:
+ case ssa.OpCopy, ssa.OpRISCV64MOVDreg:
if v.Type.IsMemory() {
return
}
@@ -278,7 +278,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
p.To.Type = obj.TYPE_REG
p.To.Reg = rd
case ssa.OpRISCV64ADD, ssa.OpRISCV64SUB, ssa.OpRISCV64SUBW, ssa.OpRISCV64XOR, ssa.OpRISCV64OR, ssa.OpRISCV64AND,
- ssa.OpRISCV64SLL, ssa.OpRISCV64SRA, ssa.OpRISCV64SRL,
+ ssa.OpRISCV64SLL, ssa.OpRISCV64SRA, ssa.OpRISCV64SRAW, ssa.OpRISCV64SRL, ssa.OpRISCV64SRLW,
ssa.OpRISCV64SLT, ssa.OpRISCV64SLTU, ssa.OpRISCV64MUL, ssa.OpRISCV64MULW, ssa.OpRISCV64MULH,
ssa.OpRISCV64MULHU, ssa.OpRISCV64DIV, ssa.OpRISCV64DIVU, ssa.OpRISCV64DIVW,
ssa.OpRISCV64DIVUW, ssa.OpRISCV64REM, ssa.OpRISCV64REMU, ssa.OpRISCV64REMW,
@@ -332,7 +332,8 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
p2.From.Reg = v.Reg1()
p2.To.Type = obj.TYPE_REG
p2.To.Reg = v.Reg1()
- case ssa.OpRISCV64FMADDD, ssa.OpRISCV64FMSUBD, ssa.OpRISCV64FNMADDD, ssa.OpRISCV64FNMSUBD:
+ case ssa.OpRISCV64FMADDD, ssa.OpRISCV64FMSUBD, ssa.OpRISCV64FNMADDD, ssa.OpRISCV64FNMSUBD,
+ ssa.OpRISCV64FMADDS, ssa.OpRISCV64FMSUBS, ssa.OpRISCV64FNMADDS, ssa.OpRISCV64FNMSUBS:
r := v.Reg()
r1 := v.Args[0].Reg()
r2 := v.Args[1].Reg()
@@ -355,7 +356,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
case ssa.OpRISCV64ADDI, ssa.OpRISCV64ADDIW, ssa.OpRISCV64XORI, ssa.OpRISCV64ORI, ssa.OpRISCV64ANDI,
- ssa.OpRISCV64SLLI, ssa.OpRISCV64SRAI, ssa.OpRISCV64SRLI, ssa.OpRISCV64SLTI,
+ ssa.OpRISCV64SLLI, ssa.OpRISCV64SRAI, ssa.OpRISCV64SRAIW, ssa.OpRISCV64SRLI, ssa.OpRISCV64SRLIW, ssa.OpRISCV64SLTI,
ssa.OpRISCV64SLTIU:
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_CONST
@@ -694,6 +695,13 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
p.To.Sym = ir.Syms.Duffcopy
p.To.Offset = v.AuxInt
+ case ssa.OpRISCV64LoweredPubBarrier:
+ // FENCE
+ s.Prog(v.Op.Asm())
+
+ case ssa.OpRISCV64LoweredRound32F, ssa.OpRISCV64LoweredRound64F:
+ // input is already rounded
+
case ssa.OpClobber, ssa.OpClobberReg:
// TODO: implement for clobberdead experiment. Nop is ok for now.
diff --git a/src/cmd/compile/internal/rttype/rttype.go b/src/cmd/compile/internal/rttype/rttype.go
new file mode 100644
index 0000000000..cdc399d9cf
--- /dev/null
+++ b/src/cmd/compile/internal/rttype/rttype.go
@@ -0,0 +1,283 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package rttype allows the compiler to share type information with
+// the runtime. The shared type information is stored in
+// internal/abi. This package translates those types from the host
+// machine on which the compiler runs to the target machine on which
+// the compiled program will run. In particular, this package handles
+// layout differences between e.g. a 64 bit compiler and 32 bit
+// target.
+package rttype
+
+import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/objw"
+ "cmd/compile/internal/types"
+ "cmd/internal/obj"
+ "internal/abi"
+ "reflect"
+)
+
+// The type structures shared with the runtime.
+var Type *types.Type
+
+var ArrayType *types.Type
+var ChanType *types.Type
+var FuncType *types.Type
+var InterfaceType *types.Type
+var MapType *types.Type
+var PtrType *types.Type
+var SliceType *types.Type
+var StructType *types.Type
+
+// Types that are parts of the types above.
+var IMethod *types.Type
+var Method *types.Type
+var StructField *types.Type
+var UncommonType *types.Type
+
+// Type switches and asserts
+var InterfaceSwitch *types.Type
+var TypeAssert *types.Type
+
+func Init() {
+ // Note: this has to be called explicitly instead of being
+ // an init function so it runs after the types package has
+ // been properly initialized.
+ Type = fromReflect(reflect.TypeOf(abi.Type{}))
+ ArrayType = fromReflect(reflect.TypeOf(abi.ArrayType{}))
+ ChanType = fromReflect(reflect.TypeOf(abi.ChanType{}))
+ FuncType = fromReflect(reflect.TypeOf(abi.FuncType{}))
+ InterfaceType = fromReflect(reflect.TypeOf(abi.InterfaceType{}))
+ MapType = fromReflect(reflect.TypeOf(abi.MapType{}))
+ PtrType = fromReflect(reflect.TypeOf(abi.PtrType{}))
+ SliceType = fromReflect(reflect.TypeOf(abi.SliceType{}))
+ StructType = fromReflect(reflect.TypeOf(abi.StructType{}))
+
+ IMethod = fromReflect(reflect.TypeOf(abi.Imethod{}))
+ Method = fromReflect(reflect.TypeOf(abi.Method{}))
+ StructField = fromReflect(reflect.TypeOf(abi.StructField{}))
+ UncommonType = fromReflect(reflect.TypeOf(abi.UncommonType{}))
+
+ InterfaceSwitch = fromReflect(reflect.TypeOf(abi.InterfaceSwitch{}))
+ TypeAssert = fromReflect(reflect.TypeOf(abi.TypeAssert{}))
+
+ // Make sure abi functions are correct. These functions are used
+ // by the linker which doesn't have the ability to do type layout,
+ // so we check the functions it uses here.
+ ptrSize := types.PtrSize
+ if got, want := int64(abi.CommonSize(ptrSize)), Type.Size(); got != want {
+ base.Fatalf("abi.CommonSize() == %d, want %d", got, want)
+ }
+ if got, want := int64(abi.StructFieldSize(ptrSize)), StructField.Size(); got != want {
+ base.Fatalf("abi.StructFieldSize() == %d, want %d", got, want)
+ }
+ if got, want := int64(abi.UncommonSize()), UncommonType.Size(); got != want {
+ base.Fatalf("abi.UncommonSize() == %d, want %d", got, want)
+ }
+ if got, want := int64(abi.TFlagOff(ptrSize)), Type.OffsetOf("TFlag"); got != want {
+ base.Fatalf("abi.TFlagOff() == %d, want %d", got, want)
+ }
+}
+
+// fromReflect translates from a host type to the equivalent target type.
+func fromReflect(rt reflect.Type) *types.Type {
+ t := reflectToType(rt)
+ types.CalcSize(t)
+ return t
+}
+
+// reflectToType converts from a reflect.Type (which is a compiler
+// host type) to a *types.Type, which is a target type. The result
+// must be CalcSize'd before using.
+func reflectToType(rt reflect.Type) *types.Type {
+ switch rt.Kind() {
+ case reflect.Bool:
+ return types.Types[types.TBOOL]
+ case reflect.Int:
+ return types.Types[types.TINT]
+ case reflect.Int32:
+ return types.Types[types.TINT32]
+ case reflect.Uint8:
+ return types.Types[types.TUINT8]
+ case reflect.Uint16:
+ return types.Types[types.TUINT16]
+ case reflect.Uint32:
+ return types.Types[types.TUINT32]
+ case reflect.Uintptr:
+ return types.Types[types.TUINTPTR]
+ case reflect.Ptr, reflect.Func, reflect.UnsafePointer:
+ // TODO: there's no mechanism to distinguish different pointer types,
+ // so we treat them all as unsafe.Pointer.
+ return types.Types[types.TUNSAFEPTR]
+ case reflect.Slice:
+ return types.NewSlice(reflectToType(rt.Elem()))
+ case reflect.Array:
+ return types.NewArray(reflectToType(rt.Elem()), int64(rt.Len()))
+ case reflect.Struct:
+ fields := make([]*types.Field, rt.NumField())
+ for i := 0; i < rt.NumField(); i++ {
+ f := rt.Field(i)
+ ft := reflectToType(f.Type)
+ fields[i] = &types.Field{Sym: &types.Sym{Name: f.Name}, Type: ft}
+ }
+ return types.NewStruct(fields)
+ default:
+ base.Fatalf("unhandled kind %s", rt.Kind())
+ return nil
+ }
+}
+
+// A Cursor represents a typed location inside a static variable where we
+// are going to write.
+type Cursor struct {
+ lsym *obj.LSym
+ offset int64
+ typ *types.Type
+}
+
+// NewCursor returns a cursor starting at lsym+off and having type t.
+func NewCursor(lsym *obj.LSym, off int64, t *types.Type) Cursor {
+ return Cursor{lsym: lsym, offset: off, typ: t}
+}
+
+// WritePtr writes a pointer "target" to the component at the location specified by c.
+func (c Cursor) WritePtr(target *obj.LSym) {
+ if c.typ.Kind() != types.TUNSAFEPTR {
+ base.Fatalf("can't write ptr, it has kind %s", c.typ.Kind())
+ }
+ if target == nil {
+ objw.Uintptr(c.lsym, int(c.offset), 0)
+ } else {
+ objw.SymPtr(c.lsym, int(c.offset), target, 0)
+ }
+}
+func (c Cursor) WriteUintptr(val uint64) {
+ if c.typ.Kind() != types.TUINTPTR {
+ base.Fatalf("can't write uintptr, it has kind %s", c.typ.Kind())
+ }
+ objw.Uintptr(c.lsym, int(c.offset), val)
+}
+func (c Cursor) WriteUint32(val uint32) {
+ if c.typ.Kind() != types.TUINT32 {
+ base.Fatalf("can't write uint32, it has kind %s", c.typ.Kind())
+ }
+ objw.Uint32(c.lsym, int(c.offset), val)
+}
+func (c Cursor) WriteUint16(val uint16) {
+ if c.typ.Kind() != types.TUINT16 {
+ base.Fatalf("can't write uint16, it has kind %s", c.typ.Kind())
+ }
+ objw.Uint16(c.lsym, int(c.offset), val)
+}
+func (c Cursor) WriteUint8(val uint8) {
+ if c.typ.Kind() != types.TUINT8 {
+ base.Fatalf("can't write uint8, it has kind %s", c.typ.Kind())
+ }
+ objw.Uint8(c.lsym, int(c.offset), val)
+}
+func (c Cursor) WriteInt(val int64) {
+ if c.typ.Kind() != types.TINT {
+ base.Fatalf("can't write int, it has kind %s", c.typ.Kind())
+ }
+ objw.Uintptr(c.lsym, int(c.offset), uint64(val))
+}
+func (c Cursor) WriteInt32(val int32) {
+ if c.typ.Kind() != types.TINT32 {
+ base.Fatalf("can't write int32, it has kind %s", c.typ.Kind())
+ }
+ objw.Uint32(c.lsym, int(c.offset), uint32(val))
+}
+func (c Cursor) WriteBool(val bool) {
+ if c.typ.Kind() != types.TBOOL {
+ base.Fatalf("can't write bool, it has kind %s", c.typ.Kind())
+ }
+ objw.Bool(c.lsym, int(c.offset), val)
+}
+
+// WriteSymPtrOff writes a "pointer" to the given symbol. The symbol
+// is encoded as a uint32 offset from the start of the section.
+func (c Cursor) WriteSymPtrOff(target *obj.LSym, weak bool) {
+ if c.typ.Kind() != types.TINT32 && c.typ.Kind() != types.TUINT32 {
+ base.Fatalf("can't write SymPtr, it has kind %s", c.typ.Kind())
+ }
+ if target == nil {
+ objw.Uint32(c.lsym, int(c.offset), 0)
+ } else if weak {
+ objw.SymPtrWeakOff(c.lsym, int(c.offset), target)
+ } else {
+ objw.SymPtrOff(c.lsym, int(c.offset), target)
+ }
+}
+
+// WriteSlice writes a slice header to c. The pointer is target+off, the len and cap fields are given.
+func (c Cursor) WriteSlice(target *obj.LSym, off, len, cap int64) {
+ if c.typ.Kind() != types.TSLICE {
+ base.Fatalf("can't write slice, it has kind %s", c.typ.Kind())
+ }
+ objw.SymPtr(c.lsym, int(c.offset), target, int(off))
+ objw.Uintptr(c.lsym, int(c.offset)+types.PtrSize, uint64(len))
+ objw.Uintptr(c.lsym, int(c.offset)+2*types.PtrSize, uint64(cap))
+ // TODO: ability to switch len&cap. Maybe not needed here, as every caller
+ // passes the same thing for both?
+ if len != cap {
+ base.Fatalf("len != cap (%d != %d)", len, cap)
+ }
+}
+
+// Reloc adds a relocation from the current cursor position.
+// Reloc fills in Off and Siz fields. Caller should fill in the rest (Type, others).
+func (c Cursor) Reloc() *obj.Reloc {
+ r := obj.Addrel(c.lsym)
+ r.Off = int32(c.offset)
+ r.Siz = uint8(c.typ.Size())
+ return r
+}
+
+// Field selects the field with the given name from the struct pointed to by c.
+func (c Cursor) Field(name string) Cursor {
+ if c.typ.Kind() != types.TSTRUCT {
+ base.Fatalf("can't call Field on non-struct %v", c.typ)
+ }
+ for _, f := range c.typ.Fields() {
+ if f.Sym.Name == name {
+ return Cursor{lsym: c.lsym, offset: c.offset + f.Offset, typ: f.Type}
+ }
+ }
+ base.Fatalf("couldn't find field %s in %v", name, c.typ)
+ return Cursor{}
+}
+
+type ArrayCursor struct {
+ c Cursor // cursor pointing at first element
+ n int // number of elements
+}
+
+// NewArrayCursor returns a cursor starting at lsym+off and having n copies of type t.
+func NewArrayCursor(lsym *obj.LSym, off int64, t *types.Type, n int) ArrayCursor {
+ return ArrayCursor{
+ c: NewCursor(lsym, off, t),
+ n: n,
+ }
+}
+
+// Elem selects element i of the array pointed to by c.
+func (a ArrayCursor) Elem(i int) Cursor {
+ if i < 0 || i >= a.n {
+ base.Fatalf("element index %d out of range [0:%d]", i, a.n)
+ }
+ return Cursor{lsym: a.c.lsym, offset: a.c.offset + int64(i)*a.c.typ.Size(), typ: a.c.typ}
+}
+
+// ModifyArray converts a cursor pointing at a type [k]T to a cursor pointing
+// at a type [n]T.
+// Also returns the size delta, aka (n-k)*sizeof(T).
+func (c Cursor) ModifyArray(n int) (ArrayCursor, int64) {
+ if c.typ.Kind() != types.TARRAY {
+ base.Fatalf("can't call ModifyArray on non-array %v", c.typ)
+ }
+ k := c.typ.NumElem()
+ return ArrayCursor{c: Cursor{lsym: c.lsym, offset: c.offset, typ: c.typ.Elem()}, n: n}, (int64(n) - k) * c.typ.Elem().Size()
+}
diff --git a/src/cmd/compile/internal/ssa/_gen/AMD64.rules b/src/cmd/compile/internal/ssa/_gen/AMD64.rules
index 5f9b85fc41..aac6873d28 100644
--- a/src/cmd/compile/internal/ssa/_gen/AMD64.rules
+++ b/src/cmd/compile/internal/ssa/_gen/AMD64.rules
@@ -82,8 +82,8 @@
(Ctz32 x) && buildcfg.GOAMD64 >= 3 => (TZCNTL x)
(Ctz64 <t> x) && buildcfg.GOAMD64 < 3 => (CMOVQEQ (Select0 <t> (BSFQ x)) (MOVQconst <t> [64]) (Select1 <types.TypeFlags> (BSFQ x)))
(Ctz32 x) && buildcfg.GOAMD64 < 3 => (Select0 (BSFQ (BTSQconst <typ.UInt64> [32] x)))
-(Ctz16 x) => (BSFL (BTSLconst <typ.UInt32> [16] x))
-(Ctz8 x) => (BSFL (BTSLconst <typ.UInt32> [ 8] x))
+(Ctz16 x) => (BSFL (ORLconst <typ.UInt32> [1<<16] x))
+(Ctz8 x) => (BSFL (ORLconst <typ.UInt32> [1<<8 ] x))
(Ctz64NonZero x) && buildcfg.GOAMD64 >= 3 => (TZCNTQ x)
(Ctz32NonZero x) && buildcfg.GOAMD64 >= 3 => (TZCNTL x)
@@ -172,6 +172,20 @@
(Round(32|64)F ...) => (Copy ...)
+// Floating-point min is tricky, as the hardware op isn't right for various special
+// cases (-0 and NaN). We use two hardware ops organized just right to make the
+// result come out how we want it. See https://github.com/golang/go/issues/59488#issuecomment-1553493207
+// (although that comment isn't exactly right, as the value overwritten is not simulated correctly).
+// t1 = MINSD x, y => incorrect if x==NaN or x==-0,y==+0
+// t2 = MINSD t1, x => fixes x==NaN case
+// res = POR t1, t2 => fixes x==-0,y==+0 case
+// Note that this trick depends on the special property that (NaN OR x) produces a NaN (although
+// it might not produce the same NaN as the input).
+(Min(64|32)F <t> x y) => (POR (MINS(D|S) <t> (MINS(D|S) <t> x y) x) (MINS(D|S) <t> x y))
+// Floating-point max is even trickier. Punt to using min instead.
+// max(x,y) == -min(-x,-y)
+(Max(64|32)F <t> x y) => (Neg(64|32)F <t> (Min(64|32)F <t> (Neg(64|32)F <t> x) (Neg(64|32)F <t> y)))
+
(CvtBoolToUint8 ...) => (Copy ...)
// Lowering shifts
@@ -289,10 +303,13 @@
(Move [10] dst src mem) =>
(MOVWstore [8] dst (MOVWload [8] src mem)
(MOVQstore dst (MOVQload src mem) mem))
+(Move [11] dst src mem) =>
+ (MOVLstore [7] dst (MOVLload [7] src mem)
+ (MOVQstore dst (MOVQload src mem) mem))
(Move [12] dst src mem) =>
(MOVLstore [8] dst (MOVLload [8] src mem)
(MOVQstore dst (MOVQload src mem) mem))
-(Move [s] dst src mem) && s == 11 || s >= 13 && s <= 15 =>
+(Move [s] dst src mem) && s >= 13 && s <= 15 =>
(MOVQstore [int32(s-8)] dst (MOVQload [int32(s-8)] src mem)
(MOVQstore dst (MOVQload src mem) mem))
@@ -366,7 +383,23 @@
(MOVQstoreconst [makeValAndOff(0,8)] destptr
(MOVQstoreconst [makeValAndOff(0,0)] destptr mem))))
-(Zero [s] destptr mem) && s > 8 && s < 16 && config.useSSE =>
+(Zero [9] destptr mem) && config.useSSE =>
+ (MOVBstoreconst [makeValAndOff(0,8)] destptr
+ (MOVQstoreconst [makeValAndOff(0,0)] destptr mem))
+
+(Zero [10] destptr mem) && config.useSSE =>
+ (MOVWstoreconst [makeValAndOff(0,8)] destptr
+ (MOVQstoreconst [makeValAndOff(0,0)] destptr mem))
+
+(Zero [11] destptr mem) && config.useSSE =>
+ (MOVLstoreconst [makeValAndOff(0,7)] destptr
+ (MOVQstoreconst [makeValAndOff(0,0)] destptr mem))
+
+(Zero [12] destptr mem) && config.useSSE =>
+ (MOVLstoreconst [makeValAndOff(0,8)] destptr
+ (MOVQstoreconst [makeValAndOff(0,0)] destptr mem))
+
+(Zero [s] destptr mem) && s > 12 && s < 16 && config.useSSE =>
(MOVQstoreconst [makeValAndOff(0,int32(s-8))] destptr
(MOVQstoreconst [makeValAndOff(0,0)] destptr mem))
@@ -645,29 +678,16 @@
// Recognize bit setting (a |= 1<<b) and toggling (a ^= 1<<b)
(OR(Q|L) (SHL(Q|L) (MOV(Q|L)const [1]) y) x) => (BTS(Q|L) x y)
(XOR(Q|L) (SHL(Q|L) (MOV(Q|L)const [1]) y) x) => (BTC(Q|L) x y)
-
-// Convert ORconst into BTS, if the code gets smaller, with boundary being
-// (ORL $40,AX is 3 bytes, ORL $80,AX is 6 bytes).
-((ORQ|XORQ)const [c] x) && isUint64PowerOfTwo(int64(c)) && uint64(c) >= 128
- => (BT(S|C)Qconst [int8(log32(c))] x)
-((ORL|XORL)const [c] x) && isUint32PowerOfTwo(int64(c)) && uint64(c) >= 128
- => (BT(S|C)Lconst [int8(log32(c))] x)
-((ORQ|XORQ) (MOVQconst [c]) x) && isUint64PowerOfTwo(c) && uint64(c) >= 128
- => (BT(S|C)Qconst [int8(log64(c))] x)
-((ORL|XORL) (MOVLconst [c]) x) && isUint32PowerOfTwo(int64(c)) && uint64(c) >= 128
- => (BT(S|C)Lconst [int8(log32(c))] x)
+// Note: only convert OR/XOR to BTS/BTC if the constant wouldn't fit in
+// the constant field of the OR/XOR instruction. See issue 61694.
+((OR|XOR)Q (MOVQconst [c]) x) && isUint64PowerOfTwo(c) && uint64(c) >= 1<<31 => (BT(S|C)Qconst [int8(log64(c))] x)
// Recognize bit clearing: a &^= 1<<b
(AND(Q|L) (NOT(Q|L) (SHL(Q|L) (MOV(Q|L)const [1]) y)) x) => (BTR(Q|L) x y)
(ANDN(Q|L) x (SHL(Q|L) (MOV(Q|L)const [1]) y)) => (BTR(Q|L) x y)
-(ANDQconst [c] x) && isUint64PowerOfTwo(int64(^c)) && uint64(^c) >= 128
- => (BTRQconst [int8(log32(^c))] x)
-(ANDLconst [c] x) && isUint32PowerOfTwo(int64(^c)) && uint64(^c) >= 128
- => (BTRLconst [int8(log32(^c))] x)
-(ANDQ (MOVQconst [c]) x) && isUint64PowerOfTwo(^c) && uint64(^c) >= 128
- => (BTRQconst [int8(log64(^c))] x)
-(ANDL (MOVLconst [c]) x) && isUint32PowerOfTwo(int64(^c)) && uint64(^c) >= 128
- => (BTRLconst [int8(log32(^c))] x)
+// Note: only convert AND to BTR if the constant wouldn't fit in
+// the constant field of the AND instruction. See issue 61694.
+(ANDQ (MOVQconst [c]) x) && isUint64PowerOfTwo(^c) && uint64(^c) >= 1<<31 => (BTRQconst [int8(log64(^c))] x)
// Special-case bit patterns on first/last bit.
// generic.rules changes ANDs of high-part/low-part masks into a couple of shifts,
@@ -681,9 +701,9 @@
// Special case resetting first/last bit
(SHL(L|Q)const [1] (SHR(L|Q)const [1] x))
- => (BTR(L|Q)const [0] x)
+ => (AND(L|Q)const [-2] x)
(SHRLconst [1] (SHLLconst [1] x))
- => (BTRLconst [31] x)
+ => (ANDLconst [0x7fffffff] x)
(SHRQconst [1] (SHLQconst [1] x))
=> (BTRQconst [63] x)
@@ -717,10 +737,10 @@
=> (SET(B|AE)store [off] {sym} ptr (BTLconst [31] x) mem)
// Fold combinations of bit ops on same bit. An example is math.Copysign(c,-1)
-(BTS(Q|L)const [c] (BTR(Q|L)const [c] x)) => (BTS(Q|L)const [c] x)
-(BTS(Q|L)const [c] (BTC(Q|L)const [c] x)) => (BTS(Q|L)const [c] x)
-(BTR(Q|L)const [c] (BTS(Q|L)const [c] x)) => (BTR(Q|L)const [c] x)
-(BTR(Q|L)const [c] (BTC(Q|L)const [c] x)) => (BTR(Q|L)const [c] x)
+(BTSQconst [c] (BTRQconst [c] x)) => (BTSQconst [c] x)
+(BTSQconst [c] (BTCQconst [c] x)) => (BTSQconst [c] x)
+(BTRQconst [c] (BTSQconst [c] x)) => (BTRQconst [c] x)
+(BTRQconst [c] (BTCQconst [c] x)) => (BTRQconst [c] x)
// Fold boolean negation into SETcc.
(XORLconst [1] (SETNE x)) => (SETEQ x)
@@ -764,31 +784,6 @@
(XOR(L|Q)const [c] (XOR(L|Q)const [d] x)) => (XOR(L|Q)const [c ^ d] x)
(OR(L|Q)const [c] (OR(L|Q)const [d] x)) => (OR(L|Q)const [c | d] x)
-(BTRLconst [c] (ANDLconst [d] x)) => (ANDLconst [d &^ (1<<uint32(c))] x)
-(ANDLconst [c] (BTRLconst [d] x)) => (ANDLconst [c &^ (1<<uint32(d))] x)
-(BTRLconst [c] (BTRLconst [d] x)) => (ANDLconst [^(1<<uint32(c) | 1<<uint32(d))] x)
-
-(BTCLconst [c] (XORLconst [d] x)) => (XORLconst [d ^ 1<<uint32(c)] x)
-(XORLconst [c] (BTCLconst [d] x)) => (XORLconst [c ^ 1<<uint32(d)] x)
-(BTCLconst [c] (BTCLconst [d] x)) => (XORLconst [1<<uint32(c) | 1<<uint32(d)] x)
-
-(BTSLconst [c] (ORLconst [d] x)) => (ORLconst [d | 1<<uint32(c)] x)
-(ORLconst [c] (BTSLconst [d] x)) => (ORLconst [c | 1<<uint32(d)] x)
-(BTSLconst [c] (BTSLconst [d] x)) => (ORLconst [1<<uint32(c) | 1<<uint32(d)] x)
-
-(BTRQconst [c] (ANDQconst [d] x)) && is32Bit(int64(d) &^ (1<<uint32(c))) => (ANDQconst [d &^ (1<<uint32(c))] x)
-(ANDQconst [c] (BTRQconst [d] x)) && is32Bit(int64(c) &^ (1<<uint32(d))) => (ANDQconst [c &^ (1<<uint32(d))] x)
-(BTRQconst [c] (BTRQconst [d] x)) && is32Bit(^(1<<uint32(c) | 1<<uint32(d))) => (ANDQconst [^(1<<uint32(c) | 1<<uint32(d))] x)
-
-(BTCQconst [c] (XORQconst [d] x)) && is32Bit(int64(d) ^ 1<<uint32(c)) => (XORQconst [d ^ 1<<uint32(c)] x)
-(XORQconst [c] (BTCQconst [d] x)) && is32Bit(int64(c) ^ 1<<uint32(d)) => (XORQconst [c ^ 1<<uint32(d)] x)
-(BTCQconst [c] (BTCQconst [d] x)) && is32Bit(1<<uint32(c) ^ 1<<uint32(d)) => (XORQconst [1<<uint32(c) ^ 1<<uint32(d)] x)
-
-(BTSQconst [c] (ORQconst [d] x)) && is32Bit(int64(d) | 1<<uint32(c)) => (ORQconst [d | 1<<uint32(c)] x)
-(ORQconst [c] (BTSQconst [d] x)) && is32Bit(int64(c) | 1<<uint32(d)) => (ORQconst [c | 1<<uint32(d)] x)
-(BTSQconst [c] (BTSQconst [d] x)) && is32Bit(1<<uint32(c) | 1<<uint32(d)) => (ORQconst [1<<uint32(c) | 1<<uint32(d)] x)
-
-
(MULLconst [c] (MULLconst [d] x)) => (MULLconst [c * d] x)
(MULQconst [c] (MULQconst [d] x)) && is32Bit(int64(c)*int64(d)) => (MULQconst [c * d] x)
@@ -1408,11 +1403,8 @@
(NOTQ (MOVQconst [c])) => (MOVQconst [^c])
(NOTL (MOVLconst [c])) => (MOVLconst [^c])
(BTSQconst [c] (MOVQconst [d])) => (MOVQconst [d|(1<<uint32(c))])
-(BTSLconst [c] (MOVLconst [d])) => (MOVLconst [d|(1<<uint32(c))])
(BTRQconst [c] (MOVQconst [d])) => (MOVQconst [d&^(1<<uint32(c))])
-(BTRLconst [c] (MOVLconst [d])) => (MOVLconst [d&^(1<<uint32(c))])
(BTCQconst [c] (MOVQconst [d])) => (MOVQconst [d^(1<<uint32(c))])
-(BTCLconst [c] (MOVLconst [d])) => (MOVLconst [d^(1<<uint32(c))])
// If c or d doesn't fit into 32 bits, then we can't construct ORQconst,
// but we can still constant-fold.
@@ -1476,6 +1468,7 @@
&& sequentialAddresses(p0, p1, int64(a.Off()+8-c.Off()))
&& a.Val() == 0
&& c.Val() == 0
+ && setPos(v, x.Pos)
&& clobber(x)
=> (MOVOstoreconst [makeValAndOff(0,a.Off())] {s} p0 mem)
(MOVQstoreconst [a] {s} p0 x:(MOVQstoreconst [c] {s} p1 mem))
@@ -1484,39 +1477,10 @@
&& sequentialAddresses(p0, p1, int64(a.Off()+8-c.Off()))
&& a.Val() == 0
&& c.Val() == 0
+ && setPos(v, x.Pos)
&& clobber(x)
=> (MOVOstoreconst [makeValAndOff(0,a.Off())] {s} p0 mem)
-(MOVBstore [i] {s} p
- x1:(MOVBload [j] {s2} p2 mem)
- mem2:(MOVBstore [i-1] {s} p
- x2:(MOVBload [j-1] {s2} p2 mem) mem))
- && x1.Uses == 1
- && x2.Uses == 1
- && mem2.Uses == 1
- && clobber(x1, x2, mem2)
- => (MOVWstore [i-1] {s} p (MOVWload [j-1] {s2} p2 mem) mem)
-
-(MOVWstore [i] {s} p
- x1:(MOVWload [j] {s2} p2 mem)
- mem2:(MOVWstore [i-2] {s} p
- x2:(MOVWload [j-2] {s2} p2 mem) mem))
- && x1.Uses == 1
- && x2.Uses == 1
- && mem2.Uses == 1
- && clobber(x1, x2, mem2)
- => (MOVLstore [i-2] {s} p (MOVLload [j-2] {s2} p2 mem) mem)
-
-(MOVLstore [i] {s} p
- x1:(MOVLload [j] {s2} p2 mem)
- mem2:(MOVLstore [i-4] {s} p
- x2:(MOVLload [j-4] {s2} p2 mem) mem))
- && x1.Uses == 1
- && x2.Uses == 1
- && mem2.Uses == 1
- && clobber(x1, x2, mem2)
- => (MOVQstore [i-4] {s} p (MOVQload [j-4] {s2} p2 mem) mem)
-
// Merge load and op
// TODO: add indexed variants?
((ADD|SUB|AND|OR|XOR)Q x l:(MOVQload [off] {sym} ptr mem)) && canMergeLoadClobber(v, l, x) && clobber(l) => ((ADD|SUB|AND|OR|XOR)Qload x [off] {sym} ptr mem)
@@ -1529,6 +1493,8 @@
(MOVQstore {sym} [off] ptr y:((ADD|AND|OR|XOR)Qload x [off] {sym} ptr mem) mem) && y.Uses==1 && clobber(y) => ((ADD|AND|OR|XOR)Qmodify [off] {sym} ptr x mem)
(MOVQstore {sym} [off] ptr y:((ADD|SUB|AND|OR|XOR)Q l:(MOVQload [off] {sym} ptr mem) x) mem) && y.Uses==1 && l.Uses==1 && clobber(y, l) =>
((ADD|SUB|AND|OR|XOR)Qmodify [off] {sym} ptr x mem)
+(MOVQstore {sym} [off] ptr x:(BT(S|R|C)Qconst [c] l:(MOVQload {sym} [off] ptr mem)) mem) && x.Uses == 1 && l.Uses == 1 && clobber(x, l) =>
+ (BT(S|R|C)Qconstmodify {sym} [makeValAndOff(int32(c),off)] ptr mem)
// Merge ADDQconst and LEAQ into atomic loads.
(MOV(Q|L|B)atomicload [off1] {sym} (ADDQconst [off2] ptr) mem) && is32Bit(int64(off1)+int64(off2)) =>
diff --git a/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go b/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go
index d8d0225fc3..606171947b 100644
--- a/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go
+++ b/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go
@@ -399,12 +399,27 @@ func init() {
{name: "BTSQ", argLength: 2, reg: gp21, asm: "BTSQ", resultInArg0: true, clobberFlags: true}, // set bit arg1%64 in arg0
{name: "BTLconst", argLength: 1, reg: gp1flags, asm: "BTL", typ: "Flags", aux: "Int8"}, // test whether bit auxint in arg0 is set, 0 <= auxint < 32
{name: "BTQconst", argLength: 1, reg: gp1flags, asm: "BTQ", typ: "Flags", aux: "Int8"}, // test whether bit auxint in arg0 is set, 0 <= auxint < 64
- {name: "BTCLconst", argLength: 1, reg: gp11, asm: "BTCL", resultInArg0: true, clobberFlags: true, aux: "Int8"}, // complement bit auxint in arg0, 0 <= auxint < 32
- {name: "BTCQconst", argLength: 1, reg: gp11, asm: "BTCQ", resultInArg0: true, clobberFlags: true, aux: "Int8"}, // complement bit auxint in arg0, 0 <= auxint < 64
- {name: "BTRLconst", argLength: 1, reg: gp11, asm: "BTRL", resultInArg0: true, clobberFlags: true, aux: "Int8"}, // reset bit auxint in arg0, 0 <= auxint < 32
- {name: "BTRQconst", argLength: 1, reg: gp11, asm: "BTRQ", resultInArg0: true, clobberFlags: true, aux: "Int8"}, // reset bit auxint in arg0, 0 <= auxint < 64
- {name: "BTSLconst", argLength: 1, reg: gp11, asm: "BTSL", resultInArg0: true, clobberFlags: true, aux: "Int8"}, // set bit auxint in arg0, 0 <= auxint < 32
- {name: "BTSQconst", argLength: 1, reg: gp11, asm: "BTSQ", resultInArg0: true, clobberFlags: true, aux: "Int8"}, // set bit auxint in arg0, 0 <= auxint < 64
+ {name: "BTCQconst", argLength: 1, reg: gp11, asm: "BTCQ", resultInArg0: true, clobberFlags: true, aux: "Int8"}, // complement bit auxint in arg0, 31 <= auxint < 64
+ {name: "BTRQconst", argLength: 1, reg: gp11, asm: "BTRQ", resultInArg0: true, clobberFlags: true, aux: "Int8"}, // reset bit auxint in arg0, 31 <= auxint < 64
+ {name: "BTSQconst", argLength: 1, reg: gp11, asm: "BTSQ", resultInArg0: true, clobberFlags: true, aux: "Int8"}, // set bit auxint in arg0, 31 <= auxint < 64
+
+ // BT[SRC]Qconstmodify
+ //
+ // S: set bit
+ // R: reset (clear) bit
+ // C: complement bit
+ //
+ // Apply operation to bit ValAndOff(AuxInt).Val() in the 64 bits at
+ // memory address arg0+ValAndOff(AuxInt).Off()+aux
+ // Bit index must be in range (31-63).
+ // (We use OR/AND/XOR for thinner targets and lower bit indexes.)
+ // arg1=mem, returns mem
+ //
+ // Note that there aren't non-const versions of these instructions.
+ // Well, there are such instructions, but they are slow and weird so we don't use them.
+ {name: "BTSQconstmodify", argLength: 2, reg: gpstoreconst, asm: "BTSQ", aux: "SymValAndOff", clobberFlags: true, faultOnNilArg0: true, symEffect: "Read,Write"},
+ {name: "BTRQconstmodify", argLength: 2, reg: gpstoreconst, asm: "BTRQ", aux: "SymValAndOff", clobberFlags: true, faultOnNilArg0: true, symEffect: "Read,Write"},
+ {name: "BTCQconstmodify", argLength: 2, reg: gpstoreconst, asm: "BTCQ", aux: "SymValAndOff", clobberFlags: true, faultOnNilArg0: true, symEffect: "Read,Write"},
// TESTx: compare (arg0 & arg1) to 0
{name: "TESTQ", argLength: 2, reg: gp2flags, commutative: true, asm: "TESTQ", typ: "Flags"},
@@ -681,6 +696,12 @@ func init() {
// Any use must be preceded by a successful check of runtime.support_fma.
{name: "VFMADD231SD", argLength: 3, reg: fp31, resultInArg0: true, asm: "VFMADD231SD"},
+ // Note that these operations don't exactly match the semantics of Go's
+ // builtin min. In particular, these aren't commutative, because on various
+ // special cases the 2nd argument is preferred.
+ {name: "MINSD", argLength: 2, reg: fp21, resultInArg0: true, asm: "MINSD"}, // min(arg0,arg1)
+ {name: "MINSS", argLength: 2, reg: fp21, resultInArg0: true, asm: "MINSS"}, // min(arg0,arg1)
+
{name: "SBBQcarrymask", argLength: 1, reg: flagsgp, asm: "SBBQ"}, // (int64)(-1) if carry is set, 0 if carry is clear.
{name: "SBBLcarrymask", argLength: 1, reg: flagsgp, asm: "SBBL"}, // (int32)(-1) if carry is set, 0 if carry is clear.
// Note: SBBW and SBBB are subsumed by SBBL
@@ -697,16 +718,27 @@ func init() {
{name: "SETAE", argLength: 1, reg: readflags, asm: "SETCC"}, // extract unsigned >= condition from arg0
{name: "SETO", argLength: 1, reg: readflags, asm: "SETOS"}, // extract if overflow flag is set from arg0
// Variants that store result to memory
- {name: "SETEQstore", argLength: 3, reg: gpstoreconst, asm: "SETEQ", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // extract == condition from arg1 to arg0+auxint+aux, arg2=mem
- {name: "SETNEstore", argLength: 3, reg: gpstoreconst, asm: "SETNE", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // extract != condition from arg1 to arg0+auxint+aux, arg2=mem
- {name: "SETLstore", argLength: 3, reg: gpstoreconst, asm: "SETLT", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // extract signed < condition from arg1 to arg0+auxint+aux, arg2=mem
- {name: "SETLEstore", argLength: 3, reg: gpstoreconst, asm: "SETLE", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // extract signed <= condition from arg1 to arg0+auxint+aux, arg2=mem
- {name: "SETGstore", argLength: 3, reg: gpstoreconst, asm: "SETGT", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // extract signed > condition from arg1 to arg0+auxint+aux, arg2=mem
- {name: "SETGEstore", argLength: 3, reg: gpstoreconst, asm: "SETGE", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // extract signed >= condition from arg1 to arg0+auxint+aux, arg2=mem
- {name: "SETBstore", argLength: 3, reg: gpstoreconst, asm: "SETCS", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // extract unsigned < condition from arg1 to arg0+auxint+aux, arg2=mem
- {name: "SETBEstore", argLength: 3, reg: gpstoreconst, asm: "SETLS", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // extract unsigned <= condition from arg1 to arg0+auxint+aux, arg2=mem
- {name: "SETAstore", argLength: 3, reg: gpstoreconst, asm: "SETHI", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // extract unsigned > condition from arg1 to arg0+auxint+aux, arg2=mem
- {name: "SETAEstore", argLength: 3, reg: gpstoreconst, asm: "SETCC", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // extract unsigned >= condition from arg1 to arg0+auxint+aux, arg2=mem
+ {name: "SETEQstore", argLength: 3, reg: gpstoreconst, asm: "SETEQ", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // extract == condition from arg1 to arg0+auxint+aux, arg2=mem
+ {name: "SETNEstore", argLength: 3, reg: gpstoreconst, asm: "SETNE", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // extract != condition from arg1 to arg0+auxint+aux, arg2=mem
+ {name: "SETLstore", argLength: 3, reg: gpstoreconst, asm: "SETLT", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // extract signed < condition from arg1 to arg0+auxint+aux, arg2=mem
+ {name: "SETLEstore", argLength: 3, reg: gpstoreconst, asm: "SETLE", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // extract signed <= condition from arg1 to arg0+auxint+aux, arg2=mem
+ {name: "SETGstore", argLength: 3, reg: gpstoreconst, asm: "SETGT", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // extract signed > condition from arg1 to arg0+auxint+aux, arg2=mem
+ {name: "SETGEstore", argLength: 3, reg: gpstoreconst, asm: "SETGE", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // extract signed >= condition from arg1 to arg0+auxint+aux, arg2=mem
+ {name: "SETBstore", argLength: 3, reg: gpstoreconst, asm: "SETCS", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // extract unsigned < condition from arg1 to arg0+auxint+aux, arg2=mem
+ {name: "SETBEstore", argLength: 3, reg: gpstoreconst, asm: "SETLS", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // extract unsigned <= condition from arg1 to arg0+auxint+aux, arg2=mem
+ {name: "SETAstore", argLength: 3, reg: gpstoreconst, asm: "SETHI", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // extract unsigned > condition from arg1 to arg0+auxint+aux, arg2=mem
+ {name: "SETAEstore", argLength: 3, reg: gpstoreconst, asm: "SETCC", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // extract unsigned >= condition from arg1 to arg0+auxint+aux, arg2=mem
+ {name: "SETEQstoreidx1", argLength: 4, reg: gpstoreconstidx, asm: "SETEQ", aux: "SymOff", typ: "Mem", scale: 1, commutative: true, symEffect: "Write"}, // extract == condition from arg2 to arg0+arg1+auxint+aux, arg3=mem
+ {name: "SETNEstoreidx1", argLength: 4, reg: gpstoreconstidx, asm: "SETNE", aux: "SymOff", typ: "Mem", scale: 1, commutative: true, symEffect: "Write"}, // extract != condition from arg2 to arg0+arg1+auxint+aux, arg3=mem
+ {name: "SETLstoreidx1", argLength: 4, reg: gpstoreconstidx, asm: "SETLT", aux: "SymOff", typ: "Mem", scale: 1, commutative: true, symEffect: "Write"}, // extract signed < condition from arg2 to arg0+arg1+auxint+aux, arg3=mem
+ {name: "SETLEstoreidx1", argLength: 4, reg: gpstoreconstidx, asm: "SETLE", aux: "SymOff", typ: "Mem", scale: 1, commutative: true, symEffect: "Write"}, // extract signed <= condition from arg2 to arg0+arg1+auxint+aux, arg3=mem
+ {name: "SETGstoreidx1", argLength: 4, reg: gpstoreconstidx, asm: "SETGT", aux: "SymOff", typ: "Mem", scale: 1, commutative: true, symEffect: "Write"}, // extract signed > condition from arg2 to arg0+arg1+auxint+aux, arg3=mem
+ {name: "SETGEstoreidx1", argLength: 4, reg: gpstoreconstidx, asm: "SETGE", aux: "SymOff", typ: "Mem", scale: 1, commutative: true, symEffect: "Write"}, // extract signed >= condition from arg2 to arg0+arg1+auxint+aux, arg3=mem
+ {name: "SETBstoreidx1", argLength: 4, reg: gpstoreconstidx, asm: "SETCS", aux: "SymOff", typ: "Mem", scale: 1, commutative: true, symEffect: "Write"}, // extract unsigned < condition from arg2 to arg0+arg1+auxint+aux, arg3=mem
+ {name: "SETBEstoreidx1", argLength: 4, reg: gpstoreconstidx, asm: "SETLS", aux: "SymOff", typ: "Mem", scale: 1, commutative: true, symEffect: "Write"}, // extract unsigned <= condition from arg2 to arg0+arg1+auxint+aux, arg3=mem
+ {name: "SETAstoreidx1", argLength: 4, reg: gpstoreconstidx, asm: "SETHI", aux: "SymOff", typ: "Mem", scale: 1, commutative: true, symEffect: "Write"}, // extract unsigned > condition from arg2 to arg0+arg1+auxint+aux, arg3=mem
+ {name: "SETAEstoreidx1", argLength: 4, reg: gpstoreconstidx, asm: "SETCC", aux: "SymOff", typ: "Mem", scale: 1, commutative: true, symEffect: "Write"}, // extract unsigned >= condition from arg2 to arg0+arg1+auxint+aux, arg3=mem
+
// Need different opcodes for floating point conditions because
// any comparison involving a NaN is always FALSE and thus
// the patterns for inverting conditions cannot be used.
@@ -746,7 +778,8 @@ func init() {
{name: "MOVLi2f", argLength: 1, reg: gpfp, typ: "Float32"}, // move 32 bits from int to float reg
{name: "MOVLf2i", argLength: 1, reg: fpgp, typ: "UInt32"}, // move 32 bits from float to int reg, zero extend
- {name: "PXOR", argLength: 2, reg: fp21, asm: "PXOR", commutative: true, resultInArg0: true}, // exclusive or, applied to X regs for float negation.
+ {name: "PXOR", argLength: 2, reg: fp21, asm: "PXOR", commutative: true, resultInArg0: true}, // exclusive or, applied to X regs (for float negation).
+ {name: "POR", argLength: 2, reg: fp21, asm: "POR", commutative: true, resultInArg0: true}, // inclusive or, applied to X regs (for float min/max).
{name: "LEAQ", argLength: 1, reg: gp11sb, asm: "LEAQ", aux: "SymOff", rematerializeable: true, symEffect: "Addr"}, // arg0 + auxint + offset encoded in aux
{name: "LEAL", argLength: 1, reg: gp11sb, asm: "LEAL", aux: "SymOff", rematerializeable: true, symEffect: "Addr"}, // arg0 + auxint + offset encoded in aux
diff --git a/src/cmd/compile/internal/ssa/_gen/ARM.rules b/src/cmd/compile/internal/ssa/_gen/ARM.rules
index a60afb000a..ed0ed80afa 100644
--- a/src/cmd/compile/internal/ssa/_gen/ARM.rules
+++ b/src/cmd/compile/internal/ssa/_gen/ARM.rules
@@ -66,17 +66,17 @@
// count trailing zero for ARMv5 and ARMv6
// 32 - CLZ(x&-x - 1)
-(Ctz32 <t> x) && buildcfg.GOARM<=6 =>
+(Ctz32 <t> x) && buildcfg.GOARM.Version<=6 =>
(RSBconst [32] (CLZ <t> (SUBconst <t> (AND <t> x (RSBconst <t> [0] x)) [1])))
-(Ctz16 <t> x) && buildcfg.GOARM<=6 =>
+(Ctz16 <t> x) && buildcfg.GOARM.Version<=6 =>
(RSBconst [32] (CLZ <t> (SUBconst <typ.UInt32> (AND <typ.UInt32> (ORconst <typ.UInt32> [0x10000] x) (RSBconst <typ.UInt32> [0] (ORconst <typ.UInt32> [0x10000] x))) [1])))
-(Ctz8 <t> x) && buildcfg.GOARM<=6 =>
+(Ctz8 <t> x) && buildcfg.GOARM.Version<=6 =>
(RSBconst [32] (CLZ <t> (SUBconst <typ.UInt32> (AND <typ.UInt32> (ORconst <typ.UInt32> [0x100] x) (RSBconst <typ.UInt32> [0] (ORconst <typ.UInt32> [0x100] x))) [1])))
// count trailing zero for ARMv7
-(Ctz32 <t> x) && buildcfg.GOARM==7 => (CLZ <t> (RBIT <t> x))
-(Ctz16 <t> x) && buildcfg.GOARM==7 => (CLZ <t> (RBIT <typ.UInt32> (ORconst <typ.UInt32> [0x10000] x)))
-(Ctz8 <t> x) && buildcfg.GOARM==7 => (CLZ <t> (RBIT <typ.UInt32> (ORconst <typ.UInt32> [0x100] x)))
+(Ctz32 <t> x) && buildcfg.GOARM.Version==7 => (CLZ <t> (RBIT <t> x))
+(Ctz16 <t> x) && buildcfg.GOARM.Version==7 => (CLZ <t> (RBIT <typ.UInt32> (ORconst <typ.UInt32> [0x10000] x)))
+(Ctz8 <t> x) && buildcfg.GOARM.Version==7 => (CLZ <t> (RBIT <typ.UInt32> (ORconst <typ.UInt32> [0x100] x)))
// bit length
(BitLen32 <t> x) => (RSBconst [32] (CLZ <t> x))
@@ -90,13 +90,13 @@
// t5 = x right rotate 8 bits -- (d, a, b, c )
// result = t4 ^ t5 -- (d, c, b, a )
// using shifted ops this can be done in 4 instructions.
-(Bswap32 <t> x) && buildcfg.GOARM==5 =>
+(Bswap32 <t> x) && buildcfg.GOARM.Version==5 =>
(XOR <t>
(SRLconst <t> (BICconst <t> (XOR <t> x (SRRconst <t> [16] x)) [0xff0000]) [8])
(SRRconst <t> x [8]))
// byte swap for ARMv6 and above
-(Bswap32 x) && buildcfg.GOARM>=6 => (REV x)
+(Bswap32 x) && buildcfg.GOARM.Version>=6 => (REV x)
// boolean ops -- booleans are represented with 0=false, 1=true
(AndB ...) => (AND ...)
@@ -741,10 +741,10 @@
(SUBconst [c] x) && !isARMImmRot(uint32(c)) && isARMImmRot(uint32(-c)) => (ADDconst [-c] x)
(ANDconst [c] x) && !isARMImmRot(uint32(c)) && isARMImmRot(^uint32(c)) => (BICconst [int32(^uint32(c))] x)
(BICconst [c] x) && !isARMImmRot(uint32(c)) && isARMImmRot(^uint32(c)) => (ANDconst [int32(^uint32(c))] x)
-(ADDconst [c] x) && buildcfg.GOARM==7 && !isARMImmRot(uint32(c)) && uint32(c)>0xffff && uint32(-c)<=0xffff => (SUBconst [-c] x)
-(SUBconst [c] x) && buildcfg.GOARM==7 && !isARMImmRot(uint32(c)) && uint32(c)>0xffff && uint32(-c)<=0xffff => (ADDconst [-c] x)
-(ANDconst [c] x) && buildcfg.GOARM==7 && !isARMImmRot(uint32(c)) && uint32(c)>0xffff && ^uint32(c)<=0xffff => (BICconst [int32(^uint32(c))] x)
-(BICconst [c] x) && buildcfg.GOARM==7 && !isARMImmRot(uint32(c)) && uint32(c)>0xffff && ^uint32(c)<=0xffff => (ANDconst [int32(^uint32(c))] x)
+(ADDconst [c] x) && buildcfg.GOARM.Version==7 && !isARMImmRot(uint32(c)) && uint32(c)>0xffff && uint32(-c)<=0xffff => (SUBconst [-c] x)
+(SUBconst [c] x) && buildcfg.GOARM.Version==7 && !isARMImmRot(uint32(c)) && uint32(c)>0xffff && uint32(-c)<=0xffff => (ADDconst [-c] x)
+(ANDconst [c] x) && buildcfg.GOARM.Version==7 && !isARMImmRot(uint32(c)) && uint32(c)>0xffff && ^uint32(c)<=0xffff => (BICconst [int32(^uint32(c))] x)
+(BICconst [c] x) && buildcfg.GOARM.Version==7 && !isARMImmRot(uint32(c)) && uint32(c)>0xffff && ^uint32(c)<=0xffff => (ANDconst [int32(^uint32(c))] x)
(ADDconst [c] (MOVWconst [d])) => (MOVWconst [c+d])
(ADDconst [c] (ADDconst [d] x)) => (ADDconst [c+d] x)
(ADDconst [c] (SUBconst [d] x)) => (ADDconst [c-d] x)
@@ -1139,7 +1139,7 @@
// UBFX instruction is supported by ARMv6T2, ARMv7 and above versions, REV16 is supported by
// ARMv6 and above versions. So for ARMv6, we need to match SLLconst, SRLconst and ORshiftLL.
((ADDshiftLL|ORshiftLL|XORshiftLL) <typ.UInt16> [8] (BFXU <typ.UInt16> [int32(armBFAuxInt(8, 8))] x) x) => (REV16 x)
-((ADDshiftLL|ORshiftLL|XORshiftLL) <typ.UInt16> [8] (SRLconst <typ.UInt16> [24] (SLLconst [16] x)) x) && buildcfg.GOARM>=6 => (REV16 x)
+((ADDshiftLL|ORshiftLL|XORshiftLL) <typ.UInt16> [8] (SRLconst <typ.UInt16> [24] (SLLconst [16] x)) x) && buildcfg.GOARM.Version>=6 => (REV16 x)
// use indexed loads and stores
(MOVWload [0] {sym} (ADD ptr idx) mem) && sym == nil => (MOVWloadidx ptr idx mem)
@@ -1209,25 +1209,25 @@
(BIC x x) => (MOVWconst [0])
(ADD (MUL x y) a) => (MULA x y a)
-(SUB a (MUL x y)) && buildcfg.GOARM == 7 => (MULS x y a)
-(RSB (MUL x y) a) && buildcfg.GOARM == 7 => (MULS x y a)
+(SUB a (MUL x y)) && buildcfg.GOARM.Version == 7 => (MULS x y a)
+(RSB (MUL x y) a) && buildcfg.GOARM.Version == 7 => (MULS x y a)
-(NEGF (MULF x y)) && buildcfg.GOARM >= 6 => (NMULF x y)
-(NEGD (MULD x y)) && buildcfg.GOARM >= 6 => (NMULD x y)
-(MULF (NEGF x) y) && buildcfg.GOARM >= 6 => (NMULF x y)
-(MULD (NEGD x) y) && buildcfg.GOARM >= 6 => (NMULD x y)
+(NEGF (MULF x y)) && buildcfg.GOARM.Version >= 6 => (NMULF x y)
+(NEGD (MULD x y)) && buildcfg.GOARM.Version >= 6 => (NMULD x y)
+(MULF (NEGF x) y) && buildcfg.GOARM.Version >= 6 => (NMULF x y)
+(MULD (NEGD x) y) && buildcfg.GOARM.Version >= 6 => (NMULD x y)
(NMULF (NEGF x) y) => (MULF x y)
(NMULD (NEGD x) y) => (MULD x y)
// the result will overwrite the addend, since they are in the same register
-(ADDF a (MULF x y)) && a.Uses == 1 && buildcfg.GOARM >= 6 => (MULAF a x y)
-(ADDF a (NMULF x y)) && a.Uses == 1 && buildcfg.GOARM >= 6 => (MULSF a x y)
-(ADDD a (MULD x y)) && a.Uses == 1 && buildcfg.GOARM >= 6 => (MULAD a x y)
-(ADDD a (NMULD x y)) && a.Uses == 1 && buildcfg.GOARM >= 6 => (MULSD a x y)
-(SUBF a (MULF x y)) && a.Uses == 1 && buildcfg.GOARM >= 6 => (MULSF a x y)
-(SUBF a (NMULF x y)) && a.Uses == 1 && buildcfg.GOARM >= 6 => (MULAF a x y)
-(SUBD a (MULD x y)) && a.Uses == 1 && buildcfg.GOARM >= 6 => (MULSD a x y)
-(SUBD a (NMULD x y)) && a.Uses == 1 && buildcfg.GOARM >= 6 => (MULAD a x y)
+(ADDF a (MULF x y)) && a.Uses == 1 && buildcfg.GOARM.Version >= 6 => (MULAF a x y)
+(ADDF a (NMULF x y)) && a.Uses == 1 && buildcfg.GOARM.Version >= 6 => (MULSF a x y)
+(ADDD a (MULD x y)) && a.Uses == 1 && buildcfg.GOARM.Version >= 6 => (MULAD a x y)
+(ADDD a (NMULD x y)) && a.Uses == 1 && buildcfg.GOARM.Version >= 6 => (MULSD a x y)
+(SUBF a (MULF x y)) && a.Uses == 1 && buildcfg.GOARM.Version >= 6 => (MULSF a x y)
+(SUBF a (NMULF x y)) && a.Uses == 1 && buildcfg.GOARM.Version >= 6 => (MULAF a x y)
+(SUBD a (MULD x y)) && a.Uses == 1 && buildcfg.GOARM.Version >= 6 => (MULSD a x y)
+(SUBD a (NMULD x y)) && a.Uses == 1 && buildcfg.GOARM.Version >= 6 => (MULAD a x y)
(AND x (MVN y)) => (BIC x y)
@@ -1259,8 +1259,8 @@
(CMPD x (MOVDconst [0])) => (CMPD0 x)
// bit extraction
-(SRAconst (SLLconst x [c]) [d]) && buildcfg.GOARM==7 && uint64(d)>=uint64(c) && uint64(d)<=31 => (BFX [(d-c)|(32-d)<<8] x)
-(SRLconst (SLLconst x [c]) [d]) && buildcfg.GOARM==7 && uint64(d)>=uint64(c) && uint64(d)<=31 => (BFXU [(d-c)|(32-d)<<8] x)
+(SRAconst (SLLconst x [c]) [d]) && buildcfg.GOARM.Version==7 && uint64(d)>=uint64(c) && uint64(d)<=31 => (BFX [(d-c)|(32-d)<<8] x)
+(SRLconst (SLLconst x [c]) [d]) && buildcfg.GOARM.Version==7 && uint64(d)>=uint64(c) && uint64(d)<=31 => (BFXU [(d-c)|(32-d)<<8] x)
// comparison simplification
((EQ|NE) (CMP x (RSBconst [0] y))) => ((EQ|NE) (CMN x y)) // sense of carry bit not preserved; see also #50854
diff --git a/src/cmd/compile/internal/ssa/_gen/ARM64.rules b/src/cmd/compile/internal/ssa/_gen/ARM64.rules
index 8cf6f6740e..c5ee0285d9 100644
--- a/src/cmd/compile/internal/ssa/_gen/ARM64.rules
+++ b/src/cmd/compile/internal/ssa/_gen/ARM64.rules
@@ -61,6 +61,9 @@
(Sqrt32 ...) => (FSQRTS ...)
+(Min(64|32)F ...) => (FMIN(D|S) ...)
+(Max(64|32)F ...) => (FMAX(D|S) ...)
+
// lowering rotates
// we do rotate detection in generic rules, if the following rules need to be changed, check generic rules first.
(RotateLeft8 <t> x (MOVDconst [c])) => (Or8 (Lsh8x64 <t> x (MOVDconst [c&7])) (Rsh8Ux64 <t> x (MOVDconst [-c&7])))
@@ -482,8 +485,8 @@
(STP [16] dst (Select0 <typ.UInt64> (LDP [16] src mem)) (Select1 <typ.UInt64> (LDP [16] src mem))
(STP dst (Select0 <typ.UInt64> (LDP src mem)) (Select1 <typ.UInt64> (LDP src mem)) mem))))
-(MOVDstorezero {s} [i] ptr x:(MOVDstorezero {s} [i+8] ptr mem)) && x.Uses == 1 && clobber(x) => (MOVQstorezero {s} [i] ptr mem)
-(MOVDstorezero {s} [i] ptr x:(MOVDstorezero {s} [i-8] ptr mem)) && x.Uses == 1 && clobber(x) => (MOVQstorezero {s} [i-8] ptr mem)
+(MOVDstorezero {s} [i] ptr x:(MOVDstorezero {s} [i+8] ptr mem)) && x.Uses == 1 && setPos(v, x.Pos) && clobber(x) => (MOVQstorezero {s} [i] ptr mem)
+(MOVDstorezero {s} [i] ptr x:(MOVDstorezero {s} [i-8] ptr mem)) && x.Uses == 1 && setPos(v, x.Pos) && clobber(x) => (MOVQstorezero {s} [i-8] ptr mem)
// strip off fractional word move
(Move [s] dst src mem) && s%16 != 0 && s%16 <= 8 && s > 16 =>
@@ -1184,7 +1187,7 @@
// mul-neg => mneg
(NEG (MUL x y)) => (MNEG x y)
-(NEG (MULW x y)) => (MNEGW x y)
+(NEG (MULW x y)) && v.Type.Size() <= 4 => (MNEGW x y)
(MUL (NEG x) y) => (MNEG x y)
(MULW (NEG x) y) => (MNEGW x y)
@@ -1194,10 +1197,10 @@
(ADD a l:(MNEG x y)) && l.Uses==1 && clobber(l) => (MSUB a x y)
(SUB a l:(MNEG x y)) && l.Uses==1 && clobber(l) => (MADD a x y)
-(ADD a l:(MULW x y)) && a.Type.Size() != 8 && l.Uses==1 && clobber(l) => (MADDW a x y)
-(SUB a l:(MULW x y)) && a.Type.Size() != 8 && l.Uses==1 && clobber(l) => (MSUBW a x y)
-(ADD a l:(MNEGW x y)) && a.Type.Size() != 8 && l.Uses==1 && clobber(l) => (MSUBW a x y)
-(SUB a l:(MNEGW x y)) && a.Type.Size() != 8 && l.Uses==1 && clobber(l) => (MADDW a x y)
+(ADD a l:(MULW x y)) && v.Type.Size() <= 4 && l.Uses==1 && clobber(l) => (MADDW a x y)
+(SUB a l:(MULW x y)) && v.Type.Size() <= 4 && l.Uses==1 && clobber(l) => (MSUBW a x y)
+(ADD a l:(MNEGW x y)) && v.Type.Size() <= 4 && l.Uses==1 && clobber(l) => (MSUBW a x y)
+(SUB a l:(MNEGW x y)) && v.Type.Size() <= 4 && l.Uses==1 && clobber(l) => (MADDW a x y)
// optimize ADCSflags, SBCSflags and friends
(ADCSflags x y (Select1 <types.TypeFlags> (ADDSconstflags [-1] (ADCzerocarry <typ.UInt64> c)))) => (ADCSflags x y c)
@@ -1217,16 +1220,16 @@
(MUL x (MOVDconst [c])) && c%7 == 0 && isPowerOfTwo64(c/7) => (SLLconst [log64(c/7)] (ADDshiftLL <x.Type> (NEG <x.Type> x) x [3]))
(MUL x (MOVDconst [c])) && c%9 == 0 && isPowerOfTwo64(c/9) => (SLLconst [log64(c/9)] (ADDshiftLL <x.Type> x x [3]))
-(MULW x (MOVDconst [c])) && int32(c)==-1 => (NEG x)
+(MULW x (MOVDconst [c])) && int32(c)==-1 => (MOVWUreg (NEG <x.Type> x))
(MULW _ (MOVDconst [c])) && int32(c)==0 => (MOVDconst [0])
-(MULW x (MOVDconst [c])) && int32(c)==1 => x
-(MULW x (MOVDconst [c])) && isPowerOfTwo64(c) => (SLLconst [log64(c)] x)
-(MULW x (MOVDconst [c])) && isPowerOfTwo64(c-1) && int32(c) >= 3 => (ADDshiftLL x x [log64(c-1)])
-(MULW x (MOVDconst [c])) && isPowerOfTwo64(c+1) && int32(c) >= 7 => (ADDshiftLL (NEG <x.Type> x) x [log64(c+1)])
-(MULW x (MOVDconst [c])) && c%3 == 0 && isPowerOfTwo64(c/3) && is32Bit(c) => (SLLconst [log64(c/3)] (ADDshiftLL <x.Type> x x [1]))
-(MULW x (MOVDconst [c])) && c%5 == 0 && isPowerOfTwo64(c/5) && is32Bit(c) => (SLLconst [log64(c/5)] (ADDshiftLL <x.Type> x x [2]))
-(MULW x (MOVDconst [c])) && c%7 == 0 && isPowerOfTwo64(c/7) && is32Bit(c) => (SLLconst [log64(c/7)] (ADDshiftLL <x.Type> (NEG <x.Type> x) x [3]))
-(MULW x (MOVDconst [c])) && c%9 == 0 && isPowerOfTwo64(c/9) && is32Bit(c) => (SLLconst [log64(c/9)] (ADDshiftLL <x.Type> x x [3]))
+(MULW x (MOVDconst [c])) && int32(c)==1 => (MOVWUreg x)
+(MULW x (MOVDconst [c])) && isPowerOfTwo64(c) => (MOVWUreg (SLLconst <x.Type> [log64(c)] x))
+(MULW x (MOVDconst [c])) && isPowerOfTwo64(c-1) && int32(c) >= 3 => (MOVWUreg (ADDshiftLL <x.Type> x x [log64(c-1)]))
+(MULW x (MOVDconst [c])) && isPowerOfTwo64(c+1) && int32(c) >= 7 => (MOVWUreg (ADDshiftLL <x.Type> (NEG <x.Type> x) x [log64(c+1)]))
+(MULW x (MOVDconst [c])) && c%3 == 0 && isPowerOfTwo64(c/3) && is32Bit(c) => (MOVWUreg (SLLconst <x.Type> [log64(c/3)] (ADDshiftLL <x.Type> x x [1])))
+(MULW x (MOVDconst [c])) && c%5 == 0 && isPowerOfTwo64(c/5) && is32Bit(c) => (MOVWUreg (SLLconst <x.Type> [log64(c/5)] (ADDshiftLL <x.Type> x x [2])))
+(MULW x (MOVDconst [c])) && c%7 == 0 && isPowerOfTwo64(c/7) && is32Bit(c) => (MOVWUreg (SLLconst <x.Type> [log64(c/7)] (ADDshiftLL <x.Type> (NEG <x.Type> x) x [3])))
+(MULW x (MOVDconst [c])) && c%9 == 0 && isPowerOfTwo64(c/9) && is32Bit(c) => (MOVWUreg (SLLconst <x.Type> [log64(c/9)] (ADDshiftLL <x.Type> x x [3])))
// mneg by constant
(MNEG x (MOVDconst [-1])) => x
@@ -1241,16 +1244,16 @@
(MNEG x (MOVDconst [c])) && c%9 == 0 && isPowerOfTwo64(c/9) => (NEG (SLLconst <x.Type> [log64(c/9)] (ADDshiftLL <x.Type> x x [3])))
-(MNEGW x (MOVDconst [c])) && int32(c)==-1 => x
+(MNEGW x (MOVDconst [c])) && int32(c)==-1 => (MOVWUreg x)
(MNEGW _ (MOVDconst [c])) && int32(c)==0 => (MOVDconst [0])
-(MNEGW x (MOVDconst [c])) && int32(c)==1 => (NEG x)
+(MNEGW x (MOVDconst [c])) && int32(c)==1 => (MOVWUreg (NEG <x.Type> x))
(MNEGW x (MOVDconst [c])) && isPowerOfTwo64(c) => (NEG (SLLconst <x.Type> [log64(c)] x))
-(MNEGW x (MOVDconst [c])) && isPowerOfTwo64(c-1) && int32(c) >= 3 => (NEG (ADDshiftLL <x.Type> x x [log64(c-1)]))
-(MNEGW x (MOVDconst [c])) && isPowerOfTwo64(c+1) && int32(c) >= 7 => (NEG (ADDshiftLL <x.Type> (NEG <x.Type> x) x [log64(c+1)]))
-(MNEGW x (MOVDconst [c])) && c%3 == 0 && isPowerOfTwo64(c/3) && is32Bit(c) => (SLLconst <x.Type> [log64(c/3)] (SUBshiftLL <x.Type> x x [2]))
-(MNEGW x (MOVDconst [c])) && c%5 == 0 && isPowerOfTwo64(c/5) && is32Bit(c) => (NEG (SLLconst <x.Type> [log64(c/5)] (ADDshiftLL <x.Type> x x [2])))
-(MNEGW x (MOVDconst [c])) && c%7 == 0 && isPowerOfTwo64(c/7) && is32Bit(c) => (SLLconst <x.Type> [log64(c/7)] (SUBshiftLL <x.Type> x x [3]))
-(MNEGW x (MOVDconst [c])) && c%9 == 0 && isPowerOfTwo64(c/9) && is32Bit(c) => (NEG (SLLconst <x.Type> [log64(c/9)] (ADDshiftLL <x.Type> x x [3])))
+(MNEGW x (MOVDconst [c])) && isPowerOfTwo64(c-1) && int32(c) >= 3 => (MOVWUreg (NEG <x.Type> (ADDshiftLL <x.Type> x x [log64(c-1)])))
+(MNEGW x (MOVDconst [c])) && isPowerOfTwo64(c+1) && int32(c) >= 7 => (MOVWUreg (NEG <x.Type> (ADDshiftLL <x.Type> (NEG <x.Type> x) x [log64(c+1)])))
+(MNEGW x (MOVDconst [c])) && c%3 == 0 && isPowerOfTwo64(c/3) && is32Bit(c) => (MOVWUreg (SLLconst <x.Type> [log64(c/3)] (SUBshiftLL <x.Type> x x [2])))
+(MNEGW x (MOVDconst [c])) && c%5 == 0 && isPowerOfTwo64(c/5) && is32Bit(c) => (MOVWUreg (NEG <x.Type> (SLLconst <x.Type> [log64(c/5)] (ADDshiftLL <x.Type> x x [2]))))
+(MNEGW x (MOVDconst [c])) && c%7 == 0 && isPowerOfTwo64(c/7) && is32Bit(c) => (MOVWUreg (SLLconst <x.Type> [log64(c/7)] (SUBshiftLL <x.Type> x x [3])))
+(MNEGW x (MOVDconst [c])) && c%9 == 0 && isPowerOfTwo64(c/9) && is32Bit(c) => (MOVWUreg (NEG <x.Type> (SLLconst <x.Type> [log64(c/9)] (ADDshiftLL <x.Type> x x [3]))))
(MADD a x (MOVDconst [-1])) => (SUB a x)
@@ -1275,27 +1278,27 @@
(MADD a (MOVDconst [c]) x) && c%7 == 0 && isPowerOfTwo64(c/7) => (SUBshiftLL a (SUBshiftLL <x.Type> x x [3]) [log64(c/7)])
(MADD a (MOVDconst [c]) x) && c%9 == 0 && isPowerOfTwo64(c/9) => (ADDshiftLL a (ADDshiftLL <x.Type> x x [3]) [log64(c/9)])
-(MADDW a x (MOVDconst [c])) && int32(c)==-1 => (SUB a x)
-(MADDW a _ (MOVDconst [c])) && int32(c)==0 => a
-(MADDW a x (MOVDconst [c])) && int32(c)==1 => (ADD a x)
-(MADDW a x (MOVDconst [c])) && isPowerOfTwo64(c) => (ADDshiftLL a x [log64(c)])
-(MADDW a x (MOVDconst [c])) && isPowerOfTwo64(c-1) && int32(c)>=3 => (ADD a (ADDshiftLL <x.Type> x x [log64(c-1)]))
-(MADDW a x (MOVDconst [c])) && isPowerOfTwo64(c+1) && int32(c)>=7 => (SUB a (SUBshiftLL <x.Type> x x [log64(c+1)]))
-(MADDW a x (MOVDconst [c])) && c%3 == 0 && isPowerOfTwo64(c/3) && is32Bit(c) => (SUBshiftLL a (SUBshiftLL <x.Type> x x [2]) [log64(c/3)])
-(MADDW a x (MOVDconst [c])) && c%5 == 0 && isPowerOfTwo64(c/5) && is32Bit(c) => (ADDshiftLL a (ADDshiftLL <x.Type> x x [2]) [log64(c/5)])
-(MADDW a x (MOVDconst [c])) && c%7 == 0 && isPowerOfTwo64(c/7) && is32Bit(c) => (SUBshiftLL a (SUBshiftLL <x.Type> x x [3]) [log64(c/7)])
-(MADDW a x (MOVDconst [c])) && c%9 == 0 && isPowerOfTwo64(c/9) && is32Bit(c) => (ADDshiftLL a (ADDshiftLL <x.Type> x x [3]) [log64(c/9)])
-
-(MADDW a (MOVDconst [c]) x) && int32(c)==-1 => (SUB a x)
-(MADDW a (MOVDconst [c]) _) && int32(c)==0 => a
-(MADDW a (MOVDconst [c]) x) && int32(c)==1 => (ADD a x)
-(MADDW a (MOVDconst [c]) x) && isPowerOfTwo64(c) => (ADDshiftLL a x [log64(c)])
-(MADDW a (MOVDconst [c]) x) && isPowerOfTwo64(c-1) && int32(c)>=3 => (ADD a (ADDshiftLL <x.Type> x x [log64(c-1)]))
-(MADDW a (MOVDconst [c]) x) && isPowerOfTwo64(c+1) && int32(c)>=7 => (SUB a (SUBshiftLL <x.Type> x x [log64(c+1)]))
-(MADDW a (MOVDconst [c]) x) && c%3 == 0 && isPowerOfTwo64(c/3) && is32Bit(c) => (SUBshiftLL a (SUBshiftLL <x.Type> x x [2]) [log64(c/3)])
-(MADDW a (MOVDconst [c]) x) && c%5 == 0 && isPowerOfTwo64(c/5) && is32Bit(c) => (ADDshiftLL a (ADDshiftLL <x.Type> x x [2]) [log64(c/5)])
-(MADDW a (MOVDconst [c]) x) && c%7 == 0 && isPowerOfTwo64(c/7) && is32Bit(c) => (SUBshiftLL a (SUBshiftLL <x.Type> x x [3]) [log64(c/7)])
-(MADDW a (MOVDconst [c]) x) && c%9 == 0 && isPowerOfTwo64(c/9) && is32Bit(c) => (ADDshiftLL a (ADDshiftLL <x.Type> x x [3]) [log64(c/9)])
+(MADDW a x (MOVDconst [c])) && int32(c)==-1 => (MOVWUreg (SUB <a.Type> a x))
+(MADDW a _ (MOVDconst [c])) && int32(c)==0 => (MOVWUreg a)
+(MADDW a x (MOVDconst [c])) && int32(c)==1 => (MOVWUreg (ADD <a.Type> a x))
+(MADDW a x (MOVDconst [c])) && isPowerOfTwo64(c) => (MOVWUreg (ADDshiftLL <a.Type> a x [log64(c)]))
+(MADDW a x (MOVDconst [c])) && isPowerOfTwo64(c-1) && int32(c)>=3 => (MOVWUreg (ADD <a.Type> a (ADDshiftLL <x.Type> x x [log64(c-1)])))
+(MADDW a x (MOVDconst [c])) && isPowerOfTwo64(c+1) && int32(c)>=7 => (MOVWUreg (SUB <a.Type> a (SUBshiftLL <x.Type> x x [log64(c+1)])))
+(MADDW a x (MOVDconst [c])) && c%3 == 0 && isPowerOfTwo64(c/3) && is32Bit(c) => (MOVWUreg (SUBshiftLL <a.Type> a (SUBshiftLL <x.Type> x x [2]) [log64(c/3)]))
+(MADDW a x (MOVDconst [c])) && c%5 == 0 && isPowerOfTwo64(c/5) && is32Bit(c) => (MOVWUreg (ADDshiftLL <a.Type> a (ADDshiftLL <x.Type> x x [2]) [log64(c/5)]))
+(MADDW a x (MOVDconst [c])) && c%7 == 0 && isPowerOfTwo64(c/7) && is32Bit(c) => (MOVWUreg (SUBshiftLL <a.Type> a (SUBshiftLL <x.Type> x x [3]) [log64(c/7)]))
+(MADDW a x (MOVDconst [c])) && c%9 == 0 && isPowerOfTwo64(c/9) && is32Bit(c) => (MOVWUreg (ADDshiftLL <a.Type> a (ADDshiftLL <x.Type> x x [3]) [log64(c/9)]))
+
+(MADDW a (MOVDconst [c]) x) && int32(c)==-1 => (MOVWUreg (SUB <a.Type> a x))
+(MADDW a (MOVDconst [c]) _) && int32(c)==0 => (MOVWUreg a)
+(MADDW a (MOVDconst [c]) x) && int32(c)==1 => (MOVWUreg (ADD <a.Type> a x))
+(MADDW a (MOVDconst [c]) x) && isPowerOfTwo64(c) => (MOVWUreg (ADDshiftLL <a.Type> a x [log64(c)]))
+(MADDW a (MOVDconst [c]) x) && isPowerOfTwo64(c-1) && int32(c)>=3 => (MOVWUreg (ADD <a.Type> a (ADDshiftLL <x.Type> x x [log64(c-1)])))
+(MADDW a (MOVDconst [c]) x) && isPowerOfTwo64(c+1) && int32(c)>=7 => (MOVWUreg (SUB <a.Type> a (SUBshiftLL <x.Type> x x [log64(c+1)])))
+(MADDW a (MOVDconst [c]) x) && c%3 == 0 && isPowerOfTwo64(c/3) && is32Bit(c) => (MOVWUreg (SUBshiftLL <a.Type> a (SUBshiftLL <x.Type> x x [2]) [log64(c/3)]))
+(MADDW a (MOVDconst [c]) x) && c%5 == 0 && isPowerOfTwo64(c/5) && is32Bit(c) => (MOVWUreg (ADDshiftLL <a.Type> a (ADDshiftLL <x.Type> x x [2]) [log64(c/5)]))
+(MADDW a (MOVDconst [c]) x) && c%7 == 0 && isPowerOfTwo64(c/7) && is32Bit(c) => (MOVWUreg (SUBshiftLL <a.Type> a (SUBshiftLL <x.Type> x x [3]) [log64(c/7)]))
+(MADDW a (MOVDconst [c]) x) && c%9 == 0 && isPowerOfTwo64(c/9) && is32Bit(c) => (MOVWUreg (ADDshiftLL <a.Type> a (ADDshiftLL <x.Type> x x [3]) [log64(c/9)]))
(MSUB a x (MOVDconst [-1])) => (ADD a x)
(MSUB a _ (MOVDconst [0])) => a
@@ -1319,33 +1322,33 @@
(MSUB a (MOVDconst [c]) x) && c%7 == 0 && isPowerOfTwo64(c/7) => (ADDshiftLL a (SUBshiftLL <x.Type> x x [3]) [log64(c/7)])
(MSUB a (MOVDconst [c]) x) && c%9 == 0 && isPowerOfTwo64(c/9) => (SUBshiftLL a (ADDshiftLL <x.Type> x x [3]) [log64(c/9)])
-(MSUBW a x (MOVDconst [c])) && int32(c)==-1 => (ADD a x)
-(MSUBW a _ (MOVDconst [c])) && int32(c)==0 => a
-(MSUBW a x (MOVDconst [c])) && int32(c)==1 => (SUB a x)
-(MSUBW a x (MOVDconst [c])) && isPowerOfTwo64(c) => (SUBshiftLL a x [log64(c)])
-(MSUBW a x (MOVDconst [c])) && isPowerOfTwo64(c-1) && int32(c)>=3 => (SUB a (ADDshiftLL <x.Type> x x [log64(c-1)]))
-(MSUBW a x (MOVDconst [c])) && isPowerOfTwo64(c+1) && int32(c)>=7 => (ADD a (SUBshiftLL <x.Type> x x [log64(c+1)]))
-(MSUBW a x (MOVDconst [c])) && c%3 == 0 && isPowerOfTwo64(c/3) && is32Bit(c) => (ADDshiftLL a (SUBshiftLL <x.Type> x x [2]) [log64(c/3)])
-(MSUBW a x (MOVDconst [c])) && c%5 == 0 && isPowerOfTwo64(c/5) && is32Bit(c) => (SUBshiftLL a (ADDshiftLL <x.Type> x x [2]) [log64(c/5)])
-(MSUBW a x (MOVDconst [c])) && c%7 == 0 && isPowerOfTwo64(c/7) && is32Bit(c) => (ADDshiftLL a (SUBshiftLL <x.Type> x x [3]) [log64(c/7)])
-(MSUBW a x (MOVDconst [c])) && c%9 == 0 && isPowerOfTwo64(c/9) && is32Bit(c) => (SUBshiftLL a (ADDshiftLL <x.Type> x x [3]) [log64(c/9)])
-
-(MSUBW a (MOVDconst [c]) x) && int32(c)==-1 => (ADD a x)
-(MSUBW a (MOVDconst [c]) _) && int32(c)==0 => a
-(MSUBW a (MOVDconst [c]) x) && int32(c)==1 => (SUB a x)
-(MSUBW a (MOVDconst [c]) x) && isPowerOfTwo64(c) => (SUBshiftLL a x [log64(c)])
-(MSUBW a (MOVDconst [c]) x) && isPowerOfTwo64(c-1) && int32(c)>=3 => (SUB a (ADDshiftLL <x.Type> x x [log64(c-1)]))
-(MSUBW a (MOVDconst [c]) x) && isPowerOfTwo64(c+1) && int32(c)>=7 => (ADD a (SUBshiftLL <x.Type> x x [log64(c+1)]))
-(MSUBW a (MOVDconst [c]) x) && c%3 == 0 && isPowerOfTwo64(c/3) && is32Bit(c) => (ADDshiftLL a (SUBshiftLL <x.Type> x x [2]) [log64(c/3)])
-(MSUBW a (MOVDconst [c]) x) && c%5 == 0 && isPowerOfTwo64(c/5) && is32Bit(c) => (SUBshiftLL a (ADDshiftLL <x.Type> x x [2]) [log64(c/5)])
-(MSUBW a (MOVDconst [c]) x) && c%7 == 0 && isPowerOfTwo64(c/7) && is32Bit(c) => (ADDshiftLL a (SUBshiftLL <x.Type> x x [3]) [log64(c/7)])
-(MSUBW a (MOVDconst [c]) x) && c%9 == 0 && isPowerOfTwo64(c/9) && is32Bit(c) => (SUBshiftLL a (ADDshiftLL <x.Type> x x [3]) [log64(c/9)])
+(MSUBW a x (MOVDconst [c])) && int32(c)==-1 => (MOVWUreg (ADD <a.Type> a x))
+(MSUBW a _ (MOVDconst [c])) && int32(c)==0 => (MOVWUreg a)
+(MSUBW a x (MOVDconst [c])) && int32(c)==1 => (MOVWUreg (SUB <a.Type> a x))
+(MSUBW a x (MOVDconst [c])) && isPowerOfTwo64(c) => (MOVWUreg (SUBshiftLL <a.Type> a x [log64(c)]))
+(MSUBW a x (MOVDconst [c])) && isPowerOfTwo64(c-1) && int32(c)>=3 => (MOVWUreg (SUB <a.Type> a (ADDshiftLL <x.Type> x x [log64(c-1)])))
+(MSUBW a x (MOVDconst [c])) && isPowerOfTwo64(c+1) && int32(c)>=7 => (MOVWUreg (ADD <a.Type> a (SUBshiftLL <x.Type> x x [log64(c+1)])))
+(MSUBW a x (MOVDconst [c])) && c%3 == 0 && isPowerOfTwo64(c/3) && is32Bit(c) => (MOVWUreg (ADDshiftLL <a.Type> a (SUBshiftLL <x.Type> x x [2]) [log64(c/3)]))
+(MSUBW a x (MOVDconst [c])) && c%5 == 0 && isPowerOfTwo64(c/5) && is32Bit(c) => (MOVWUreg (SUBshiftLL <a.Type> a (ADDshiftLL <x.Type> x x [2]) [log64(c/5)]))
+(MSUBW a x (MOVDconst [c])) && c%7 == 0 && isPowerOfTwo64(c/7) && is32Bit(c) => (MOVWUreg (ADDshiftLL <a.Type> a (SUBshiftLL <x.Type> x x [3]) [log64(c/7)]))
+(MSUBW a x (MOVDconst [c])) && c%9 == 0 && isPowerOfTwo64(c/9) && is32Bit(c) => (MOVWUreg (SUBshiftLL <a.Type> a (ADDshiftLL <x.Type> x x [3]) [log64(c/9)]))
+
+(MSUBW a (MOVDconst [c]) x) && int32(c)==-1 => (MOVWUreg (ADD <a.Type> a x))
+(MSUBW a (MOVDconst [c]) _) && int32(c)==0 => (MOVWUreg a)
+(MSUBW a (MOVDconst [c]) x) && int32(c)==1 => (MOVWUreg (SUB <a.Type> a x))
+(MSUBW a (MOVDconst [c]) x) && isPowerOfTwo64(c) => (MOVWUreg (SUBshiftLL <a.Type> a x [log64(c)]))
+(MSUBW a (MOVDconst [c]) x) && isPowerOfTwo64(c-1) && int32(c)>=3 => (MOVWUreg (SUB <a.Type> a (ADDshiftLL <x.Type> x x [log64(c-1)])))
+(MSUBW a (MOVDconst [c]) x) && isPowerOfTwo64(c+1) && int32(c)>=7 => (MOVWUreg (ADD <a.Type> a (SUBshiftLL <x.Type> x x [log64(c+1)])))
+(MSUBW a (MOVDconst [c]) x) && c%3 == 0 && isPowerOfTwo64(c/3) && is32Bit(c) => (MOVWUreg (ADDshiftLL <a.Type> a (SUBshiftLL <x.Type> x x [2]) [log64(c/3)]))
+(MSUBW a (MOVDconst [c]) x) && c%5 == 0 && isPowerOfTwo64(c/5) && is32Bit(c) => (MOVWUreg (SUBshiftLL <a.Type> a (ADDshiftLL <x.Type> x x [2]) [log64(c/5)]))
+(MSUBW a (MOVDconst [c]) x) && c%7 == 0 && isPowerOfTwo64(c/7) && is32Bit(c) => (MOVWUreg (ADDshiftLL <a.Type> a (SUBshiftLL <x.Type> x x [3]) [log64(c/7)]))
+(MSUBW a (MOVDconst [c]) x) && c%9 == 0 && isPowerOfTwo64(c/9) && is32Bit(c) => (MOVWUreg (SUBshiftLL <a.Type> a (ADDshiftLL <x.Type> x x [3]) [log64(c/9)]))
// div by constant
(UDIV x (MOVDconst [1])) => x
(UDIV x (MOVDconst [c])) && isPowerOfTwo64(c) => (SRLconst [log64(c)] x)
-(UDIVW x (MOVDconst [c])) && uint32(c)==1 => x
-(UDIVW x (MOVDconst [c])) && isPowerOfTwo64(c) && is32Bit(c) => (SRLconst [log64(c)] x)
+(UDIVW x (MOVDconst [c])) && uint32(c)==1 => (MOVWUreg x)
+(UDIVW x (MOVDconst [c])) && isPowerOfTwo64(c) && is32Bit(c) => (SRLconst [log64(c)] (MOVWUreg <v.Type> x))
(UMOD _ (MOVDconst [1])) => (MOVDconst [0])
(UMOD x (MOVDconst [c])) && isPowerOfTwo64(c) => (ANDconst [c-1] x)
(UMODW _ (MOVDconst [c])) && uint32(c)==1 => (MOVDconst [0])
@@ -1401,24 +1404,24 @@
(SRLconst [c] (MOVDconst [d])) => (MOVDconst [int64(uint64(d)>>uint64(c))])
(SRAconst [c] (MOVDconst [d])) => (MOVDconst [d>>uint64(c)])
(MUL (MOVDconst [c]) (MOVDconst [d])) => (MOVDconst [c*d])
-(MULW (MOVDconst [c]) (MOVDconst [d])) => (MOVDconst [int64(int32(c)*int32(d))])
(MNEG (MOVDconst [c]) (MOVDconst [d])) => (MOVDconst [-c*d])
-(MNEGW (MOVDconst [c]) (MOVDconst [d])) => (MOVDconst [-int64(int32(c)*int32(d))])
-(MADD (MOVDconst [c]) x y) => (ADDconst [c] (MUL <x.Type> x y))
-(MADDW (MOVDconst [c]) x y) => (ADDconst [c] (MULW <x.Type> x y))
-(MSUB (MOVDconst [c]) x y) => (ADDconst [c] (MNEG <x.Type> x y))
-(MSUBW (MOVDconst [c]) x y) => (ADDconst [c] (MNEGW <x.Type> x y))
+(MULW (MOVDconst [c]) (MOVDconst [d])) => (MOVDconst [int64(uint32(c*d))])
+(MNEGW (MOVDconst [c]) (MOVDconst [d])) => (MOVDconst [int64(uint32(-c*d))])
+(MADD (MOVDconst [c]) x y) => (ADDconst [c] (MUL <x.Type> x y))
+(MSUB (MOVDconst [c]) x y) => (ADDconst [c] (MNEG <x.Type> x y))
(MADD a (MOVDconst [c]) (MOVDconst [d])) => (ADDconst [c*d] a)
-(MADDW a (MOVDconst [c]) (MOVDconst [d])) => (ADDconst [int64(int32(c)*int32(d))] a)
(MSUB a (MOVDconst [c]) (MOVDconst [d])) => (SUBconst [c*d] a)
-(MSUBW a (MOVDconst [c]) (MOVDconst [d])) => (SUBconst [int64(int32(c)*int32(d))] a)
+(MADDW (MOVDconst [c]) x y) => (MOVWUreg (ADDconst <x.Type> [c] (MULW <x.Type> x y)))
+(MSUBW (MOVDconst [c]) x y) => (MOVWUreg (ADDconst <x.Type> [c] (MNEGW <x.Type> x y)))
+(MADDW a (MOVDconst [c]) (MOVDconst [d])) => (MOVWUreg (ADDconst <a.Type> [c*d] a))
+(MSUBW a (MOVDconst [c]) (MOVDconst [d])) => (MOVWUreg (SUBconst <a.Type> [c*d] a))
(DIV (MOVDconst [c]) (MOVDconst [d])) && d != 0 => (MOVDconst [c/d])
(UDIV (MOVDconst [c]) (MOVDconst [d])) && d != 0 => (MOVDconst [int64(uint64(c)/uint64(d))])
-(DIVW (MOVDconst [c]) (MOVDconst [d])) && d != 0 => (MOVDconst [int64(int32(c)/int32(d))])
+(DIVW (MOVDconst [c]) (MOVDconst [d])) && d != 0 => (MOVDconst [int64(uint32(int32(c)/int32(d)))])
(UDIVW (MOVDconst [c]) (MOVDconst [d])) && d != 0 => (MOVDconst [int64(uint32(c)/uint32(d))])
(MOD (MOVDconst [c]) (MOVDconst [d])) && d != 0 => (MOVDconst [c%d])
(UMOD (MOVDconst [c]) (MOVDconst [d])) && d != 0 => (MOVDconst [int64(uint64(c)%uint64(d))])
-(MODW (MOVDconst [c]) (MOVDconst [d])) && d != 0 => (MOVDconst [int64(int32(c)%int32(d))])
+(MODW (MOVDconst [c]) (MOVDconst [d])) && d != 0 => (MOVDconst [int64(uint32(int32(c)%int32(d)))])
(UMODW (MOVDconst [c]) (MOVDconst [d])) && d != 0 => (MOVDconst [int64(uint32(c)%uint32(d))])
(ANDconst [c] (MOVDconst [d])) => (MOVDconst [c&d])
(ANDconst [c] (ANDconst [d] x)) => (ANDconst [c&d] x)
@@ -1566,13 +1569,26 @@
(LessEqualF (InvertFlags x)) => (GreaterEqualF x)
(GreaterThanF (InvertFlags x)) => (LessThanF x)
(GreaterEqualF (InvertFlags x)) => (LessEqualF x)
-(LessThanNoov (InvertFlags x)) => (BIC (GreaterEqualNoov <typ.Bool> x) (Equal <typ.Bool> x))
-(GreaterEqualNoov (InvertFlags x)) => (OR (LessThanNoov <typ.Bool> x) (Equal <typ.Bool> x))
+(LessThanNoov (InvertFlags x)) => (CSEL0 [OpARM64NotEqual] (GreaterEqualNoov <typ.Bool> x) x)
+(GreaterEqualNoov (InvertFlags x)) => (CSINC [OpARM64NotEqual] (LessThanNoov <typ.Bool> x) (MOVDconst [0]) x)
// Boolean-generating instructions (NOTE: NOT all boolean Values) always
// zero upper bit of the register; no need to zero-extend
(MOVBUreg x:((Equal|NotEqual|LessThan|LessThanU|LessThanF|LessEqual|LessEqualU|LessEqualF|GreaterThan|GreaterThanU|GreaterThanF|GreaterEqual|GreaterEqualU|GreaterEqualF) _)) => (MOVDreg x)
+// Don't bother extending if we're not using the higher bits.
+(MOV(B|BU)reg x) && v.Type.Size() <= 1 => x
+(MOV(H|HU)reg x) && v.Type.Size() <= 2 => x
+(MOV(W|WU)reg x) && v.Type.Size() <= 4 => x
+
+// omit unsign extension
+(MOVWUreg x) && zeroUpper32Bits(x, 3) => x
+
+// omit sign extension
+(MOVWreg <t> (ANDconst x [c])) && uint64(c) & uint64(0xffffffff80000000) == 0 => (ANDconst <t> x [c])
+(MOVHreg <t> (ANDconst x [c])) && uint64(c) & uint64(0xffffffffffff8000) == 0 => (ANDconst <t> x [c])
+(MOVBreg <t> (ANDconst x [c])) && uint64(c) & uint64(0xffffffffffffff80) == 0 => (ANDconst <t> x [c])
+
// absorb flag constants into conditional instructions
(CSEL [cc] x _ flag) && ccARM64Eval(cc, flag) > 0 => x
(CSEL [cc] _ y flag) && ccARM64Eval(cc, flag) < 0 => y
diff --git a/src/cmd/compile/internal/ssa/_gen/ARM64Ops.go b/src/cmd/compile/internal/ssa/_gen/ARM64Ops.go
index 2853e62540..5a98aa0c54 100644
--- a/src/cmd/compile/internal/ssa/_gen/ARM64Ops.go
+++ b/src/cmd/compile/internal/ssa/_gen/ARM64Ops.go
@@ -13,6 +13,7 @@ import "strings"
// - *const instructions may use a constant larger than the instruction can encode.
// In this case the assembler expands to multiple instructions and uses tmp
// register (R27).
+// - All 32-bit Ops will zero the upper 32 bits of the destination register.
// Suffixes encode the bit width of various instructions.
// D (double word) = 64 bit
@@ -195,9 +196,9 @@ func init() {
{name: "MULL", argLength: 2, reg: gp21, asm: "SMULL", commutative: true}, // arg0 * arg1, signed, 32-bit mult results in 64-bit
{name: "UMULL", argLength: 2, reg: gp21, asm: "UMULL", commutative: true}, // arg0 * arg1, unsigned, 32-bit mult results in 64-bit
{name: "DIV", argLength: 2, reg: gp21, asm: "SDIV"}, // arg0 / arg1, signed
- {name: "UDIV", argLength: 2, reg: gp21, asm: "UDIV"}, // arg0 / arg1, unsighed
+ {name: "UDIV", argLength: 2, reg: gp21, asm: "UDIV"}, // arg0 / arg1, unsigned
{name: "DIVW", argLength: 2, reg: gp21, asm: "SDIVW"}, // arg0 / arg1, signed, 32 bit
- {name: "UDIVW", argLength: 2, reg: gp21, asm: "UDIVW"}, // arg0 / arg1, unsighed, 32 bit
+ {name: "UDIVW", argLength: 2, reg: gp21, asm: "UDIVW"}, // arg0 / arg1, unsigned, 32 bit
{name: "MOD", argLength: 2, reg: gp21, asm: "REM"}, // arg0 % arg1, signed
{name: "UMOD", argLength: 2, reg: gp21, asm: "UREM"}, // arg0 % arg1, unsigned
{name: "MODW", argLength: 2, reg: gp21, asm: "REMW"}, // arg0 % arg1, signed, 32 bit
@@ -234,6 +235,10 @@ func init() {
{name: "FNEGD", argLength: 1, reg: fp11, asm: "FNEGD"}, // -arg0, float64
{name: "FSQRTD", argLength: 1, reg: fp11, asm: "FSQRTD"}, // sqrt(arg0), float64
{name: "FSQRTS", argLength: 1, reg: fp11, asm: "FSQRTS"}, // sqrt(arg0), float32
+ {name: "FMIND", argLength: 2, reg: fp21, asm: "FMIND"}, // min(arg0, arg1)
+ {name: "FMINS", argLength: 2, reg: fp21, asm: "FMINS"}, // min(arg0, arg1)
+ {name: "FMAXD", argLength: 2, reg: fp21, asm: "FMAXD"}, // max(arg0, arg1)
+ {name: "FMAXS", argLength: 2, reg: fp21, asm: "FMAXS"}, // max(arg0, arg1)
{name: "REV", argLength: 1, reg: gp11, asm: "REV"}, // byte reverse, 64-bit
{name: "REVW", argLength: 1, reg: gp11, asm: "REVW"}, // byte reverse, 32-bit
{name: "REV16", argLength: 1, reg: gp11, asm: "REV16"}, // byte reverse in each 16-bit halfword, 64-bit
diff --git a/src/cmd/compile/internal/ssa/_gen/LOONG64.rules b/src/cmd/compile/internal/ssa/_gen/LOONG64.rules
index 4a47c4cd47..2af9519113 100644
--- a/src/cmd/compile/internal/ssa/_gen/LOONG64.rules
+++ b/src/cmd/compile/internal/ssa/_gen/LOONG64.rules
@@ -416,7 +416,7 @@
(GetCallerSP ...) => (LoweredGetCallerSP ...)
(GetCallerPC ...) => (LoweredGetCallerPC ...)
-(If cond yes no) => (NE cond yes no)
+(If cond yes no) => (NE (MOVBUreg <typ.UInt64> cond) yes no)
// Write barrier.
(WB ...) => (LoweredWB ...)
@@ -450,71 +450,37 @@
(EQ (SGTconst [0] x) yes no) => (GEZ x yes no)
(NE (SGT x (MOVVconst [0])) yes no) => (GTZ x yes no)
(EQ (SGT x (MOVVconst [0])) yes no) => (LEZ x yes no)
+(MOVBUreg x:((SGT|SGTU) _ _)) => x
// fold offset into address
(ADDVconst [off1] (MOVVaddr [off2] {sym} ptr)) && is32Bit(off1+int64(off2)) => (MOVVaddr [int32(off1)+int32(off2)] {sym} ptr)
// fold address into load/store
-(MOVBload [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVBload [off1+int32(off2)] {sym} ptr mem)
-(MOVBUload [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVBUload [off1+int32(off2)] {sym} ptr mem)
-(MOVHload [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVHload [off1+int32(off2)] {sym} ptr mem)
-(MOVHUload [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVHUload [off1+int32(off2)] {sym} ptr mem)
-(MOVWload [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVWload [off1+int32(off2)] {sym} ptr mem)
-(MOVWUload [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVWUload [off1+int32(off2)] {sym} ptr mem)
-(MOVVload [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVVload [off1+int32(off2)] {sym} ptr mem)
-(MOVFload [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVFload [off1+int32(off2)] {sym} ptr mem)
-(MOVDload [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVDload [off1+int32(off2)] {sym} ptr mem)
-
-(MOVBstore [off1] {sym} (ADDVconst [off2] ptr) val mem) && is32Bit(int64(off1)+off2) => (MOVBstore [off1+int32(off2)] {sym} ptr val mem)
-(MOVHstore [off1] {sym} (ADDVconst [off2] ptr) val mem) && is32Bit(int64(off1)+off2) => (MOVHstore [off1+int32(off2)] {sym} ptr val mem)
-(MOVWstore [off1] {sym} (ADDVconst [off2] ptr) val mem) && is32Bit(int64(off1)+off2) => (MOVWstore [off1+int32(off2)] {sym} ptr val mem)
-(MOVVstore [off1] {sym} (ADDVconst [off2] ptr) val mem) && is32Bit(int64(off1)+off2) => (MOVVstore [off1+int32(off2)] {sym} ptr val mem)
-(MOVFstore [off1] {sym} (ADDVconst [off2] ptr) val mem) && is32Bit(int64(off1)+off2) => (MOVFstore [off1+int32(off2)] {sym} ptr val mem)
-(MOVDstore [off1] {sym} (ADDVconst [off2] ptr) val mem) && is32Bit(int64(off1)+off2) => (MOVDstore [off1+int32(off2)] {sym} ptr val mem)
-(MOVBstorezero [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVBstorezero [off1+int32(off2)] {sym} ptr mem)
-(MOVHstorezero [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVHstorezero [off1+int32(off2)] {sym} ptr mem)
-(MOVWstorezero [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVWstorezero [off1+int32(off2)] {sym} ptr mem)
-(MOVVstorezero [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVVstorezero [off1+int32(off2)] {sym} ptr mem)
-
-(MOVBload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) =>
- (MOVBload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
-(MOVBUload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) =>
- (MOVBUload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
-(MOVHload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) =>
- (MOVHload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
-(MOVHUload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) =>
- (MOVHUload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
-(MOVWload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) =>
- (MOVWload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
-(MOVWUload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) =>
- (MOVWUload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
-(MOVVload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) =>
- (MOVVload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
-(MOVFload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) =>
- (MOVFload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
-(MOVDload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) =>
- (MOVDload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
-
-(MOVBstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) =>
- (MOVBstore [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr val mem)
-(MOVHstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) =>
- (MOVHstore [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr val mem)
-(MOVWstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) =>
- (MOVWstore [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr val mem)
-(MOVVstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) =>
- (MOVVstore [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr val mem)
-(MOVFstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) =>
- (MOVFstore [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr val mem)
-(MOVDstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) =>
- (MOVDstore [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr val mem)
-(MOVBstorezero [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) =>
- (MOVBstorezero [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
-(MOVHstorezero [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) =>
- (MOVHstorezero [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
-(MOVWstorezero [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) =>
- (MOVWstorezero [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
-(MOVVstorezero [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) =>
- (MOVVstorezero [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
+// Do not fold global variable access in -dynlink mode, where it will be rewritten
+// to use the GOT via REGTMP, which currently cannot handle large offset.
+(MOV(B|BU|H|HU|W|WU|V|F|D)load [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2)
+ && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
+ (MOV(B|BU|H|HU|W|WU|V|F|D)load [off1+int32(off2)] {sym} ptr mem)
+
+(MOV(B|H|W|V|F|D)store [off1] {sym} (ADDVconst [off2] ptr) val mem) && is32Bit(int64(off1)+off2)
+ && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
+ (MOV(B|H|W|V|F|D)store [off1+int32(off2)] {sym} ptr val mem)
+
+(MOV(B|H|W|V)storezero [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2)
+ && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
+ (MOV(B|H|W|V)storezero [off1+int32(off2)] {sym} ptr mem)
+
+(MOV(B|BU|H|HU|W|WU|V|F|D)load [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2)
+ && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
+ (MOV(B|BU|H|HU|W|WU|V|F|D)load [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
+
+(MOV(B|H|W|V|F|D)store [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2)
+ && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
+ (MOV(B|H|W|V|F|D)store [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr val mem)
+
+(MOV(B|H|W|V)storezero [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2)
+ && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
+ (MOV(B|H|W|V)storezero [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
(LoweredAtomicStore(32|64) ptr (MOVVconst [0]) mem) => (LoweredAtomicStorezero(32|64) ptr mem)
(LoweredAtomicAdd32 ptr (MOVVconst [c]) mem) && is32Bit(c) => (LoweredAtomicAddconst32 [int32(c)] ptr mem)
diff --git a/src/cmd/compile/internal/ssa/_gen/LOONG64Ops.go b/src/cmd/compile/internal/ssa/_gen/LOONG64Ops.go
index 23f20fddeb..3fbf5be499 100644
--- a/src/cmd/compile/internal/ssa/_gen/LOONG64Ops.go
+++ b/src/cmd/compile/internal/ssa/_gen/LOONG64Ops.go
@@ -123,17 +123,17 @@ func init() {
// Common individual register masks
var (
- gp = buildReg("R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31") // R1 is LR, R2 is thread pointer, R3 is stack pointer, R21-unused, R22 is g, R30 is REGTMP
+ gp = buildReg("R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31") // R1 is LR, R2 is thread pointer, R3 is stack pointer, R22 is g, R30 is REGTMP
gpg = gp | buildReg("g")
gpsp = gp | buildReg("SP")
gpspg = gpg | buildReg("SP")
gpspsbg = gpspg | buildReg("SB")
fp = buildReg("F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31")
callerSave = gp | fp | buildReg("g") // runtime.setg (and anything calling it) may clobber g
- r1 = buildReg("R19")
- r2 = buildReg("R18")
- r3 = buildReg("R17")
- r4 = buildReg("R4")
+ r1 = buildReg("R20")
+ r2 = buildReg("R21")
+ r3 = buildReg("R23")
+ r4 = buildReg("R24")
)
// Common regInfo
var (
@@ -273,31 +273,32 @@ func init() {
{name: "MOVDF", argLength: 1, reg: fp11, asm: "MOVDF"}, // float64 -> float32
// function calls
- {name: "CALLstatic", argLength: 1, reg: regInfo{clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call static function aux.(*obj.LSym). arg0=mem, auxint=argsize, returns mem
- {name: "CALLtail", argLength: 1, reg: regInfo{clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true, tailCall: true}, // tail call static function aux.(*obj.LSym). arg0=mem, auxint=argsize, returns mem
- {name: "CALLclosure", argLength: 3, reg: regInfo{inputs: []regMask{gpsp, buildReg("R29"), 0}, clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call function via closure. arg0=codeptr, arg1=closure, arg2=mem, auxint=argsize, returns mem
- {name: "CALLinter", argLength: 2, reg: regInfo{inputs: []regMask{gp}, clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call fn by pointer. arg0=codeptr, arg1=mem, auxint=argsize, returns mem
+ {name: "CALLstatic", argLength: -1, reg: regInfo{clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call static function aux.(*obj.LSym). last arg=mem, auxint=argsize, returns mem
+ {name: "CALLtail", argLength: -1, reg: regInfo{clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true, tailCall: true}, // tail call static function aux.(*obj.LSym). last arg=mem, auxint=argsize, returns mem
+ {name: "CALLclosure", argLength: -1, reg: regInfo{inputs: []regMask{gpsp, buildReg("R29"), 0}, clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call function via closure. arg0=codeptr, arg1=closure, last arg=mem, auxint=argsize, returns mem
+ {name: "CALLinter", argLength: -1, reg: regInfo{inputs: []regMask{gp}, clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call fn by pointer. arg0=codeptr, last arg=mem, auxint=argsize, returns mem
// duffzero
// arg0 = address of memory to zero
// arg1 = mem
// auxint = offset into duffzero code to start executing
// returns mem
- // R19 aka loong64.REGRT1 changed as side effect
+ // R20 aka loong64.REGRT1 changed as side effect
{
name: "DUFFZERO",
aux: "Int64",
argLength: 2,
reg: regInfo{
- inputs: []regMask{gp},
- clobbers: buildReg("R19 R1"),
+ inputs: []regMask{buildReg("R20")},
+ clobbers: buildReg("R20 R1"),
},
+ typ: "Mem",
faultOnNilArg0: true,
},
// duffcopy
- // arg0 = address of dst memory (in R20, changed as side effect) REGRT2
- // arg1 = address of src memory (in R19, changed as side effect) REGRT1
+ // arg0 = address of dst memory (in R21, changed as side effect)
+ // arg1 = address of src memory (in R20, changed as side effect)
// arg2 = mem
// auxint = offset into duffcopy code to start executing
// returns mem
@@ -306,57 +307,56 @@ func init() {
aux: "Int64",
argLength: 3,
reg: regInfo{
- inputs: []regMask{buildReg("R20"), buildReg("R19")},
- clobbers: buildReg("R19 R20 R1"),
+ inputs: []regMask{buildReg("R21"), buildReg("R20")},
+ clobbers: buildReg("R20 R21 R1"),
},
+ typ: "Mem",
faultOnNilArg0: true,
faultOnNilArg1: true,
},
// large or unaligned zeroing
- // arg0 = address of memory to zero (in R19, changed as side effect)
+ // arg0 = address of memory to zero (in R20, changed as side effect)
// arg1 = address of the last element to zero
// arg2 = mem
// auxint = alignment
// returns mem
- // SUBV $8, R19
- // MOVV R0, 8(R19)
- // ADDV $8, R19
- // BNE Rarg1, R19, -2(PC)
+ // MOVx R0, (R20)
+ // ADDV $sz, R20
+ // BGEU Rarg1, R20, -2(PC)
{
name: "LoweredZero",
aux: "Int64",
argLength: 3,
reg: regInfo{
- inputs: []regMask{buildReg("R19"), gp},
- clobbers: buildReg("R19"),
+ inputs: []regMask{buildReg("R20"), gp},
+ clobbers: buildReg("R20"),
},
- clobberFlags: true,
+ typ: "Mem",
faultOnNilArg0: true,
},
// large or unaligned move
- // arg0 = address of dst memory (in R4, changed as side effect)
- // arg1 = address of src memory (in R19, changed as side effect)
+ // arg0 = address of dst memory (in R21, changed as side effect)
+ // arg1 = address of src memory (in R20, changed as side effect)
// arg2 = address of the last element of src
// arg3 = mem
// auxint = alignment
// returns mem
- // SUBV $8, R19
- // MOVV 8(R19), Rtmp
- // MOVV Rtmp, (R4)
- // ADDV $8, R19
- // ADDV $8, R4
- // BNE Rarg2, R19, -4(PC)
+ // MOVx (R20), Rtmp
+ // MOVx Rtmp, (R21)
+ // ADDV $sz, R20
+ // ADDV $sz, R21
+ // BGEU Rarg2, R20, -4(PC)
{
name: "LoweredMove",
aux: "Int64",
argLength: 4,
reg: regInfo{
- inputs: []regMask{buildReg("R4"), buildReg("R19"), gp},
- clobbers: buildReg("R19 R4"),
+ inputs: []regMask{buildReg("R21"), buildReg("R20"), gp},
+ clobbers: buildReg("R20 R21"),
},
- clobberFlags: true,
+ typ: "Mem",
faultOnNilArg0: true,
faultOnNilArg1: true,
},
@@ -476,8 +476,8 @@ func init() {
blocks: blocks,
regnames: regNamesLOONG64,
// TODO: support register ABI on loong64
- ParamIntRegNames: "R4 R5 R6 R7 R8 R9 R10 R11",
- ParamFloatRegNames: "F0 F1 F2 F3 F4 F5 F6 F7",
+ ParamIntRegNames: "R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19",
+ ParamFloatRegNames: "F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15",
gpregmask: gp,
fpregmask: fp,
framepointerreg: -1, // not used
diff --git a/src/cmd/compile/internal/ssa/_gen/MIPS64.rules b/src/cmd/compile/internal/ssa/_gen/MIPS64.rules
index 4628e2a024..cabc7c652d 100644
--- a/src/cmd/compile/internal/ssa/_gen/MIPS64.rules
+++ b/src/cmd/compile/internal/ssa/_gen/MIPS64.rules
@@ -38,6 +38,14 @@
(Mod8 x y) => (Select0 (DIVV (SignExt8to64 x) (SignExt8to64 y)))
(Mod8u x y) => (Select0 (DIVVU (ZeroExt8to64 x) (ZeroExt8to64 y)))
+(Select0 <t> (Add64carry x y c)) => (ADDV (ADDV <t> x y) c)
+(Select1 <t> (Add64carry x y c)) =>
+ (OR (SGTU <t> x s:(ADDV <t> x y)) (SGTU <t> s (ADDV <t> s c)))
+
+(Select0 <t> (Sub64borrow x y c)) => (SUBV (SUBV <t> x y) c)
+(Select1 <t> (Sub64borrow x y c)) =>
+ (OR (SGTU <t> s:(SUBV <t> x y) x) (SGTU <t> (SUBV <t> s c) s))
+
// math package intrinsics
(Abs ...) => (ABSD ...)
@@ -798,6 +806,10 @@
(GEZ (MOVVconst [c]) yes no) && c >= 0 => (First yes no)
(GEZ (MOVVconst [c]) yes no) && c < 0 => (First no yes)
+// SGT/SGTU with known outcomes.
+(SGT x x) => (MOVVconst [0])
+(SGTU x x) => (MOVVconst [0])
+
// fold readonly sym load
(MOVBload [off] {sym} (SB) _) && symIsRO(sym) => (MOVVconst [int64(read8(sym, int64(off)))])
(MOVHload [off] {sym} (SB) _) && symIsRO(sym) => (MOVVconst [int64(read16(sym, int64(off), config.ctxt.Arch.ByteOrder))])
diff --git a/src/cmd/compile/internal/ssa/_gen/PPC64.rules b/src/cmd/compile/internal/ssa/_gen/PPC64.rules
index 97e592fd7e..c9cd34b9a6 100644
--- a/src/cmd/compile/internal/ssa/_gen/PPC64.rules
+++ b/src/cmd/compile/internal/ssa/_gen/PPC64.rules
@@ -238,6 +238,8 @@
(LocalAddr <t> {sym} base mem) && t.Elem().HasPointers() => (MOVDaddr {sym} (SPanchored base mem))
(LocalAddr <t> {sym} base _) && !t.Elem().HasPointers() => (MOVDaddr {sym} base)
(OffPtr [off] ptr) => (ADD (MOVDconst <typ.Int64> [off]) ptr)
+(MOVDaddr {sym} [n] p:(ADD x y)) && sym == nil && n == 0 => p
+(MOVDaddr {sym} [n] ptr) && sym == nil && n == 0 && (ptr.Op == OpArgIntReg || ptr.Op == OpPhi) => ptr
// TODO: optimize these cases?
(Ctz32NonZero ...) => (Ctz32 ...)
@@ -321,10 +323,6 @@
(NE (CMPWconst [0] (Select0 (ANDCCconst [1] ((Equal|NotEqual|LessThan|LessEqual|GreaterThan|GreaterEqual) cc)))) yes no) => ((EQ|NE|LT|LE|GT|GE) cc yes no)
(NE (CMPWconst [0] (Select0 (ANDCCconst [1] ((FLessThan|FLessEqual|FGreaterThan|FGreaterEqual) cc)))) yes no) => ((FLT|FLE|FGT|FGE) cc yes no)
-// Elide compares of bit tests
-((EQ|NE) (CMPconst [0] (Select0 (ANDCCconst [c] x))) yes no) => ((EQ|NE) (Select1 <types.TypeFlags> (ANDCCconst [c] x)) yes no)
-((EQ|NE) (CMPWconst [0] (Select0 (ANDCCconst [c] x))) yes no) => ((EQ|NE) (Select1 <types.TypeFlags> (ANDCCconst [c] x)) yes no)
-
// absorb flag constants into branches
(EQ (FlagEQ) yes no) => (First yes no)
(EQ (FlagLT) yes no) => (First no yes)
@@ -405,8 +403,8 @@
// Elide compares of bit tests
-((EQ|NE|LT|LE|GT|GE) (CMPconst [0] (Select0 (ANDCCconst [c] x))) yes no) => ((EQ|NE|LT|LE|GT|GE) (Select1 <types.TypeFlags> (ANDCCconst [c] x)) yes no)
-((EQ|NE|LT|LE|GT|GE) (CMPWconst [0] (Select0 (ANDCCconst [c] x))) yes no) => ((EQ|NE|LT|LE|GT|GE) (Select1 <types.TypeFlags> (ANDCCconst [c] x)) yes no)
+((EQ|NE|LT|LE|GT|GE) (CMPconst [0] (Select0 z:(ANDCCconst [c] x))) yes no) => ((EQ|NE|LT|LE|GT|GE) (Select1 <types.TypeFlags> z) yes no)
+((EQ|NE|LT|LE|GT|GE) (CMPWconst [0] (Select0 z:(ANDCCconst [c] x))) yes no) => ((EQ|NE|LT|LE|GT|GE) (Select1 <types.TypeFlags> z) yes no)
((EQ|NE|LT|LE|GT|GE) (CMPconst [0] z:(AND x y)) yes no) && z.Uses == 1 => ((EQ|NE|LT|LE|GT|GE) (Select1 <types.TypeFlags> (ANDCC x y)) yes no)
((EQ|NE|LT|LE|GT|GE) (CMPconst [0] z:(OR x y)) yes no) && z.Uses == 1 => ((EQ|NE|LT|LE|GT|GE) (Select1 <types.TypeFlags> (ORCC x y)) yes no)
((EQ|NE|LT|LE|GT|GE) (CMPconst [0] z:(XOR x y)) yes no) && z.Uses == 1 => ((EQ|NE|LT|LE|GT|GE) (Select1 <types.TypeFlags> (XORCC x y)) yes no)
@@ -559,6 +557,7 @@
(NOR (MOVDconst [c]) (MOVDconst [d])) => (MOVDconst [^(c|d)])
// Discover consts
+(AND x (MOVDconst [-1])) => x
(AND x (MOVDconst [c])) && isU16Bit(c) => (Select0 (ANDCCconst [c] x))
(XOR x (MOVDconst [c])) && isU32Bit(c) => (XORconst [c] x)
(OR x (MOVDconst [c])) && isU32Bit(c) => (ORconst [c] x)
@@ -801,6 +800,7 @@
(AtomicOr(8|32) ...) => (LoweredAtomicOr(8|32) ...)
(Slicemask <t> x) => (SRADconst (NEG <t> x) [63])
+(Select0 (ANDCCconst [1] z:(SRADconst [63] x))) && z.Uses == 1 => (SRDconst [63] x)
// Note that MOV??reg returns a 64-bit int, x is not necessarily that wide
// This may interact with other patterns in the future. (Compare with arm64)
@@ -1000,7 +1000,7 @@
// Fold bit reversal into loads.
(BR(W|H) x:(MOV(W|H)Zload [off] {sym} ptr mem)) && x.Uses == 1 => @x.Block (MOV(W|H)BRload (MOVDaddr <ptr.Type> [off] {sym} ptr) mem)
-(BR(W|H) x:(MOV(W|H)Zloadidx ptr idx mem)) && x.Uses == 1 => @x.Block (MOV(W|H)Zreg (MOV(W|H)BRloadidx ptr idx mem))
+(BR(W|H) x:(MOV(W|H)Zloadidx ptr idx mem)) && x.Uses == 1 => @x.Block (MOV(W|H)BRloadidx ptr idx mem)
(BRD x:(MOVDload [off] {sym} ptr mem)) && x.Uses == 1 => @x.Block (MOVDBRload (MOVDaddr <ptr.Type> [off] {sym} ptr) mem)
(BRD x:(MOVDloadidx ptr idx mem)) && x.Uses == 1 => @x.Block (MOVDBRloadidx ptr idx mem)
@@ -1011,7 +1011,7 @@
// GOPPC64<10 rules.
// These Bswap operations should only be introduced by the memcombine pass in places where they can be folded into loads or stores.
(Bswap(32|16) x:(MOV(W|H)Zload [off] {sym} ptr mem)) => @x.Block (MOV(W|H)BRload (MOVDaddr <ptr.Type> [off] {sym} ptr) mem)
-(Bswap(32|16) x:(MOV(W|H)Zloadidx ptr idx mem)) => @x.Block (MOV(W|H)Zreg (MOV(W|H)BRloadidx ptr idx mem))
+(Bswap(32|16) x:(MOV(W|H)Zloadidx ptr idx mem)) => @x.Block (MOV(W|H)BRloadidx ptr idx mem)
(Bswap64 x:(MOVDload [off] {sym} ptr mem)) => @x.Block (MOVDBRload (MOVDaddr <ptr.Type> [off] {sym} ptr) mem)
(Bswap64 x:(MOVDloadidx ptr idx mem)) => @x.Block (MOVDBRloadidx ptr idx mem)
(MOV(D|W|H)store [off] {sym} ptr (Bswap(64|32|16) val) mem) => (MOV(D|W|H)BRstore (MOVDaddr <ptr.Type> [off] {sym} ptr) val mem)
diff --git a/src/cmd/compile/internal/ssa/_gen/PPC64Ops.go b/src/cmd/compile/internal/ssa/_gen/PPC64Ops.go
index 4be362373c..7aa2e6c351 100644
--- a/src/cmd/compile/internal/ssa/_gen/PPC64Ops.go
+++ b/src/cmd/compile/internal/ssa/_gen/PPC64Ops.go
@@ -176,14 +176,17 @@ func init() {
r6 = buildReg("R6")
)
ops := []opData{
- {name: "ADD", argLength: 2, reg: gp21, asm: "ADD", commutative: true}, // arg0 + arg1
- {name: "ADDconst", argLength: 1, reg: gp11, asm: "ADD", aux: "Int64"}, // arg0 + auxInt
- {name: "FADD", argLength: 2, reg: fp21, asm: "FADD", commutative: true}, // arg0+arg1
- {name: "FADDS", argLength: 2, reg: fp21, asm: "FADDS", commutative: true}, // arg0+arg1
- {name: "SUB", argLength: 2, reg: gp21, asm: "SUB"}, // arg0-arg1
- {name: "SUBFCconst", argLength: 1, reg: gp11cxer, asm: "SUBC", aux: "Int64"}, // auxInt - arg0 (carry is ignored)
- {name: "FSUB", argLength: 2, reg: fp21, asm: "FSUB"}, // arg0-arg1
- {name: "FSUBS", argLength: 2, reg: fp21, asm: "FSUBS"}, // arg0-arg1
+ {name: "ADD", argLength: 2, reg: gp21, asm: "ADD", commutative: true}, // arg0 + arg1
+ {name: "ADDCC", argLength: 2, reg: gp21, asm: "ADDCC", commutative: true, typ: "(Int,Flags)"}, // arg0 + arg1
+ {name: "ADDconst", argLength: 1, reg: gp11, asm: "ADD", aux: "Int64"}, // arg0 + auxInt
+ {name: "ADDCCconst", argLength: 1, reg: gp11cxer, asm: "ADDCCC", aux: "Int64", typ: "(Int,Flags)"}, // arg0 + auxInt sets CC, clobbers XER
+ {name: "FADD", argLength: 2, reg: fp21, asm: "FADD", commutative: true}, // arg0+arg1
+ {name: "FADDS", argLength: 2, reg: fp21, asm: "FADDS", commutative: true}, // arg0+arg1
+ {name: "SUB", argLength: 2, reg: gp21, asm: "SUB"}, // arg0-arg1
+ {name: "SUBCC", argLength: 2, reg: gp21, asm: "SUBCC", typ: "(Int,Flags)"}, // arg0-arg1 sets CC
+ {name: "SUBFCconst", argLength: 1, reg: gp11cxer, asm: "SUBC", aux: "Int64"}, // auxInt - arg0 (carry is ignored)
+ {name: "FSUB", argLength: 2, reg: fp21, asm: "FSUB"}, // arg0-arg1
+ {name: "FSUBS", argLength: 2, reg: fp21, asm: "FSUBS"}, // arg0-arg1
{name: "MULLD", argLength: 2, reg: gp21, asm: "MULLD", typ: "Int64", commutative: true}, // arg0*arg1 (signed 64-bit)
{name: "MULLW", argLength: 2, reg: gp21, asm: "MULLW", typ: "Int32", commutative: true}, // arg0*arg1 (signed 32-bit)
@@ -215,7 +218,6 @@ func init() {
{name: "ROTLW", argLength: 2, reg: gp21, asm: "ROTLW"}, // uint32(arg0) rotate left by arg1 mod 32
// The following are ops to implement the extended mnemonics for shifts as described in section C.8 of the ISA.
// The constant shift values are packed into the aux int32.
- {name: "RLDICL", argLength: 1, reg: gp11, asm: "RLDICL", aux: "Int32"}, // arg0 extract bits identified by shift params"
{name: "CLRLSLWI", argLength: 1, reg: gp11, asm: "CLRLSLWI", aux: "Int32"}, //
{name: "CLRLSLDI", argLength: 1, reg: gp11, asm: "CLRLSLDI", aux: "Int32"}, //
@@ -243,9 +245,12 @@ func init() {
{name: "RLWINM", argLength: 1, reg: gp11, asm: "RLWNM", aux: "Int64"}, // Rotate and mask by immediate "rlwinm". encodePPC64RotateMask describes aux
{name: "RLWNM", argLength: 2, reg: gp21, asm: "RLWNM", aux: "Int64"}, // Rotate and mask by "rlwnm". encodePPC64RotateMask describes aux
{name: "RLWMI", argLength: 2, reg: gp21a0, asm: "RLWMI", aux: "Int64", resultInArg0: true}, // "rlwimi" similar aux encoding as above
+ {name: "RLDICL", argLength: 1, reg: gp11, asm: "RLDICL", aux: "Int64"}, // Auxint is encoded similarly to RLWINM, but only MB and SH are valid. ME is always 63.
+ {name: "RLDICR", argLength: 1, reg: gp11, asm: "RLDICR", aux: "Int64"}, // Likewise, but only ME and SH are valid. MB is always 0.
- {name: "CNTLZD", argLength: 1, reg: gp11, asm: "CNTLZD", clobberFlags: true}, // count leading zeros
- {name: "CNTLZW", argLength: 1, reg: gp11, asm: "CNTLZW", clobberFlags: true}, // count leading zeros (32 bit)
+ {name: "CNTLZD", argLength: 1, reg: gp11, asm: "CNTLZD"}, // count leading zeros
+ {name: "CNTLZDCC", argLength: 1, reg: gp11, asm: "CNTLZDCC", typ: "(Int, Flags)"}, // count leading zeros, sets CC
+ {name: "CNTLZW", argLength: 1, reg: gp11, asm: "CNTLZW"}, // count leading zeros (32 bit)
{name: "CNTTZD", argLength: 1, reg: gp11, asm: "CNTTZD"}, // count trailing zeros
{name: "CNTTZW", argLength: 1, reg: gp11, asm: "CNTTZW"}, // count trailing zeros (32 bit)
@@ -284,34 +289,37 @@ func init() {
{name: "MFVSRD", argLength: 1, reg: fpgp, asm: "MFVSRD", typ: "Int64"}, // move 64 bits of F register into G register
{name: "MTVSRD", argLength: 1, reg: gpfp, asm: "MTVSRD", typ: "Float64"}, // move 64 bits of G register into F register
- {name: "AND", argLength: 2, reg: gp21, asm: "AND", commutative: true}, // arg0&arg1
- {name: "ANDN", argLength: 2, reg: gp21, asm: "ANDN"}, // arg0&^arg1
- {name: "ANDCC", argLength: 2, reg: gp21, asm: "ANDCC", commutative: true, clobberFlags: true, typ: "(Int64,Flags)"}, // arg0&arg1 sets CC
- {name: "OR", argLength: 2, reg: gp21, asm: "OR", commutative: true}, // arg0|arg1
- {name: "ORN", argLength: 2, reg: gp21, asm: "ORN"}, // arg0|^arg1
- {name: "ORCC", argLength: 2, reg: gp21, asm: "ORCC", commutative: true, clobberFlags: true, typ: "(Int,Flags)"}, // arg0|arg1 sets CC
- {name: "NOR", argLength: 2, reg: gp21, asm: "NOR", commutative: true}, // ^(arg0|arg1)
- {name: "XOR", argLength: 2, reg: gp21, asm: "XOR", typ: "Int64", commutative: true}, // arg0^arg1
- {name: "XORCC", argLength: 2, reg: gp21, asm: "XORCC", commutative: true, clobberFlags: true, typ: "(Int,Flags)"}, // arg0^arg1 sets CC
- {name: "EQV", argLength: 2, reg: gp21, asm: "EQV", typ: "Int64", commutative: true}, // arg0^^arg1
- {name: "NEG", argLength: 1, reg: gp11, asm: "NEG"}, // -arg0 (integer)
- {name: "BRD", argLength: 1, reg: gp11, asm: "BRD"}, // reversebytes64(arg0)
- {name: "BRW", argLength: 1, reg: gp11, asm: "BRW"}, // reversebytes32(arg0)
- {name: "BRH", argLength: 1, reg: gp11, asm: "BRH"}, // reversebytes16(arg0)
- {name: "FNEG", argLength: 1, reg: fp11, asm: "FNEG"}, // -arg0 (floating point)
- {name: "FSQRT", argLength: 1, reg: fp11, asm: "FSQRT"}, // sqrt(arg0) (floating point)
- {name: "FSQRTS", argLength: 1, reg: fp11, asm: "FSQRTS"}, // sqrt(arg0) (floating point, single precision)
- {name: "FFLOOR", argLength: 1, reg: fp11, asm: "FRIM"}, // floor(arg0), float64
- {name: "FCEIL", argLength: 1, reg: fp11, asm: "FRIP"}, // ceil(arg0), float64
- {name: "FTRUNC", argLength: 1, reg: fp11, asm: "FRIZ"}, // trunc(arg0), float64
- {name: "FROUND", argLength: 1, reg: fp11, asm: "FRIN"}, // round(arg0), float64
- {name: "FABS", argLength: 1, reg: fp11, asm: "FABS"}, // abs(arg0), float64
- {name: "FNABS", argLength: 1, reg: fp11, asm: "FNABS"}, // -abs(arg0), float64
- {name: "FCPSGN", argLength: 2, reg: fp21, asm: "FCPSGN"}, // copysign arg0 -> arg1, float64
-
- {name: "ORconst", argLength: 1, reg: gp11, asm: "OR", aux: "Int64"}, // arg0|aux
- {name: "XORconst", argLength: 1, reg: gp11, asm: "XOR", aux: "Int64"}, // arg0^aux
- {name: "ANDCCconst", argLength: 1, reg: regInfo{inputs: []regMask{gp | sp | sb}, outputs: []regMask{gp}}, asm: "ANDCC", aux: "Int64", clobberFlags: true, typ: "(Int,Flags)"}, // arg0&aux == 0 // and-immediate sets CC on PPC, always.
+ {name: "AND", argLength: 2, reg: gp21, asm: "AND", commutative: true}, // arg0&arg1
+ {name: "ANDN", argLength: 2, reg: gp21, asm: "ANDN"}, // arg0&^arg1
+ {name: "ANDNCC", argLength: 2, reg: gp21, asm: "ANDNCC", typ: "(Int64,Flags)"}, // arg0&^arg1 sets CC
+ {name: "ANDCC", argLength: 2, reg: gp21, asm: "ANDCC", commutative: true, typ: "(Int64,Flags)"}, // arg0&arg1 sets CC
+ {name: "OR", argLength: 2, reg: gp21, asm: "OR", commutative: true}, // arg0|arg1
+ {name: "ORN", argLength: 2, reg: gp21, asm: "ORN"}, // arg0|^arg1
+ {name: "ORCC", argLength: 2, reg: gp21, asm: "ORCC", commutative: true, typ: "(Int,Flags)"}, // arg0|arg1 sets CC
+ {name: "NOR", argLength: 2, reg: gp21, asm: "NOR", commutative: true}, // ^(arg0|arg1)
+ {name: "NORCC", argLength: 2, reg: gp21, asm: "NORCC", commutative: true, typ: "(Int,Flags)"}, // ^(arg0|arg1) sets CC
+ {name: "XOR", argLength: 2, reg: gp21, asm: "XOR", typ: "Int64", commutative: true}, // arg0^arg1
+ {name: "XORCC", argLength: 2, reg: gp21, asm: "XORCC", commutative: true, typ: "(Int,Flags)"}, // arg0^arg1 sets CC
+ {name: "EQV", argLength: 2, reg: gp21, asm: "EQV", typ: "Int64", commutative: true}, // arg0^^arg1
+ {name: "NEG", argLength: 1, reg: gp11, asm: "NEG"}, // -arg0 (integer)
+ {name: "NEGCC", argLength: 1, reg: gp11, asm: "NEGCC", typ: "(Int,Flags)"}, // -arg0 (integer) sets CC
+ {name: "BRD", argLength: 1, reg: gp11, asm: "BRD"}, // reversebytes64(arg0)
+ {name: "BRW", argLength: 1, reg: gp11, asm: "BRW"}, // reversebytes32(arg0)
+ {name: "BRH", argLength: 1, reg: gp11, asm: "BRH"}, // reversebytes16(arg0)
+ {name: "FNEG", argLength: 1, reg: fp11, asm: "FNEG"}, // -arg0 (floating point)
+ {name: "FSQRT", argLength: 1, reg: fp11, asm: "FSQRT"}, // sqrt(arg0) (floating point)
+ {name: "FSQRTS", argLength: 1, reg: fp11, asm: "FSQRTS"}, // sqrt(arg0) (floating point, single precision)
+ {name: "FFLOOR", argLength: 1, reg: fp11, asm: "FRIM"}, // floor(arg0), float64
+ {name: "FCEIL", argLength: 1, reg: fp11, asm: "FRIP"}, // ceil(arg0), float64
+ {name: "FTRUNC", argLength: 1, reg: fp11, asm: "FRIZ"}, // trunc(arg0), float64
+ {name: "FROUND", argLength: 1, reg: fp11, asm: "FRIN"}, // round(arg0), float64
+ {name: "FABS", argLength: 1, reg: fp11, asm: "FABS"}, // abs(arg0), float64
+ {name: "FNABS", argLength: 1, reg: fp11, asm: "FNABS"}, // -abs(arg0), float64
+ {name: "FCPSGN", argLength: 2, reg: fp21, asm: "FCPSGN"}, // copysign arg0 -> arg1, float64
+
+ {name: "ORconst", argLength: 1, reg: gp11, asm: "OR", aux: "Int64"}, // arg0|aux
+ {name: "XORconst", argLength: 1, reg: gp11, asm: "XOR", aux: "Int64"}, // arg0^aux
+ {name: "ANDCCconst", argLength: 1, reg: regInfo{inputs: []regMask{gp | sp | sb}, outputs: []regMask{gp}}, asm: "ANDCC", aux: "Int64", typ: "(Int,Flags)"}, // arg0&aux == 0 // and-immediate sets CC on PPC, always.
{name: "MOVBreg", argLength: 1, reg: gp11, asm: "MOVB", typ: "Int64"}, // sign extend int8 to int64
{name: "MOVBZreg", argLength: 1, reg: gp11, asm: "MOVBZ", typ: "Int64"}, // zero extend uint8 to uint64
@@ -469,7 +477,7 @@ func init() {
// MOVD $16,R31
// loop:
// STXVD2X VS32,(R0)(R3)
- // STXVD2X VS32,(R31),R3)
+ // STXVD2X VS32,(R31)(R3)
// ADD R3,32
// BC loop
diff --git a/src/cmd/compile/internal/ssa/_gen/PPC64latelower.rules b/src/cmd/compile/internal/ssa/_gen/PPC64latelower.rules
index 00d898f783..2eecf94300 100644
--- a/src/cmd/compile/internal/ssa/_gen/PPC64latelower.rules
+++ b/src/cmd/compile/internal/ssa/_gen/PPC64latelower.rules
@@ -17,3 +17,39 @@
(SETBCR [0] cmp) && buildcfg.GOPPC64 <= 9 => (ISELZ [4] (MOVDconst [1]) cmp)
(SETBC [1] cmp) && buildcfg.GOPPC64 <= 9 => (ISELZ [1] (MOVDconst [1]) cmp)
(SETBCR [1] cmp) && buildcfg.GOPPC64 <= 9 => (ISELZ [5] (MOVDconst [1]) cmp)
+
+// Avoid using ANDCCconst if the value for CR0 is not needed, since ANDCCconst
+// always sets it.
+(Select0 z:(ANDCCconst [m] x)) && z.Uses == 1 && isPPC64ValidShiftMask(m) => (RLDICL [encodePPC64RotateMask(0,m,64)] x)
+// The upper bits of the smaller than register values is undefined. Take advantage of that.
+(AND <t> x:(MOVDconst [m]) n) && t.Size() <= 2 => (Select0 (ANDCCconst [int64(int16(m))] n))
+
+// Convert simple bit masks to an equivalent rldic[lr] if possible.
+(AND x:(MOVDconst [m]) n) && isPPC64ValidShiftMask(m) => (RLDICL [encodePPC64RotateMask(0,m,64)] n)
+(AND x:(MOVDconst [m]) n) && m != 0 && isPPC64ValidShiftMask(^m) => (RLDICR [encodePPC64RotateMask(0,m,64)] n)
+
+// If the RLDICL does not rotate its value, a shifted value can be merged.
+(RLDICL [em] x:(SRDconst [s] a)) && (em&0xFF0000) == 0 => (RLDICL [mergePPC64RLDICLandSRDconst(em, s)] a)
+
+// Convert rotated 32 bit masks on 32 bit values into rlwinm. In general, this leaves the upper 32 bits in an undefined state.
+(AND <t> x:(MOVDconst [m]) n) && t.Size() == 4 && isPPC64WordRotateMask(m) => (RLWINM [encodePPC64RotateMask(0,m,32)] n)
+
+// When PCRel is supported, paddi can add a 34b signed constant in one instruction.
+(ADD (MOVDconst [m]) x) && supportsPPC64PCRel() && (m<<30)>>30 == m => (ADDconst [m] x)
+
+
+// Where possible and practical, generate CC opcodes. Due to the structure of the rules, there are limits to how
+// a Value can be rewritten which make it impossible to correctly rewrite sibling Value users. To workaround this
+// case, candidates for CC opcodes are converted in two steps:
+// 1. Convert all (x (Op ...) ...) into (x (Select0 (OpCC ...) ...). See convertPPC64OpToOpCC for more
+// detail on how and why this is done there.
+// 2. Rewrite (CMPconst [0] (Select0 (OpCC ...))) into (Select1 (OpCC...))
+// Note: to minimize potentially expensive regeneration of CC opcodes during the flagalloc pass, only rewrite if
+// both ops are in the same block.
+(CMPconst [0] z:((ADD|AND|ANDN|OR|SUB|NOR|XOR) x y)) && v.Block == z.Block => (CMPconst [0] convertPPC64OpToOpCC(z))
+(CMPconst [0] z:((NEG|CNTLZD) x)) && v.Block == z.Block => (CMPconst [0] convertPPC64OpToOpCC(z))
+// Note: ADDCCconst only assembles to 1 instruction for int16 constants.
+(CMPconst [0] z:(ADDconst [c] x)) && int64(int16(c)) == c && v.Block == z.Block => (CMPconst [0] convertPPC64OpToOpCC(z))
+// And finally, fixup the flag user.
+(CMPconst <t> [0] (Select0 z:((ADD|AND|ANDN|OR|SUB|NOR|XOR)CC x y))) => (Select1 <t> z)
+(CMPconst <t> [0] (Select0 z:((ADDCCconst|NEGCC|CNTLZDCC) y))) => (Select1 <t> z)
diff --git a/src/cmd/compile/internal/ssa/_gen/RISCV64.rules b/src/cmd/compile/internal/ssa/_gen/RISCV64.rules
index 9a6fcebdc5..fc206c42d3 100644
--- a/src/cmd/compile/internal/ssa/_gen/RISCV64.rules
+++ b/src/cmd/compile/internal/ssa/_gen/RISCV64.rules
@@ -3,21 +3,11 @@
// license that can be found in the LICENSE file.
// Lowering arithmetic
-(Add64 ...) => (ADD ...)
-(AddPtr ...) => (ADD ...)
-(Add32 ...) => (ADD ...)
-(Add16 ...) => (ADD ...)
-(Add8 ...) => (ADD ...)
-(Add32F ...) => (FADDS ...)
-(Add64F ...) => (FADDD ...)
-
-(Sub64 ...) => (SUB ...)
-(SubPtr ...) => (SUB ...)
-(Sub32 ...) => (SUB ...)
-(Sub16 ...) => (SUB ...)
-(Sub8 ...) => (SUB ...)
-(Sub32F ...) => (FSUBS ...)
-(Sub64F ...) => (FSUBD ...)
+(Add(Ptr|64|32|16|8) ...) => (ADD ...)
+(Add(64|32)F ...) => (FADD(D|S) ...)
+
+(Sub(Ptr|64|32|16|8) ...) => (SUB ...)
+(Sub(64|32)F ...) => (FSUB(D|S) ...)
(Mul64 ...) => (MUL ...)
(Mul64uhilo ...) => (LoweredMuluhilo ...)
@@ -25,11 +15,9 @@
(Mul32 ...) => (MULW ...)
(Mul16 x y) => (MULW (SignExt16to32 x) (SignExt16to32 y))
(Mul8 x y) => (MULW (SignExt8to32 x) (SignExt8to32 y))
-(Mul32F ...) => (FMULS ...)
-(Mul64F ...) => (FMULD ...)
+(Mul(64|32)F ...) => (FMUL(D|S) ...)
-(Div32F ...) => (FDIVS ...)
-(Div64F ...) => (FDIVD ...)
+(Div(64|32)F ...) => (FDIV(D|S) ...)
(Div64 x y [false]) => (DIV x y)
(Div64u ...) => (DIVU ...)
@@ -65,32 +53,15 @@
(Mod8 x y) => (REMW (SignExt8to32 x) (SignExt8to32 y))
(Mod8u x y) => (REMUW (ZeroExt8to32 x) (ZeroExt8to32 y))
-(And64 ...) => (AND ...)
-(And32 ...) => (AND ...)
-(And16 ...) => (AND ...)
-(And8 ...) => (AND ...)
-
-(Or64 ...) => (OR ...)
-(Or32 ...) => (OR ...)
-(Or16 ...) => (OR ...)
-(Or8 ...) => (OR ...)
-
-(Xor64 ...) => (XOR ...)
-(Xor32 ...) => (XOR ...)
-(Xor16 ...) => (XOR ...)
-(Xor8 ...) => (XOR ...)
-
-(Neg64 ...) => (NEG ...)
-(Neg32 ...) => (NEG ...)
-(Neg16 ...) => (NEG ...)
-(Neg8 ...) => (NEG ...)
-(Neg32F ...) => (FNEGS ...)
-(Neg64F ...) => (FNEGD ...)
-
-(Com64 ...) => (NOT ...)
-(Com32 ...) => (NOT ...)
-(Com16 ...) => (NOT ...)
-(Com8 ...) => (NOT ...)
+(And(64|32|16|8) ...) => (AND ...)
+(Or(64|32|16|8) ...) => (OR ...)
+(Xor(64|32|16|8) ...) => (XOR ...)
+
+(Neg(64|32|16|8) ...) => (NEG ...)
+(Neg(64|32)F ...) => (FNEG(D|S) ...)
+
+(Com(64|32|16|8) ...) => (NOT ...)
+
(Sqrt ...) => (FSQRTD ...)
(Sqrt32 ...) => (FSQRTS ...)
@@ -132,8 +103,7 @@
(CvtBoolToUint8 ...) => (Copy ...)
-(Round32F ...) => (Copy ...)
-(Round64F ...) => (Copy ...)
+(Round(32|64)F ...) => (LoweredRound(32|64)F ...)
(Slicemask <t> x) => (SRAI [63] (NEG <t> x))
@@ -180,61 +150,65 @@
(Lsh32x(64|32|16|8) x y) && shiftIsBounded(v) => (SLL x y)
(Lsh64x(64|32|16|8) x y) && shiftIsBounded(v) => (SLL x y)
-// SRL only considers the bottom 6 bits of y. If y > 64, the result should
-// always be 0. See Lsh above for a detailed description.
-(Rsh8Ux8 <t> x y) && !shiftIsBounded(v) => (AND (SRL <t> (ZeroExt8to64 x) y) (Neg8 <t> (SLTIU <t> [64] (ZeroExt8to64 y))))
-(Rsh8Ux16 <t> x y) && !shiftIsBounded(v) => (AND (SRL <t> (ZeroExt8to64 x) y) (Neg8 <t> (SLTIU <t> [64] (ZeroExt16to64 y))))
-(Rsh8Ux32 <t> x y) && !shiftIsBounded(v) => (AND (SRL <t> (ZeroExt8to64 x) y) (Neg8 <t> (SLTIU <t> [64] (ZeroExt32to64 y))))
-(Rsh8Ux64 <t> x y) && !shiftIsBounded(v) => (AND (SRL <t> (ZeroExt8to64 x) y) (Neg8 <t> (SLTIU <t> [64] y)))
-(Rsh16Ux8 <t> x y) && !shiftIsBounded(v) => (AND (SRL <t> (ZeroExt16to64 x) y) (Neg16 <t> (SLTIU <t> [64] (ZeroExt8to64 y))))
-(Rsh16Ux16 <t> x y) && !shiftIsBounded(v) => (AND (SRL <t> (ZeroExt16to64 x) y) (Neg16 <t> (SLTIU <t> [64] (ZeroExt16to64 y))))
-(Rsh16Ux32 <t> x y) && !shiftIsBounded(v) => (AND (SRL <t> (ZeroExt16to64 x) y) (Neg16 <t> (SLTIU <t> [64] (ZeroExt32to64 y))))
-(Rsh16Ux64 <t> x y) && !shiftIsBounded(v) => (AND (SRL <t> (ZeroExt16to64 x) y) (Neg16 <t> (SLTIU <t> [64] y)))
-(Rsh32Ux8 <t> x y) && !shiftIsBounded(v) => (AND (SRL <t> (ZeroExt32to64 x) y) (Neg32 <t> (SLTIU <t> [64] (ZeroExt8to64 y))))
-(Rsh32Ux16 <t> x y) && !shiftIsBounded(v) => (AND (SRL <t> (ZeroExt32to64 x) y) (Neg32 <t> (SLTIU <t> [64] (ZeroExt16to64 y))))
-(Rsh32Ux32 <t> x y) && !shiftIsBounded(v) => (AND (SRL <t> (ZeroExt32to64 x) y) (Neg32 <t> (SLTIU <t> [64] (ZeroExt32to64 y))))
-(Rsh32Ux64 <t> x y) && !shiftIsBounded(v) => (AND (SRL <t> (ZeroExt32to64 x) y) (Neg32 <t> (SLTIU <t> [64] y)))
-(Rsh64Ux8 <t> x y) && !shiftIsBounded(v) => (AND (SRL <t> x y) (Neg64 <t> (SLTIU <t> [64] (ZeroExt8to64 y))))
-(Rsh64Ux16 <t> x y) && !shiftIsBounded(v) => (AND (SRL <t> x y) (Neg64 <t> (SLTIU <t> [64] (ZeroExt16to64 y))))
-(Rsh64Ux32 <t> x y) && !shiftIsBounded(v) => (AND (SRL <t> x y) (Neg64 <t> (SLTIU <t> [64] (ZeroExt32to64 y))))
-(Rsh64Ux64 <t> x y) && !shiftIsBounded(v) => (AND (SRL <t> x y) (Neg64 <t> (SLTIU <t> [64] y)))
-
-(Rsh8Ux(64|32|16|8) x y) && shiftIsBounded(v) => (SRL (ZeroExt8to64 x) y)
-(Rsh16Ux(64|32|16|8) x y) && shiftIsBounded(v) => (SRL (ZeroExt16to64 x) y)
-(Rsh32Ux(64|32|16|8) x y) && shiftIsBounded(v) => (SRL (ZeroExt32to64 x) y)
-(Rsh64Ux(64|32|16|8) x y) && shiftIsBounded(v) => (SRL x y)
-
-// SRA only considers the bottom 6 bits of y. If y > 64, the result should
-// be either 0 or -1 based on the sign bit.
+// SRL only considers the bottom 6 bits of y, similarly SRLW only considers the
+// bottom 5 bits of y. Ensure that the result is always zero if the shift exceeds
+// the maximum value. See Lsh above for a detailed description.
+(Rsh8Ux8 <t> x y) && !shiftIsBounded(v) => (AND (SRL <t> (ZeroExt8to64 x) y) (Neg8 <t> (SLTIU <t> [64] (ZeroExt8to64 y))))
+(Rsh8Ux16 <t> x y) && !shiftIsBounded(v) => (AND (SRL <t> (ZeroExt8to64 x) y) (Neg8 <t> (SLTIU <t> [64] (ZeroExt16to64 y))))
+(Rsh8Ux32 <t> x y) && !shiftIsBounded(v) => (AND (SRL <t> (ZeroExt8to64 x) y) (Neg8 <t> (SLTIU <t> [64] (ZeroExt32to64 y))))
+(Rsh8Ux64 <t> x y) && !shiftIsBounded(v) => (AND (SRL <t> (ZeroExt8to64 x) y) (Neg8 <t> (SLTIU <t> [64] y)))
+(Rsh16Ux8 <t> x y) && !shiftIsBounded(v) => (AND (SRL <t> (ZeroExt16to64 x) y) (Neg16 <t> (SLTIU <t> [64] (ZeroExt8to64 y))))
+(Rsh16Ux16 <t> x y) && !shiftIsBounded(v) => (AND (SRL <t> (ZeroExt16to64 x) y) (Neg16 <t> (SLTIU <t> [64] (ZeroExt16to64 y))))
+(Rsh16Ux32 <t> x y) && !shiftIsBounded(v) => (AND (SRL <t> (ZeroExt16to64 x) y) (Neg16 <t> (SLTIU <t> [64] (ZeroExt32to64 y))))
+(Rsh16Ux64 <t> x y) && !shiftIsBounded(v) => (AND (SRL <t> (ZeroExt16to64 x) y) (Neg16 <t> (SLTIU <t> [64] y)))
+(Rsh32Ux8 <t> x y) && !shiftIsBounded(v) => (AND (SRLW <t> x y) (Neg32 <t> (SLTIU <t> [32] (ZeroExt8to64 y))))
+(Rsh32Ux16 <t> x y) && !shiftIsBounded(v) => (AND (SRLW <t> x y) (Neg32 <t> (SLTIU <t> [32] (ZeroExt16to64 y))))
+(Rsh32Ux32 <t> x y) && !shiftIsBounded(v) => (AND (SRLW <t> x y) (Neg32 <t> (SLTIU <t> [32] (ZeroExt32to64 y))))
+(Rsh32Ux64 <t> x y) && !shiftIsBounded(v) => (AND (SRLW <t> x y) (Neg32 <t> (SLTIU <t> [32] y)))
+(Rsh64Ux8 <t> x y) && !shiftIsBounded(v) => (AND (SRL <t> x y) (Neg64 <t> (SLTIU <t> [64] (ZeroExt8to64 y))))
+(Rsh64Ux16 <t> x y) && !shiftIsBounded(v) => (AND (SRL <t> x y) (Neg64 <t> (SLTIU <t> [64] (ZeroExt16to64 y))))
+(Rsh64Ux32 <t> x y) && !shiftIsBounded(v) => (AND (SRL <t> x y) (Neg64 <t> (SLTIU <t> [64] (ZeroExt32to64 y))))
+(Rsh64Ux64 <t> x y) && !shiftIsBounded(v) => (AND (SRL <t> x y) (Neg64 <t> (SLTIU <t> [64] y)))
+
+(Rsh8Ux(64|32|16|8) x y) && shiftIsBounded(v) => (SRL (ZeroExt8to64 x) y)
+(Rsh16Ux(64|32|16|8) x y) && shiftIsBounded(v) => (SRL (ZeroExt16to64 x) y)
+(Rsh32Ux(64|32|16|8) x y) && shiftIsBounded(v) => (SRLW x y)
+(Rsh64Ux(64|32|16|8) x y) && shiftIsBounded(v) => (SRL x y)
+
+// SRA only considers the bottom 6 bits of y, similarly SRAW only considers the
+// bottom 5 bits. If y is greater than the maximum value (either 63 or 31
+// depending on the instruction), the result of the shift should be either 0
+// or -1 based on the sign bit of x.
//
-// We implement this by performing the max shift (-1) if y >= 64.
+// We implement this by performing the max shift (-1) if y > the maximum value.
//
// We OR (uint64(y < 64) - 1) into y before passing it to SRA. This leaves
-// us with -1 (0xffff...) if y >= 64.
+// us with -1 (0xffff...) if y >= 64. Similarly, we OR (uint64(y < 32) - 1) into y
+// before passing it to SRAW.
//
// We don't need to sign-extend the OR result, as it will be at minimum 8 bits,
-// more than the 6 bits SRA cares about.
-(Rsh8x8 <t> x y) && !shiftIsBounded(v) => (SRA <t> (SignExt8to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt8to64 y)))))
-(Rsh8x16 <t> x y) && !shiftIsBounded(v) => (SRA <t> (SignExt8to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt16to64 y)))))
-(Rsh8x32 <t> x y) && !shiftIsBounded(v) => (SRA <t> (SignExt8to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt32to64 y)))))
-(Rsh8x64 <t> x y) && !shiftIsBounded(v) => (SRA <t> (SignExt8to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] y))))
-(Rsh16x8 <t> x y) && !shiftIsBounded(v) => (SRA <t> (SignExt16to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt8to64 y)))))
-(Rsh16x16 <t> x y) && !shiftIsBounded(v) => (SRA <t> (SignExt16to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt16to64 y)))))
-(Rsh16x32 <t> x y) && !shiftIsBounded(v) => (SRA <t> (SignExt16to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt32to64 y)))))
-(Rsh16x64 <t> x y) && !shiftIsBounded(v) => (SRA <t> (SignExt16to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] y))))
-(Rsh32x8 <t> x y) && !shiftIsBounded(v) => (SRA <t> (SignExt32to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt8to64 y)))))
-(Rsh32x16 <t> x y) && !shiftIsBounded(v) => (SRA <t> (SignExt32to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt16to64 y)))))
-(Rsh32x32 <t> x y) && !shiftIsBounded(v) => (SRA <t> (SignExt32to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt32to64 y)))))
-(Rsh32x64 <t> x y) && !shiftIsBounded(v) => (SRA <t> (SignExt32to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] y))))
-(Rsh64x8 <t> x y) && !shiftIsBounded(v) => (SRA <t> x (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt8to64 y)))))
-(Rsh64x16 <t> x y) && !shiftIsBounded(v) => (SRA <t> x (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt16to64 y)))))
-(Rsh64x32 <t> x y) && !shiftIsBounded(v) => (SRA <t> x (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt32to64 y)))))
-(Rsh64x64 <t> x y) && !shiftIsBounded(v) => (SRA <t> x (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] y))))
-
-(Rsh8x(64|32|16|8) x y) && shiftIsBounded(v) => (SRA (SignExt8to64 x) y)
-(Rsh16x(64|32|16|8) x y) && shiftIsBounded(v) => (SRA (SignExt16to64 x) y)
-(Rsh32x(64|32|16|8) x y) && shiftIsBounded(v) => (SRA (SignExt32to64 x) y)
-(Rsh64x(64|32|16|8) x y) && shiftIsBounded(v) => (SRA x y)
+// more than the 5 or 6 bits SRAW and SRA care about.
+(Rsh8x8 <t> x y) && !shiftIsBounded(v) => (SRA <t> (SignExt8to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt8to64 y)))))
+(Rsh8x16 <t> x y) && !shiftIsBounded(v) => (SRA <t> (SignExt8to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt16to64 y)))))
+(Rsh8x32 <t> x y) && !shiftIsBounded(v) => (SRA <t> (SignExt8to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt32to64 y)))))
+(Rsh8x64 <t> x y) && !shiftIsBounded(v) => (SRA <t> (SignExt8to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] y))))
+(Rsh16x8 <t> x y) && !shiftIsBounded(v) => (SRA <t> (SignExt16to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt8to64 y)))))
+(Rsh16x16 <t> x y) && !shiftIsBounded(v) => (SRA <t> (SignExt16to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt16to64 y)))))
+(Rsh16x32 <t> x y) && !shiftIsBounded(v) => (SRA <t> (SignExt16to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt32to64 y)))))
+(Rsh16x64 <t> x y) && !shiftIsBounded(v) => (SRA <t> (SignExt16to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] y))))
+(Rsh32x8 <t> x y) && !shiftIsBounded(v) => (SRAW <t> x (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [32] (ZeroExt8to64 y)))))
+(Rsh32x16 <t> x y) && !shiftIsBounded(v) => (SRAW <t> x (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [32] (ZeroExt16to64 y)))))
+(Rsh32x32 <t> x y) && !shiftIsBounded(v) => (SRAW <t> x (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [32] (ZeroExt32to64 y)))))
+(Rsh32x64 <t> x y) && !shiftIsBounded(v) => (SRAW <t> x (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [32] y))))
+(Rsh64x8 <t> x y) && !shiftIsBounded(v) => (SRA <t> x (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt8to64 y)))))
+(Rsh64x16 <t> x y) && !shiftIsBounded(v) => (SRA <t> x (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt16to64 y)))))
+(Rsh64x32 <t> x y) && !shiftIsBounded(v) => (SRA <t> x (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt32to64 y)))))
+(Rsh64x64 <t> x y) && !shiftIsBounded(v) => (SRA <t> x (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] y))))
+
+(Rsh8x(64|32|16|8) x y) && shiftIsBounded(v) => (SRA (SignExt8to64 x) y)
+(Rsh16x(64|32|16|8) x y) && shiftIsBounded(v) => (SRA (SignExt16to64 x) y)
+(Rsh32x(64|32|16|8) x y) && shiftIsBounded(v) => (SRAW x y)
+(Rsh64x(64|32|16|8) x y) && shiftIsBounded(v) => (SRA x y)
// Rotates.
(RotateLeft8 <t> x (MOVDconst [c])) => (Or8 (Lsh8x64 <t> x (MOVDconst [c&7])) (Rsh8Ux64 <t> x (MOVDconst [-c&7])))
@@ -250,36 +224,27 @@
(Less32U x y) => (SLTU (ZeroExt32to64 x) (ZeroExt32to64 y))
(Less16U x y) => (SLTU (ZeroExt16to64 x) (ZeroExt16to64 y))
(Less8U x y) => (SLTU (ZeroExt8to64 x) (ZeroExt8to64 y))
-(Less64F ...) => (FLTD ...)
-(Less32F ...) => (FLTS ...)
+(Less(64|32)F ...) => (FLT(D|S) ...)
// Convert x <= y to !(y > x).
-(Leq64 x y) => (Not (Less64 y x))
-(Leq32 x y) => (Not (Less32 y x))
-(Leq16 x y) => (Not (Less16 y x))
-(Leq8 x y) => (Not (Less8 y x))
-(Leq64U x y) => (Not (Less64U y x))
-(Leq32U x y) => (Not (Less32U y x))
-(Leq16U x y) => (Not (Less16U y x))
-(Leq8U x y) => (Not (Less8U y x))
-(Leq64F ...) => (FLED ...)
-(Leq32F ...) => (FLES ...)
+(Leq(64|32|16|8) x y) => (Not (Less(64|32|16|8) y x))
+(Leq(64|32|16|8)U x y) => (Not (Less(64|32|16|8)U y x))
+(Leq(64|32)F ...) => (FLE(D|S) ...)
(EqPtr x y) => (SEQZ (SUB <typ.Uintptr> x y))
(Eq64 x y) => (SEQZ (SUB <x.Type> x y))
-(Eq32 x y) => (SEQZ (SUB <x.Type> (ZeroExt32to64 x) (ZeroExt32to64 y)))
+(Eq32 x y) && x.Type.IsSigned() => (SEQZ (SUB <x.Type> (SignExt32to64 x) (SignExt32to64 y)))
+(Eq32 x y) && !x.Type.IsSigned() => (SEQZ (SUB <x.Type> (ZeroExt32to64 x) (ZeroExt32to64 y)))
(Eq16 x y) => (SEQZ (SUB <x.Type> (ZeroExt16to64 x) (ZeroExt16to64 y)))
(Eq8 x y) => (SEQZ (SUB <x.Type> (ZeroExt8to64 x) (ZeroExt8to64 y)))
-(Eq64F ...) => (FEQD ...)
-(Eq32F ...) => (FEQS ...)
+(Eq(64|32)F ...) => (FEQ(D|S) ...)
-(NeqPtr x y) => (SNEZ (SUB <typ.Uintptr> x y))
-(Neq64 x y) => (SNEZ (SUB <x.Type> x y))
-(Neq32 x y) => (SNEZ (SUB <x.Type> (ZeroExt32to64 x) (ZeroExt32to64 y)))
-(Neq16 x y) => (SNEZ (SUB <x.Type> (ZeroExt16to64 x) (ZeroExt16to64 y)))
-(Neq8 x y) => (SNEZ (SUB <x.Type> (ZeroExt8to64 x) (ZeroExt8to64 y)))
-(Neq64F ...) => (FNED ...)
-(Neq32F ...) => (FNES ...)
+(NeqPtr x y) => (Not (EqPtr x y))
+(Neq64 x y) => (Not (Eq64 x y))
+(Neq32 x y) => (Not (Eq32 x y))
+(Neq16 x y) => (Not (Eq16 x y))
+(Neq8 x y) => (Not (Eq8 x y))
+(Neq(64|32)F ...) => (FNE(D|S) ...)
// Loads
(Load <t> ptr mem) && t.IsBoolean() => (MOVBUload ptr mem)
@@ -435,8 +400,6 @@
(ADD <ptr.Type> ptr (MOVDconst [s-moveSize(t.Alignment(), config)]))
mem)
-(Convert ...) => (MOVconvert ...)
-
// Checks
(IsNonNil ...) => (SNEZ ...)
(IsInBounds ...) => (Less64U ...)
@@ -451,6 +414,9 @@
// Write barrier.
(WB ...) => (LoweredWB ...)
+// Publication barrier as intrinsic
+(PubBarrier ...) => (LoweredPubBarrier ...)
+
(PanicBounds [kind] x y mem) && boundsABI(kind) == 0 => (LoweredPanicBoundsA [kind] x y mem)
(PanicBounds [kind] x y mem) && boundsABI(kind) == 1 => (LoweredPanicBoundsB [kind] x y mem)
(PanicBounds [kind] x y mem) && boundsABI(kind) == 2 => (LoweredPanicBoundsC [kind] x y mem)
@@ -537,10 +503,7 @@
(OffPtr [off] ptr) && is32Bit(off) => (ADDI [off] ptr)
(OffPtr [off] ptr) => (ADD (MOVDconst [off]) ptr)
-(Const8 [val]) => (MOVDconst [int64(val)])
-(Const16 [val]) => (MOVDconst [int64(val)])
-(Const32 [val]) => (MOVDconst [int64(val)])
-(Const64 [val]) => (MOVDconst [int64(val)])
+(Const(64|32|16|8) [val]) => (MOVDconst [int64(val)])
(Const32F [val]) => (FMVSX (MOVDconst [int64(math.Float32bits(val))]))
(Const64F [val]) => (FMVDX (MOVDconst [int64(math.Float64bits(val))]))
(ConstNil) => (MOVDconst [0])
@@ -557,18 +520,9 @@
(TailCall ...) => (CALLtail ...)
// Atomic Intrinsics
-(AtomicLoad8 ...) => (LoweredAtomicLoad8 ...)
-(AtomicLoad32 ...) => (LoweredAtomicLoad32 ...)
-(AtomicLoad64 ...) => (LoweredAtomicLoad64 ...)
-(AtomicLoadPtr ...) => (LoweredAtomicLoad64 ...)
-
-(AtomicStore8 ...) => (LoweredAtomicStore8 ...)
-(AtomicStore32 ...) => (LoweredAtomicStore32 ...)
-(AtomicStore64 ...) => (LoweredAtomicStore64 ...)
-(AtomicStorePtrNoWB ...) => (LoweredAtomicStore64 ...)
-
-(AtomicAdd32 ...) => (LoweredAtomicAdd32 ...)
-(AtomicAdd64 ...) => (LoweredAtomicAdd64 ...)
+(AtomicLoad(Ptr|64|32|8) ...) => (LoweredAtomicLoad(64|64|32|8) ...)
+(AtomicStore(PtrNoWB|64|32|8) ...) => (LoweredAtomicStore(64|64|32|8) ...)
+(AtomicAdd(64|32) ...) => (LoweredAtomicAdd(64|32) ...)
// AtomicAnd8(ptr,val) => LoweredAtomicAnd32(ptr&^3, ^((uint8(val) ^ 0xff) << ((ptr & 3) * 8)))
(AtomicAnd8 ptr val mem) =>
@@ -581,8 +535,7 @@
(AtomicCompareAndSwap32 ptr old new mem) => (LoweredAtomicCas32 ptr (SignExt32to64 old) new mem)
(AtomicCompareAndSwap64 ...) => (LoweredAtomicCas64 ...)
-(AtomicExchange32 ...) => (LoweredAtomicExchange32 ...)
-(AtomicExchange64 ...) => (LoweredAtomicExchange64 ...)
+(AtomicExchange(64|32) ...) => (LoweredAtomicExchange(64|32) ...)
// AtomicOr8(ptr,val) => LoweredAtomicOr32(ptr&^3, uint32(val)<<((ptr&3)*8))
(AtomicOr8 ptr val mem) =>
@@ -756,6 +709,20 @@
// But for now, this is enough to get rid of lots of them.
(MOVDnop (MOVDconst [c])) => (MOVDconst [c])
+// Avoid unnecessary zero and sign extension when right shifting.
+(SRAI <t> [x] (MOVWreg y)) && x >= 0 && x <= 31 => (SRAIW <t> [int64(x)] y)
+(SRLI <t> [x] (MOVWUreg y)) && x >= 0 && x <= 31 => (SRLIW <t> [int64(x)] y)
+
+// Replace right shifts that exceed size of signed type.
+(SRAI <t> [x] (MOVBreg y)) && x >= 8 => (SRAI [63] (SLLI <t> [56] y))
+(SRAI <t> [x] (MOVHreg y)) && x >= 16 => (SRAI [63] (SLLI <t> [48] y))
+(SRAI <t> [x] (MOVWreg y)) && x >= 32 => (SRAIW [31] y)
+
+// Eliminate right shifts that exceed size of unsigned type.
+(SRLI <t> [x] (MOVBUreg y)) && x >= 8 => (MOVDconst <t> [0])
+(SRLI <t> [x] (MOVHUreg y)) && x >= 16 => (MOVDconst <t> [0])
+(SRLI <t> [x] (MOVWUreg y)) && x >= 32 => (MOVDconst <t> [0])
+
// Fold constant into immediate instructions where possible.
(ADD (MOVDconst <t> [val]) x) && is32Bit(val) && !t.IsPtr() => (ADDI [val] x)
(AND (MOVDconst [val]) x) && is32Bit(val) => (ANDI [val] x)
@@ -763,7 +730,9 @@
(XOR (MOVDconst [val]) x) && is32Bit(val) => (XORI [val] x)
(SLL x (MOVDconst [val])) => (SLLI [int64(val&63)] x)
(SRL x (MOVDconst [val])) => (SRLI [int64(val&63)] x)
+(SRLW x (MOVDconst [val])) => (SRLIW [int64(val&31)] x)
(SRA x (MOVDconst [val])) => (SRAI [int64(val&63)] x)
+(SRAW x (MOVDconst [val])) => (SRAIW [int64(val&31)] x)
(SLT x (MOVDconst [val])) && val >= -2048 && val <= 2047 => (SLTI [val] x)
(SLTU x (MOVDconst [val])) && val >= -2048 && val <= 2047 => (SLTIU [val] x)
@@ -832,6 +801,10 @@
(Select0 m:(LoweredMuluhilo x y)) && m.Uses == 1 => (MULHU x y)
(Select1 m:(LoweredMuluhilo x y)) && m.Uses == 1 => (MUL x y)
+(FADD(S|D) a (FMUL(S|D) x y)) && a.Block.Func.useFMA(v) => (FMADD(S|D) x y a)
+(FSUB(S|D) a (FMUL(S|D) x y)) && a.Block.Func.useFMA(v) => (FNMSUB(S|D) x y a)
+(FSUB(S|D) (FMUL(S|D) x y) a) && a.Block.Func.useFMA(v) => (FMSUB(S|D) x y a)
+
// Merge negation into fused multiply-add and multiply-subtract.
//
// Key:
@@ -842,5 +815,7 @@
// D B
//
// Note: multiplication commutativity handled by rule generator.
+(F(MADD|NMADD|MSUB|NMSUB)S neg:(FNEGS x) y z) && neg.Uses == 1 => (F(NMSUB|MSUB|NMADD|MADD)S x y z)
+(F(MADD|NMADD|MSUB|NMSUB)S x y neg:(FNEGS z)) && neg.Uses == 1 => (F(MSUB|NMSUB|MADD|NMADD)S x y z)
(F(MADD|NMADD|MSUB|NMSUB)D neg:(FNEGD x) y z) && neg.Uses == 1 => (F(NMSUB|MSUB|NMADD|MADD)D x y z)
(F(MADD|NMADD|MSUB|NMSUB)D x y neg:(FNEGD z)) && neg.Uses == 1 => (F(MSUB|NMSUB|MADD|NMADD)D x y z)
diff --git a/src/cmd/compile/internal/ssa/_gen/RISCV64Ops.go b/src/cmd/compile/internal/ssa/_gen/RISCV64Ops.go
index 52e87cbe72..93f20f8a99 100644
--- a/src/cmd/compile/internal/ssa/_gen/RISCV64Ops.go
+++ b/src/cmd/compile/internal/ssa/_gen/RISCV64Ops.go
@@ -207,12 +207,16 @@ func init() {
{name: "MOVDnop", argLength: 1, reg: regInfo{inputs: []regMask{gpMask}, outputs: []regMask{gpMask}}, resultInArg0: true}, // nop, return arg0 in same register
// Shift ops
- {name: "SLL", argLength: 2, reg: gp21, asm: "SLL"}, // arg0 << (aux1 & 63)
- {name: "SRA", argLength: 2, reg: gp21, asm: "SRA"}, // arg0 >> (aux1 & 63), signed
- {name: "SRL", argLength: 2, reg: gp21, asm: "SRL"}, // arg0 >> (aux1 & 63), unsigned
- {name: "SLLI", argLength: 1, reg: gp11, asm: "SLLI", aux: "Int64"}, // arg0 << auxint, shift amount 0-63
- {name: "SRAI", argLength: 1, reg: gp11, asm: "SRAI", aux: "Int64"}, // arg0 >> auxint, signed, shift amount 0-63
- {name: "SRLI", argLength: 1, reg: gp11, asm: "SRLI", aux: "Int64"}, // arg0 >> auxint, unsigned, shift amount 0-63
+ {name: "SLL", argLength: 2, reg: gp21, asm: "SLL"}, // arg0 << (aux1 & 63)
+ {name: "SRA", argLength: 2, reg: gp21, asm: "SRA"}, // arg0 >> (aux1 & 63), signed
+ {name: "SRAW", argLength: 2, reg: gp21, asm: "SRAW"}, // arg0 >> (aux1 & 31), signed
+ {name: "SRL", argLength: 2, reg: gp21, asm: "SRL"}, // arg0 >> (aux1 & 63), unsigned
+ {name: "SRLW", argLength: 2, reg: gp21, asm: "SRLW"}, // arg0 >> (aux1 & 31), unsigned
+ {name: "SLLI", argLength: 1, reg: gp11, asm: "SLLI", aux: "Int64"}, // arg0 << auxint, shift amount 0-63
+ {name: "SRAI", argLength: 1, reg: gp11, asm: "SRAI", aux: "Int64"}, // arg0 >> auxint, signed, shift amount 0-63
+ {name: "SRAIW", argLength: 1, reg: gp11, asm: "SRAIW", aux: "Int64"}, // arg0 >> auxint, signed, shift amount 0-31
+ {name: "SRLI", argLength: 1, reg: gp11, asm: "SRLI", aux: "Int64"}, // arg0 >> auxint, unsigned, shift amount 0-63
+ {name: "SRLIW", argLength: 1, reg: gp11, asm: "SRLIW", aux: "Int64"}, // arg0 >> auxint, unsigned, shift amount 0-31
// Bitwise ops
{name: "XOR", argLength: 2, reg: gp21, asm: "XOR", commutative: true}, // arg0 ^ arg1
@@ -231,11 +235,9 @@ func init() {
{name: "SLTU", argLength: 2, reg: gp21, asm: "SLTU"}, // arg0 < arg1, unsigned, result is 0 or 1
{name: "SLTIU", argLength: 1, reg: gp11, asm: "SLTIU", aux: "Int64"}, // arg0 < auxint, unsigned, result is 0 or 1
- // MOVconvert converts between pointers and integers.
- // We have a special op for this so as to not confuse GC
- // (particularly stack maps). It takes a memory arg so it
- // gets correctly ordered with respect to GC safepoints.
- {name: "MOVconvert", argLength: 2, reg: gp11, asm: "MOV"}, // arg0, but converted to int/ptr as appropriate; arg1=mem
+ // Round ops to block fused-multiply-add extraction.
+ {name: "LoweredRound32F", argLength: 1, reg: fp11, resultInArg0: true},
+ {name: "LoweredRound64F", argLength: 1, reg: fp11, resultInArg0: true},
// Calls
{name: "CALLstatic", argLength: -1, reg: call, aux: "CallOff", call: true}, // call static function aux.(*gc.Sym). last arg=mem, auxint=argsize, returns mem
@@ -395,6 +397,9 @@ func init() {
// Returns a pointer to a write barrier buffer in X24.
{name: "LoweredWB", argLength: 1, reg: regInfo{clobbers: (callerSave &^ (gpMask | regNamed["g"])) | regNamed["X1"], outputs: []regMask{regNamed["X24"]}}, clobberFlags: true, aux: "Int64"},
+ // Do data barrier. arg0=memorys
+ {name: "LoweredPubBarrier", argLength: 1, asm: "FENCE", hasSideEffects: true},
+
// There are three of these functions so that they can have three different register inputs.
// When we check 0 <= c <= cap (A), then 0 <= b <= c (B), then 0 <= a <= b (C), we want the
// default registers to match so we don't need to copy registers around unnecessarily.
@@ -407,6 +412,10 @@ func init() {
{name: "FSUBS", argLength: 2, reg: fp21, asm: "FSUBS", commutative: false, typ: "Float32"}, // arg0 - arg1
{name: "FMULS", argLength: 2, reg: fp21, asm: "FMULS", commutative: true, typ: "Float32"}, // arg0 * arg1
{name: "FDIVS", argLength: 2, reg: fp21, asm: "FDIVS", commutative: false, typ: "Float32"}, // arg0 / arg1
+ {name: "FMADDS", argLength: 3, reg: fp31, asm: "FMADDS", commutative: true, typ: "Float32"}, // (arg0 * arg1) + arg2
+ {name: "FMSUBS", argLength: 3, reg: fp31, asm: "FMSUBS", commutative: true, typ: "Float32"}, // (arg0 * arg1) - arg2
+ {name: "FNMADDS", argLength: 3, reg: fp31, asm: "FNMADDS", commutative: true, typ: "Float32"}, // -(arg0 * arg1) + arg2
+ {name: "FNMSUBS", argLength: 3, reg: fp31, asm: "FNMSUBS", commutative: true, typ: "Float32"}, // -(arg0 * arg1) - arg2
{name: "FSQRTS", argLength: 1, reg: fp11, asm: "FSQRTS", typ: "Float32"}, // sqrt(arg0)
{name: "FNEGS", argLength: 1, reg: fp11, asm: "FNEGS", typ: "Float32"}, // -arg0
{name: "FMVSX", argLength: 1, reg: gpfp, asm: "FMVSX", typ: "Float32"}, // reinterpret arg0 as float
diff --git a/src/cmd/compile/internal/ssa/_gen/S390X.rules b/src/cmd/compile/internal/ssa/_gen/S390X.rules
index a9d62c79ce..2a6d7e737c 100644
--- a/src/cmd/compile/internal/ssa/_gen/S390X.rules
+++ b/src/cmd/compile/internal/ssa/_gen/S390X.rules
@@ -1300,21 +1300,25 @@
&& p.Op != OpSB
&& x.Uses == 1
&& is20Bit(int64(i)-4)
+ && setPos(v, x.Pos)
&& clobber(x)
=> (STM2 [i-4] {s} p w0 w1 mem)
(MOVWstore [i] {s} p w2 x:(STM2 [i-8] {s} p w0 w1 mem))
&& x.Uses == 1
&& is20Bit(int64(i)-8)
+ && setPos(v, x.Pos)
&& clobber(x)
=> (STM3 [i-8] {s} p w0 w1 w2 mem)
(MOVWstore [i] {s} p w3 x:(STM3 [i-12] {s} p w0 w1 w2 mem))
&& x.Uses == 1
&& is20Bit(int64(i)-12)
+ && setPos(v, x.Pos)
&& clobber(x)
=> (STM4 [i-12] {s} p w0 w1 w2 w3 mem)
(STM2 [i] {s} p w2 w3 x:(STM2 [i-8] {s} p w0 w1 mem))
&& x.Uses == 1
&& is20Bit(int64(i)-8)
+ && setPos(v, x.Pos)
&& clobber(x)
=> (STM4 [i-8] {s} p w0 w1 w2 w3 mem)
// 64-bit
@@ -1322,21 +1326,25 @@
&& p.Op != OpSB
&& x.Uses == 1
&& is20Bit(int64(i)-8)
+ && setPos(v, x.Pos)
&& clobber(x)
=> (STMG2 [i-8] {s} p w0 w1 mem)
(MOVDstore [i] {s} p w2 x:(STMG2 [i-16] {s} p w0 w1 mem))
&& x.Uses == 1
&& is20Bit(int64(i)-16)
+ && setPos(v, x.Pos)
&& clobber(x)
=> (STMG3 [i-16] {s} p w0 w1 w2 mem)
(MOVDstore [i] {s} p w3 x:(STMG3 [i-24] {s} p w0 w1 w2 mem))
&& x.Uses == 1
&& is20Bit(int64(i)-24)
+ && setPos(v, x.Pos)
&& clobber(x)
=> (STMG4 [i-24] {s} p w0 w1 w2 w3 mem)
(STMG2 [i] {s} p w2 w3 x:(STMG2 [i-16] {s} p w0 w1 mem))
&& x.Uses == 1
&& is20Bit(int64(i)-16)
+ && setPos(v, x.Pos)
&& clobber(x)
=> (STMG4 [i-16] {s} p w0 w1 w2 w3 mem)
diff --git a/src/cmd/compile/internal/ssa/_gen/allocators.go b/src/cmd/compile/internal/ssa/_gen/allocators.go
index 5c72fe8be1..5869a61e82 100644
--- a/src/cmd/compile/internal/ssa/_gen/allocators.go
+++ b/src/cmd/compile/internal/ssa/_gen/allocators.go
@@ -1,3 +1,7 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
package main
// TODO: should we share backing storage for similarly-shaped types?
diff --git a/src/cmd/compile/internal/ssa/_gen/cover.bash b/src/cmd/compile/internal/ssa/_gen/cover.bash
index 7311cfb5f3..733f9db2c2 100755
--- a/src/cmd/compile/internal/ssa/_gen/cover.bash
+++ b/src/cmd/compile/internal/ssa/_gen/cover.bash
@@ -12,7 +12,7 @@
# regular 'go run .' usage to run the generator.
cat >main_test.go <<-EOF
- // +build ignore
+ //go:build ignore
package main
diff --git a/src/cmd/compile/internal/ssa/_gen/dec.rules b/src/cmd/compile/internal/ssa/_gen/dec.rules
index b19489870d..7944947e06 100644
--- a/src/cmd/compile/internal/ssa/_gen/dec.rules
+++ b/src/cmd/compile/internal/ssa/_gen/dec.rules
@@ -7,6 +7,8 @@
// types. These rules work together with the decomposeBuiltIn
// pass which handles phis of these types.
+(Store {t} _ _ mem) && t.Size() == 0 => mem
+
// complex ops
(ComplexReal (ComplexMake real _ )) => real
(ComplexImag (ComplexMake _ imag )) => imag
@@ -91,3 +93,109 @@
(OffPtr <typ.BytePtrPtr> [config.PtrSize] dst)
data
(Store {typ.Uintptr} dst itab mem))
+
+// Helpers for expand calls
+// Some of these are copied from generic.rules
+
+(IMake _typ (StructMake1 val)) => (IMake _typ val)
+(StructSelect [0] (IData x)) => (IData x)
+
+(StructSelect (StructMake1 x)) => x
+(StructSelect [0] (StructMake2 x _)) => x
+(StructSelect [1] (StructMake2 _ x)) => x
+(StructSelect [0] (StructMake3 x _ _)) => x
+(StructSelect [1] (StructMake3 _ x _)) => x
+(StructSelect [2] (StructMake3 _ _ x)) => x
+(StructSelect [0] (StructMake4 x _ _ _)) => x
+(StructSelect [1] (StructMake4 _ x _ _)) => x
+(StructSelect [2] (StructMake4 _ _ x _)) => x
+(StructSelect [3] (StructMake4 _ _ _ x)) => x
+
+// Special case coming from immediate interface rewriting
+// Typical case: (StructSelect [0] (IData (IMake typ dat)) rewrites to (StructSelect [0] dat)
+// but because the interface is immediate, the type of "IData" is a one-element struct containing
+// a pointer that is not the pointer type of dat (can be a *uint8).
+// More annoying case: (ArraySelect[0] (StructSelect[0] isAPtr))
+// There, result of the StructSelect is an Array (not a pointer) and
+// the pre-rewrite input to the ArraySelect is a struct, not a pointer.
+(StructSelect [0] x) && x.Type.IsPtrShaped() => x
+(ArraySelect [0] x) && x.Type.IsPtrShaped() => x
+
+// These, too. Bits is bits.
+(ArrayMake1 x) && x.Type.IsPtrShaped() => x
+(StructMake1 x) && x.Type.IsPtrShaped() => x
+
+(Store dst (StructMake1 <t> f0) mem) =>
+ (Store {t.FieldType(0)} (OffPtr <t.FieldType(0).PtrTo()> [0] dst) f0 mem)
+(Store dst (StructMake2 <t> f0 f1) mem) =>
+ (Store {t.FieldType(1)}
+ (OffPtr <t.FieldType(1).PtrTo()> [t.FieldOff(1)] dst)
+ f1
+ (Store {t.FieldType(0)}
+ (OffPtr <t.FieldType(0).PtrTo()> [0] dst)
+ f0 mem))
+(Store dst (StructMake3 <t> f0 f1 f2) mem) =>
+ (Store {t.FieldType(2)}
+ (OffPtr <t.FieldType(2).PtrTo()> [t.FieldOff(2)] dst)
+ f2
+ (Store {t.FieldType(1)}
+ (OffPtr <t.FieldType(1).PtrTo()> [t.FieldOff(1)] dst)
+ f1
+ (Store {t.FieldType(0)}
+ (OffPtr <t.FieldType(0).PtrTo()> [0] dst)
+ f0 mem)))
+(Store dst (StructMake4 <t> f0 f1 f2 f3) mem) =>
+ (Store {t.FieldType(3)}
+ (OffPtr <t.FieldType(3).PtrTo()> [t.FieldOff(3)] dst)
+ f3
+ (Store {t.FieldType(2)}
+ (OffPtr <t.FieldType(2).PtrTo()> [t.FieldOff(2)] dst)
+ f2
+ (Store {t.FieldType(1)}
+ (OffPtr <t.FieldType(1).PtrTo()> [t.FieldOff(1)] dst)
+ f1
+ (Store {t.FieldType(0)}
+ (OffPtr <t.FieldType(0).PtrTo()> [0] dst)
+ f0 mem))))
+
+(ArraySelect (ArrayMake1 x)) => x
+(ArraySelect [0] (IData x)) => (IData x)
+
+(Store dst (ArrayMake1 e) mem) => (Store {e.Type} dst e mem)
+
+// NOTE removed must-not-be-SSA condition.
+(ArraySelect [i] x:(Load <t> ptr mem)) =>
+ @x.Block (Load <v.Type> (OffPtr <v.Type.PtrTo()> [t.Elem().Size()*i] ptr) mem)
+
+(StringPtr x:(Load <t> ptr mem)) && t.IsString() => @x.Block (Load <typ.BytePtr> ptr mem)
+(StringLen x:(Load <t> ptr mem)) && t.IsString() => @x.Block (Load <typ.Int>
+ (OffPtr <typ.IntPtr> [config.PtrSize] ptr)
+ mem)
+
+// NOTE removed must-not-be-SSA condition.
+(StructSelect [i] x:(Load <t> ptr mem)) =>
+ @x.Block (Load <v.Type> (OffPtr <v.Type.PtrTo()> [t.FieldOff(int(i))] ptr) mem)
+
+(ITab x:(Load <t> ptr mem)) && t.IsInterface() => @x.Block (Load <typ.Uintptr> ptr mem)
+
+(IData x:(Load <t> ptr mem)) && t.IsInterface() => @x.Block (Load <typ.BytePtr>
+ (OffPtr <typ.BytePtrPtr> [config.PtrSize] ptr)
+ mem)
+
+(SlicePtr x:(Load <t> ptr mem)) && t.IsSlice() => @x.Block (Load <t.Elem().PtrTo()> ptr mem)
+(SliceLen x:(Load <t> ptr mem)) && t.IsSlice() => @x.Block (Load <typ.Int>
+ (OffPtr <typ.IntPtr> [config.PtrSize] ptr)
+ mem)
+(SliceCap x:(Load <t> ptr mem)) && t.IsSlice() => @x.Block (Load <typ.Int>
+ (OffPtr <typ.IntPtr> [2*config.PtrSize] ptr)
+ mem)
+
+(ComplexReal x:(Load <t> ptr mem)) && t.IsComplex() && t.Size() == 8 => @x.Block (Load <typ.Float32> ptr mem)
+(ComplexImag x:(Load <t> ptr mem)) && t.IsComplex() && t.Size() == 8 => @x.Block (Load <typ.Float32>
+ (OffPtr <typ.Float32Ptr> [4] ptr)
+ mem)
+
+(ComplexReal x:(Load <t> ptr mem)) && t.IsComplex() && t.Size() == 16 => @x.Block (Load <typ.Float64> ptr mem)
+(ComplexImag x:(Load <t> ptr mem)) && t.IsComplex() && t.Size() == 16 => @x.Block (Load <typ.Float64>
+ (OffPtr <typ.Float64Ptr> [8] ptr)
+ mem)
diff --git a/src/cmd/compile/internal/ssa/_gen/generic.rules b/src/cmd/compile/internal/ssa/_gen/generic.rules
index cdb346321e..aeda62591a 100644
--- a/src/cmd/compile/internal/ssa/_gen/generic.rules
+++ b/src/cmd/compile/internal/ssa/_gen/generic.rules
@@ -704,7 +704,7 @@
(Store {t2} p2 _
mem:(Zero [n] p3 _)))
&& o1 >= 0 && o1+t1.Size() <= n && isSamePtr(p1, p3)
- && fe.CanSSA(t1)
+ && CanSSA(t1)
&& disjoint(op, t1.Size(), p2, t2.Size())
=> @mem.Block (Load <t1> (OffPtr <op.Type> [o1] p3) mem)
(Load <t1> op:(OffPtr [o1] p1)
@@ -712,7 +712,7 @@
(Store {t3} p3 _
mem:(Zero [n] p4 _))))
&& o1 >= 0 && o1+t1.Size() <= n && isSamePtr(p1, p4)
- && fe.CanSSA(t1)
+ && CanSSA(t1)
&& disjoint(op, t1.Size(), p2, t2.Size())
&& disjoint(op, t1.Size(), p3, t3.Size())
=> @mem.Block (Load <t1> (OffPtr <op.Type> [o1] p4) mem)
@@ -722,7 +722,7 @@
(Store {t4} p4 _
mem:(Zero [n] p5 _)))))
&& o1 >= 0 && o1+t1.Size() <= n && isSamePtr(p1, p5)
- && fe.CanSSA(t1)
+ && CanSSA(t1)
&& disjoint(op, t1.Size(), p2, t2.Size())
&& disjoint(op, t1.Size(), p3, t3.Size())
&& disjoint(op, t1.Size(), p4, t4.Size())
@@ -734,7 +734,7 @@
(Store {t5} p5 _
mem:(Zero [n] p6 _))))))
&& o1 >= 0 && o1+t1.Size() <= n && isSamePtr(p1, p6)
- && fe.CanSSA(t1)
+ && CanSSA(t1)
&& disjoint(op, t1.Size(), p2, t2.Size())
&& disjoint(op, t1.Size(), p3, t3.Size())
&& disjoint(op, t1.Size(), p4, t4.Size())
@@ -848,28 +848,28 @@
(StructSelect [2] (StructMake4 _ _ x _)) => x
(StructSelect [3] (StructMake4 _ _ _ x)) => x
-(Load <t> _ _) && t.IsStruct() && t.NumFields() == 0 && fe.CanSSA(t) =>
+(Load <t> _ _) && t.IsStruct() && t.NumFields() == 0 && CanSSA(t) =>
(StructMake0)
-(Load <t> ptr mem) && t.IsStruct() && t.NumFields() == 1 && fe.CanSSA(t) =>
+(Load <t> ptr mem) && t.IsStruct() && t.NumFields() == 1 && CanSSA(t) =>
(StructMake1
(Load <t.FieldType(0)> (OffPtr <t.FieldType(0).PtrTo()> [0] ptr) mem))
-(Load <t> ptr mem) && t.IsStruct() && t.NumFields() == 2 && fe.CanSSA(t) =>
+(Load <t> ptr mem) && t.IsStruct() && t.NumFields() == 2 && CanSSA(t) =>
(StructMake2
(Load <t.FieldType(0)> (OffPtr <t.FieldType(0).PtrTo()> [0] ptr) mem)
(Load <t.FieldType(1)> (OffPtr <t.FieldType(1).PtrTo()> [t.FieldOff(1)] ptr) mem))
-(Load <t> ptr mem) && t.IsStruct() && t.NumFields() == 3 && fe.CanSSA(t) =>
+(Load <t> ptr mem) && t.IsStruct() && t.NumFields() == 3 && CanSSA(t) =>
(StructMake3
(Load <t.FieldType(0)> (OffPtr <t.FieldType(0).PtrTo()> [0] ptr) mem)
(Load <t.FieldType(1)> (OffPtr <t.FieldType(1).PtrTo()> [t.FieldOff(1)] ptr) mem)
(Load <t.FieldType(2)> (OffPtr <t.FieldType(2).PtrTo()> [t.FieldOff(2)] ptr) mem))
-(Load <t> ptr mem) && t.IsStruct() && t.NumFields() == 4 && fe.CanSSA(t) =>
+(Load <t> ptr mem) && t.IsStruct() && t.NumFields() == 4 && CanSSA(t) =>
(StructMake4
(Load <t.FieldType(0)> (OffPtr <t.FieldType(0).PtrTo()> [0] ptr) mem)
(Load <t.FieldType(1)> (OffPtr <t.FieldType(1).PtrTo()> [t.FieldOff(1)] ptr) mem)
(Load <t.FieldType(2)> (OffPtr <t.FieldType(2).PtrTo()> [t.FieldOff(2)] ptr) mem)
(Load <t.FieldType(3)> (OffPtr <t.FieldType(3).PtrTo()> [t.FieldOff(3)] ptr) mem))
-(StructSelect [i] x:(Load <t> ptr mem)) && !fe.CanSSA(t) =>
+(StructSelect [i] x:(Load <t> ptr mem)) && !CanSSA(t) =>
@x.Block (Load <v.Type> (OffPtr <v.Type.PtrTo()> [t.FieldOff(int(i))] ptr) mem)
(Store _ (StructMake0) mem) => mem
@@ -911,9 +911,9 @@
(StructSelect [0] (IData x)) => (IData x)
// un-SSAable values use mem->mem copies
-(Store {t} dst (Load src mem) mem) && !fe.CanSSA(t) =>
+(Store {t} dst (Load src mem) mem) && !CanSSA(t) =>
(Move {t} [t.Size()] dst src mem)
-(Store {t} dst (Load src mem) (VarDef {x} mem)) && !fe.CanSSA(t) =>
+(Store {t} dst (Load src mem) (VarDef {x} mem)) && !CanSSA(t) =>
(Move {t} [t.Size()] dst src (VarDef {x} mem))
// array ops
@@ -922,7 +922,7 @@
(Load <t> _ _) && t.IsArray() && t.NumElem() == 0 =>
(ArrayMake0)
-(Load <t> ptr mem) && t.IsArray() && t.NumElem() == 1 && fe.CanSSA(t) =>
+(Load <t> ptr mem) && t.IsArray() && t.NumElem() == 1 && CanSSA(t) =>
(ArrayMake1 (Load <t.Elem()> ptr mem))
(Store _ (ArrayMake0) mem) => mem
@@ -981,7 +981,7 @@
(ConstNil <typ.Uintptr>)
(ConstNil <typ.BytePtr>))
-(NilCheck (GetG mem) mem) => mem
+(NilCheck ptr:(GetG mem) mem) => ptr
(If (Not cond) yes no) => (If cond no yes)
(If (ConstBool [c]) yes no) && c => (First yes no)
@@ -2055,19 +2055,19 @@
&& isSameCall(call.Aux, "runtime.newobject")
=> mem
-(NilCheck (SelectN [0] call:(StaticLECall _ _)) _)
+(NilCheck ptr:(SelectN [0] call:(StaticLECall _ _)) _)
&& isSameCall(call.Aux, "runtime.newobject")
&& warnRule(fe.Debug_checknil(), v, "removed nil check")
- => (Invalid)
+ => ptr
-(NilCheck (OffPtr (SelectN [0] call:(StaticLECall _ _))) _)
+(NilCheck ptr:(OffPtr (SelectN [0] call:(StaticLECall _ _))) _)
&& isSameCall(call.Aux, "runtime.newobject")
&& warnRule(fe.Debug_checknil(), v, "removed nil check")
- => (Invalid)
+ => ptr
// Addresses of globals are always non-nil.
-(NilCheck (Addr {_} (SB)) _) => (Invalid)
-(NilCheck (Convert (Addr {_} (SB)) _) _) => (Invalid)
+(NilCheck ptr:(Addr {_} (SB)) _) => ptr
+(NilCheck ptr:(Convert (Addr {_} (SB)) _) _) => ptr
// for late-expanded calls, recognize memequal applied to a single constant byte
// Support is limited by 1, 2, 4, 8 byte sizes
@@ -2121,6 +2121,11 @@
&& isSameCall(callAux, "runtime.memequal")
=> (MakeResult (ConstBool <typ.Bool> [true]) mem)
+(Static(Call|LECall) {callAux} p q _ mem)
+ && isSameCall(callAux, "runtime.memequal")
+ && isSamePtr(p, q)
+ => (MakeResult (ConstBool <typ.Bool> [true]) mem)
+
// Turn known-size calls to memclrNoHeapPointers into a Zero.
// Note that we are using types.Types[types.TUINT8] instead of sptr.Type.Elem() - see issue 55122 and CL 431496 for more details.
(SelectN [0] call:(StaticCall {sym} sptr (Const(64|32) [c]) mem))
diff --git a/src/cmd/compile/internal/ssa/_gen/genericOps.go b/src/cmd/compile/internal/ssa/_gen/genericOps.go
index 53ff57f6b1..69eb48ce44 100644
--- a/src/cmd/compile/internal/ssa/_gen/genericOps.go
+++ b/src/cmd/compile/internal/ssa/_gen/genericOps.go
@@ -285,6 +285,12 @@ var genericOps = []opData{
{name: "Abs", argLength: 1}, // absolute value arg0
{name: "Copysign", argLength: 2}, // copy sign from arg0 to arg1
+ // Float min/max implementation, if hardware is available.
+ {name: "Min64F", argLength: 2}, // min(arg0,arg1)
+ {name: "Min32F", argLength: 2}, // min(arg0,arg1)
+ {name: "Max64F", argLength: 2}, // max(arg0,arg1)
+ {name: "Max32F", argLength: 2}, // max(arg0,arg1)
+
// 3-input opcode.
// Fused-multiply-add, float64 only.
// When a*b+c is exactly zero (before rounding), then the result is +0 or -0.
@@ -471,7 +477,7 @@ var genericOps = []opData{
{name: "IsNonNil", argLength: 1, typ: "Bool"}, // arg0 != nil
{name: "IsInBounds", argLength: 2, typ: "Bool"}, // 0 <= arg0 < arg1. arg1 is guaranteed >= 0.
{name: "IsSliceInBounds", argLength: 2, typ: "Bool"}, // 0 <= arg0 <= arg1. arg1 is guaranteed >= 0.
- {name: "NilCheck", argLength: 2, typ: "Void"}, // arg0=ptr, arg1=mem. Panics if arg0 is nil. Returns void.
+ {name: "NilCheck", argLength: 2, nilCheck: true}, // arg0=ptr, arg1=mem. Panics if arg0 is nil. Returns the ptr unmodified.
// Pseudo-ops
{name: "GetG", argLength: 1, zeroWidth: true}, // runtime.getg() (read g pointer). arg0=mem
@@ -643,6 +649,8 @@ var genericOps = []opData{
// Plain [] [next]
// If [boolean Value] [then, else]
// First [] [always, never]
+// Defer [mem] [nopanic, panic] (control opcode should be OpStaticCall to runtime.deferproc)
+//JumpTable [integer Value] [succ1,succ2,..]
var genericBlocks = []blockData{
{name: "Plain"}, // a single successor
diff --git a/src/cmd/compile/internal/ssa/_gen/rulegen.go b/src/cmd/compile/internal/ssa/_gen/rulegen.go
index 15be9a1c50..072df298f3 100644
--- a/src/cmd/compile/internal/ssa/_gen/rulegen.go
+++ b/src/cmd/compile/internal/ssa/_gen/rulegen.go
@@ -1400,7 +1400,7 @@ func parseValue(val string, arch arch, loc string) (op opData, oparch, typ, auxi
if op.name == "" {
// Failed to find the op.
// Run through everything again with strict=false
- // to generate useful diagnosic messages before failing.
+ // to generate useful diagnostic messages before failing.
for _, x := range genericOps {
match(x, false, "generic")
}
diff --git a/src/cmd/compile/internal/ssa/addressingmodes.go b/src/cmd/compile/internal/ssa/addressingmodes.go
index 699f6e45ae..4e3209e396 100644
--- a/src/cmd/compile/internal/ssa/addressingmodes.go
+++ b/src/cmd/compile/internal/ssa/addressingmodes.go
@@ -195,6 +195,17 @@ var combine = map[[2]Op]Op{
[2]Op{OpAMD64MOVQstoreconst, OpAMD64LEAQ1}: OpAMD64MOVQstoreconstidx1,
[2]Op{OpAMD64MOVQstoreconst, OpAMD64LEAQ8}: OpAMD64MOVQstoreconstidx8,
+ [2]Op{OpAMD64SETEQstore, OpAMD64LEAQ1}: OpAMD64SETEQstoreidx1,
+ [2]Op{OpAMD64SETNEstore, OpAMD64LEAQ1}: OpAMD64SETNEstoreidx1,
+ [2]Op{OpAMD64SETLstore, OpAMD64LEAQ1}: OpAMD64SETLstoreidx1,
+ [2]Op{OpAMD64SETLEstore, OpAMD64LEAQ1}: OpAMD64SETLEstoreidx1,
+ [2]Op{OpAMD64SETGstore, OpAMD64LEAQ1}: OpAMD64SETGstoreidx1,
+ [2]Op{OpAMD64SETGEstore, OpAMD64LEAQ1}: OpAMD64SETGEstoreidx1,
+ [2]Op{OpAMD64SETBstore, OpAMD64LEAQ1}: OpAMD64SETBstoreidx1,
+ [2]Op{OpAMD64SETBEstore, OpAMD64LEAQ1}: OpAMD64SETBEstoreidx1,
+ [2]Op{OpAMD64SETAstore, OpAMD64LEAQ1}: OpAMD64SETAstoreidx1,
+ [2]Op{OpAMD64SETAEstore, OpAMD64LEAQ1}: OpAMD64SETAEstoreidx1,
+
// These instructions are re-split differently for performance, see needSplit above.
// TODO if 386 versions are created, also update needSplit and _gen/386splitload.rules
[2]Op{OpAMD64CMPBload, OpAMD64ADDQ}: OpAMD64CMPBloadidx1,
diff --git a/src/cmd/compile/internal/ssa/bench_test.go b/src/cmd/compile/internal/ssa/bench_test.go
index 0971667507..1dc733bf55 100644
--- a/src/cmd/compile/internal/ssa/bench_test.go
+++ b/src/cmd/compile/internal/ssa/bench_test.go
@@ -30,3 +30,21 @@ func BenchmarkPhioptPass(b *testing.B) {
}
}
}
+
+type Point struct {
+ X, Y int
+}
+
+//go:noinline
+func sign(p1, p2, p3 Point) bool {
+ return (p1.X-p3.X)*(p2.Y-p3.Y)-(p2.X-p3.X)*(p1.Y-p3.Y) < 0
+}
+
+func BenchmarkInvertLessThanNoov(b *testing.B) {
+ p1 := Point{1, 2}
+ p2 := Point{2, 3}
+ p3 := Point{3, 4}
+ for i := 0; i < b.N; i++ {
+ sign(p1, p2, p3)
+ }
+}
diff --git a/src/cmd/compile/internal/ssa/block.go b/src/cmd/compile/internal/ssa/block.go
index e7776b2316..26af10b59c 100644
--- a/src/cmd/compile/internal/ssa/block.go
+++ b/src/cmd/compile/internal/ssa/block.go
@@ -112,13 +112,6 @@ func (e Edge) String() string {
}
// BlockKind is the kind of SSA block.
-//
-// kind controls successors
-// ------------------------------------------
-// Exit [return mem] []
-// Plain [] [next]
-// If [boolean Value] [then, else]
-// Defer [mem] [nopanic, panic] (control opcode should be OpStaticCall to runtime.deferproc)
type BlockKind int16
// short form print
@@ -275,8 +268,7 @@ func (b *Block) truncateValues(i int) {
b.Values = b.Values[:i]
}
-// AddEdgeTo adds an edge from block b to block c. Used during building of the
-// SSA graph; do not use on an already-completed SSA graph.
+// AddEdgeTo adds an edge from block b to block c.
func (b *Block) AddEdgeTo(c *Block) {
i := len(b.Succs)
j := len(c.Preds)
@@ -305,6 +297,8 @@ func (b *Block) removePred(i int) {
// removeSucc removes the ith output edge from b.
// It is the responsibility of the caller to remove
// the corresponding predecessor edge.
+// Note that this potentially reorders successors of b, so it
+// must be used very carefully.
func (b *Block) removeSucc(i int) {
n := len(b.Succs) - 1
if i != n {
@@ -331,6 +325,19 @@ func (b *Block) swapSuccessors() {
b.Likely *= -1
}
+// Swaps b.Succs[x] and b.Succs[y].
+func (b *Block) swapSuccessorsByIdx(x, y int) {
+ if x == y {
+ return
+ }
+ ex := b.Succs[x]
+ ey := b.Succs[y]
+ b.Succs[x] = ey
+ b.Succs[y] = ex
+ ex.b.Preds[ex.i].i = y
+ ey.b.Preds[ey.i].i = x
+}
+
// removePhiArg removes the ith arg from phi.
// It must be called after calling b.removePred(i) to
// adjust the corresponding phi value of the block:
@@ -347,7 +354,7 @@ func (b *Block) swapSuccessors() {
func (b *Block) removePhiArg(phi *Value, i int) {
n := len(b.Preds)
if numPhiArgs := len(phi.Args); numPhiArgs-1 != n {
- b.Fatalf("inconsistent state, num predecessors: %d, num phi args: %d", n, numPhiArgs)
+ b.Fatalf("inconsistent state for %v, num predecessors: %d, num phi args: %d", phi, n, numPhiArgs)
}
phi.Args[i].Uses--
phi.Args[i] = phi.Args[n]
@@ -385,10 +392,10 @@ func (b *Block) AuxIntString() string {
return fmt.Sprintf("%v", int8(b.AuxInt))
case "uint8":
return fmt.Sprintf("%v", uint8(b.AuxInt))
- default: // type specified but not implemented - print as int64
- return fmt.Sprintf("%v", b.AuxInt)
case "": // no aux int type
return ""
+ default: // type specified but not implemented - print as int64
+ return fmt.Sprintf("%v", b.AuxInt)
}
}
diff --git a/src/cmd/compile/internal/ssa/check.go b/src/cmd/compile/internal/ssa/check.go
index f34b907419..bbfdaceaad 100644
--- a/src/cmd/compile/internal/ssa/check.go
+++ b/src/cmd/compile/internal/ssa/check.go
@@ -317,7 +317,28 @@ func checkFunc(f *Func) {
if !v.Aux.(*ir.Name).Type().HasPointers() {
f.Fatalf("vardef must have pointer type %s", v.Aux.(*ir.Name).Type().String())
}
-
+ case OpNilCheck:
+ // nil checks have pointer type before scheduling, and
+ // void type after scheduling.
+ if f.scheduled {
+ if v.Uses != 0 {
+ f.Fatalf("nilcheck must have 0 uses %s", v.Uses)
+ }
+ if !v.Type.IsVoid() {
+ f.Fatalf("nilcheck must have void type %s", v.Type.String())
+ }
+ } else {
+ if !v.Type.IsPtrShaped() && !v.Type.IsUintptr() {
+ f.Fatalf("nilcheck must have pointer type %s", v.Type.String())
+ }
+ }
+ if !v.Args[0].Type.IsPtrShaped() && !v.Args[0].Type.IsUintptr() {
+ f.Fatalf("nilcheck must have argument of pointer type %s", v.Args[0].Type.String())
+ }
+ if !v.Args[1].Type.IsMemory() {
+ f.Fatalf("bad arg 1 type to %s: want mem, have %s",
+ v.Op, v.Args[1].Type.String())
+ }
}
// TODO: check for cycles in values
diff --git a/src/cmd/compile/internal/ssa/compile.go b/src/cmd/compile/internal/ssa/compile.go
index 8618cf34cd..d125891f88 100644
--- a/src/cmd/compile/internal/ssa/compile.go
+++ b/src/cmd/compile/internal/ssa/compile.go
@@ -472,11 +472,12 @@ var passes = [...]pass{
{name: "nilcheckelim", fn: nilcheckelim},
{name: "prove", fn: prove},
{name: "early fuse", fn: fuseEarly},
- {name: "decompose builtin", fn: decomposeBuiltIn, required: true},
{name: "expand calls", fn: expandCalls, required: true},
+ {name: "decompose builtin", fn: postExpandCallsDecompose, required: true},
{name: "softfloat", fn: softfloat, required: true},
{name: "late opt", fn: opt, required: true}, // TODO: split required rules and optimizing rules
{name: "dead auto elim", fn: elimDeadAutosGeneric},
+ {name: "sccp", fn: sccp},
{name: "generic deadcode", fn: deadcode, required: true}, // remove dead stores, which otherwise mess up store chain
{name: "check bce", fn: checkbce},
{name: "branchelim", fn: branchelim},
@@ -508,7 +509,6 @@ var passes = [...]pass{
{name: "flagalloc", fn: flagalloc, required: true}, // allocate flags register
{name: "regalloc", fn: regalloc, required: true}, // allocate int & float registers + stack slots
{name: "loop rotate", fn: loopRotate},
- {name: "stackframe", fn: stackframe, required: true},
{name: "trim", fn: trim}, // remove empty blocks
}
@@ -547,6 +547,8 @@ var passOrder = [...]constraint{
{"generic cse", "tighten"},
// checkbce needs the values removed
{"generic deadcode", "check bce"},
+ // decompose builtin now also cleans up after expand calls
+ {"expand calls", "decompose builtin"},
// don't run optimization pass until we've decomposed builtin objects
{"decompose builtin", "late opt"},
// decompose builtin is the last pass that may introduce new float ops, so run softfloat after it
@@ -577,8 +579,6 @@ var passOrder = [...]constraint{
{"flagalloc", "regalloc"},
// loopRotate will confuse regalloc.
{"regalloc", "loop rotate"},
- // stackframe needs to know about spilled registers.
- {"regalloc", "stackframe"},
// trim needs regalloc to be done first.
{"regalloc", "trim"},
// memcombine works better if fuse happens first, to help merge stores.
diff --git a/src/cmd/compile/internal/ssa/config.go b/src/cmd/compile/internal/ssa/config.go
index 43f9f0affc..debcf1a0f4 100644
--- a/src/cmd/compile/internal/ssa/config.go
+++ b/src/cmd/compile/internal/ssa/config.go
@@ -143,23 +143,13 @@ type Logger interface {
type Frontend interface {
Logger
- // CanSSA reports whether variables of type t are SSA-able.
- CanSSA(t *types.Type) bool
-
// StringData returns a symbol pointing to the given string's contents.
StringData(string) *obj.LSym
- // Auto returns a Node for an auto variable of the given type.
- // The SSA compiler uses this function to allocate space for spills.
- Auto(src.XPos, *types.Type) *ir.Name
-
// Given the name for a compound type, returns the name we should use
// for the parts of that compound type.
SplitSlot(parent *LocalSlot, suffix string, offset int64, t *types.Type) LocalSlot
- // AllocFrame assigns frame offsets to all live auto variables.
- AllocFrame(f *Func)
-
// Syslook returns a symbol of the runtime function/variable with the
// given name.
Syslook(string) *obj.LSym
@@ -167,9 +157,6 @@ type Frontend interface {
// UseWriteBarrier reports whether write barrier is enabled
UseWriteBarrier() bool
- // MyImportPath provides the import name (roughly, the package) for the function being compiled.
- MyImportPath() string
-
// Func returns the ir.Func of the function being compiled.
Func() *ir.Func
}
@@ -296,6 +283,8 @@ func NewConfig(arch string, types Types, ctxt *obj.Link, optimize, softfloat boo
c.registers = registersLOONG64[:]
c.gpRegMask = gpRegMaskLOONG64
c.fpRegMask = fpRegMaskLOONG64
+ c.intParamRegs = paramIntRegLOONG64
+ c.floatParamRegs = paramFloatRegLOONG64
c.FPReg = framepointerRegLOONG64
c.LinkReg = linkRegLOONG64
c.hasGReg = true
@@ -374,8 +363,8 @@ func NewConfig(arch string, types Types, ctxt *obj.Link, optimize, softfloat boo
c.floatParamRegs = nil // no FP registers in softfloat mode
}
- c.ABI0 = abi.NewABIConfig(0, 0, ctxt.Arch.FixedFrameSize)
- c.ABI1 = abi.NewABIConfig(len(c.intParamRegs), len(c.floatParamRegs), ctxt.Arch.FixedFrameSize)
+ c.ABI0 = abi.NewABIConfig(0, 0, ctxt.Arch.FixedFrameSize, 0)
+ c.ABI1 = abi.NewABIConfig(len(c.intParamRegs), len(c.floatParamRegs), ctxt.Arch.FixedFrameSize, 1)
// On Plan 9, floating point operations are not allowed in note handler.
if buildcfg.GOOS == "plan9" {
diff --git a/src/cmd/compile/internal/ssa/cse_test.go b/src/cmd/compile/internal/ssa/cse_test.go
index 813ebe43a1..7d3e44fbe0 100644
--- a/src/cmd/compile/internal/ssa/cse_test.go
+++ b/src/cmd/compile/internal/ssa/cse_test.go
@@ -6,7 +6,6 @@ package ssa
import (
"cmd/compile/internal/types"
- "cmd/internal/src"
"testing"
)
@@ -22,7 +21,7 @@ func TestCSEAuxPartitionBug(t *testing.T) {
arg1Aux := &tstAux{"arg1-aux"}
arg2Aux := &tstAux{"arg2-aux"}
arg3Aux := &tstAux{"arg3-aux"}
- a := c.Frontend().Auto(src.NoXPos, c.config.Types.Int8.PtrTo())
+ a := c.Temp(c.config.Types.Int8.PtrTo())
// construct lots of values with args that have aux values and place
// them in an order that triggers the bug
@@ -93,7 +92,7 @@ func TestCSEAuxPartitionBug(t *testing.T) {
// TestZCSE tests the zero arg cse.
func TestZCSE(t *testing.T) {
c := testConfig(t)
- a := c.Frontend().Auto(src.NoXPos, c.config.Types.Int8.PtrTo())
+ a := c.Temp(c.config.Types.Int8.PtrTo())
fun := c.Fun("entry",
Bloc("entry",
diff --git a/src/cmd/compile/internal/ssa/deadcode.go b/src/cmd/compile/internal/ssa/deadcode.go
index 52cc7f2ca7..3bd1737bab 100644
--- a/src/cmd/compile/internal/ssa/deadcode.go
+++ b/src/cmd/compile/internal/ssa/deadcode.go
@@ -110,16 +110,15 @@ func liveValues(f *Func, reachable []bool) (live []bool, liveOrderStmts []*Value
}
}
for _, v := range b.Values {
- if (opcodeTable[v.Op].call || opcodeTable[v.Op].hasSideEffects) && !live[v.ID] {
+ if (opcodeTable[v.Op].call || opcodeTable[v.Op].hasSideEffects || opcodeTable[v.Op].nilCheck) && !live[v.ID] {
live[v.ID] = true
q = append(q, v)
if v.Pos.IsStmt() != src.PosNotStmt {
liveOrderStmts = append(liveOrderStmts, v)
}
}
- if v.Type.IsVoid() && !live[v.ID] {
- // The only Void ops are nil checks and inline marks. We must keep these.
- if v.Op == OpInlMark && !liveInlIdx[int(v.AuxInt)] {
+ if v.Op == OpInlMark {
+ if !liveInlIdx[int(v.AuxInt)] {
// We don't need marks for bodies that
// have been completely optimized away.
// TODO: save marks only for bodies which
@@ -313,6 +312,8 @@ func deadcode(f *Func) {
// removeEdge removes the i'th outgoing edge from b (and
// the corresponding incoming edge from b.Succs[i].b).
+// Note that this potentially reorders successors of b, so it
+// must be used very carefully.
func (b *Block) removeEdge(i int) {
e := b.Succs[i]
c := e.b
diff --git a/src/cmd/compile/internal/ssa/deadstore.go b/src/cmd/compile/internal/ssa/deadstore.go
index 648b68af78..cb3427103c 100644
--- a/src/cmd/compile/internal/ssa/deadstore.go
+++ b/src/cmd/compile/internal/ssa/deadstore.go
@@ -73,9 +73,9 @@ func dse(f *Func) {
}
// Walk backwards looking for dead stores. Keep track of shadowed addresses.
- // A "shadowed address" is a pointer and a size describing a memory region that
- // is known to be written. We keep track of shadowed addresses in the shadowed
- // map, mapping the ID of the address to the size of the shadowed region.
+ // A "shadowed address" is a pointer, offset, and size describing a memory region that
+ // is known to be written. We keep track of shadowed addresses in the shadowed map,
+ // mapping the ID of the address to a shadowRange where future writes will happen.
// Since we're walking backwards, writes to a shadowed region are useless,
// as they will be immediately overwritten.
shadowed.clear()
@@ -88,13 +88,20 @@ func dse(f *Func) {
shadowed.clear()
}
if v.Op == OpStore || v.Op == OpZero {
+ ptr := v.Args[0]
+ var off int64
+ for ptr.Op == OpOffPtr { // Walk to base pointer
+ off += ptr.AuxInt
+ ptr = ptr.Args[0]
+ }
var sz int64
if v.Op == OpStore {
sz = v.Aux.(*types.Type).Size()
} else { // OpZero
sz = v.AuxInt
}
- if shadowedSize := int64(shadowed.get(v.Args[0].ID)); shadowedSize != -1 && shadowedSize >= sz {
+ sr := shadowRange(shadowed.get(ptr.ID))
+ if sr.contains(off, off+sz) {
// Modify the store/zero into a copy of the memory state,
// effectively eliding the store operation.
if v.Op == OpStore {
@@ -108,10 +115,8 @@ func dse(f *Func) {
v.AuxInt = 0
v.Op = OpCopy
} else {
- if sz > 0x7fffffff { // work around sparseMap's int32 value type
- sz = 0x7fffffff
- }
- shadowed.set(v.Args[0].ID, int32(sz))
+ // Extend shadowed region.
+ shadowed.set(ptr.ID, int32(sr.merge(off, off+sz)))
}
}
// walk to previous store
@@ -131,6 +136,49 @@ func dse(f *Func) {
}
}
+// A shadowRange encodes a set of byte offsets [lo():hi()] from
+// a given pointer that will be written to later in the block.
+// A zero shadowRange encodes an empty shadowed range (and so
+// does a -1 shadowRange, which is what sparsemap.get returns
+// on a failed lookup).
+type shadowRange int32
+
+func (sr shadowRange) lo() int64 {
+ return int64(sr & 0xffff)
+}
+func (sr shadowRange) hi() int64 {
+ return int64((sr >> 16) & 0xffff)
+}
+
+// contains reports whether [lo:hi] is completely within sr.
+func (sr shadowRange) contains(lo, hi int64) bool {
+ return lo >= sr.lo() && hi <= sr.hi()
+}
+
+// merge returns the union of sr and [lo:hi].
+// merge is allowed to return something smaller than the union.
+func (sr shadowRange) merge(lo, hi int64) shadowRange {
+ if lo < 0 || hi > 0xffff {
+ // Ignore offsets that are too large or small.
+ return sr
+ }
+ if sr.lo() == sr.hi() {
+ // Old range is empty - use new one.
+ return shadowRange(lo + hi<<16)
+ }
+ if hi < sr.lo() || lo > sr.hi() {
+ // The two regions don't overlap or abut, so we would
+ // have to keep track of multiple disjoint ranges.
+ // Because we can only keep one, keep the larger one.
+ if sr.hi()-sr.lo() >= hi-lo {
+ return sr
+ }
+ return shadowRange(lo + hi<<16)
+ }
+ // Regions overlap or abut - compute the union.
+ return shadowRange(min(lo, sr.lo()) + max(hi, sr.hi())<<16)
+}
+
// elimDeadAutosGeneric deletes autos that are never accessed. To achieve this
// we track the operations that the address of each auto reaches and if it only
// reaches stores then we delete all the stores. The other operations will then
@@ -201,7 +249,7 @@ func elimDeadAutosGeneric(f *Func) {
}
if v.Uses == 0 && v.Op != OpNilCheck && !v.Op.IsCall() && !v.Op.HasSideEffects() || len(args) == 0 {
- // Nil check has no use, but we need to keep it.
+ // We need to keep nil checks even if they have no use.
// Also keep calls and values that have side effects.
return
}
diff --git a/src/cmd/compile/internal/ssa/debug.go b/src/cmd/compile/internal/ssa/debug.go
index 63934662a5..05a72787f3 100644
--- a/src/cmd/compile/internal/ssa/debug.go
+++ b/src/cmd/compile/internal/ssa/debug.go
@@ -42,7 +42,10 @@ type FuncDebug struct {
OptDcl []*ir.Name
// Filled in by the user. Translates Block and Value ID to PC.
- GetPC func(ID, ID) int64
+ //
+ // NOTE: block is only used if value is BlockStart.ID or BlockEnd.ID.
+ // Otherwise, it is ignored.
+ GetPC func(block, value ID) int64
}
type BlockDebug struct {
@@ -70,8 +73,8 @@ func (ls *liveSlot) String() string {
return fmt.Sprintf("0x%x.%d.%d", ls.Registers, ls.stackOffsetValue(), int32(ls.StackOffset)&1)
}
-func (loc liveSlot) absent() bool {
- return loc.Registers == 0 && !loc.onStack()
+func (ls liveSlot) absent() bool {
+ return ls.Registers == 0 && !ls.onStack()
}
// StackOffset encodes whether a value is on the stack and if so, where.
@@ -433,7 +436,7 @@ func (sc *slotCanonicalizer) canonSlot(idx SlKeyIdx) LocalSlot {
// synthesizes new (dead) values for the non-live params or the
// non-live pieces of partially live params.
func PopulateABIInRegArgOps(f *Func) {
- pri := f.ABISelf.ABIAnalyzeFuncType(f.Type.FuncType())
+ pri := f.ABISelf.ABIAnalyzeFuncType(f.Type)
// When manufacturing new slots that correspond to splits of
// composite parameters, we want to avoid creating a new sub-slot
@@ -519,7 +522,7 @@ func PopulateABIInRegArgOps(f *Func) {
if !isNamedRegParam(inp) {
continue
}
- n := inp.Name.(*ir.Name)
+ n := inp.Name
// Param is spread across one or more registers. Walk through
// each piece to see whether we've seen an arg reg op for it.
@@ -1368,7 +1371,7 @@ func (state *debugState) buildLocationLists(blockLocs []*BlockDebug) {
// Flush any leftover entries live at the end of the last block.
for varID := range state.lists {
- state.writePendingEntry(VarID(varID), state.f.Blocks[len(state.f.Blocks)-1].ID, FuncEnd.ID)
+ state.writePendingEntry(VarID(varID), -1, FuncEnd.ID)
list := state.lists[varID]
if state.loggingLevel > 0 {
if len(list) == 0 {
@@ -1734,7 +1737,7 @@ func isNamedRegParam(p abi.ABIParamAssignment) bool {
if p.Name == nil {
return false
}
- n := p.Name.(*ir.Name)
+ n := p.Name
if n.Sym() == nil || n.Sym().IsBlank() {
return false
}
@@ -1754,7 +1757,7 @@ func isNamedRegParam(p abi.ABIParamAssignment) bool {
// each input param reg will be spilled in the prolog).
func BuildFuncDebugNoOptimized(ctxt *obj.Link, f *Func, loggingEnabled bool, stackOffset func(LocalSlot) int32, rval *FuncDebug) {
- pri := f.ABISelf.ABIAnalyzeFuncType(f.Type.FuncType())
+ pri := f.ABISelf.ABIAnalyzeFuncType(f.Type)
// Look to see if we have any named register-promoted parameters.
// If there are none, bail early and let the caller sort things
@@ -1790,7 +1793,7 @@ func BuildFuncDebugNoOptimized(ctxt *obj.Link, f *Func, loggingEnabled bool, sta
continue
}
- n := inp.Name.(*ir.Name)
+ n := inp.Name
sl := LocalSlot{N: n, Type: inp.Type, Off: 0}
rval.Vars = append(rval.Vars, n)
rval.Slots = append(rval.Slots, sl)
diff --git a/src/cmd/compile/internal/ssa/debug_lines_test.go b/src/cmd/compile/internal/ssa/debug_lines_test.go
index cf115107a1..af9e2a34cf 100644
--- a/src/cmd/compile/internal/ssa/debug_lines_test.go
+++ b/src/cmd/compile/internal/ssa/debug_lines_test.go
@@ -44,7 +44,7 @@ func testGoArch() string {
func hasRegisterABI() bool {
switch testGoArch() {
- case "amd64", "arm64", "ppc64", "ppc64le", "riscv":
+ case "amd64", "arm64", "loong64", "ppc64", "ppc64le", "riscv":
return true
}
return false
diff --git a/src/cmd/compile/internal/ssa/expand_calls.go b/src/cmd/compile/internal/ssa/expand_calls.go
index 3afd73eb6a..b0788f1db4 100644
--- a/src/cmd/compile/internal/ssa/expand_calls.go
+++ b/src/cmd/compile/internal/ssa/expand_calls.go
@@ -11,1790 +11,1025 @@ import (
"cmd/compile/internal/types"
"cmd/internal/src"
"fmt"
- "sort"
)
-type selKey struct {
- from *Value // what is selected from
- offsetOrIndex int64 // whatever is appropriate for the selector
- size int64
- typ *types.Type
+func postExpandCallsDecompose(f *Func) {
+ decomposeUser(f) // redo user decompose to cleanup after expand calls
+ decomposeBuiltIn(f) // handles both regular decomposition and cleanup.
}
-type Abi1RO uint8 // An offset within a parameter's slice of register indices, for abi1.
-
-func isBlockMultiValueExit(b *Block) bool {
- return (b.Kind == BlockRet || b.Kind == BlockRetJmp) && b.Controls[0] != nil && b.Controls[0].Op == OpMakeResult
-}
+func expandCalls(f *Func) {
+ // Convert each aggregate arg to a call into "dismantle aggregate, store/pass parts"
+ // Convert each aggregate result from a call into "assemble aggregate from parts"
+ // Convert each multivalue exit into "dismantle aggregate, store/return parts"
+ // Convert incoming aggregate arg into assembly of parts.
+ // Feed modified AST to decompose.
-func badVal(s string, v *Value) error {
- return fmt.Errorf("%s %s", s, v.LongString())
-}
+ sp, _ := f.spSb()
-// removeTrivialWrapperTypes unwraps layers of
-// struct { singleField SomeType } and [1]SomeType
-// until a non-wrapper type is reached. This is useful
-// for working with assignments to/from interface data
-// fields (either second operand to OpIMake or OpIData)
-// where the wrapping or type conversion can be elided
-// because of type conversions/assertions in source code
-// that do not appear in SSA.
-func removeTrivialWrapperTypes(t *types.Type) *types.Type {
- for {
- if t.IsStruct() && t.NumFields() == 1 {
- t = t.Field(0).Type
- continue
- }
- if t.IsArray() && t.NumElem() == 1 {
- t = t.Elem()
- continue
- }
- break
+ x := &expandState{
+ f: f,
+ debug: f.pass.debug,
+ regSize: f.Config.RegSize,
+ sp: sp,
+ typs: &f.Config.Types,
+ wideSelects: make(map[*Value]*Value),
+ commonArgs: make(map[selKey]*Value),
+ commonSelectors: make(map[selKey]*Value),
+ memForCall: make(map[ID]*Value),
}
- return t
-}
-// A registerCursor tracks which register is used for an Arg or regValues, or a piece of such.
-type registerCursor struct {
- // TODO(register args) convert this to a generalized target cursor.
- storeDest *Value // if there are no register targets, then this is the base of the store.
- regsLen int // the number of registers available for this Arg/result (which is all in registers or not at all)
- nextSlice Abi1RO // the next register/register-slice offset
- config *abi.ABIConfig
- regValues *[]*Value // values assigned to registers accumulate here
-}
-
-func (rc *registerCursor) String() string {
- dest := "<none>"
- if rc.storeDest != nil {
- dest = rc.storeDest.String()
- }
- regs := "<none>"
- if rc.regValues != nil {
- regs = ""
- for i, x := range *rc.regValues {
- if i > 0 {
- regs = regs + "; "
- }
- regs = regs + x.LongString()
- }
+ // For 32-bit, need to deal with decomposition of 64-bit integers, which depends on endianness.
+ if f.Config.BigEndian {
+ x.firstOp = OpInt64Hi
+ x.secondOp = OpInt64Lo
+ x.firstType = x.typs.Int32
+ x.secondType = x.typs.UInt32
+ } else {
+ x.firstOp = OpInt64Lo
+ x.secondOp = OpInt64Hi
+ x.firstType = x.typs.UInt32
+ x.secondType = x.typs.Int32
}
- // not printing the config because that has not been useful
- return fmt.Sprintf("RCSR{storeDest=%v, regsLen=%d, nextSlice=%d, regValues=[%s]}", dest, rc.regsLen, rc.nextSlice, regs)
-}
-// next effectively post-increments the register cursor; the receiver is advanced,
-// the old value is returned.
-func (c *registerCursor) next(t *types.Type) registerCursor {
- rc := *c
- if int(c.nextSlice) < c.regsLen {
- w := c.config.NumParamRegs(t)
- c.nextSlice += Abi1RO(w)
- }
- return rc
-}
+ // Defer select processing until after all calls and selects are seen.
+ var selects []*Value
+ var calls []*Value
+ var args []*Value
+ var exitBlocks []*Block
-// plus returns a register cursor offset from the original, without modifying the original.
-func (c *registerCursor) plus(regWidth Abi1RO) registerCursor {
- rc := *c
- rc.nextSlice += regWidth
- return rc
-}
+ var m0 *Value
-const (
- // Register offsets for fields of built-in aggregate types; the ones not listed are zero.
- RO_complex_imag = 1
- RO_string_len = 1
- RO_slice_len = 1
- RO_slice_cap = 2
- RO_iface_data = 1
-)
-
-func (x *expandState) regWidth(t *types.Type) Abi1RO {
- return Abi1RO(x.abi1.NumParamRegs(t))
-}
+ // Accumulate lists of calls, args, selects, and exit blocks to process,
+ // note "wide" selects consumed by stores,
+ // rewrite mem for each call,
+ // rewrite each OpSelectNAddr.
+ for _, b := range f.Blocks {
+ for _, v := range b.Values {
+ switch v.Op {
+ case OpInitMem:
+ m0 = v
-// regOffset returns the register offset of the i'th element of type t
-func (x *expandState) regOffset(t *types.Type, i int) Abi1RO {
- // TODO maybe cache this in a map if profiling recommends.
- if i == 0 {
- return 0
- }
- if t.IsArray() {
- return Abi1RO(i) * x.regWidth(t.Elem())
- }
- if t.IsStruct() {
- k := Abi1RO(0)
- for j := 0; j < i; j++ {
- k += x.regWidth(t.FieldType(j))
- }
- return k
- }
- panic("Haven't implemented this case yet, do I need to?")
-}
+ case OpClosureLECall, OpInterLECall, OpStaticLECall, OpTailLECall:
+ calls = append(calls, v)
-// at returns the register cursor for component i of t, where the first
-// component is numbered 0.
-func (c *registerCursor) at(t *types.Type, i int) registerCursor {
- rc := *c
- if i == 0 || c.regsLen == 0 {
- return rc
- }
- if t.IsArray() {
- w := c.config.NumParamRegs(t.Elem())
- rc.nextSlice += Abi1RO(i * w)
- return rc
- }
- if t.IsStruct() {
- for j := 0; j < i; j++ {
- rc.next(t.FieldType(j))
- }
- return rc
- }
- panic("Haven't implemented this case yet, do I need to?")
-}
-
-func (c *registerCursor) init(regs []abi.RegIndex, info *abi.ABIParamResultInfo, result *[]*Value, storeDest *Value) {
- c.regsLen = len(regs)
- c.nextSlice = 0
- if len(regs) == 0 {
- c.storeDest = storeDest // only save this if there are no registers, will explode if misused.
- return
- }
- c.config = info.Config()
- c.regValues = result
-}
+ case OpArg:
+ args = append(args, v)
-func (c *registerCursor) addArg(v *Value) {
- *c.regValues = append(*c.regValues, v)
-}
+ case OpStore:
+ if a := v.Args[1]; a.Op == OpSelectN && !CanSSA(a.Type) {
+ if a.Uses > 1 {
+ panic(fmt.Errorf("Saw double use of wide SelectN %s operand of Store %s",
+ a.LongString(), v.LongString()))
+ }
+ x.wideSelects[a] = v
+ }
-func (c *registerCursor) hasRegs() bool {
- return c.regsLen > 0
-}
+ case OpSelectN:
+ if v.Type == types.TypeMem {
+ // rewrite the mem selector in place
+ call := v.Args[0]
+ aux := call.Aux.(*AuxCall)
+ mem := x.memForCall[call.ID]
+ if mem == nil {
+ v.AuxInt = int64(aux.abiInfo.OutRegistersUsed())
+ x.memForCall[call.ID] = v
+ } else {
+ panic(fmt.Errorf("Saw two memories for call %v, %v and %v", call, mem, v))
+ }
+ } else {
+ selects = append(selects, v)
+ }
-type expandState struct {
- f *Func
- abi1 *abi.ABIConfig
- debug int // odd values log lost statement markers, so likely settings are 1 (stmts), 2 (expansion), and 3 (both)
- canSSAType func(*types.Type) bool
- regSize int64
- sp *Value
- typs *Types
- ptrSize int64
- hiOffset int64
- lowOffset int64
- hiRo Abi1RO
- loRo Abi1RO
- namedSelects map[*Value][]namedVal
- sdom SparseTree
- commonSelectors map[selKey]*Value // used to de-dupe selectors
- commonArgs map[selKey]*Value // used to de-dupe OpArg/OpArgIntReg/OpArgFloatReg
- memForCall map[ID]*Value // For a call, need to know the unique selector that gets the mem.
- transformedSelects map[ID]bool // OpSelectN after rewriting, either created or renumbered.
- indentLevel int // Indentation for debugging recursion
-}
+ case OpSelectNAddr:
+ call := v.Args[0]
+ which := v.AuxInt
+ aux := call.Aux.(*AuxCall)
+ pt := v.Type
+ off := x.offsetFrom(x.f.Entry, x.sp, aux.OffsetOfResult(which), pt)
+ v.copyOf(off)
+ }
+ }
-// intPairTypes returns the pair of 32-bit int types needed to encode a 64-bit integer type on a target
-// that has no 64-bit integer registers.
-func (x *expandState) intPairTypes(et types.Kind) (tHi, tLo *types.Type) {
- tHi = x.typs.UInt32
- if et == types.TINT64 {
- tHi = x.typs.Int32
+ // rewrite function results from an exit block
+ // values returned by function need to be split out into registers.
+ if isBlockMultiValueExit(b) {
+ exitBlocks = append(exitBlocks, b)
+ }
}
- tLo = x.typs.UInt32
- return
-}
-// isAlreadyExpandedAggregateType returns whether a type is an SSA-able "aggregate" (multiple register) type
-// that was expanded in an earlier phase (currently, expand_calls is intended to run after decomposeBuiltin,
-// so this is all aggregate types -- small struct and array, complex, interface, string, slice, and 64-bit
-// integer on 32-bit).
-func (x *expandState) isAlreadyExpandedAggregateType(t *types.Type) bool {
- if !x.canSSAType(t) {
- return false
+ // Convert each aggregate arg into Make of its parts (and so on, to primitive types)
+ for _, v := range args {
+ var rc registerCursor
+ a := x.prAssignForArg(v)
+ aux := x.f.OwnAux
+ regs := a.Registers
+ var offset int64
+ if len(regs) == 0 {
+ offset = a.FrameOffset(aux.abiInfo)
+ }
+ auxBase := x.offsetFrom(x.f.Entry, x.sp, offset, types.NewPtr(v.Type))
+ rc.init(regs, aux.abiInfo, nil, auxBase, 0)
+ x.rewriteSelectOrArg(f.Entry.Pos, f.Entry, v, v, m0, v.Type, rc)
}
- return t.IsStruct() || t.IsArray() || t.IsComplex() || t.IsInterface() || t.IsString() || t.IsSlice() ||
- (t.Size() > x.regSize && (t.IsInteger() || (x.f.Config.SoftFloat && t.IsFloat())))
-}
-// offsetFrom creates an offset from a pointer, simplifying chained offsets and offsets from SP
-// TODO should also optimize offsets from SB?
-func (x *expandState) offsetFrom(b *Block, from *Value, offset int64, pt *types.Type) *Value {
- ft := from.Type
- if offset == 0 {
- if ft == pt {
- return from
+ // Rewrite selects of results (which may be aggregates) into make-aggregates of register/memory-targeted selects
+ for _, v := range selects {
+ if v.Op == OpInvalid {
+ continue
}
- // This captures common, (apparently) safe cases. The unsafe cases involve ft == uintptr
- if (ft.IsPtr() || ft.IsUnsafePtr()) && pt.IsPtr() {
- return from
+
+ call := v.Args[0]
+ aux := call.Aux.(*AuxCall)
+ mem := x.memForCall[call.ID]
+ if mem == nil {
+ mem = call.Block.NewValue1I(call.Pos, OpSelectN, types.TypeMem, int64(aux.abiInfo.OutRegistersUsed()), call)
+ x.memForCall[call.ID] = mem
+ }
+
+ i := v.AuxInt
+ regs := aux.RegsOfResult(i)
+
+ // If this select cannot fit into SSA and is stored, either disaggregate to register stores, or mem-mem move.
+ if store := x.wideSelects[v]; store != nil {
+ // Use the mem that comes from the store operation.
+ storeAddr := store.Args[0]
+ mem := store.Args[2]
+ if len(regs) > 0 {
+ // Cannot do a rewrite that builds up a result from pieces; instead, copy pieces to the store operation.
+ var rc registerCursor
+ rc.init(regs, aux.abiInfo, nil, storeAddr, 0)
+ mem = x.rewriteWideSelectToStores(call.Pos, call.Block, v, mem, v.Type, rc)
+ store.copyOf(mem)
+ } else {
+ // Move directly from AuxBase to store target; rewrite the store instruction.
+ offset := aux.OffsetOfResult(i)
+ auxBase := x.offsetFrom(x.f.Entry, x.sp, offset, types.NewPtr(v.Type))
+ // was Store dst, v, mem
+ // now Move dst, auxBase, mem
+ move := store.Block.NewValue3A(store.Pos, OpMove, types.TypeMem, v.Type, storeAddr, auxBase, mem)
+ move.AuxInt = v.Type.Size()
+ store.copyOf(move)
+ }
+ continue
}
- }
- // Simplify, canonicalize
- for from.Op == OpOffPtr {
- offset += from.AuxInt
- from = from.Args[0]
- }
- if from == x.sp {
- return x.f.ConstOffPtrSP(pt, offset, x.sp)
- }
- return b.NewValue1I(from.Pos.WithNotStmt(), OpOffPtr, pt, offset, from)
-}
-// splitSlots splits one "field" (specified by sfx, offset, and ty) out of the LocalSlots in ls and returns the new LocalSlots this generates.
-func (x *expandState) splitSlots(ls []*LocalSlot, sfx string, offset int64, ty *types.Type) []*LocalSlot {
- var locs []*LocalSlot
- for i := range ls {
- locs = append(locs, x.f.SplitSlot(ls[i], sfx, offset, ty))
+ var auxBase *Value
+ if len(regs) == 0 {
+ offset := aux.OffsetOfResult(i)
+ auxBase = x.offsetFrom(x.f.Entry, x.sp, offset, types.NewPtr(v.Type))
+ }
+ var rc registerCursor
+ rc.init(regs, aux.abiInfo, nil, auxBase, 0)
+ x.rewriteSelectOrArg(call.Pos, call.Block, v, v, mem, v.Type, rc)
}
- return locs
-}
-// prAssignForArg returns the ABIParamAssignment for v, assumed to be an OpArg.
-func (x *expandState) prAssignForArg(v *Value) *abi.ABIParamAssignment {
- if v.Op != OpArg {
- panic(badVal("Wanted OpArg, instead saw", v))
+ rewriteCall := func(v *Value, newOp Op, argStart int) {
+ // Break aggregate args passed to call into smaller pieces.
+ x.rewriteCallArgs(v, argStart)
+ v.Op = newOp
+ rts := abi.RegisterTypes(v.Aux.(*AuxCall).abiInfo.OutParams())
+ v.Type = types.NewResults(append(rts, types.TypeMem))
}
- return ParamAssignmentForArgName(x.f, v.Aux.(*ir.Name))
-}
-// ParamAssignmentForArgName returns the ABIParamAssignment for f's arg with matching name.
-func ParamAssignmentForArgName(f *Func, name *ir.Name) *abi.ABIParamAssignment {
- abiInfo := f.OwnAux.abiInfo
- ip := abiInfo.InParams()
- for i, a := range ip {
- if a.Name == name {
- return &ip[i]
+ // Rewrite calls
+ for _, v := range calls {
+ switch v.Op {
+ case OpStaticLECall:
+ rewriteCall(v, OpStaticCall, 0)
+ case OpTailLECall:
+ rewriteCall(v, OpTailCall, 0)
+ case OpClosureLECall:
+ rewriteCall(v, OpClosureCall, 2)
+ case OpInterLECall:
+ rewriteCall(v, OpInterCall, 1)
}
}
- panic(fmt.Errorf("Did not match param %v in prInfo %+v", name, abiInfo.InParams()))
-}
-// indent increments (or decrements) the indentation.
-func (x *expandState) indent(n int) {
- x.indentLevel += n
-}
-
-// Printf does an indented fmt.Printf on the format and args.
-func (x *expandState) Printf(format string, a ...interface{}) (n int, err error) {
- if x.indentLevel > 0 {
- fmt.Printf("%[1]*s", x.indentLevel, "")
+ // Rewrite results from exit blocks
+ for _, b := range exitBlocks {
+ v := b.Controls[0]
+ x.rewriteFuncResults(v, b, f.OwnAux)
+ b.SetControl(v)
}
- return fmt.Printf(format, a...)
+
}
-// Calls that need lowering have some number of inputs, including a memory input,
-// and produce a tuple of (value1, value2, ..., mem) where valueK may or may not be SSA-able.
+func (x *expandState) rewriteFuncResults(v *Value, b *Block, aux *AuxCall) {
+ // This is very similar to rewriteCallArgs
+ // differences:
+ // firstArg + preArgs
+ // sp vs auxBase
-// With the current ABI those inputs need to be converted into stores to memory,
-// rethreading the call's memory input to the first, and the new call now receiving the last.
+ m0 := v.MemoryArg()
+ mem := m0
-// With the current ABI, the outputs need to be converted to loads, which will all use the call's
-// memory output as their input.
+ allResults := []*Value{}
+ var oldArgs []*Value
+ argsWithoutMem := v.Args[:len(v.Args)-1]
-// rewriteSelect recursively walks from leaf selector to a root (OpSelectN, OpLoad, OpArg)
-// through a chain of Struct/Array/builtin Select operations. If the chain of selectors does not
-// end in an expected root, it does nothing (this can happen depending on compiler phase ordering).
-// The "leaf" provides the type, the root supplies the container, and the leaf-to-root path
-// accumulates the offset.
-// It emits the code necessary to implement the leaf select operation that leads to the root.
-//
-// TODO when registers really arrive, must also decompose anything split across two registers or registers and memory.
-func (x *expandState) rewriteSelect(leaf *Value, selector *Value, offset int64, regOffset Abi1RO) []*LocalSlot {
- if x.debug > 1 {
- x.indent(3)
- defer x.indent(-3)
- x.Printf("rewriteSelect(%s; %s; memOff=%d; regOff=%d)\n", leaf.LongString(), selector.LongString(), offset, regOffset)
- }
- var locs []*LocalSlot
- leafType := leaf.Type
- if len(selector.Args) > 0 {
- w := selector.Args[0]
- if w.Op == OpCopy {
- for w.Op == OpCopy {
- w = w.Args[0]
+ for j, a := range argsWithoutMem {
+ oldArgs = append(oldArgs, a)
+ i := int64(j)
+ auxType := aux.TypeOfResult(i)
+ auxBase := b.NewValue2A(v.Pos, OpLocalAddr, types.NewPtr(auxType), aux.NameOfResult(i), x.sp, mem)
+ auxOffset := int64(0)
+ aRegs := aux.RegsOfResult(int64(j))
+ if a.Op == OpDereference {
+ a.Op = OpLoad
+ }
+ var rc registerCursor
+ var result *[]*Value
+ if len(aRegs) > 0 {
+ result = &allResults
+ } else {
+ if a.Op == OpLoad && a.Args[0].Op == OpLocalAddr {
+ addr := a.Args[0]
+ if addr.MemoryArg() == a.MemoryArg() && addr.Aux == aux.NameOfResult(i) {
+ continue // Self move to output parameter
+ }
}
- selector.SetArg(0, w)
}
+ rc.init(aRegs, aux.abiInfo, result, auxBase, auxOffset)
+ mem = x.decomposeAsNecessary(v.Pos, b, a, mem, rc)
}
- switch selector.Op {
- case OpArgIntReg, OpArgFloatReg:
- if leafType == selector.Type { // OpIData leads us here, sometimes.
- leaf.copyOf(selector)
- } else {
- x.f.Fatalf("Unexpected %s type, selector=%s, leaf=%s\n", selector.Op.String(), selector.LongString(), leaf.LongString())
- }
- if x.debug > 1 {
- x.Printf("---%s, break\n", selector.Op.String())
- }
- case OpArg:
- if !x.isAlreadyExpandedAggregateType(selector.Type) {
- if leafType == selector.Type { // OpIData leads us here, sometimes.
- x.newArgToMemOrRegs(selector, leaf, offset, regOffset, leafType, leaf.Pos)
- } else {
- x.f.Fatalf("Unexpected OpArg type, selector=%s, leaf=%s\n", selector.LongString(), leaf.LongString())
- }
+ v.resetArgs()
+ v.AddArgs(allResults...)
+ v.AddArg(mem)
+ for _, a := range oldArgs {
+ if a.Uses == 0 {
if x.debug > 1 {
- x.Printf("---OpArg, break\n")
+ x.Printf("...marking %v unused\n", a.LongString())
}
- break
- }
- switch leaf.Op {
- case OpIData, OpStructSelect, OpArraySelect:
- leafType = removeTrivialWrapperTypes(leaf.Type)
+ x.invalidateRecursively(a)
}
- x.newArgToMemOrRegs(selector, leaf, offset, regOffset, leafType, leaf.Pos)
+ }
+ v.Type = types.NewResults(append(abi.RegisterTypes(aux.abiInfo.OutParams()), types.TypeMem))
+ return
+}
- for _, s := range x.namedSelects[selector] {
- locs = append(locs, x.f.Names[s.locIndex])
- }
+func (x *expandState) rewriteCallArgs(v *Value, firstArg int) {
+ if x.debug > 1 {
+ x.indent(3)
+ defer x.indent(-3)
+ x.Printf("rewriteCallArgs(%s; %d)\n", v.LongString(), firstArg)
+ }
+ // Thread the stores on the memory arg
+ aux := v.Aux.(*AuxCall)
+ m0 := v.MemoryArg()
+ mem := m0
+ allResults := []*Value{}
+ oldArgs := []*Value{}
+ argsWithoutMem := v.Args[firstArg : len(v.Args)-1] // Also strip closure/interface Op-specific args
- case OpLoad: // We end up here because of IData of immediate structures.
- // Failure case:
- // (note the failure case is very rare; w/o this case, make.bash and run.bash both pass, as well as
- // the hard cases of building {syscall,math,math/cmplx,math/bits,go/constant} on ppc64le and mips-softfloat).
- //
- // GOSSAFUNC='(*dumper).dump' go build -gcflags=-l -tags=math_big_pure_go cmd/compile/internal/gc
- // cmd/compile/internal/gc/dump.go:136:14: internal compiler error: '(*dumper).dump': not lowered: v827, StructSelect PTR PTR
- // b2: ← b1
- // v20 (+142) = StaticLECall <interface {},mem> {AuxCall{reflect.Value.Interface([reflect.Value,0])[interface {},24]}} [40] v8 v1
- // v21 (142) = SelectN <mem> [1] v20
- // v22 (142) = SelectN <interface {}> [0] v20
- // b15: ← b8
- // v71 (+143) = IData <Nodes> v22 (v[Nodes])
- // v73 (+146) = StaticLECall <[]*Node,mem> {AuxCall{"".Nodes.Slice([Nodes,0])[[]*Node,8]}} [32] v71 v21
- //
- // translates (w/o the "case OpLoad:" above) to:
- //
- // b2: ← b1
- // v20 (+142) = StaticCall <mem> {AuxCall{reflect.Value.Interface([reflect.Value,0])[interface {},24]}} [40] v715
- // v23 (142) = Load <*uintptr> v19 v20
- // v823 (142) = IsNonNil <bool> v23
- // v67 (+143) = Load <*[]*Node> v880 v20
- // b15: ← b8
- // v827 (146) = StructSelect <*[]*Node> [0] v67
- // v846 (146) = Store <mem> {*[]*Node} v769 v827 v20
- // v73 (+146) = StaticCall <mem> {AuxCall{"".Nodes.Slice([Nodes,0])[[]*Node,8]}} [32] v846
- // i.e., the struct select is generated and remains in because it is not applied to an actual structure.
- // The OpLoad was created to load the single field of the IData
- // This case removes that StructSelect.
- if leafType != selector.Type {
- if x.f.Config.SoftFloat && selector.Type.IsFloat() {
- if x.debug > 1 {
- x.Printf("---OpLoad, break\n")
- }
- break // softfloat pass will take care of that
- }
- x.f.Fatalf("Unexpected Load as selector, leaf=%s, selector=%s\n", leaf.LongString(), selector.LongString())
- }
- leaf.copyOf(selector)
- for _, s := range x.namedSelects[selector] {
- locs = append(locs, x.f.Names[s.locIndex])
- }
+ sp := x.sp
+ if v.Op == OpTailLECall {
+ // For tail call, we unwind the frame before the call so we'll use the caller's
+ // SP.
+ sp = x.f.Entry.NewValue1(src.NoXPos, OpGetCallerSP, x.typs.Uintptr, mem)
+ }
- case OpSelectN:
- // TODO(register args) result case
- // if applied to Op-mumble-call, the Aux tells us which result, regOffset specifies offset within result. If a register, should rewrite to OpSelectN for new call.
- // TODO these may be duplicated. Should memoize. Intermediate selectors will go dead, no worries there.
- call := selector.Args[0]
- call0 := call
- aux := call.Aux.(*AuxCall)
- which := selector.AuxInt
- if x.transformedSelects[selector.ID] {
- // This is a minor hack. Either this select has had its operand adjusted (mem) or
- // it is some other intermediate node that was rewritten to reference a register (not a generic arg).
- // This can occur with chains of selection/indexing from single field/element aggregates.
- leaf.copyOf(selector)
- break
- }
- if which == aux.NResults() { // mem is after the results.
- // rewrite v as a Copy of call -- the replacement call will produce a mem.
- if leaf != selector {
- panic(fmt.Errorf("Unexpected selector of memory, selector=%s, call=%s, leaf=%s", selector.LongString(), call.LongString(), leaf.LongString()))
- }
- if aux.abiInfo == nil {
- panic(badVal("aux.abiInfo nil for call", call))
- }
- if existing := x.memForCall[call.ID]; existing == nil {
- selector.AuxInt = int64(aux.abiInfo.OutRegistersUsed())
- x.memForCall[call.ID] = selector
- x.transformedSelects[selector.ID] = true // operand adjusted
- } else {
- selector.copyOf(existing)
- }
+ for i, a := range argsWithoutMem { // skip leading non-parameter SSA Args and trailing mem SSA Arg.
+ oldArgs = append(oldArgs, a)
+ auxI := int64(i)
+ aRegs := aux.RegsOfArg(auxI)
+ aType := aux.TypeOfArg(auxI)
- } else {
- leafType := removeTrivialWrapperTypes(leaf.Type)
- if x.canSSAType(leafType) {
- pt := types.NewPtr(leafType)
- // Any selection right out of the arg area/registers has to be same Block as call, use call as mem input.
- // Create a "mem" for any loads that need to occur.
- if mem := x.memForCall[call.ID]; mem != nil {
- if mem.Block != call.Block {
- panic(fmt.Errorf("selector and call need to be in same block, selector=%s; call=%s", selector.LongString(), call.LongString()))
- }
- call = mem
- } else {
- mem = call.Block.NewValue1I(call.Pos.WithNotStmt(), OpSelectN, types.TypeMem, int64(aux.abiInfo.OutRegistersUsed()), call)
- x.transformedSelects[mem.ID] = true // select uses post-expansion indexing
- x.memForCall[call.ID] = mem
- call = mem
- }
- outParam := aux.abiInfo.OutParam(int(which))
- if len(outParam.Registers) > 0 {
- firstReg := uint32(0)
- for i := 0; i < int(which); i++ {
- firstReg += uint32(len(aux.abiInfo.OutParam(i).Registers))
- }
- reg := int64(regOffset + Abi1RO(firstReg))
- if leaf.Block == call.Block {
- leaf.reset(OpSelectN)
- leaf.SetArgs1(call0)
- leaf.Type = leafType
- leaf.AuxInt = reg
- x.transformedSelects[leaf.ID] = true // leaf, rewritten to use post-expansion indexing.
- } else {
- w := call.Block.NewValue1I(leaf.Pos, OpSelectN, leafType, reg, call0)
- x.transformedSelects[w.ID] = true // select, using post-expansion indexing.
- leaf.copyOf(w)
- }
- } else {
- off := x.offsetFrom(x.f.Entry, x.sp, offset+aux.OffsetOfResult(which), pt)
- if leaf.Block == call.Block {
- leaf.reset(OpLoad)
- leaf.SetArgs2(off, call)
- leaf.Type = leafType
- } else {
- w := call.Block.NewValue2(leaf.Pos, OpLoad, leafType, off, call)
- leaf.copyOf(w)
- if x.debug > 1 {
- x.Printf("---new %s\n", w.LongString())
- }
- }
- }
- for _, s := range x.namedSelects[selector] {
- locs = append(locs, x.f.Names[s.locIndex])
- }
- } else {
- x.f.Fatalf("Should not have non-SSA-able OpSelectN, selector=%s", selector.LongString())
- }
+ if a.Op == OpDereference {
+ a.Op = OpLoad
}
-
- case OpStructSelect:
- w := selector.Args[0]
- var ls []*LocalSlot
- if w.Type.Kind() != types.TSTRUCT { // IData artifact
- ls = x.rewriteSelect(leaf, w, offset, regOffset)
+ var rc registerCursor
+ var result *[]*Value
+ var aOffset int64
+ if len(aRegs) > 0 {
+ result = &allResults
} else {
- fldi := int(selector.AuxInt)
- ls = x.rewriteSelect(leaf, w, offset+w.Type.FieldOff(fldi), regOffset+x.regOffset(w.Type, fldi))
- if w.Op != OpIData {
- for _, l := range ls {
- locs = append(locs, x.f.SplitStruct(l, int(selector.AuxInt)))
- }
+ aOffset = aux.OffsetOfArg(auxI)
+ }
+ if v.Op == OpTailLECall && a.Op == OpArg && a.AuxInt == 0 {
+ // It's common for a tail call passing the same arguments (e.g. method wrapper),
+ // so this would be a self copy. Detect this and optimize it out.
+ n := a.Aux.(*ir.Name)
+ if n.Class == ir.PPARAM && n.FrameOffset()+x.f.Config.ctxt.Arch.FixedFrameSize == aOffset {
+ continue
}
}
-
- case OpArraySelect:
- w := selector.Args[0]
- index := selector.AuxInt
- x.rewriteSelect(leaf, w, offset+selector.Type.Size()*index, regOffset+x.regOffset(w.Type, int(index)))
-
- case OpInt64Hi:
- w := selector.Args[0]
- ls := x.rewriteSelect(leaf, w, offset+x.hiOffset, regOffset+x.hiRo)
- locs = x.splitSlots(ls, ".hi", x.hiOffset, leafType)
-
- case OpInt64Lo:
- w := selector.Args[0]
- ls := x.rewriteSelect(leaf, w, offset+x.lowOffset, regOffset+x.loRo)
- locs = x.splitSlots(ls, ".lo", x.lowOffset, leafType)
-
- case OpStringPtr:
- ls := x.rewriteSelect(leaf, selector.Args[0], offset, regOffset)
- locs = x.splitSlots(ls, ".ptr", 0, x.typs.BytePtr)
-
- case OpSlicePtr, OpSlicePtrUnchecked:
- w := selector.Args[0]
- ls := x.rewriteSelect(leaf, w, offset, regOffset)
- locs = x.splitSlots(ls, ".ptr", 0, types.NewPtr(w.Type.Elem()))
-
- case OpITab:
- w := selector.Args[0]
- ls := x.rewriteSelect(leaf, w, offset, regOffset)
- sfx := ".itab"
- if w.Type.IsEmptyInterface() {
- sfx = ".type"
+ if x.debug > 1 {
+ x.Printf("...storeArg %s, %v, %d\n", a.LongString(), aType, aOffset)
}
- locs = x.splitSlots(ls, sfx, 0, x.typs.Uintptr)
-
- case OpComplexReal:
- ls := x.rewriteSelect(leaf, selector.Args[0], offset, regOffset)
- locs = x.splitSlots(ls, ".real", 0, selector.Type)
-
- case OpComplexImag:
- ls := x.rewriteSelect(leaf, selector.Args[0], offset+selector.Type.Size(), regOffset+RO_complex_imag) // result is FloatNN, width of result is offset of imaginary part.
- locs = x.splitSlots(ls, ".imag", selector.Type.Size(), selector.Type)
-
- case OpStringLen, OpSliceLen:
- ls := x.rewriteSelect(leaf, selector.Args[0], offset+x.ptrSize, regOffset+RO_slice_len)
- locs = x.splitSlots(ls, ".len", x.ptrSize, leafType)
-
- case OpIData:
- ls := x.rewriteSelect(leaf, selector.Args[0], offset+x.ptrSize, regOffset+RO_iface_data)
- locs = x.splitSlots(ls, ".data", x.ptrSize, leafType)
- case OpSliceCap:
- ls := x.rewriteSelect(leaf, selector.Args[0], offset+2*x.ptrSize, regOffset+RO_slice_cap)
- locs = x.splitSlots(ls, ".cap", 2*x.ptrSize, leafType)
-
- case OpCopy: // If it's an intermediate result, recurse
- locs = x.rewriteSelect(leaf, selector.Args[0], offset, regOffset)
- for _, s := range x.namedSelects[selector] {
- // this copy may have had its own name, preserve that, too.
- locs = append(locs, x.f.Names[s.locIndex])
+ rc.init(aRegs, aux.abiInfo, result, sp, aOffset)
+ mem = x.decomposeAsNecessary(v.Pos, v.Block, a, mem, rc)
+ }
+ var preArgStore [2]*Value
+ preArgs := append(preArgStore[:0], v.Args[0:firstArg]...)
+ v.resetArgs()
+ v.AddArgs(preArgs...)
+ v.AddArgs(allResults...)
+ v.AddArg(mem)
+ for _, a := range oldArgs {
+ if a.Uses == 0 {
+ x.invalidateRecursively(a)
}
-
- default:
- // Ignore dead ends. These can occur if this phase is run before decompose builtin (which is not intended, but allowed).
}
- return locs
+ return
}
-func (x *expandState) rewriteDereference(b *Block, base, a, mem *Value, offset, size int64, typ *types.Type, pos src.XPos) *Value {
- source := a.Args[0]
- dst := x.offsetFrom(b, base, offset, source.Type)
- if a.Uses == 1 && a.Block == b {
- a.reset(OpMove)
- a.Pos = pos
- a.Type = types.TypeMem
- a.Aux = typ
- a.AuxInt = size
- a.SetArgs3(dst, source, mem)
- mem = a
- } else {
- mem = b.NewValue3A(pos, OpMove, types.TypeMem, typ, dst, source, mem)
- mem.AuxInt = size
- }
+func (x *expandState) decomposePair(pos src.XPos, b *Block, a, mem *Value, t0, t1 *types.Type, o0, o1 Op, rc *registerCursor) *Value {
+ e := b.NewValue1(pos, o0, t0, a)
+ pos = pos.WithNotStmt()
+ mem = x.decomposeAsNecessary(pos, b, e, mem, rc.next(t0))
+ e = b.NewValue1(pos, o1, t1, a)
+ mem = x.decomposeAsNecessary(pos, b, e, mem, rc.next(t1))
return mem
}
-var indexNames [1]string = [1]string{"[0]"}
+func (x *expandState) decomposeOne(pos src.XPos, b *Block, a, mem *Value, t0 *types.Type, o0 Op, rc *registerCursor) *Value {
+ e := b.NewValue1(pos, o0, t0, a)
+ pos = pos.WithNotStmt()
+ mem = x.decomposeAsNecessary(pos, b, e, mem, rc.next(t0))
+ return mem
+}
-// pathTo returns the selection path to the leaf type at offset within container.
-// e.g. len(thing.field[0]) => ".field[0].len"
-// this is for purposes of generating names ultimately fed to a debugger.
-func (x *expandState) pathTo(container, leaf *types.Type, offset int64) string {
- if container == leaf || offset == 0 && container.Size() == leaf.Size() {
- return ""
+// decomposeAsNecessary converts a value (perhaps an aggregate) passed to a call or returned by a function,
+// into the appropriate sequence of stores and register assignments to transmit that value in a given ABI, and
+// returns the current memory after this convert/rewrite (it may be the input memory, perhaps stores were needed.)
+// 'pos' is the source position all this is tied to
+// 'b' is the enclosing block
+// 'a' is the value to decompose
+// 'm0' is the input memory arg used for the first store (or returned if there are no stores)
+// 'rc' is a registerCursor which identifies the register/memory destination for the value
+func (x *expandState) decomposeAsNecessary(pos src.XPos, b *Block, a, m0 *Value, rc registerCursor) *Value {
+ if x.debug > 1 {
+ x.indent(3)
+ defer x.indent(-3)
}
- path := ""
-outer:
- for {
- switch container.Kind() {
- case types.TARRAY:
- container = container.Elem()
- if container.Size() == 0 {
- return path
- }
- i := offset / container.Size()
- offset = offset % container.Size()
- // If a future compiler/ABI supports larger SSA/Arg-able arrays, expand indexNames.
- path = path + indexNames[i]
- continue
- case types.TSTRUCT:
- for i := 0; i < container.NumFields(); i++ {
- fld := container.Field(i)
- if fld.Offset+fld.Type.Size() > offset {
- offset -= fld.Offset
- path += "." + fld.Sym.Name
- container = fld.Type
- continue outer
- }
- }
- return path
- case types.TINT64, types.TUINT64:
- if container.Size() == x.regSize {
- return path
- }
- if offset == x.hiOffset {
- return path + ".hi"
- }
- return path + ".lo"
- case types.TINTER:
- if offset != 0 {
- return path + ".data"
- }
- if container.IsEmptyInterface() {
- return path + ".type"
- }
- return path + ".itab"
-
- case types.TSLICE:
- if offset == 2*x.regSize {
- return path + ".cap"
- }
- fallthrough
- case types.TSTRING:
- if offset == 0 {
- return path + ".ptr"
- }
- return path + ".len"
- case types.TCOMPLEX64, types.TCOMPLEX128:
- if offset == 0 {
- return path + ".real"
- }
- return path + ".imag"
- }
- return path
+ at := a.Type
+ if at.Size() == 0 {
+ return m0
}
-}
-
-// decomposeArg is a helper for storeArgOrLoad.
-// It decomposes a Load or an Arg into smaller parts and returns the new mem.
-// If the type does not match one of the expected aggregate types, it returns nil instead.
-// Parameters:
-//
-// pos -- the location of any generated code.
-// b -- the block into which any generated code should normally be placed
-// source -- the value, possibly an aggregate, to be stored.
-// mem -- the mem flowing into this decomposition (loads depend on it, stores updated it)
-// t -- the type of the value to be stored
-// storeOffset -- if the value is stored in memory, it is stored at base (see storeRc) + storeOffset
-// loadRegOffset -- regarding source as a value in registers, the register offset in ABI1. Meaningful only if source is OpArg.
-// storeRc -- storeRC; if the value is stored in registers, this specifies the registers.
-// StoreRc also identifies whether the target is registers or memory, and has the base for the store operation.
-func (x *expandState) decomposeArg(pos src.XPos, b *Block, source, mem *Value, t *types.Type, storeOffset int64, loadRegOffset Abi1RO, storeRc registerCursor) *Value {
-
- pa := x.prAssignForArg(source)
- var locs []*LocalSlot
- for _, s := range x.namedSelects[source] {
- locs = append(locs, x.f.Names[s.locIndex])
+ if a.Op == OpDereference {
+ a.Op = OpLoad // For purposes of parameter passing expansion, a Dereference is a Load.
}
- if len(pa.Registers) > 0 {
- // Handle the in-registers case directly
- rts, offs := pa.RegisterTypesAndOffsets()
- last := loadRegOffset + x.regWidth(t)
- if offs[loadRegOffset] != 0 {
- // Document the problem before panicking.
- for i := 0; i < len(rts); i++ {
- rt := rts[i]
- off := offs[i]
- fmt.Printf("rt=%s, off=%d, rt.Width=%d, rt.Align=%d\n", rt.String(), off, rt.Size(), uint8(rt.Alignment()))
- }
- panic(fmt.Errorf("offset %d of requested register %d should be zero, source=%s", offs[loadRegOffset], loadRegOffset, source.LongString()))
- }
-
+ if !rc.hasRegs() && !CanSSA(at) {
+ dst := x.offsetFrom(b, rc.storeDest, rc.storeOffset, types.NewPtr(at))
if x.debug > 1 {
- x.Printf("decompose arg %s has %d locs\n", source.LongString(), len(locs))
+ x.Printf("...recur store %s at %s\n", a.LongString(), dst.LongString())
}
-
- for i := loadRegOffset; i < last; i++ {
- rt := rts[i]
- off := offs[i]
- w := x.commonArgs[selKey{source, off, rt.Size(), rt}]
- if w == nil {
- w = x.newArgToMemOrRegs(source, w, off, i, rt, pos)
- suffix := x.pathTo(source.Type, rt, off)
- if suffix != "" {
- x.splitSlotsIntoNames(locs, suffix, off, rt, w)
- }
- }
- if t.IsPtrShaped() {
- // Preserve the original store type. This ensures pointer type
- // properties aren't discarded (e.g, notinheap).
- if rt.Size() != t.Size() || len(pa.Registers) != 1 || i != loadRegOffset {
- b.Func.Fatalf("incompatible store type %v and %v, i=%d", t, rt, i)
- }
- rt = t
- }
- mem = x.storeArgOrLoad(pos, b, w, mem, rt, storeOffset+off, i, storeRc.next(rt))
+ if a.Op == OpLoad {
+ m0 = b.NewValue3A(pos, OpMove, types.TypeMem, at, dst, a.Args[0], m0)
+ m0.AuxInt = at.Size()
+ return m0
+ } else {
+ panic(fmt.Errorf("Store of not a load"))
}
- return mem
}
- u := source.Type
- switch u.Kind() {
+ mem := m0
+ switch at.Kind() {
case types.TARRAY:
- elem := u.Elem()
- elemRO := x.regWidth(elem)
- for i := int64(0); i < u.NumElem(); i++ {
- elemOff := i * elem.Size()
- mem = storeOneArg(x, pos, b, locs, indexNames[i], source, mem, elem, elemOff, storeOffset+elemOff, loadRegOffset, storeRc.next(elem))
- loadRegOffset += elemRO
+ et := at.Elem()
+ for i := int64(0); i < at.NumElem(); i++ {
+ e := b.NewValue1I(pos, OpArraySelect, et, i, a)
pos = pos.WithNotStmt()
+ mem = x.decomposeAsNecessary(pos, b, e, mem, rc.next(et))
}
return mem
+
case types.TSTRUCT:
- for i := 0; i < u.NumFields(); i++ {
- fld := u.Field(i)
- mem = storeOneArg(x, pos, b, locs, "."+fld.Sym.Name, source, mem, fld.Type, fld.Offset, storeOffset+fld.Offset, loadRegOffset, storeRc.next(fld.Type))
- loadRegOffset += x.regWidth(fld.Type)
+ for i := 0; i < at.NumFields(); i++ {
+ et := at.Field(i).Type // might need to read offsets from the fields
+ e := b.NewValue1I(pos, OpStructSelect, et, int64(i), a)
pos = pos.WithNotStmt()
+ if x.debug > 1 {
+ x.Printf("...recur decompose %s, %v\n", e.LongString(), et)
+ }
+ mem = x.decomposeAsNecessary(pos, b, e, mem, rc.next(et))
}
return mem
- case types.TINT64, types.TUINT64:
- if t.Size() == x.regSize {
- break
- }
- tHi, tLo := x.intPairTypes(t.Kind())
- mem = storeOneArg(x, pos, b, locs, ".hi", source, mem, tHi, x.hiOffset, storeOffset+x.hiOffset, loadRegOffset+x.hiRo, storeRc.plus(x.hiRo))
+
+ case types.TSLICE:
+ mem = x.decomposeOne(pos, b, a, mem, at.Elem().PtrTo(), OpSlicePtr, &rc)
pos = pos.WithNotStmt()
- return storeOneArg(x, pos, b, locs, ".lo", source, mem, tLo, x.lowOffset, storeOffset+x.lowOffset, loadRegOffset+x.loRo, storeRc.plus(x.loRo))
+ mem = x.decomposeOne(pos, b, a, mem, x.typs.Int, OpSliceLen, &rc)
+ return x.decomposeOne(pos, b, a, mem, x.typs.Int, OpSliceCap, &rc)
+
+ case types.TSTRING:
+ return x.decomposePair(pos, b, a, mem, x.typs.BytePtr, x.typs.Int, OpStringPtr, OpStringLen, &rc)
+
case types.TINTER:
- sfx := ".itab"
- if u.IsEmptyInterface() {
- sfx = ".type"
+ mem = x.decomposeOne(pos, b, a, mem, x.typs.Uintptr, OpITab, &rc)
+ pos = pos.WithNotStmt()
+ // Immediate interfaces cause so many headaches.
+ if a.Op == OpIMake {
+ data := a.Args[1]
+ for data.Op == OpStructMake1 || data.Op == OpArrayMake1 {
+ data = data.Args[0]
+ }
+ return x.decomposeAsNecessary(pos, b, data, mem, rc.next(data.Type))
}
- return storeTwoArg(x, pos, b, locs, sfx, ".idata", source, mem, x.typs.Uintptr, x.typs.BytePtr, 0, storeOffset, loadRegOffset, storeRc)
- case types.TSTRING:
- return storeTwoArg(x, pos, b, locs, ".ptr", ".len", source, mem, x.typs.BytePtr, x.typs.Int, 0, storeOffset, loadRegOffset, storeRc)
+ return x.decomposeOne(pos, b, a, mem, x.typs.BytePtr, OpIData, &rc)
+
case types.TCOMPLEX64:
- return storeTwoArg(x, pos, b, locs, ".real", ".imag", source, mem, x.typs.Float32, x.typs.Float32, 0, storeOffset, loadRegOffset, storeRc)
- case types.TCOMPLEX128:
- return storeTwoArg(x, pos, b, locs, ".real", ".imag", source, mem, x.typs.Float64, x.typs.Float64, 0, storeOffset, loadRegOffset, storeRc)
- case types.TSLICE:
- mem = storeOneArg(x, pos, b, locs, ".ptr", source, mem, x.typs.BytePtr, 0, storeOffset, loadRegOffset, storeRc.next(x.typs.BytePtr))
- return storeTwoArg(x, pos, b, locs, ".len", ".cap", source, mem, x.typs.Int, x.typs.Int, x.ptrSize, storeOffset+x.ptrSize, loadRegOffset+RO_slice_len, storeRc)
- }
- return nil
-}
+ return x.decomposePair(pos, b, a, mem, x.typs.Float32, x.typs.Float32, OpComplexReal, OpComplexImag, &rc)
-func (x *expandState) splitSlotsIntoNames(locs []*LocalSlot, suffix string, off int64, rt *types.Type, w *Value) {
- wlocs := x.splitSlots(locs, suffix, off, rt)
- for _, l := range wlocs {
- old, ok := x.f.NamedValues[*l]
- x.f.NamedValues[*l] = append(old, w)
- if !ok {
- x.f.Names = append(x.f.Names, l)
- }
- }
-}
+ case types.TCOMPLEX128:
+ return x.decomposePair(pos, b, a, mem, x.typs.Float64, x.typs.Float64, OpComplexReal, OpComplexImag, &rc)
-// decomposeLoad is a helper for storeArgOrLoad.
-// It decomposes a Load into smaller parts and returns the new mem.
-// If the type does not match one of the expected aggregate types, it returns nil instead.
-// Parameters:
-//
-// pos -- the location of any generated code.
-// b -- the block into which any generated code should normally be placed
-// source -- the value, possibly an aggregate, to be stored.
-// mem -- the mem flowing into this decomposition (loads depend on it, stores updated it)
-// t -- the type of the value to be stored
-// storeOffset -- if the value is stored in memory, it is stored at base (see storeRc) + offset
-// loadRegOffset -- regarding source as a value in registers, the register offset in ABI1. Meaningful only if source is OpArg.
-// storeRc -- storeRC; if the value is stored in registers, this specifies the registers.
-// StoreRc also identifies whether the target is registers or memory, and has the base for the store operation.
-//
-// TODO -- this needs cleanup; it just works for SSA-able aggregates, and won't fully generalize to register-args aggregates.
-func (x *expandState) decomposeLoad(pos src.XPos, b *Block, source, mem *Value, t *types.Type, storeOffset int64, loadRegOffset Abi1RO, storeRc registerCursor) *Value {
- u := source.Type
- switch u.Kind() {
- case types.TARRAY:
- elem := u.Elem()
- elemRO := x.regWidth(elem)
- for i := int64(0); i < u.NumElem(); i++ {
- elemOff := i * elem.Size()
- mem = storeOneLoad(x, pos, b, source, mem, elem, elemOff, storeOffset+elemOff, loadRegOffset, storeRc.next(elem))
- loadRegOffset += elemRO
- pos = pos.WithNotStmt()
- }
- return mem
- case types.TSTRUCT:
- for i := 0; i < u.NumFields(); i++ {
- fld := u.Field(i)
- mem = storeOneLoad(x, pos, b, source, mem, fld.Type, fld.Offset, storeOffset+fld.Offset, loadRegOffset, storeRc.next(fld.Type))
- loadRegOffset += x.regWidth(fld.Type)
- pos = pos.WithNotStmt()
+ case types.TINT64:
+ if at.Size() > x.regSize {
+ return x.decomposePair(pos, b, a, mem, x.firstType, x.secondType, x.firstOp, x.secondOp, &rc)
}
- return mem
- case types.TINT64, types.TUINT64:
- if t.Size() == x.regSize {
- break
+ case types.TUINT64:
+ if at.Size() > x.regSize {
+ return x.decomposePair(pos, b, a, mem, x.typs.UInt32, x.typs.UInt32, x.firstOp, x.secondOp, &rc)
}
- tHi, tLo := x.intPairTypes(t.Kind())
- mem = storeOneLoad(x, pos, b, source, mem, tHi, x.hiOffset, storeOffset+x.hiOffset, loadRegOffset+x.hiRo, storeRc.plus(x.hiRo))
- pos = pos.WithNotStmt()
- return storeOneLoad(x, pos, b, source, mem, tLo, x.lowOffset, storeOffset+x.lowOffset, loadRegOffset+x.loRo, storeRc.plus(x.loRo))
- case types.TINTER:
- return storeTwoLoad(x, pos, b, source, mem, x.typs.Uintptr, x.typs.BytePtr, 0, storeOffset, loadRegOffset, storeRc)
- case types.TSTRING:
- return storeTwoLoad(x, pos, b, source, mem, x.typs.BytePtr, x.typs.Int, 0, storeOffset, loadRegOffset, storeRc)
- case types.TCOMPLEX64:
- return storeTwoLoad(x, pos, b, source, mem, x.typs.Float32, x.typs.Float32, 0, storeOffset, loadRegOffset, storeRc)
- case types.TCOMPLEX128:
- return storeTwoLoad(x, pos, b, source, mem, x.typs.Float64, x.typs.Float64, 0, storeOffset, loadRegOffset, storeRc)
- case types.TSLICE:
- mem = storeOneLoad(x, pos, b, source, mem, x.typs.BytePtr, 0, storeOffset, loadRegOffset, storeRc.next(x.typs.BytePtr))
- return storeTwoLoad(x, pos, b, source, mem, x.typs.Int, x.typs.Int, x.ptrSize, storeOffset+x.ptrSize, loadRegOffset+RO_slice_len, storeRc)
}
- return nil
-}
-// storeOneArg creates a decomposed (one step) arg that is then stored.
-// pos and b locate the store instruction, source is the "base" of the value input,
-// mem is the input mem, t is the type in question, and offArg and offStore are the offsets from the respective bases.
-func storeOneArg(x *expandState, pos src.XPos, b *Block, locs []*LocalSlot, suffix string, source, mem *Value, t *types.Type, argOffset, storeOffset int64, loadRegOffset Abi1RO, storeRc registerCursor) *Value {
- if x.debug > 1 {
- x.indent(3)
- defer x.indent(-3)
- x.Printf("storeOneArg(%s; %s; %s; aO=%d; sO=%d; lrO=%d; %s)\n", source.LongString(), mem.String(), t.String(), argOffset, storeOffset, loadRegOffset, storeRc.String())
- }
+ // An atomic type, either record the register or store it and update the memory.
- w := x.commonArgs[selKey{source, argOffset, t.Size(), t}]
- if w == nil {
- w = x.newArgToMemOrRegs(source, w, argOffset, loadRegOffset, t, pos)
- x.splitSlotsIntoNames(locs, suffix, argOffset, t, w)
+ if rc.hasRegs() {
+ if x.debug > 1 {
+ x.Printf("...recur addArg %s\n", a.LongString())
+ }
+ rc.addArg(a)
+ } else {
+ dst := x.offsetFrom(b, rc.storeDest, rc.storeOffset, types.NewPtr(at))
+ if x.debug > 1 {
+ x.Printf("...recur store %s at %s\n", a.LongString(), dst.LongString())
+ }
+ mem = b.NewValue3A(pos, OpStore, types.TypeMem, at, dst, a, mem)
}
- return x.storeArgOrLoad(pos, b, w, mem, t, storeOffset, loadRegOffset, storeRc)
-}
-
-// storeOneLoad creates a decomposed (one step) load that is then stored.
-func storeOneLoad(x *expandState, pos src.XPos, b *Block, source, mem *Value, t *types.Type, offArg, offStore int64, loadRegOffset Abi1RO, storeRc registerCursor) *Value {
- from := x.offsetFrom(source.Block, source.Args[0], offArg, types.NewPtr(t))
- w := b.NewValue2(source.Pos, OpLoad, t, from, mem)
- return x.storeArgOrLoad(pos, b, w, mem, t, offStore, loadRegOffset, storeRc)
-}
-func storeTwoArg(x *expandState, pos src.XPos, b *Block, locs []*LocalSlot, suffix1 string, suffix2 string, source, mem *Value, t1, t2 *types.Type, offArg, offStore int64, loadRegOffset Abi1RO, storeRc registerCursor) *Value {
- mem = storeOneArg(x, pos, b, locs, suffix1, source, mem, t1, offArg, offStore, loadRegOffset, storeRc.next(t1))
- pos = pos.WithNotStmt()
- t1Size := t1.Size()
- return storeOneArg(x, pos, b, locs, suffix2, source, mem, t2, offArg+t1Size, offStore+t1Size, loadRegOffset+1, storeRc)
-}
-
-// storeTwoLoad creates a pair of decomposed (one step) loads that are then stored.
-// the elements of the pair must not require any additional alignment.
-func storeTwoLoad(x *expandState, pos src.XPos, b *Block, source, mem *Value, t1, t2 *types.Type, offArg, offStore int64, loadRegOffset Abi1RO, storeRc registerCursor) *Value {
- mem = storeOneLoad(x, pos, b, source, mem, t1, offArg, offStore, loadRegOffset, storeRc.next(t1))
- pos = pos.WithNotStmt()
- t1Size := t1.Size()
- return storeOneLoad(x, pos, b, source, mem, t2, offArg+t1Size, offStore+t1Size, loadRegOffset+1, storeRc)
+ return mem
}
-// storeArgOrLoad converts stores of SSA-able potentially aggregatable arguments (passed to a call) into a series of primitive-typed
-// stores of non-aggregate types. It recursively walks up a chain of selectors until it reaches a Load or an Arg.
-// If it does not reach a Load or an Arg, nothing happens; this allows a little freedom in phase ordering.
-func (x *expandState) storeArgOrLoad(pos src.XPos, b *Block, source, mem *Value, t *types.Type, storeOffset int64, loadRegOffset Abi1RO, storeRc registerCursor) *Value {
- if x.debug > 1 {
- x.indent(3)
- defer x.indent(-3)
- x.Printf("storeArgOrLoad(%s; %s; %s; %d; %s)\n", source.LongString(), mem.String(), t.String(), storeOffset, storeRc.String())
+// Convert scalar OpArg into the proper OpWhateverArg instruction
+// Convert scalar OpSelectN into perhaps-differently-indexed OpSelectN
+// Convert aggregate OpArg into Make of its parts (which are eventually scalars)
+// Convert aggregate OpSelectN into Make of its parts (which are eventually scalars)
+// Returns the converted value.
+//
+// - "pos" the position for any generated instructions
+// - "b" the block for any generated instructions
+// - "container" the outermost OpArg/OpSelectN
+// - "a" the instruction to overwrite, if any (only the outermost caller)
+// - "m0" the memory arg for any loads that are necessary
+// - "at" the type of the Arg/part
+// - "rc" the register/memory cursor locating the various parts of the Arg.
+func (x *expandState) rewriteSelectOrArg(pos src.XPos, b *Block, container, a, m0 *Value, at *types.Type, rc registerCursor) *Value {
+
+ if at == types.TypeMem {
+ a.copyOf(m0)
+ return a
+ }
+
+ makeOf := func(a *Value, op Op, args []*Value) *Value {
+ if a == nil {
+ a = b.NewValue0(pos, op, at)
+ a.AddArgs(args...)
+ } else {
+ a.resetArgs()
+ a.Aux, a.AuxInt = nil, 0
+ a.Pos, a.Op, a.Type = pos, op, at
+ a.AddArgs(args...)
+ }
+ return a
}
- // Start with Opcodes that can be disassembled
- switch source.Op {
- case OpCopy:
- return x.storeArgOrLoad(pos, b, source.Args[0], mem, t, storeOffset, loadRegOffset, storeRc)
-
- case OpLoad, OpDereference:
- ret := x.decomposeLoad(pos, b, source, mem, t, storeOffset, loadRegOffset, storeRc)
- if ret != nil {
- return ret
+ if at.Size() == 0 {
+ // For consistency, create these values even though they'll ultimately be unused
+ if at.IsArray() {
+ return makeOf(a, OpArrayMake0, nil)
}
-
- case OpArg:
- ret := x.decomposeArg(pos, b, source, mem, t, storeOffset, loadRegOffset, storeRc)
- if ret != nil {
- return ret
+ if at.IsStruct() {
+ return makeOf(a, OpStructMake0, nil)
}
+ return a
+ }
- case OpArrayMake0, OpStructMake0:
- // TODO(register args) is this correct for registers?
- return mem
-
- case OpStructMake1, OpStructMake2, OpStructMake3, OpStructMake4:
- for i := 0; i < t.NumFields(); i++ {
- fld := t.Field(i)
- mem = x.storeArgOrLoad(pos, b, source.Args[i], mem, fld.Type, storeOffset+fld.Offset, 0, storeRc.next(fld.Type))
- pos = pos.WithNotStmt()
+ sk := selKey{from: container, size: 0, offsetOrIndex: rc.storeOffset, typ: at}
+ dupe := x.commonSelectors[sk]
+ if dupe != nil {
+ if a == nil {
+ return dupe
}
- return mem
-
- case OpArrayMake1:
- return x.storeArgOrLoad(pos, b, source.Args[0], mem, t.Elem(), storeOffset, 0, storeRc.at(t, 0))
+ a.copyOf(dupe)
+ return a
+ }
- case OpInt64Make:
- tHi, tLo := x.intPairTypes(t.Kind())
- mem = x.storeArgOrLoad(pos, b, source.Args[0], mem, tHi, storeOffset+x.hiOffset, 0, storeRc.next(tHi))
- pos = pos.WithNotStmt()
- return x.storeArgOrLoad(pos, b, source.Args[1], mem, tLo, storeOffset+x.lowOffset, 0, storeRc)
+ var argStore [10]*Value
+ args := argStore[:0]
- case OpComplexMake:
- tPart := x.typs.Float32
- wPart := t.Size() / 2
- if wPart == 8 {
- tPart = x.typs.Float64
+ addArg := func(a0 *Value) {
+ if a0 == nil {
+ as := "<nil>"
+ if a != nil {
+ as = a.LongString()
+ }
+ panic(fmt.Errorf("a0 should not be nil, a=%v, container=%v, at=%v", as, container.LongString(), at))
}
- mem = x.storeArgOrLoad(pos, b, source.Args[0], mem, tPart, storeOffset, 0, storeRc.next(tPart))
- pos = pos.WithNotStmt()
- return x.storeArgOrLoad(pos, b, source.Args[1], mem, tPart, storeOffset+wPart, 0, storeRc)
-
- case OpIMake:
- mem = x.storeArgOrLoad(pos, b, source.Args[0], mem, x.typs.Uintptr, storeOffset, 0, storeRc.next(x.typs.Uintptr))
- pos = pos.WithNotStmt()
- return x.storeArgOrLoad(pos, b, source.Args[1], mem, x.typs.BytePtr, storeOffset+x.ptrSize, 0, storeRc)
-
- case OpStringMake:
- mem = x.storeArgOrLoad(pos, b, source.Args[0], mem, x.typs.BytePtr, storeOffset, 0, storeRc.next(x.typs.BytePtr))
- pos = pos.WithNotStmt()
- return x.storeArgOrLoad(pos, b, source.Args[1], mem, x.typs.Int, storeOffset+x.ptrSize, 0, storeRc)
-
- case OpSliceMake:
- mem = x.storeArgOrLoad(pos, b, source.Args[0], mem, x.typs.BytePtr, storeOffset, 0, storeRc.next(x.typs.BytePtr))
- pos = pos.WithNotStmt()
- mem = x.storeArgOrLoad(pos, b, source.Args[1], mem, x.typs.Int, storeOffset+x.ptrSize, 0, storeRc.next(x.typs.Int))
- return x.storeArgOrLoad(pos, b, source.Args[2], mem, x.typs.Int, storeOffset+2*x.ptrSize, 0, storeRc)
+ args = append(args, a0)
}
- // For nodes that cannot be taken apart -- OpSelectN, other structure selectors.
- switch t.Kind() {
+ switch at.Kind() {
case types.TARRAY:
- elt := t.Elem()
- if source.Type != t && t.NumElem() == 1 && elt.Size() == t.Size() && t.Size() == x.regSize {
- t = removeTrivialWrapperTypes(t)
- // it could be a leaf type, but the "leaf" could be complex64 (for example)
- return x.storeArgOrLoad(pos, b, source, mem, t, storeOffset, loadRegOffset, storeRc)
+ et := at.Elem()
+ for i := int64(0); i < at.NumElem(); i++ {
+ e := x.rewriteSelectOrArg(pos, b, container, nil, m0, et, rc.next(et))
+ addArg(e)
}
- eltRO := x.regWidth(elt)
- source.Type = t
- for i := int64(0); i < t.NumElem(); i++ {
- sel := b.NewValue1I(pos, OpArraySelect, elt, i, source)
- mem = x.storeArgOrLoad(pos, b, sel, mem, elt, storeOffset+i*elt.Size(), loadRegOffset, storeRc.at(t, 0))
- loadRegOffset += eltRO
- pos = pos.WithNotStmt()
- }
- return mem
+ a = makeOf(a, OpArrayMake1, args)
+ x.commonSelectors[sk] = a
+ return a
case types.TSTRUCT:
- if source.Type != t && t.NumFields() == 1 && t.Field(0).Type.Size() == t.Size() && t.Size() == x.regSize {
- // This peculiar test deals with accesses to immediate interface data.
- // It works okay because everything is the same size.
- // Example code that triggers this can be found in go/constant/value.go, function ToComplex
- // v119 (+881) = IData <intVal> v6
- // v121 (+882) = StaticLECall <floatVal,mem> {AuxCall{"".itof([intVal,0])[floatVal,8]}} [16] v119 v1
- // This corresponds to the generic rewrite rule "(StructSelect [0] (IData x)) => (IData x)"
- // Guard against "struct{struct{*foo}}"
- // Other rewriting phases create minor glitches when they transform IData, for instance the
- // interface-typed Arg "x" of ToFloat in go/constant/value.go
- // v6 (858) = Arg <Value> {x} (x[Value], x[Value])
- // is rewritten by decomposeArgs into
- // v141 (858) = Arg <uintptr> {x}
- // v139 (858) = Arg <*uint8> {x} [8]
- // because of a type case clause on line 862 of go/constant/value.go
- // case intVal:
- // return itof(x)
- // v139 is later stored as an intVal == struct{val *big.Int} which naively requires the fields of
- // of a *uint8, which does not succeed.
- t = removeTrivialWrapperTypes(t)
- // it could be a leaf type, but the "leaf" could be complex64 (for example)
- return x.storeArgOrLoad(pos, b, source, mem, t, storeOffset, loadRegOffset, storeRc)
- }
-
- source.Type = t
- for i := 0; i < t.NumFields(); i++ {
- fld := t.Field(i)
- sel := b.NewValue1I(pos, OpStructSelect, fld.Type, int64(i), source)
- mem = x.storeArgOrLoad(pos, b, sel, mem, fld.Type, storeOffset+fld.Offset, loadRegOffset, storeRc.next(fld.Type))
- loadRegOffset += x.regWidth(fld.Type)
+ // Assume ssagen/ssa.go (in buildssa) spills large aggregates so they won't appear here.
+ for i := 0; i < at.NumFields(); i++ {
+ et := at.Field(i).Type
+ e := x.rewriteSelectOrArg(pos, b, container, nil, m0, et, rc.next(et))
+ if e == nil {
+ panic(fmt.Errorf("nil e, et=%v, et.Size()=%d, i=%d", et, et.Size(), i))
+ }
+ addArg(e)
pos = pos.WithNotStmt()
}
- return mem
-
- case types.TINT64, types.TUINT64:
- if t.Size() == x.regSize {
- break
+ if at.NumFields() > 4 {
+ panic(fmt.Errorf("Too many fields (%d, %d bytes), container=%s", at.NumFields(), at.Size(), container.LongString()))
}
- tHi, tLo := x.intPairTypes(t.Kind())
- sel := b.NewValue1(pos, OpInt64Hi, tHi, source)
- mem = x.storeArgOrLoad(pos, b, sel, mem, tHi, storeOffset+x.hiOffset, loadRegOffset+x.hiRo, storeRc.plus(x.hiRo))
- pos = pos.WithNotStmt()
- sel = b.NewValue1(pos, OpInt64Lo, tLo, source)
- return x.storeArgOrLoad(pos, b, sel, mem, tLo, storeOffset+x.lowOffset, loadRegOffset+x.loRo, storeRc.plus(x.hiRo))
+ a = makeOf(a, StructMakeOp(at.NumFields()), args)
+ x.commonSelectors[sk] = a
+ return a
- case types.TINTER:
- sel := b.NewValue1(pos, OpITab, x.typs.BytePtr, source)
- mem = x.storeArgOrLoad(pos, b, sel, mem, x.typs.BytePtr, storeOffset, loadRegOffset, storeRc.next(x.typs.BytePtr))
+ case types.TSLICE:
+ addArg(x.rewriteSelectOrArg(pos, b, container, nil, m0, at.Elem().PtrTo(), rc.next(x.typs.BytePtr)))
pos = pos.WithNotStmt()
- sel = b.NewValue1(pos, OpIData, x.typs.BytePtr, source)
- return x.storeArgOrLoad(pos, b, sel, mem, x.typs.BytePtr, storeOffset+x.ptrSize, loadRegOffset+RO_iface_data, storeRc)
+ addArg(x.rewriteSelectOrArg(pos, b, container, nil, m0, x.typs.Int, rc.next(x.typs.Int)))
+ addArg(x.rewriteSelectOrArg(pos, b, container, nil, m0, x.typs.Int, rc.next(x.typs.Int)))
+ a = makeOf(a, OpSliceMake, args)
+ x.commonSelectors[sk] = a
+ return a
case types.TSTRING:
- sel := b.NewValue1(pos, OpStringPtr, x.typs.BytePtr, source)
- mem = x.storeArgOrLoad(pos, b, sel, mem, x.typs.BytePtr, storeOffset, loadRegOffset, storeRc.next(x.typs.BytePtr))
+ addArg(x.rewriteSelectOrArg(pos, b, container, nil, m0, x.typs.BytePtr, rc.next(x.typs.BytePtr)))
pos = pos.WithNotStmt()
- sel = b.NewValue1(pos, OpStringLen, x.typs.Int, source)
- return x.storeArgOrLoad(pos, b, sel, mem, x.typs.Int, storeOffset+x.ptrSize, loadRegOffset+RO_string_len, storeRc)
+ addArg(x.rewriteSelectOrArg(pos, b, container, nil, m0, x.typs.Int, rc.next(x.typs.Int)))
+ a = makeOf(a, OpStringMake, args)
+ x.commonSelectors[sk] = a
+ return a
- case types.TSLICE:
- et := types.NewPtr(t.Elem())
- sel := b.NewValue1(pos, OpSlicePtr, et, source)
- mem = x.storeArgOrLoad(pos, b, sel, mem, et, storeOffset, loadRegOffset, storeRc.next(et))
+ case types.TINTER:
+ addArg(x.rewriteSelectOrArg(pos, b, container, nil, m0, x.typs.Uintptr, rc.next(x.typs.Uintptr)))
pos = pos.WithNotStmt()
- sel = b.NewValue1(pos, OpSliceLen, x.typs.Int, source)
- mem = x.storeArgOrLoad(pos, b, sel, mem, x.typs.Int, storeOffset+x.ptrSize, loadRegOffset+RO_slice_len, storeRc.next(x.typs.Int))
- sel = b.NewValue1(pos, OpSliceCap, x.typs.Int, source)
- return x.storeArgOrLoad(pos, b, sel, mem, x.typs.Int, storeOffset+2*x.ptrSize, loadRegOffset+RO_slice_cap, storeRc)
+ addArg(x.rewriteSelectOrArg(pos, b, container, nil, m0, x.typs.BytePtr, rc.next(x.typs.BytePtr)))
+ a = makeOf(a, OpIMake, args)
+ x.commonSelectors[sk] = a
+ return a
case types.TCOMPLEX64:
- sel := b.NewValue1(pos, OpComplexReal, x.typs.Float32, source)
- mem = x.storeArgOrLoad(pos, b, sel, mem, x.typs.Float32, storeOffset, loadRegOffset, storeRc.next(x.typs.Float32))
+ addArg(x.rewriteSelectOrArg(pos, b, container, nil, m0, x.typs.Float32, rc.next(x.typs.Float32)))
pos = pos.WithNotStmt()
- sel = b.NewValue1(pos, OpComplexImag, x.typs.Float32, source)
- return x.storeArgOrLoad(pos, b, sel, mem, x.typs.Float32, storeOffset+4, loadRegOffset+RO_complex_imag, storeRc)
+ addArg(x.rewriteSelectOrArg(pos, b, container, nil, m0, x.typs.Float32, rc.next(x.typs.Float32)))
+ a = makeOf(a, OpComplexMake, args)
+ x.commonSelectors[sk] = a
+ return a
case types.TCOMPLEX128:
- sel := b.NewValue1(pos, OpComplexReal, x.typs.Float64, source)
- mem = x.storeArgOrLoad(pos, b, sel, mem, x.typs.Float64, storeOffset, loadRegOffset, storeRc.next(x.typs.Float64))
+ addArg(x.rewriteSelectOrArg(pos, b, container, nil, m0, x.typs.Float64, rc.next(x.typs.Float64)))
pos = pos.WithNotStmt()
- sel = b.NewValue1(pos, OpComplexImag, x.typs.Float64, source)
- return x.storeArgOrLoad(pos, b, sel, mem, x.typs.Float64, storeOffset+8, loadRegOffset+RO_complex_imag, storeRc)
+ addArg(x.rewriteSelectOrArg(pos, b, container, nil, m0, x.typs.Float64, rc.next(x.typs.Float64)))
+ a = makeOf(a, OpComplexMake, args)
+ x.commonSelectors[sk] = a
+ return a
+
+ case types.TINT64:
+ if at.Size() > x.regSize {
+ addArg(x.rewriteSelectOrArg(pos, b, container, nil, m0, x.firstType, rc.next(x.firstType)))
+ pos = pos.WithNotStmt()
+ addArg(x.rewriteSelectOrArg(pos, b, container, nil, m0, x.secondType, rc.next(x.secondType)))
+ if !x.f.Config.BigEndian {
+ // Int64Make args are big, little
+ args[0], args[1] = args[1], args[0]
+ }
+ a = makeOf(a, OpInt64Make, args)
+ x.commonSelectors[sk] = a
+ return a
+ }
+ case types.TUINT64:
+ if at.Size() > x.regSize {
+ addArg(x.rewriteSelectOrArg(pos, b, container, nil, m0, x.typs.UInt32, rc.next(x.typs.UInt32)))
+ pos = pos.WithNotStmt()
+ addArg(x.rewriteSelectOrArg(pos, b, container, nil, m0, x.typs.UInt32, rc.next(x.typs.UInt32)))
+ if !x.f.Config.BigEndian {
+ // Int64Make args are big, little
+ args[0], args[1] = args[1], args[0]
+ }
+ a = makeOf(a, OpInt64Make, args)
+ x.commonSelectors[sk] = a
+ return a
+ }
}
- s := mem
- if source.Op == OpDereference {
- source.Op = OpLoad // For purposes of parameter passing expansion, a Dereference is a Load.
- }
- if storeRc.hasRegs() {
- storeRc.addArg(source)
- } else {
- dst := x.offsetFrom(b, storeRc.storeDest, storeOffset, types.NewPtr(t))
- s = b.NewValue3A(pos, OpStore, types.TypeMem, t, dst, source, mem)
- }
- if x.debug > 1 {
- x.Printf("-->storeArg returns %s, storeRc=%s\n", s.LongString(), storeRc.String())
- }
- return s
-}
+ // An atomic type, either record the register or store it and update the memory.
-// rewriteArgs replaces all the call-parameter Args to a call with their register translation (if any).
-// Preceding parameters (code pointers, closure pointer) are preserved, and the memory input is modified
-// to account for any parameter stores required.
-// Any of the old Args that have their use count fall to zero are marked OpInvalid.
-func (x *expandState) rewriteArgs(v *Value, firstArg int) {
- if x.debug > 1 {
- x.indent(3)
- defer x.indent(-3)
- x.Printf("rewriteArgs(%s; %d)\n", v.LongString(), firstArg)
- }
- // Thread the stores on the memory arg
- aux := v.Aux.(*AuxCall)
- m0 := v.MemoryArg()
- mem := m0
- newArgs := []*Value{}
- oldArgs := []*Value{}
- sp := x.sp
- if v.Op == OpTailLECall {
- // For tail call, we unwind the frame before the call so we'll use the caller's
- // SP.
- sp = x.f.Entry.NewValue1(src.NoXPos, OpGetCallerSP, x.typs.Uintptr, mem)
- }
- for i, a := range v.Args[firstArg : len(v.Args)-1] { // skip leading non-parameter SSA Args and trailing mem SSA Arg.
- oldArgs = append(oldArgs, a)
- auxI := int64(i)
- aRegs := aux.RegsOfArg(auxI)
- aType := aux.TypeOfArg(auxI)
- if len(aRegs) == 0 && a.Op == OpDereference {
- aOffset := aux.OffsetOfArg(auxI)
- if a.MemoryArg() != m0 {
- x.f.Fatalf("Op...LECall and OpDereference have mismatched mem, %s and %s", v.LongString(), a.LongString())
- }
- if v.Op == OpTailLECall {
- // It's common for a tail call passing the same arguments (e.g. method wrapper),
- // so this would be a self copy. Detect this and optimize it out.
- a0 := a.Args[0]
- if a0.Op == OpLocalAddr {
- n := a0.Aux.(*ir.Name)
- if n.Class == ir.PPARAM && n.FrameOffset()+x.f.Config.ctxt.Arch.FixedFrameSize == aOffset {
- continue
- }
- }
- }
- if x.debug > 1 {
- x.Printf("...storeArg %s, %v, %d\n", a.LongString(), aType, aOffset)
- }
- // "Dereference" of addressed (probably not-SSA-eligible) value becomes Move
- // TODO(register args) this will be more complicated with registers in the picture.
- mem = x.rewriteDereference(v.Block, sp, a, mem, aOffset, aux.SizeOfArg(auxI), aType, v.Pos)
+ // Depending on the container Op, the leaves are either OpSelectN or OpArg{Int,Float}Reg
+
+ if container.Op == OpArg {
+ if rc.hasRegs() {
+ op, i := rc.ArgOpAndRegisterFor()
+ name := container.Aux.(*ir.Name)
+ a = makeOf(a, op, nil)
+ a.AuxInt = i
+ a.Aux = &AuxNameOffset{name, rc.storeOffset}
} else {
- var rc registerCursor
- var result *[]*Value
- var aOffset int64
- if len(aRegs) > 0 {
- result = &newArgs
+ key := selKey{container, rc.storeOffset, at.Size(), at}
+ w := x.commonArgs[key]
+ if w != nil && w.Uses != 0 {
+ if a == nil {
+ a = w
+ } else {
+ a.copyOf(w)
+ }
} else {
- aOffset = aux.OffsetOfArg(auxI)
- }
- if v.Op == OpTailLECall && a.Op == OpArg && a.AuxInt == 0 {
- // It's common for a tail call passing the same arguments (e.g. method wrapper),
- // so this would be a self copy. Detect this and optimize it out.
- n := a.Aux.(*ir.Name)
- if n.Class == ir.PPARAM && n.FrameOffset()+x.f.Config.ctxt.Arch.FixedFrameSize == aOffset {
- continue
+ if a == nil {
+ aux := container.Aux
+ auxInt := container.AuxInt + rc.storeOffset
+ a = container.Block.NewValue0IA(container.Pos, OpArg, at, auxInt, aux)
+ } else {
+ // do nothing, the original should be okay.
}
+ x.commonArgs[key] = a
}
- if x.debug > 1 {
- x.Printf("...storeArg %s, %v, %d\n", a.LongString(), aType, aOffset)
- }
- rc.init(aRegs, aux.abiInfo, result, sp)
- mem = x.storeArgOrLoad(v.Pos, v.Block, a, mem, aType, aOffset, 0, rc)
}
- }
- var preArgStore [2]*Value
- preArgs := append(preArgStore[:0], v.Args[0:firstArg]...)
- v.resetArgs()
- v.AddArgs(preArgs...)
- v.AddArgs(newArgs...)
- v.AddArg(mem)
- for _, a := range oldArgs {
- if a.Uses == 0 {
- x.invalidateRecursively(a)
+ } else if container.Op == OpSelectN {
+ call := container.Args[0]
+ aux := call.Aux.(*AuxCall)
+ which := container.AuxInt
+
+ if at == types.TypeMem {
+ if a != m0 || a != x.memForCall[call.ID] {
+ panic(fmt.Errorf("Memories %s, %s, and %s should all be equal after %s", a.LongString(), m0.LongString(), x.memForCall[call.ID], call.LongString()))
+ }
+ } else if rc.hasRegs() {
+ firstReg := uint32(0)
+ for i := 0; i < int(which); i++ {
+ firstReg += uint32(len(aux.abiInfo.OutParam(i).Registers))
+ }
+ reg := int64(rc.nextSlice + Abi1RO(firstReg))
+ a = makeOf(a, OpSelectN, []*Value{call})
+ a.AuxInt = reg
+ } else {
+ off := x.offsetFrom(x.f.Entry, x.sp, rc.storeOffset+aux.OffsetOfResult(which), types.NewPtr(at))
+ a = makeOf(a, OpLoad, []*Value{off, m0})
}
+
+ } else {
+ panic(fmt.Errorf("Expected container OpArg or OpSelectN, saw %v instead", container.LongString()))
}
- return
+ x.commonSelectors[sk] = a
+ return a
}
-func (x *expandState) invalidateRecursively(a *Value) {
- var s string
- if x.debug > 0 {
- plus := " "
- if a.Pos.IsStmt() == src.PosIsStmt {
- plus = " +"
- }
- s = a.String() + plus + a.Pos.LineNumber() + " " + a.LongString()
- if x.debug > 1 {
- x.Printf("...marking %v unused\n", s)
- }
- }
- lost := a.invalidateRecursively()
- if x.debug&1 != 0 && lost { // For odd values of x.debug, do this.
- x.Printf("Lost statement marker in %s on former %s\n", base.Ctxt.Pkgpath+"."+x.f.Name, s)
+// rewriteWideSelectToStores handles the case of a SelectN'd result from a function call that is too large for SSA,
+// but is transferred in registers. In this case the register cursor tracks both operands; the register sources and
+// the memory destinations.
+// This returns the memory flowing out of the last store
+func (x *expandState) rewriteWideSelectToStores(pos src.XPos, b *Block, container, m0 *Value, at *types.Type, rc registerCursor) *Value {
+
+ if at.Size() == 0 {
+ return m0
}
-}
-// expandCalls converts LE (Late Expansion) calls that act like they receive value args into a lower-level form
-// that is more oriented to a platform's ABI. The SelectN operations that extract results are rewritten into
-// more appropriate forms, and any StructMake or ArrayMake inputs are decomposed until non-struct values are
-// reached. On the callee side, OpArg nodes are not decomposed until this phase is run.
-// TODO results should not be lowered until this phase.
-func expandCalls(f *Func) {
- // Calls that need lowering have some number of inputs, including a memory input,
- // and produce a tuple of (value1, value2, ..., mem) where valueK may or may not be SSA-able.
+ switch at.Kind() {
+ case types.TARRAY:
+ et := at.Elem()
+ for i := int64(0); i < at.NumElem(); i++ {
+ m0 = x.rewriteWideSelectToStores(pos, b, container, m0, et, rc.next(et))
+ }
+ return m0
- // With the current ABI those inputs need to be converted into stores to memory,
- // rethreading the call's memory input to the first, and the new call now receiving the last.
+ case types.TSTRUCT:
+ // Assume ssagen/ssa.go (in buildssa) spills large aggregates so they won't appear here.
+ for i := 0; i < at.NumFields(); i++ {
+ et := at.Field(i).Type
+ m0 = x.rewriteWideSelectToStores(pos, b, container, m0, et, rc.next(et))
+ pos = pos.WithNotStmt()
+ }
+ return m0
- // With the current ABI, the outputs need to be converted to loads, which will all use the call's
- // memory output as their input.
- sp, _ := f.spSb()
- x := &expandState{
- f: f,
- abi1: f.ABI1,
- debug: f.pass.debug,
- canSSAType: f.fe.CanSSA,
- regSize: f.Config.RegSize,
- sp: sp,
- typs: &f.Config.Types,
- ptrSize: f.Config.PtrSize,
- namedSelects: make(map[*Value][]namedVal),
- sdom: f.Sdom(),
- commonArgs: make(map[selKey]*Value),
- memForCall: make(map[ID]*Value),
- transformedSelects: make(map[ID]bool),
- }
+ case types.TSLICE:
+ m0 = x.rewriteWideSelectToStores(pos, b, container, m0, at.Elem().PtrTo(), rc.next(x.typs.BytePtr))
+ pos = pos.WithNotStmt()
+ m0 = x.rewriteWideSelectToStores(pos, b, container, m0, x.typs.Int, rc.next(x.typs.Int))
+ m0 = x.rewriteWideSelectToStores(pos, b, container, m0, x.typs.Int, rc.next(x.typs.Int))
+ return m0
- // For 32-bit, need to deal with decomposition of 64-bit integers, which depends on endianness.
- if f.Config.BigEndian {
- x.lowOffset, x.hiOffset = 4, 0
- x.loRo, x.hiRo = 1, 0
- } else {
- x.lowOffset, x.hiOffset = 0, 4
- x.loRo, x.hiRo = 0, 1
- }
+ case types.TSTRING:
+ m0 = x.rewriteWideSelectToStores(pos, b, container, m0, x.typs.BytePtr, rc.next(x.typs.BytePtr))
+ pos = pos.WithNotStmt()
+ m0 = x.rewriteWideSelectToStores(pos, b, container, m0, x.typs.Int, rc.next(x.typs.Int))
+ return m0
- if x.debug > 1 {
- x.Printf("\nexpandsCalls(%s)\n", f.Name)
- }
+ case types.TINTER:
+ m0 = x.rewriteWideSelectToStores(pos, b, container, m0, x.typs.Uintptr, rc.next(x.typs.Uintptr))
+ pos = pos.WithNotStmt()
+ m0 = x.rewriteWideSelectToStores(pos, b, container, m0, x.typs.BytePtr, rc.next(x.typs.BytePtr))
+ return m0
- for i, name := range f.Names {
- t := name.Type
- if x.isAlreadyExpandedAggregateType(t) {
- for j, v := range f.NamedValues[*name] {
- if v.Op == OpSelectN || v.Op == OpArg && x.isAlreadyExpandedAggregateType(v.Type) {
- ns := x.namedSelects[v]
- x.namedSelects[v] = append(ns, namedVal{locIndex: i, valIndex: j})
- }
- }
- }
- }
+ case types.TCOMPLEX64:
+ m0 = x.rewriteWideSelectToStores(pos, b, container, m0, x.typs.Float32, rc.next(x.typs.Float32))
+ pos = pos.WithNotStmt()
+ m0 = x.rewriteWideSelectToStores(pos, b, container, m0, x.typs.Float32, rc.next(x.typs.Float32))
+ return m0
- // TODO if too slow, whole program iteration can be replaced w/ slices of appropriate values, accumulated in first loop here.
+ case types.TCOMPLEX128:
+ m0 = x.rewriteWideSelectToStores(pos, b, container, m0, x.typs.Float64, rc.next(x.typs.Float64))
+ pos = pos.WithNotStmt()
+ m0 = x.rewriteWideSelectToStores(pos, b, container, m0, x.typs.Float64, rc.next(x.typs.Float64))
+ return m0
- // Step 0: rewrite the calls to convert args to calls into stores/register movement.
- for _, b := range f.Blocks {
- for _, v := range b.Values {
- firstArg := 0
- switch v.Op {
- case OpStaticLECall, OpTailLECall:
- case OpInterLECall:
- firstArg = 1
- case OpClosureLECall:
- firstArg = 2
- default:
- continue
- }
- x.rewriteArgs(v, firstArg)
+ case types.TINT64:
+ if at.Size() > x.regSize {
+ m0 = x.rewriteWideSelectToStores(pos, b, container, m0, x.firstType, rc.next(x.firstType))
+ pos = pos.WithNotStmt()
+ m0 = x.rewriteWideSelectToStores(pos, b, container, m0, x.secondType, rc.next(x.secondType))
+ return m0
}
- if isBlockMultiValueExit(b) {
- x.indent(3)
- // Very similar to code in rewriteArgs, but results instead of args.
- v := b.Controls[0]
- m0 := v.MemoryArg()
- mem := m0
- aux := f.OwnAux
- allResults := []*Value{}
- if x.debug > 1 {
- x.Printf("multiValueExit rewriting %s\n", v.LongString())
- }
- var oldArgs []*Value
- for j, a := range v.Args[:len(v.Args)-1] {
- oldArgs = append(oldArgs, a)
- i := int64(j)
- auxType := aux.TypeOfResult(i)
- auxBase := b.NewValue2A(v.Pos, OpLocalAddr, types.NewPtr(auxType), aux.NameOfResult(i), x.sp, mem)
- auxOffset := int64(0)
- auxSize := aux.SizeOfResult(i)
- aRegs := aux.RegsOfResult(int64(j))
- if len(aRegs) == 0 && a.Op == OpDereference {
- // Avoid a self-move, and if one is detected try to remove the already-inserted VarDef for the assignment that won't happen.
- if dAddr, dMem := a.Args[0], a.Args[1]; dAddr.Op == OpLocalAddr && dAddr.Args[0].Op == OpSP &&
- dAddr.Args[1] == dMem && dAddr.Aux == aux.NameOfResult(i) {
- if dMem.Op == OpVarDef && dMem.Aux == dAddr.Aux {
- dMem.copyOf(dMem.MemoryArg()) // elide the VarDef
- }
- continue
- }
- mem = x.rewriteDereference(v.Block, auxBase, a, mem, auxOffset, auxSize, auxType, a.Pos)
- } else {
- if a.Op == OpLoad && a.Args[0].Op == OpLocalAddr {
- addr := a.Args[0] // This is a self-move. // TODO(register args) do what here for registers?
- if addr.MemoryArg() == a.MemoryArg() && addr.Aux == aux.NameOfResult(i) {
- continue
- }
- }
- var rc registerCursor
- var result *[]*Value
- if len(aRegs) > 0 {
- result = &allResults
- }
- rc.init(aRegs, aux.abiInfo, result, auxBase)
- mem = x.storeArgOrLoad(v.Pos, b, a, mem, aux.TypeOfResult(i), auxOffset, 0, rc)
- }
- }
- v.resetArgs()
- v.AddArgs(allResults...)
- v.AddArg(mem)
- v.Type = types.NewResults(append(abi.RegisterTypes(aux.abiInfo.OutParams()), types.TypeMem))
- b.SetControl(v)
- for _, a := range oldArgs {
- if a.Uses == 0 {
- if x.debug > 1 {
- x.Printf("...marking %v unused\n", a.LongString())
- }
- x.invalidateRecursively(a)
- }
- }
- if x.debug > 1 {
- x.Printf("...multiValueExit new result %s\n", v.LongString())
- }
- x.indent(-3)
+ case types.TUINT64:
+ if at.Size() > x.regSize {
+ m0 = x.rewriteWideSelectToStores(pos, b, container, m0, x.typs.UInt32, rc.next(x.typs.UInt32))
+ pos = pos.WithNotStmt()
+ m0 = x.rewriteWideSelectToStores(pos, b, container, m0, x.typs.UInt32, rc.next(x.typs.UInt32))
+ return m0
}
}
- // Step 1: any stores of aggregates remaining are believed to be sourced from call results or args.
- // Decompose those stores into a series of smaller stores, adding selection ops as necessary.
- for _, b := range f.Blocks {
- for _, v := range b.Values {
- if v.Op == OpStore {
- t := v.Aux.(*types.Type)
- source := v.Args[1]
- tSrc := source.Type
- iAEATt := x.isAlreadyExpandedAggregateType(t)
-
- if !iAEATt {
- // guarding against store immediate struct into interface data field -- store type is *uint8
- // TODO can this happen recursively?
- iAEATt = x.isAlreadyExpandedAggregateType(tSrc)
- if iAEATt {
- t = tSrc
- }
- }
- dst, mem := v.Args[0], v.Args[2]
- mem = x.storeArgOrLoad(v.Pos, b, source, mem, t, 0, 0, registerCursor{storeDest: dst})
- v.copyOf(mem)
+ // TODO could change treatment of too-large OpArg, would deal with it here.
+ if container.Op == OpSelectN {
+ call := container.Args[0]
+ aux := call.Aux.(*AuxCall)
+ which := container.AuxInt
+
+ if rc.hasRegs() {
+ firstReg := uint32(0)
+ for i := 0; i < int(which); i++ {
+ firstReg += uint32(len(aux.abiInfo.OutParam(i).Registers))
}
+ reg := int64(rc.nextSlice + Abi1RO(firstReg))
+ a := b.NewValue1I(pos, OpSelectN, at, reg, call)
+ dst := x.offsetFrom(b, rc.storeDest, rc.storeOffset, types.NewPtr(at))
+ m0 = b.NewValue3A(pos, OpStore, types.TypeMem, at, dst, a, m0)
+ } else {
+ panic(fmt.Errorf("Expected rc to have registers"))
}
+ } else {
+ panic(fmt.Errorf("Expected container OpSelectN, saw %v instead", container.LongString()))
}
+ return m0
+}
- val2Preds := make(map[*Value]int32) // Used to accumulate dependency graph of selection operations for topological ordering.
-
- // Step 2: transform or accumulate selection operations for rewrite in topological order.
- //
- // Aggregate types that have already (in earlier phases) been transformed must be lowered comprehensively to finish
- // the transformation (user-defined structs and arrays, slices, strings, interfaces, complex, 64-bit on 32-bit architectures),
- //
- // Any select-for-addressing applied to call results can be transformed directly.
- for _, b := range f.Blocks {
- for _, v := range b.Values {
- // Accumulate chains of selectors for processing in topological order
- switch v.Op {
- case OpStructSelect, OpArraySelect,
- OpIData, OpITab,
- OpStringPtr, OpStringLen,
- OpSlicePtr, OpSliceLen, OpSliceCap, OpSlicePtrUnchecked,
- OpComplexReal, OpComplexImag,
- OpInt64Hi, OpInt64Lo:
- w := v.Args[0]
- switch w.Op {
- case OpStructSelect, OpArraySelect, OpSelectN, OpArg:
- val2Preds[w] += 1
- if x.debug > 1 {
- x.Printf("v2p[%s] = %d\n", w.LongString(), val2Preds[w])
- }
- }
- fallthrough
+func isBlockMultiValueExit(b *Block) bool {
+ return (b.Kind == BlockRet || b.Kind == BlockRetJmp) && b.Controls[0] != nil && b.Controls[0].Op == OpMakeResult
+}
- case OpSelectN:
- if _, ok := val2Preds[v]; !ok {
- val2Preds[v] = 0
- if x.debug > 1 {
- x.Printf("v2p[%s] = %d\n", v.LongString(), val2Preds[v])
- }
- }
+type Abi1RO uint8 // An offset within a parameter's slice of register indices, for abi1.
- case OpArg:
- if !x.isAlreadyExpandedAggregateType(v.Type) {
- continue
- }
- if _, ok := val2Preds[v]; !ok {
- val2Preds[v] = 0
- if x.debug > 1 {
- x.Printf("v2p[%s] = %d\n", v.LongString(), val2Preds[v])
- }
- }
+// A registerCursor tracks which register is used for an Arg or regValues, or a piece of such.
+type registerCursor struct {
+ storeDest *Value // if there are no register targets, then this is the base of the store.
+ storeOffset int64
+ regs []abi.RegIndex // the registers available for this Arg/result (which is all in registers or not at all)
+ nextSlice Abi1RO // the next register/register-slice offset
+ config *abi.ABIConfig
+ regValues *[]*Value // values assigned to registers accumulate here
+}
- case OpSelectNAddr:
- // Do these directly, there are no chains of selectors.
- call := v.Args[0]
- which := v.AuxInt
- aux := call.Aux.(*AuxCall)
- pt := v.Type
- off := x.offsetFrom(x.f.Entry, x.sp, aux.OffsetOfResult(which), pt)
- v.copyOf(off)
+func (c *registerCursor) String() string {
+ dest := "<none>"
+ if c.storeDest != nil {
+ dest = fmt.Sprintf("%s+%d", c.storeDest.String(), c.storeOffset)
+ }
+ regs := "<none>"
+ if c.regValues != nil {
+ regs = ""
+ for i, x := range *c.regValues {
+ if i > 0 {
+ regs = regs + "; "
}
+ regs = regs + x.LongString()
}
}
- // Step 3: Compute topological order of selectors,
- // then process it in reverse to eliminate duplicates,
- // then forwards to rewrite selectors.
- //
- // All chains of selectors end up in same block as the call.
-
- // Compilation must be deterministic, so sort after extracting first zeroes from map.
- // Sorting allows dominators-last order within each batch,
- // so that the backwards scan for duplicates will most often find copies from dominating blocks (it is best-effort).
- var toProcess []*Value
- less := func(i, j int) bool {
- vi, vj := toProcess[i], toProcess[j]
- bi, bj := vi.Block, vj.Block
- if bi == bj {
- return vi.ID < vj.ID
- }
- return x.sdom.domorder(bi) > x.sdom.domorder(bj) // reverse the order to put dominators last.
+ // not printing the config because that has not been useful
+ return fmt.Sprintf("RCSR{storeDest=%v, regsLen=%d, nextSlice=%d, regValues=[%s]}", dest, len(c.regs), c.nextSlice, regs)
+}
+
+// next effectively post-increments the register cursor; the receiver is advanced,
+// the (aligned) old value is returned.
+func (c *registerCursor) next(t *types.Type) registerCursor {
+ c.storeOffset = types.RoundUp(c.storeOffset, t.Alignment())
+ rc := *c
+ c.storeOffset = types.RoundUp(c.storeOffset+t.Size(), t.Alignment())
+ if int(c.nextSlice) < len(c.regs) {
+ w := c.config.NumParamRegs(t)
+ c.nextSlice += Abi1RO(w)
}
+ return rc
+}
- // Accumulate order in allOrdered
- var allOrdered []*Value
- for v, n := range val2Preds {
- if n == 0 {
- allOrdered = append(allOrdered, v)
- }
+// plus returns a register cursor offset from the original, without modifying the original.
+func (c *registerCursor) plus(regWidth Abi1RO) registerCursor {
+ rc := *c
+ rc.nextSlice += regWidth
+ return rc
+}
+
+// at returns the register cursor for component i of t, where the first
+// component is numbered 0.
+func (c *registerCursor) at(t *types.Type, i int) registerCursor {
+ rc := *c
+ if i == 0 || len(c.regs) == 0 {
+ return rc
}
- last := 0 // allOrdered[0:last] has been top-sorted and processed
- for len(val2Preds) > 0 {
- toProcess = allOrdered[last:]
- last = len(allOrdered)
- sort.SliceStable(toProcess, less)
- for _, v := range toProcess {
- delete(val2Preds, v)
- if v.Op == OpArg {
- continue // no Args[0], hence done.
- }
- w := v.Args[0]
- n, ok := val2Preds[w]
- if !ok {
- continue
- }
- if n == 1 {
- allOrdered = append(allOrdered, w)
- delete(val2Preds, w)
- continue
- }
- val2Preds[w] = n - 1
- }
+ if t.IsArray() {
+ w := c.config.NumParamRegs(t.Elem())
+ rc.nextSlice += Abi1RO(i * w)
+ return rc
}
-
- x.commonSelectors = make(map[selKey]*Value)
- // Rewrite duplicate selectors as copies where possible.
- for i := len(allOrdered) - 1; i >= 0; i-- {
- v := allOrdered[i]
- if v.Op == OpArg {
- continue
- }
- w := v.Args[0]
- if w.Op == OpCopy {
- for w.Op == OpCopy {
- w = w.Args[0]
- }
- v.SetArg(0, w)
- }
- typ := v.Type
- if typ.IsMemory() {
- continue // handled elsewhere, not an indexable result
- }
- size := typ.Size()
- offset := int64(0)
- switch v.Op {
- case OpStructSelect:
- if w.Type.Kind() == types.TSTRUCT {
- offset = w.Type.FieldOff(int(v.AuxInt))
- } else { // Immediate interface data artifact, offset is zero.
- f.Fatalf("Expand calls interface data problem, func %s, v=%s, w=%s\n", f.Name, v.LongString(), w.LongString())
- }
- case OpArraySelect:
- offset = size * v.AuxInt
- case OpSelectN:
- offset = v.AuxInt // offset is just a key, really.
- case OpInt64Hi:
- offset = x.hiOffset
- case OpInt64Lo:
- offset = x.lowOffset
- case OpStringLen, OpSliceLen, OpIData:
- offset = x.ptrSize
- case OpSliceCap:
- offset = 2 * x.ptrSize
- case OpComplexImag:
- offset = size
- }
- sk := selKey{from: w, size: size, offsetOrIndex: offset, typ: typ}
- dupe := x.commonSelectors[sk]
- if dupe == nil {
- x.commonSelectors[sk] = v
- } else if x.sdom.IsAncestorEq(dupe.Block, v.Block) {
- if x.debug > 1 {
- x.Printf("Duplicate, make %s copy of %s\n", v, dupe)
- }
- v.copyOf(dupe)
- } else {
- // Because values are processed in dominator order, the old common[s] will never dominate after a miss is seen.
- // Installing the new value might match some future values.
- x.commonSelectors[sk] = v
+ if t.IsStruct() {
+ for j := 0; j < i; j++ {
+ rc.next(t.FieldType(j))
}
+ return rc
}
+ panic("Haven't implemented this case yet, do I need to?")
+}
- // Indices of entries in f.Names that need to be deleted.
- var toDelete []namedVal
+func (c *registerCursor) init(regs []abi.RegIndex, info *abi.ABIParamResultInfo, result *[]*Value, storeDest *Value, storeOffset int64) {
+ c.regs = regs
+ c.nextSlice = 0
+ c.storeOffset = storeOffset
+ c.storeDest = storeDest
+ c.config = info.Config()
+ c.regValues = result
+}
- // Rewrite selectors.
- for i, v := range allOrdered {
- if x.debug > 1 {
- b := v.Block
- x.Printf("allOrdered[%d] = b%d, %s, uses=%d\n", i, b.ID, v.LongString(), v.Uses)
- }
- if v.Uses == 0 {
- x.invalidateRecursively(v)
- continue
- }
- if v.Op == OpCopy {
- continue
- }
- locs := x.rewriteSelect(v, v, 0, 0)
- // Install new names.
- if v.Type.IsMemory() {
- continue
- }
- // Leaf types may have debug locations
- if !x.isAlreadyExpandedAggregateType(v.Type) {
- for _, l := range locs {
- if _, ok := f.NamedValues[*l]; !ok {
- f.Names = append(f.Names, l)
- }
- f.NamedValues[*l] = append(f.NamedValues[*l], v)
- }
- continue
- }
- if ns, ok := x.namedSelects[v]; ok {
- // Not-leaf types that had debug locations need to lose them.
+func (c *registerCursor) addArg(v *Value) {
+ *c.regValues = append(*c.regValues, v)
+}
- toDelete = append(toDelete, ns...)
- }
- }
+func (c *registerCursor) hasRegs() bool {
+ return len(c.regs) > 0
+}
- deleteNamedVals(f, toDelete)
+func (c *registerCursor) ArgOpAndRegisterFor() (Op, int64) {
+ r := c.regs[c.nextSlice]
+ return ArgOpAndRegisterFor(r, c.config)
+}
- // Step 4: rewrite the calls themselves, correcting the type.
- for _, b := range f.Blocks {
- for _, v := range b.Values {
- switch v.Op {
- case OpArg:
- x.rewriteArgToMemOrRegs(v)
- case OpStaticLECall:
- v.Op = OpStaticCall
- rts := abi.RegisterTypes(v.Aux.(*AuxCall).abiInfo.OutParams())
- v.Type = types.NewResults(append(rts, types.TypeMem))
- case OpTailLECall:
- v.Op = OpTailCall
- rts := abi.RegisterTypes(v.Aux.(*AuxCall).abiInfo.OutParams())
- v.Type = types.NewResults(append(rts, types.TypeMem))
- case OpClosureLECall:
- v.Op = OpClosureCall
- rts := abi.RegisterTypes(v.Aux.(*AuxCall).abiInfo.OutParams())
- v.Type = types.NewResults(append(rts, types.TypeMem))
- case OpInterLECall:
- v.Op = OpInterCall
- rts := abi.RegisterTypes(v.Aux.(*AuxCall).abiInfo.OutParams())
- v.Type = types.NewResults(append(rts, types.TypeMem))
- }
- }
+// ArgOpAndRegisterFor converts an abi register index into an ssa Op and corresponding
+// arg register index.
+func ArgOpAndRegisterFor(r abi.RegIndex, abiConfig *abi.ABIConfig) (Op, int64) {
+ i := abiConfig.FloatIndexFor(r)
+ if i >= 0 { // float PR
+ return OpArgFloatReg, i
}
+ return OpArgIntReg, int64(r)
+}
- // Step 5: dedup OpArgXXXReg values. Mostly it is already dedup'd by commonArgs,
- // but there are cases that we have same OpArgXXXReg values with different types.
- // E.g. string is sometimes decomposed as { *int8, int }, sometimes as { unsafe.Pointer, uintptr }.
- // (Can we avoid that?)
- var IArg, FArg [32]*Value
- for _, v := range f.Entry.Values {
- switch v.Op {
- case OpArgIntReg:
- i := v.AuxInt
- if w := IArg[i]; w != nil {
- if w.Type.Size() != v.Type.Size() {
- f.Fatalf("incompatible OpArgIntReg [%d]: %s and %s", i, v.LongString(), w.LongString())
- }
- if w.Type.IsUnsafePtr() && !v.Type.IsUnsafePtr() {
- // Update unsafe.Pointer type if we know the actual pointer type.
- w.Type = v.Type
- }
- // TODO: don't dedup pointer and scalar? Rewrite to OpConvert? Can it happen?
- v.copyOf(w)
- } else {
- IArg[i] = v
- }
- case OpArgFloatReg:
- i := v.AuxInt
- if w := FArg[i]; w != nil {
- if w.Type.Size() != v.Type.Size() {
- f.Fatalf("incompatible OpArgFloatReg [%d]: %v and %v", i, v, w)
- }
- v.copyOf(w)
- } else {
- FArg[i] = v
- }
- }
+type selKey struct {
+ from *Value // what is selected from
+ offsetOrIndex int64 // whatever is appropriate for the selector
+ size int64
+ typ *types.Type
+}
+
+type expandState struct {
+ f *Func
+ debug int // odd values log lost statement markers, so likely settings are 1 (stmts), 2 (expansion), and 3 (both)
+ regSize int64
+ sp *Value
+ typs *Types
+
+ firstOp Op // for 64-bit integers on 32-bit machines, first word in memory
+ secondOp Op // for 64-bit integers on 32-bit machines, second word in memory
+ firstType *types.Type // first half type, for Int64
+ secondType *types.Type // second half type, for Int64
+
+ wideSelects map[*Value]*Value // Selects that are not SSA-able, mapped to consuming stores.
+ commonSelectors map[selKey]*Value // used to de-dupe selectors
+ commonArgs map[selKey]*Value // used to de-dupe OpArg/OpArgIntReg/OpArgFloatReg
+ memForCall map[ID]*Value // For a call, need to know the unique selector that gets the mem.
+ indentLevel int // Indentation for debugging recursion
+}
+
+// intPairTypes returns the pair of 32-bit int types needed to encode a 64-bit integer type on a target
+// that has no 64-bit integer registers.
+func (x *expandState) intPairTypes(et types.Kind) (tHi, tLo *types.Type) {
+ tHi = x.typs.UInt32
+ if et == types.TINT64 {
+ tHi = x.typs.Int32
}
+ tLo = x.typs.UInt32
+ return
+}
- // Step 6: elide any copies introduced.
- // Update named values.
- for _, name := range f.Names {
- values := f.NamedValues[*name]
- for i, v := range values {
- if v.Op == OpCopy {
- a := v.Args[0]
- for a.Op == OpCopy {
- a = a.Args[0]
- }
- values[i] = a
- }
+// offsetFrom creates an offset from a pointer, simplifying chained offsets and offsets from SP
+func (x *expandState) offsetFrom(b *Block, from *Value, offset int64, pt *types.Type) *Value {
+ ft := from.Type
+ if offset == 0 {
+ if ft == pt {
+ return from
}
- }
- for _, b := range f.Blocks {
- for _, v := range b.Values {
- for i, a := range v.Args {
- if a.Op != OpCopy {
- continue
- }
- aa := copySource(a)
- v.SetArg(i, aa)
- for a.Uses == 0 {
- b := a.Args[0]
- x.invalidateRecursively(a)
- a = b
- }
- }
+ // This captures common, (apparently) safe cases. The unsafe cases involve ft == uintptr
+ if (ft.IsPtr() || ft.IsUnsafePtr()) && pt.IsPtr() {
+ return from
}
}
-
- // Rewriting can attach lines to values that are unlikely to survive code generation, so move them to a use.
- for _, b := range f.Blocks {
- for _, v := range b.Values {
- for _, a := range v.Args {
- if a.Pos.IsStmt() != src.PosIsStmt {
- continue
- }
- if a.Type.IsMemory() {
- continue
- }
- if a.Pos.Line() != v.Pos.Line() {
- continue
- }
- if !a.Pos.SameFile(v.Pos) {
- continue
- }
- switch a.Op {
- case OpArgIntReg, OpArgFloatReg, OpSelectN:
- v.Pos = v.Pos.WithIsStmt()
- a.Pos = a.Pos.WithDefaultStmt()
- }
- }
- }
+ // Simplify, canonicalize
+ for from.Op == OpOffPtr {
+ offset += from.AuxInt
+ from = from.Args[0]
+ }
+ if from == x.sp {
+ return x.f.ConstOffPtrSP(pt, offset, x.sp)
}
+ return b.NewValue1I(from.Pos.WithNotStmt(), OpOffPtr, pt, offset, from)
}
-// rewriteArgToMemOrRegs converts OpArg v in-place into the register version of v,
-// if that is appropriate.
-func (x *expandState) rewriteArgToMemOrRegs(v *Value) *Value {
- if x.debug > 1 {
- x.indent(3)
- defer x.indent(-3)
- x.Printf("rewriteArgToMemOrRegs(%s)\n", v.LongString())
+func (x *expandState) regWidth(t *types.Type) Abi1RO {
+ return Abi1RO(x.f.ABI1.NumParamRegs(t))
+}
+
+// regOffset returns the register offset of the i'th element of type t
+func (x *expandState) regOffset(t *types.Type, i int) Abi1RO {
+ // TODO maybe cache this in a map if profiling recommends.
+ if i == 0 {
+ return 0
}
- pa := x.prAssignForArg(v)
- switch len(pa.Registers) {
- case 0:
- frameOff := v.Aux.(*ir.Name).FrameOffset()
- if pa.Offset() != int32(frameOff+x.f.ABISelf.LocalsOffset()) {
- panic(fmt.Errorf("Parameter assignment %d and OpArg.Aux frameOffset %d disagree, op=%s",
- pa.Offset(), frameOff, v.LongString()))
- }
- case 1:
- t := v.Type
- key := selKey{v, 0, t.Size(), t}
- w := x.commonArgs[key]
- if w != nil && w.Uses != 0 { // do not reuse dead value
- v.copyOf(w)
- break
- }
- r := pa.Registers[0]
- var i int64
- v.Op, i = ArgOpAndRegisterFor(r, x.f.ABISelf)
- v.Aux = &AuxNameOffset{v.Aux.(*ir.Name), 0}
- v.AuxInt = i
- x.commonArgs[key] = v
-
- default:
- panic(badVal("Saw unexpanded OpArg", v))
+ if t.IsArray() {
+ return Abi1RO(i) * x.regWidth(t.Elem())
}
- if x.debug > 1 {
- x.Printf("-->%s\n", v.LongString())
+ if t.IsStruct() {
+ k := Abi1RO(0)
+ for j := 0; j < i; j++ {
+ k += x.regWidth(t.FieldType(j))
+ }
+ return k
}
- return v
+ panic("Haven't implemented this case yet, do I need to?")
}
-// newArgToMemOrRegs either rewrites toReplace into an OpArg referencing memory or into an OpArgXXXReg to a register,
-// or rewrites it into a copy of the appropriate OpArgXXX. The actual OpArgXXX is determined by combining baseArg (an OpArg)
-// with offset, regOffset, and t to determine which portion of it to reference (either all or a part, in memory or in registers).
-func (x *expandState) newArgToMemOrRegs(baseArg, toReplace *Value, offset int64, regOffset Abi1RO, t *types.Type, pos src.XPos) *Value {
- if x.debug > 1 {
- x.indent(3)
- defer x.indent(-3)
- x.Printf("newArgToMemOrRegs(base=%s; toReplace=%s; t=%s; memOff=%d; regOff=%d)\n", baseArg.String(), toReplace.LongString(), t.String(), offset, regOffset)
+// prAssignForArg returns the ABIParamAssignment for v, assumed to be an OpArg.
+func (x *expandState) prAssignForArg(v *Value) *abi.ABIParamAssignment {
+ if v.Op != OpArg {
+ panic(fmt.Errorf("Wanted OpArg, instead saw %s", v.LongString()))
}
- key := selKey{baseArg, offset, t.Size(), t}
- w := x.commonArgs[key]
- if w != nil && w.Uses != 0 { // do not reuse dead value
- if toReplace != nil {
- toReplace.copyOf(w)
- if x.debug > 1 {
- x.Printf("...replace %s\n", toReplace.LongString())
- }
- }
- if x.debug > 1 {
- x.Printf("-->%s\n", w.LongString())
+ return ParamAssignmentForArgName(x.f, v.Aux.(*ir.Name))
+}
+
+// ParamAssignmentForArgName returns the ABIParamAssignment for f's arg with matching name.
+func ParamAssignmentForArgName(f *Func, name *ir.Name) *abi.ABIParamAssignment {
+ abiInfo := f.OwnAux.abiInfo
+ ip := abiInfo.InParams()
+ for i, a := range ip {
+ if a.Name == name {
+ return &ip[i]
}
- return w
}
+ panic(fmt.Errorf("Did not match param %v in prInfo %+v", name, abiInfo.InParams()))
+}
- pa := x.prAssignForArg(baseArg)
- if len(pa.Registers) == 0 { // Arg is on stack
- frameOff := baseArg.Aux.(*ir.Name).FrameOffset()
- if pa.Offset() != int32(frameOff+x.f.ABISelf.LocalsOffset()) {
- panic(fmt.Errorf("Parameter assignment %d and OpArg.Aux frameOffset %d disagree, op=%s",
- pa.Offset(), frameOff, baseArg.LongString()))
- }
- aux := baseArg.Aux
- auxInt := baseArg.AuxInt + offset
- if toReplace != nil && toReplace.Block == baseArg.Block {
- toReplace.reset(OpArg)
- toReplace.Aux = aux
- toReplace.AuxInt = auxInt
- toReplace.Type = t
- w = toReplace
- } else {
- w = baseArg.Block.NewValue0IA(baseArg.Pos, OpArg, t, auxInt, aux)
- }
- x.commonArgs[key] = w
- if toReplace != nil {
- toReplace.copyOf(w)
+// indent increments (or decrements) the indentation.
+func (x *expandState) indent(n int) {
+ x.indentLevel += n
+}
+
+// Printf does an indented fmt.Printf on the format and args.
+func (x *expandState) Printf(format string, a ...interface{}) (n int, err error) {
+ if x.indentLevel > 0 {
+ fmt.Printf("%[1]*s", x.indentLevel, "")
+ }
+ return fmt.Printf(format, a...)
+}
+
+func (x *expandState) invalidateRecursively(a *Value) {
+ var s string
+ if x.debug > 0 {
+ plus := " "
+ if a.Pos.IsStmt() == src.PosIsStmt {
+ plus = " +"
}
+ s = a.String() + plus + a.Pos.LineNumber() + " " + a.LongString()
if x.debug > 1 {
- x.Printf("-->%s\n", w.LongString())
+ x.Printf("...marking %v unused\n", s)
}
- return w
- }
- // Arg is in registers
- r := pa.Registers[regOffset]
- op, auxInt := ArgOpAndRegisterFor(r, x.f.ABISelf)
- if op == OpArgIntReg && t.IsFloat() || op == OpArgFloatReg && t.IsInteger() {
- fmt.Printf("pa=%v\nx.f.OwnAux.abiInfo=%s\n",
- pa.ToString(x.f.ABISelf, true),
- x.f.OwnAux.abiInfo.String())
- panic(fmt.Errorf("Op/Type mismatch, op=%s, type=%s", op.String(), t.String()))
- }
- if baseArg.AuxInt != 0 {
- base.Fatalf("BaseArg %s bound to registers has non-zero AuxInt", baseArg.LongString())
}
- aux := &AuxNameOffset{baseArg.Aux.(*ir.Name), offset}
- if toReplace != nil && toReplace.Block == baseArg.Block {
- toReplace.reset(op)
- toReplace.Aux = aux
- toReplace.AuxInt = auxInt
- toReplace.Type = t
- w = toReplace
- } else {
- w = baseArg.Block.NewValue0IA(baseArg.Pos, op, t, auxInt, aux)
- }
- x.commonArgs[key] = w
- if toReplace != nil {
- toReplace.copyOf(w)
- }
- if x.debug > 1 {
- x.Printf("-->%s\n", w.LongString())
- }
- return w
-
-}
-
-// ArgOpAndRegisterFor converts an abi register index into an ssa Op and corresponding
-// arg register index.
-func ArgOpAndRegisterFor(r abi.RegIndex, abiConfig *abi.ABIConfig) (Op, int64) {
- i := abiConfig.FloatIndexFor(r)
- if i >= 0 { // float PR
- return OpArgFloatReg, i
+ lost := a.invalidateRecursively()
+ if x.debug&1 != 0 && lost { // For odd values of x.debug, do this.
+ x.Printf("Lost statement marker in %s on former %s\n", base.Ctxt.Pkgpath+"."+x.f.Name, s)
}
- return OpArgIntReg, int64(r)
}
diff --git a/src/cmd/compile/internal/ssa/export_test.go b/src/cmd/compile/internal/ssa/export_test.go
index 14f2474a11..b2c4b1997f 100644
--- a/src/cmd/compile/internal/ssa/export_test.go
+++ b/src/cmd/compile/internal/ssa/export_test.go
@@ -55,22 +55,26 @@ type Conf struct {
func (c *Conf) Frontend() Frontend {
if c.fe == nil {
- f := ir.NewFunc(src.NoXPos)
- f.Nname = ir.NewNameAt(f.Pos(), &types.Sym{
- Pkg: types.NewPkg("my/import/path", "path"),
- Name: "function",
- })
- f.LSym = &obj.LSym{Name: "my/import/path.function"}
+ pkg := types.NewPkg("my/import/path", "path")
+ fn := ir.NewFunc(src.NoXPos, src.NoXPos, pkg.Lookup("function"), types.NewSignature(nil, nil, nil))
+ fn.DeclareParams(true)
+ fn.LSym = &obj.LSym{Name: "my/import/path.function"}
c.fe = TestFrontend{
t: c.tb,
ctxt: c.config.ctxt,
- f: f,
+ f: fn,
}
}
return c.fe
}
+func (c *Conf) Temp(typ *types.Type) *ir.Name {
+ n := ir.NewNameAt(src.NoXPos, &types.Sym{Name: "aFakeAuto"}, typ)
+ n.Class = ir.PAUTO
+ return n
+}
+
// TestFrontend is a test-only frontend.
// It assumes 64 bit integers and pointers.
type TestFrontend struct {
@@ -82,17 +86,9 @@ type TestFrontend struct {
func (TestFrontend) StringData(s string) *obj.LSym {
return nil
}
-func (TestFrontend) Auto(pos src.XPos, t *types.Type) *ir.Name {
- n := ir.NewNameAt(pos, &types.Sym{Name: "aFakeAuto"})
- n.SetType(t)
- n.Class = ir.PAUTO
- return n
-}
func (d TestFrontend) SplitSlot(parent *LocalSlot, suffix string, offset int64, t *types.Type) LocalSlot {
return LocalSlot{N: parent.N, Type: t, Off: offset}
}
-func (TestFrontend) AllocFrame(f *Func) {
-}
func (d TestFrontend) Syslook(s string) *obj.LSym {
return d.ctxt.Lookup(s)
}
@@ -107,9 +103,6 @@ func (d TestFrontend) Fatalf(_ src.XPos, msg string, args ...interface{}) { d.t.
func (d TestFrontend) Warnl(_ src.XPos, msg string, args ...interface{}) { d.t.Logf(msg, args...) }
func (d TestFrontend) Debug_checknil() bool { return false }
-func (d TestFrontend) MyImportPath() string {
- return d.f.Sym().Pkg.Path
-}
func (d TestFrontend) Func() *ir.Func {
return d.f
}
@@ -125,10 +118,3 @@ func init() {
typecheck.InitUniverse()
testTypes.SetTypPtrs()
}
-
-func (d TestFrontend) DerefItab(sym *obj.LSym, off int64) *obj.LSym { return nil }
-
-func (d TestFrontend) CanSSA(t *types.Type) bool {
- // There are no un-SSAable types in test land.
- return true
-}
diff --git a/src/cmd/compile/internal/ssa/func.go b/src/cmd/compile/internal/ssa/func.go
index 2d203e583b..031d94f90c 100644
--- a/src/cmd/compile/internal/ssa/func.go
+++ b/src/cmd/compile/internal/ssa/func.go
@@ -7,7 +7,10 @@ package ssa
import (
"cmd/compile/internal/abi"
"cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/typecheck"
"cmd/compile/internal/types"
+ "cmd/internal/obj"
"cmd/internal/src"
"fmt"
"math"
@@ -61,7 +64,7 @@ type Func struct {
// RegArgs is a slice of register-memory pairs that must be spilled and unspilled in the uncommon path of function entry.
RegArgs []Spill
- // AuxCall describing parameters and results for this function.
+ // OwnAux describes parameters and results for this function.
OwnAux *AuxCall
freeValues *Value // free Values linked by argstorage[0]. All other fields except ID are 0/nil.
@@ -84,9 +87,17 @@ type LocalSlotSplitKey struct {
}
// NewFunc returns a new, empty function object.
-// Caller must set f.Config and f.Cache before using f.
-func NewFunc(fe Frontend) *Func {
- return &Func{fe: fe, NamedValues: make(map[LocalSlot][]*Value), CanonicalLocalSlots: make(map[LocalSlot]*LocalSlot), CanonicalLocalSplits: make(map[LocalSlotSplitKey]*LocalSlot)}
+// Caller must reset cache before calling NewFunc.
+func (c *Config) NewFunc(fe Frontend, cache *Cache) *Func {
+ return &Func{
+ fe: fe,
+ Config: c,
+ Cache: cache,
+
+ NamedValues: make(map[LocalSlot][]*Value),
+ CanonicalLocalSlots: make(map[LocalSlot]*LocalSlot),
+ CanonicalLocalSplits: make(map[LocalSlotSplitKey]*LocalSlot),
+ }
}
// NumBlocks returns an integer larger than the id of any Block in the Func.
@@ -99,6 +110,21 @@ func (f *Func) NumValues() int {
return f.vid.num()
}
+// NameABI returns the function name followed by comma and the ABI number.
+// This is intended for use with GOSSAFUNC and HTML dumps, and differs from
+// the linker's "<1>" convention because "<" and ">" require shell quoting
+// and are not legal file names (for use with GOSSADIR) on Windows.
+func (f *Func) NameABI() string {
+ return FuncNameABI(f.Name, f.ABISelf.Which())
+}
+
+// FuncNameABI returns n followed by a comma and the value of a.
+// This is a separate function to allow a single point encoding
+// of the format, which is used in places where there's not a Func yet.
+func FuncNameABI(n string, a obj.ABI) string {
+ return fmt.Sprintf("%s,%d", n, a)
+}
+
// newSparseSet returns a sparse set that can store at least up to n integers.
func (f *Func) newSparseSet(n int) *sparseSet {
return f.Cache.allocSparseSet(n)
@@ -695,7 +721,6 @@ func (f *Func) ConstOffPtrSP(t *types.Type, c int64, sp *Value) *Value {
v.AddArg(sp)
}
return v
-
}
func (f *Func) Frontend() Frontend { return f.fe }
@@ -773,7 +798,8 @@ func (f *Func) DebugHashMatch() bool {
if !base.HasDebugHash() {
return true
}
- return base.DebugHashMatchPkgFunc(f.fe.MyImportPath(), f.Name)
+ sym := f.fe.Func().Sym()
+ return base.DebugHashMatchPkgFunc(sym.Pkg.Path, sym.Name)
}
func (f *Func) spSb() (sp, sb *Value) {
@@ -809,3 +835,8 @@ func (f *Func) useFMA(v *Value) bool {
}
return base.FmaHash.MatchPos(v.Pos, nil)
}
+
+// NewLocal returns a new anonymous local variable of the given type.
+func (f *Func) NewLocal(pos src.XPos, typ *types.Type) *ir.Name {
+ return typecheck.TempAt(pos, f.fe.Func(), typ) // Note: adds new auto to fn.Dcl list
+}
diff --git a/src/cmd/compile/internal/ssa/func_test.go b/src/cmd/compile/internal/ssa/func_test.go
index bbb228d8a5..6923aaa58e 100644
--- a/src/cmd/compile/internal/ssa/func_test.go
+++ b/src/cmd/compile/internal/ssa/func_test.go
@@ -152,12 +152,10 @@ func AuxCallLSym(name string) *AuxCall {
// supplied to one of the Bloc functions. Each of the bloc names and
// valu names should be unique across the Fun.
func (c *Conf) Fun(entry string, blocs ...bloc) fun {
- f := NewFunc(c.Frontend())
- f.Config = c.config
// TODO: Either mark some SSA tests as t.Parallel,
// or set up a shared Cache and Reset it between tests.
// But not both.
- f.Cache = new(Cache)
+ f := c.config.NewFunc(c.Frontend(), new(Cache))
f.pass = &emptyPass
f.cachedLineStarts = newXposmap(map[int]lineRange{0: {0, 100}, 1: {0, 100}, 2: {0, 100}, 3: {0, 100}, 4: {0, 100}})
diff --git a/src/cmd/compile/internal/ssa/fuse.go b/src/cmd/compile/internal/ssa/fuse.go
index 6d3fb70780..68defde7b4 100644
--- a/src/cmd/compile/internal/ssa/fuse.go
+++ b/src/cmd/compile/internal/ssa/fuse.go
@@ -169,7 +169,7 @@ func fuseBlockIf(b *Block) bool {
// There may be false positives.
func isEmpty(b *Block) bool {
for _, v := range b.Values {
- if v.Uses > 0 || v.Op.IsCall() || v.Op.HasSideEffects() || v.Type.IsVoid() {
+ if v.Uses > 0 || v.Op.IsCall() || v.Op.HasSideEffects() || v.Type.IsVoid() || opcodeTable[v.Op].nilCheck {
return false
}
}
diff --git a/src/cmd/compile/internal/ssa/fuse_test.go b/src/cmd/compile/internal/ssa/fuse_test.go
index fa7921a18f..2f89938d1d 100644
--- a/src/cmd/compile/internal/ssa/fuse_test.go
+++ b/src/cmd/compile/internal/ssa/fuse_test.go
@@ -254,7 +254,7 @@ func TestFuseSideEffects(t *testing.T) {
Valu("p", OpArg, c.config.Types.IntPtr, 0, nil),
If("c1", "z0", "exit")),
Bloc("z0",
- Valu("nilcheck", OpNilCheck, types.TypeVoid, 0, nil, "p", "mem"),
+ Valu("nilcheck", OpNilCheck, c.config.Types.IntPtr, 0, nil, "p", "mem"),
Goto("exit")),
Bloc("exit",
Exit("mem"),
diff --git a/src/cmd/compile/internal/ssa/html.go b/src/cmd/compile/internal/ssa/html.go
index 7e5a097d7d..ea170fbcdb 100644
--- a/src/cmd/compile/internal/ssa/html.go
+++ b/src/cmd/compile/internal/ssa/html.go
@@ -741,7 +741,7 @@ function toggleDarkMode() {
</head>`)
w.WriteString("<body>")
w.WriteString("<h1>")
- w.WriteString(html.EscapeString(w.Func.Name))
+ w.WriteString(html.EscapeString(w.Func.NameABI()))
w.WriteString("</h1>")
w.WriteString(`
<a href="#" onclick="toggle_visibility('help');return false;" id="helplink">help</a>
@@ -784,7 +784,7 @@ func (w *HTMLWriter) Close() {
io.WriteString(w.w, "</body>")
io.WriteString(w.w, "</html>")
w.w.Close()
- fmt.Printf("dumped SSA to %v\n", w.path)
+ fmt.Printf("dumped SSA for %s to %v\n", w.Func.NameABI(), w.path)
}
// WritePhase writes f in a column headed by title.
diff --git a/src/cmd/compile/internal/ssa/lca.go b/src/cmd/compile/internal/ssa/lca.go
index 90daebe44f..6e7ad96d29 100644
--- a/src/cmd/compile/internal/ssa/lca.go
+++ b/src/cmd/compile/internal/ssa/lca.go
@@ -106,7 +106,7 @@ func (lca *lcaRange) find(a, b *Block) *Block {
if a == b {
return a
}
- // Find the positions of a and bin the Euler tour.
+ // Find the positions of a and b in the Euler tour.
p1 := lca.blocks[a.ID].pos
p2 := lca.blocks[b.ID].pos
if p1 > p2 {
diff --git a/src/cmd/compile/internal/ssa/loopbce.go b/src/cmd/compile/internal/ssa/loopbce.go
index b7dfaa33e3..dd1f39dbef 100644
--- a/src/cmd/compile/internal/ssa/loopbce.go
+++ b/src/cmd/compile/internal/ssa/loopbce.go
@@ -13,12 +13,14 @@ import (
type indVarFlags uint8
const (
- indVarMinExc indVarFlags = 1 << iota // minimum value is exclusive (default: inclusive)
- indVarMaxInc // maximum value is inclusive (default: exclusive)
+ indVarMinExc indVarFlags = 1 << iota // minimum value is exclusive (default: inclusive)
+ indVarMaxInc // maximum value is inclusive (default: exclusive)
+ indVarCountDown // if set the iteration starts at max and count towards min (default: min towards max)
)
type indVar struct {
ind *Value // induction variable
+ nxt *Value // the incremented variable
min *Value // minimum value, inclusive/exclusive depends on flags
max *Value // maximum value, inclusive/exclusive depends on flags
entry *Block // entry block in the loop.
@@ -127,6 +129,13 @@ func findIndVar(f *Func) []indVar {
less = false
}
+ if ind.Block != b {
+ // TODO: Could be extended to include disjointed loop headers.
+ // I don't think this is causing missed optimizations in real world code often.
+ // See https://go.dev/issue/63955
+ continue
+ }
+
// Expect the increment to be a nonzero constant.
if !inc.isGenericIntConst() {
continue
@@ -277,6 +286,7 @@ func findIndVar(f *Func) []indVar {
if !inclusive {
flags |= indVarMinExc
}
+ flags |= indVarCountDown
step = -step
}
if f.pass.debug >= 1 {
@@ -285,6 +295,7 @@ func findIndVar(f *Func) []indVar {
iv = append(iv, indVar{
ind: ind,
+ nxt: nxt,
min: min,
max: max,
entry: b.Succs[0].b,
diff --git a/src/cmd/compile/internal/ssa/loopreschedchecks.go b/src/cmd/compile/internal/ssa/loopreschedchecks.go
index 7c56523ad8..0ac473d229 100644
--- a/src/cmd/compile/internal/ssa/loopreschedchecks.go
+++ b/src/cmd/compile/internal/ssa/loopreschedchecks.go
@@ -247,7 +247,7 @@ func insertLoopReschedChecks(f *Func) {
// mem1 := call resched (mem0)
// goto header
resched := f.fe.Syslook("goschedguarded")
- call := sched.NewValue1A(bb.Pos, OpStaticCall, types.TypeResultMem, StaticAuxCall(resched, bb.Func.ABIDefault.ABIAnalyzeTypes(nil, nil, nil)), mem0)
+ call := sched.NewValue1A(bb.Pos, OpStaticCall, types.TypeResultMem, StaticAuxCall(resched, bb.Func.ABIDefault.ABIAnalyzeTypes(nil, nil)), mem0)
mem1 := sched.NewValue1I(bb.Pos, OpSelectN, types.TypeMem, 0, call)
sched.AddEdgeTo(h)
headerMemPhi.AddArg(mem1)
diff --git a/src/cmd/compile/internal/ssa/magic.go b/src/cmd/compile/internal/ssa/magic.go
index df4b568134..235b0e5e5c 100644
--- a/src/cmd/compile/internal/ssa/magic.go
+++ b/src/cmd/compile/internal/ssa/magic.go
@@ -170,7 +170,7 @@ func smagicOK(n uint, c int64) bool {
return c&(c-1) != 0
}
-// smagicOKn reports whether we should strength reduce an signed n-bit divide by c.
+// smagicOKn reports whether we should strength reduce a signed n-bit divide by c.
func smagicOK8(c int8) bool { return smagicOK(8, int64(c)) }
func smagicOK16(c int16) bool { return smagicOK(16, int64(c)) }
func smagicOK32(c int32) bool { return smagicOK(32, int64(c)) }
diff --git a/src/cmd/compile/internal/ssa/memcombine.go b/src/cmd/compile/internal/ssa/memcombine.go
index c1346434c9..b1a47510be 100644
--- a/src/cmd/compile/internal/ssa/memcombine.go
+++ b/src/cmd/compile/internal/ssa/memcombine.go
@@ -313,8 +313,8 @@ func combineLoads(root *Value, n int64) bool {
if isLittleEndian && shift0 != 0 {
v = leftShift(loadBlock, pos, v, shift0)
}
- if isBigEndian && shift0-(n-1)*8 != 0 {
- v = leftShift(loadBlock, pos, v, shift0-(n-1)*8)
+ if isBigEndian && shift0-(n-1)*size*8 != 0 {
+ v = leftShift(loadBlock, pos, v, shift0-(n-1)*size*8)
}
// Install with (Copy v).
@@ -500,6 +500,8 @@ func combineStores(root *Value, n int64) bool {
return false
}
if x.Aux.(*types.Type).Size() != size {
+ // TODO: the constant source and consecutive load source cases
+ // do not need all the stores to be the same size.
return false
}
base, off := splitPtr(x.Args[0])
@@ -510,6 +512,8 @@ func combineStores(root *Value, n int64) bool {
}
// Before we sort, grab the memory arg the result should have.
mem := a[n-1].store.Args[2]
+ // Also grab position of first store (last in array = first in memory order).
+ pos := a[n-1].store.Pos
// Sort stores in increasing address order.
sort.Slice(a, func(i, j int) bool {
@@ -562,6 +566,7 @@ func combineStores(root *Value, n int64) bool {
v := a[i].store
if v == root {
v.Aux = cv.Type // widen store type
+ v.Pos = pos
v.SetArg(0, ptr)
v.SetArg(1, cv)
v.SetArg(2, mem)
@@ -573,6 +578,75 @@ func combineStores(root *Value, n int64) bool {
return true
}
+ // Check for consecutive loads as the source of the stores.
+ var loadMem *Value
+ var loadBase BaseAddress
+ var loadIdx int64
+ for i := int64(0); i < n; i++ {
+ load := a[i].store.Args[1]
+ if load.Op != OpLoad {
+ loadMem = nil
+ break
+ }
+ if load.Uses != 1 {
+ loadMem = nil
+ break
+ }
+ if load.Type.IsPtr() {
+ // Don't combine stores containing a pointer, as we need
+ // a write barrier for those. This can't currently happen,
+ // but might in the future if we ever have another
+ // 8-byte-reg/4-byte-ptr architecture like amd64p32.
+ loadMem = nil
+ break
+ }
+ mem := load.Args[1]
+ base, idx := splitPtr(load.Args[0])
+ if loadMem == nil {
+ // First one we found
+ loadMem = mem
+ loadBase = base
+ loadIdx = idx
+ continue
+ }
+ if base != loadBase || mem != loadMem {
+ loadMem = nil
+ break
+ }
+ if idx != loadIdx+(a[i].offset-a[0].offset) {
+ loadMem = nil
+ break
+ }
+ }
+ if loadMem != nil {
+ // Modify the first load to do a larger load instead.
+ load := a[0].store.Args[1]
+ switch size * n {
+ case 2:
+ load.Type = types.Types[types.TUINT16]
+ case 4:
+ load.Type = types.Types[types.TUINT32]
+ case 8:
+ load.Type = types.Types[types.TUINT64]
+ }
+
+ // Modify root to do the store.
+ for i := int64(0); i < n; i++ {
+ v := a[i].store
+ if v == root {
+ v.Aux = load.Type // widen store type
+ v.Pos = pos
+ v.SetArg(0, ptr)
+ v.SetArg(1, load)
+ v.SetArg(2, mem)
+ } else {
+ clobber(v)
+ v.Type = types.Types[types.TBOOL] // erase memory type
+ }
+ }
+ return true
+ }
+
// Check that all the shift/trunc are of the same base value.
shiftBase := getShiftBase(a)
if shiftBase == nil {
@@ -588,14 +662,14 @@ func combineStores(root *Value, n int64) bool {
isLittleEndian := true
shift0 := shift(a[0].store, shiftBase)
for i := int64(1); i < n; i++ {
- if shift(a[i].store, shiftBase) != shift0+i*8 {
+ if shift(a[i].store, shiftBase) != shift0+i*size*8 {
isLittleEndian = false
break
}
}
isBigEndian := true
for i := int64(1); i < n; i++ {
- if shift(a[i].store, shiftBase) != shift0-i*8 {
+ if shift(a[i].store, shiftBase) != shift0-i*size*8 {
isBigEndian = false
break
}
@@ -618,8 +692,8 @@ func combineStores(root *Value, n int64) bool {
if isLittleEndian && shift0 != 0 {
sv = rightShift(root.Block, root.Pos, sv, shift0)
}
- if isBigEndian && shift0-(n-1)*8 != 0 {
- sv = rightShift(root.Block, root.Pos, sv, shift0-(n-1)*8)
+ if isBigEndian && shift0-(n-1)*size*8 != 0 {
+ sv = rightShift(root.Block, root.Pos, sv, shift0-(n-1)*size*8)
}
if sv.Type.Size() > size*n {
sv = truncate(root.Block, root.Pos, sv, sv.Type.Size(), size*n)
@@ -633,6 +707,7 @@ func combineStores(root *Value, n int64) bool {
v := a[i].store
if v == root {
v.Aux = sv.Type // widen store type
+ v.Pos = pos
v.SetArg(0, ptr)
v.SetArg(1, sv)
v.SetArg(2, mem)
diff --git a/src/cmd/compile/internal/ssa/nilcheck.go b/src/cmd/compile/internal/ssa/nilcheck.go
index 4f797a473f..c69cd8c32e 100644
--- a/src/cmd/compile/internal/ssa/nilcheck.go
+++ b/src/cmd/compile/internal/ssa/nilcheck.go
@@ -38,11 +38,14 @@ func nilcheckelim(f *Func) {
work := make([]bp, 0, 256)
work = append(work, bp{block: f.Entry})
- // map from value ID to bool indicating if value is known to be non-nil
- // in the current dominator path being walked. This slice is updated by
+ // map from value ID to known non-nil version of that value ID
+ // (in the current dominator path being walked). This slice is updated by
// walkStates to maintain the known non-nil values.
- nonNilValues := f.Cache.allocBoolSlice(f.NumValues())
- defer f.Cache.freeBoolSlice(nonNilValues)
+ // If there is extrinsic information about non-nil-ness, this map
+ // points a value to itself. If a value is known non-nil because we
+ // already did a nil check on it, it points to the nil check operation.
+ nonNilValues := f.Cache.allocValueSlice(f.NumValues())
+ defer f.Cache.freeValueSlice(nonNilValues)
// make an initial pass identifying any non-nil values
for _, b := range f.Blocks {
@@ -54,7 +57,7 @@ func nilcheckelim(f *Func) {
// We assume that SlicePtr is non-nil because we do a bounds check
// before the slice access (and all cap>0 slices have a non-nil ptr). See #30366.
if v.Op == OpAddr || v.Op == OpLocalAddr || v.Op == OpAddPtr || v.Op == OpOffPtr || v.Op == OpAdd32 || v.Op == OpAdd64 || v.Op == OpSub32 || v.Op == OpSub64 || v.Op == OpSlicePtr {
- nonNilValues[v.ID] = true
+ nonNilValues[v.ID] = v
}
}
}
@@ -68,16 +71,16 @@ func nilcheckelim(f *Func) {
if v.Op == OpPhi {
argsNonNil := true
for _, a := range v.Args {
- if !nonNilValues[a.ID] {
+ if nonNilValues[a.ID] == nil {
argsNonNil = false
break
}
}
if argsNonNil {
- if !nonNilValues[v.ID] {
+ if nonNilValues[v.ID] == nil {
changed = true
}
- nonNilValues[v.ID] = true
+ nonNilValues[v.ID] = v
}
}
}
@@ -103,8 +106,8 @@ func nilcheckelim(f *Func) {
if len(b.Preds) == 1 {
p := b.Preds[0].b
if p.Kind == BlockIf && p.Controls[0].Op == OpIsNonNil && p.Succs[0].b == b {
- if ptr := p.Controls[0].Args[0]; !nonNilValues[ptr.ID] {
- nonNilValues[ptr.ID] = true
+ if ptr := p.Controls[0].Args[0]; nonNilValues[ptr.ID] == nil {
+ nonNilValues[ptr.ID] = ptr
work = append(work, bp{op: ClearPtr, ptr: ptr})
}
}
@@ -117,14 +120,11 @@ func nilcheckelim(f *Func) {
pendingLines.clear()
// Next, process values in the block.
- i := 0
for _, v := range b.Values {
- b.Values[i] = v
- i++
switch v.Op {
case OpIsNonNil:
ptr := v.Args[0]
- if nonNilValues[ptr.ID] {
+ if nonNilValues[ptr.ID] != nil {
if v.Pos.IsStmt() == src.PosIsStmt { // Boolean true is a terrible statement boundary.
pendingLines.add(v.Pos)
v.Pos = v.Pos.WithNotStmt()
@@ -135,7 +135,7 @@ func nilcheckelim(f *Func) {
}
case OpNilCheck:
ptr := v.Args[0]
- if nonNilValues[ptr.ID] {
+ if nilCheck := nonNilValues[ptr.ID]; nilCheck != nil {
// This is a redundant implicit nil check.
// Logging in the style of the former compiler -- and omit line 1,
// which is usually in generated code.
@@ -145,14 +145,13 @@ func nilcheckelim(f *Func) {
if v.Pos.IsStmt() == src.PosIsStmt { // About to lose a statement boundary
pendingLines.add(v.Pos)
}
- v.reset(OpUnknown)
- f.freeValue(v)
- i--
+ v.Op = OpCopy
+ v.SetArgs1(nilCheck)
continue
}
// Record the fact that we know ptr is non nil, and remember to
// undo that information when this dominator subtree is done.
- nonNilValues[ptr.ID] = true
+ nonNilValues[ptr.ID] = v
work = append(work, bp{op: ClearPtr, ptr: ptr})
fallthrough // a non-eliminated nil check might be a good place for a statement boundary.
default:
@@ -163,7 +162,7 @@ func nilcheckelim(f *Func) {
}
}
// This reduces the lost statement count in "go" by 5 (out of 500 total).
- for j := 0; j < i; j++ { // is this an ordering problem?
+ for j := range b.Values { // is this an ordering problem?
v := b.Values[j]
if v.Pos.IsStmt() != src.PosNotStmt && !isPoorStatementOp(v.Op) && pendingLines.contains(v.Pos) {
v.Pos = v.Pos.WithIsStmt()
@@ -174,7 +173,6 @@ func nilcheckelim(f *Func) {
b.Pos = b.Pos.WithIsStmt()
pendingLines.remove(b.Pos)
}
- b.truncateValues(i)
// Add all dominated blocks to the work list.
for w := sdom[node.block.ID].child; w != nil; w = sdom[w.ID].sibling {
@@ -182,7 +180,7 @@ func nilcheckelim(f *Func) {
}
case ClearPtr:
- nonNilValues[node.ptr.ID] = false
+ nonNilValues[node.ptr.ID] = nil
continue
}
}
diff --git a/src/cmd/compile/internal/ssa/nilcheck_test.go b/src/cmd/compile/internal/ssa/nilcheck_test.go
index 2e32afe2a6..6c89b1e185 100644
--- a/src/cmd/compile/internal/ssa/nilcheck_test.go
+++ b/src/cmd/compile/internal/ssa/nilcheck_test.go
@@ -1,3 +1,7 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
package ssa
import (
diff --git a/src/cmd/compile/internal/ssa/numberlines.go b/src/cmd/compile/internal/ssa/numberlines.go
index 4cbc4919f4..b4eca324d5 100644
--- a/src/cmd/compile/internal/ssa/numberlines.go
+++ b/src/cmd/compile/internal/ssa/numberlines.go
@@ -32,7 +32,7 @@ func nextGoodStatementIndex(v *Value, i int, b *Block) int {
// If the value is the last one in the block, too bad, it will have to do
// (this assumes that the value ordering vaguely corresponds to the source
// program execution order, which tends to be true directly after ssa is
- // first built.
+ // first built).
if i >= len(b.Values)-1 {
return i
}
diff --git a/src/cmd/compile/internal/ssa/op.go b/src/cmd/compile/internal/ssa/op.go
index e2319d78d6..cb151b2f6c 100644
--- a/src/cmd/compile/internal/ssa/op.go
+++ b/src/cmd/compile/internal/ssa/op.go
@@ -238,13 +238,9 @@ func (a *AuxCall) RegsOfArg(which int64) []abi.RegIndex {
return a.abiInfo.InParam(int(which)).Registers
}
-// NameOfResult returns the type of result which (indexed 0, 1, etc).
+// NameOfResult returns the ir.Name of result which (indexed 0, 1, etc).
func (a *AuxCall) NameOfResult(which int64) *ir.Name {
- name := a.abiInfo.OutParam(int(which)).Name
- if name == nil {
- return nil
- }
- return name.(*ir.Name)
+ return a.abiInfo.OutParam(int(which)).Name
}
// TypeOfResult returns the type of result which (indexed 0, 1, etc).
diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go
index 1480fcf45b..c552832520 100644
--- a/src/cmd/compile/internal/ssa/opGen.go
+++ b/src/cmd/compile/internal/ssa/opGen.go
@@ -716,12 +716,12 @@ const (
OpAMD64BTSQ
OpAMD64BTLconst
OpAMD64BTQconst
- OpAMD64BTCLconst
OpAMD64BTCQconst
- OpAMD64BTRLconst
OpAMD64BTRQconst
- OpAMD64BTSLconst
OpAMD64BTSQconst
+ OpAMD64BTSQconstmodify
+ OpAMD64BTRQconstmodify
+ OpAMD64BTCQconstmodify
OpAMD64TESTQ
OpAMD64TESTL
OpAMD64TESTW
@@ -912,6 +912,8 @@ const (
OpAMD64SQRTSS
OpAMD64ROUNDSD
OpAMD64VFMADD231SD
+ OpAMD64MINSD
+ OpAMD64MINSS
OpAMD64SBBQcarrymask
OpAMD64SBBLcarrymask
OpAMD64SETEQ
@@ -935,6 +937,16 @@ const (
OpAMD64SETBEstore
OpAMD64SETAstore
OpAMD64SETAEstore
+ OpAMD64SETEQstoreidx1
+ OpAMD64SETNEstoreidx1
+ OpAMD64SETLstoreidx1
+ OpAMD64SETLEstoreidx1
+ OpAMD64SETGstoreidx1
+ OpAMD64SETGEstoreidx1
+ OpAMD64SETBstoreidx1
+ OpAMD64SETBEstoreidx1
+ OpAMD64SETAstoreidx1
+ OpAMD64SETAEstoreidx1
OpAMD64SETEQF
OpAMD64SETNEF
OpAMD64SETORD
@@ -964,6 +976,7 @@ const (
OpAMD64MOVLi2f
OpAMD64MOVLf2i
OpAMD64PXOR
+ OpAMD64POR
OpAMD64LEAQ
OpAMD64LEAL
OpAMD64LEAW
@@ -1441,6 +1454,10 @@ const (
OpARM64FNEGD
OpARM64FSQRTD
OpARM64FSQRTS
+ OpARM64FMIND
+ OpARM64FMINS
+ OpARM64FMAXD
+ OpARM64FMAXS
OpARM64REV
OpARM64REVW
OpARM64REV16
@@ -2089,10 +2106,13 @@ const (
OpMIPS64LoweredPanicBoundsC
OpPPC64ADD
+ OpPPC64ADDCC
OpPPC64ADDconst
+ OpPPC64ADDCCconst
OpPPC64FADD
OpPPC64FADDS
OpPPC64SUB
+ OpPPC64SUBCC
OpPPC64SUBFCconst
OpPPC64FSUB
OpPPC64FSUBS
@@ -2119,7 +2139,6 @@ const (
OpPPC64SLW
OpPPC64ROTL
OpPPC64ROTLW
- OpPPC64RLDICL
OpPPC64CLRLSLWI
OpPPC64CLRLSLDI
OpPPC64ADDC
@@ -2142,7 +2161,10 @@ const (
OpPPC64RLWINM
OpPPC64RLWNM
OpPPC64RLWMI
+ OpPPC64RLDICL
+ OpPPC64RLDICR
OpPPC64CNTLZD
+ OpPPC64CNTLZDCC
OpPPC64CNTLZW
OpPPC64CNTTZD
OpPPC64CNTTZW
@@ -2168,15 +2190,18 @@ const (
OpPPC64MTVSRD
OpPPC64AND
OpPPC64ANDN
+ OpPPC64ANDNCC
OpPPC64ANDCC
OpPPC64OR
OpPPC64ORN
OpPPC64ORCC
OpPPC64NOR
+ OpPPC64NORCC
OpPPC64XOR
OpPPC64XORCC
OpPPC64EQV
OpPPC64NEG
+ OpPPC64NEGCC
OpPPC64BRD
OpPPC64BRW
OpPPC64BRH
@@ -2364,10 +2389,14 @@ const (
OpRISCV64MOVDnop
OpRISCV64SLL
OpRISCV64SRA
+ OpRISCV64SRAW
OpRISCV64SRL
+ OpRISCV64SRLW
OpRISCV64SLLI
OpRISCV64SRAI
+ OpRISCV64SRAIW
OpRISCV64SRLI
+ OpRISCV64SRLIW
OpRISCV64XOR
OpRISCV64XORI
OpRISCV64OR
@@ -2381,7 +2410,8 @@ const (
OpRISCV64SLTI
OpRISCV64SLTU
OpRISCV64SLTIU
- OpRISCV64MOVconvert
+ OpRISCV64LoweredRound32F
+ OpRISCV64LoweredRound64F
OpRISCV64CALLstatic
OpRISCV64CALLtail
OpRISCV64CALLclosure
@@ -2409,6 +2439,7 @@ const (
OpRISCV64LoweredGetCallerSP
OpRISCV64LoweredGetCallerPC
OpRISCV64LoweredWB
+ OpRISCV64LoweredPubBarrier
OpRISCV64LoweredPanicBoundsA
OpRISCV64LoweredPanicBoundsB
OpRISCV64LoweredPanicBoundsC
@@ -2416,6 +2447,10 @@ const (
OpRISCV64FSUBS
OpRISCV64FMULS
OpRISCV64FDIVS
+ OpRISCV64FMADDS
+ OpRISCV64FMSUBS
+ OpRISCV64FNMADDS
+ OpRISCV64FNMSUBS
OpRISCV64FSQRTS
OpRISCV64FNEGS
OpRISCV64FMVSX
@@ -3006,6 +3041,10 @@ const (
OpRoundToEven
OpAbs
OpCopysign
+ OpMin64F
+ OpMin32F
+ OpMax64F
+ OpMax32F
OpFMA
OpPhi
OpCopy
@@ -8758,12 +8797,12 @@ var opcodeTable = [...]opInfo{
},
},
{
- name: "BTCLconst",
+ name: "BTCQconst",
auxType: auxInt8,
argLen: 1,
resultInArg0: true,
clobberFlags: true,
- asm: x86.ABTCL,
+ asm: x86.ABTCQ,
reg: regInfo{
inputs: []inputInfo{
{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
@@ -8774,12 +8813,12 @@ var opcodeTable = [...]opInfo{
},
},
{
- name: "BTCQconst",
+ name: "BTRQconst",
auxType: auxInt8,
argLen: 1,
resultInArg0: true,
clobberFlags: true,
- asm: x86.ABTCQ,
+ asm: x86.ABTRQ,
reg: regInfo{
inputs: []inputInfo{
{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
@@ -8790,12 +8829,12 @@ var opcodeTable = [...]opInfo{
},
},
{
- name: "BTRLconst",
+ name: "BTSQconst",
auxType: auxInt8,
argLen: 1,
resultInArg0: true,
clobberFlags: true,
- asm: x86.ABTRL,
+ asm: x86.ABTSQ,
reg: regInfo{
inputs: []inputInfo{
{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
@@ -8806,50 +8845,44 @@ var opcodeTable = [...]opInfo{
},
},
{
- name: "BTRQconst",
- auxType: auxInt8,
- argLen: 1,
- resultInArg0: true,
- clobberFlags: true,
- asm: x86.ABTRQ,
+ name: "BTSQconstmodify",
+ auxType: auxSymValAndOff,
+ argLen: 2,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.ABTSQ,
reg: regInfo{
inputs: []inputInfo{
- {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
- },
- outputs: []outputInfo{
- {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
},
},
},
{
- name: "BTSLconst",
- auxType: auxInt8,
- argLen: 1,
- resultInArg0: true,
- clobberFlags: true,
- asm: x86.ABTSL,
+ name: "BTRQconstmodify",
+ auxType: auxSymValAndOff,
+ argLen: 2,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.ABTRQ,
reg: regInfo{
inputs: []inputInfo{
- {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
- },
- outputs: []outputInfo{
- {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
},
},
},
{
- name: "BTSQconst",
- auxType: auxInt8,
- argLen: 1,
- resultInArg0: true,
- clobberFlags: true,
- asm: x86.ABTSQ,
+ name: "BTCQconstmodify",
+ auxType: auxSymValAndOff,
+ argLen: 2,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.ABTCQ,
reg: regInfo{
inputs: []inputInfo{
- {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
- },
- outputs: []outputInfo{
- {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
},
},
},
@@ -11891,6 +11924,36 @@ var opcodeTable = [...]opInfo{
},
},
{
+ name: "MINSD",
+ argLen: 2,
+ resultInArg0: true,
+ asm: x86.AMINSD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "MINSS",
+ argLen: 2,
+ resultInArg0: true,
+ asm: x86.AMINSS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
name: "SBBQcarrymask",
argLen: 1,
asm: x86.ASBBQ,
@@ -12151,6 +12214,156 @@ var opcodeTable = [...]opInfo{
},
},
{
+ name: "SETEQstoreidx1",
+ auxType: auxSymOff,
+ argLen: 4,
+ commutative: true,
+ symEffect: SymWrite,
+ asm: x86.ASETEQ,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "SETNEstoreidx1",
+ auxType: auxSymOff,
+ argLen: 4,
+ commutative: true,
+ symEffect: SymWrite,
+ asm: x86.ASETNE,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "SETLstoreidx1",
+ auxType: auxSymOff,
+ argLen: 4,
+ commutative: true,
+ symEffect: SymWrite,
+ asm: x86.ASETLT,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "SETLEstoreidx1",
+ auxType: auxSymOff,
+ argLen: 4,
+ commutative: true,
+ symEffect: SymWrite,
+ asm: x86.ASETLE,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "SETGstoreidx1",
+ auxType: auxSymOff,
+ argLen: 4,
+ commutative: true,
+ symEffect: SymWrite,
+ asm: x86.ASETGT,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "SETGEstoreidx1",
+ auxType: auxSymOff,
+ argLen: 4,
+ commutative: true,
+ symEffect: SymWrite,
+ asm: x86.ASETGE,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "SETBstoreidx1",
+ auxType: auxSymOff,
+ argLen: 4,
+ commutative: true,
+ symEffect: SymWrite,
+ asm: x86.ASETCS,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "SETBEstoreidx1",
+ auxType: auxSymOff,
+ argLen: 4,
+ commutative: true,
+ symEffect: SymWrite,
+ asm: x86.ASETLS,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "SETAstoreidx1",
+ auxType: auxSymOff,
+ argLen: 4,
+ commutative: true,
+ symEffect: SymWrite,
+ asm: x86.ASETHI,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "SETAEstoreidx1",
+ auxType: auxSymOff,
+ argLen: 4,
+ commutative: true,
+ symEffect: SymWrite,
+ asm: x86.ASETCC,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
name: "SETEQF",
argLen: 1,
clobberFlags: true,
@@ -12511,6 +12724,22 @@ var opcodeTable = [...]opInfo{
},
},
{
+ name: "POR",
+ argLen: 2,
+ commutative: true,
+ resultInArg0: true,
+ asm: x86.APOR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
name: "LEAQ",
auxType: auxSymOff,
argLen: 1,
@@ -19278,6 +19507,62 @@ var opcodeTable = [...]opInfo{
},
},
{
+ name: "FMIND",
+ argLen: 2,
+ asm: arm64.AFMIND,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FMINS",
+ argLen: 2,
+ asm: arm64.AFMINS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FMAXD",
+ argLen: 2,
+ asm: arm64.AFMAXD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FMAXS",
+ argLen: 2,
+ asm: arm64.AFMAXS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
name: "REV",
argLen: 1,
asm: arm64.AREV,
@@ -22908,11 +23193,11 @@ var opcodeTable = [...]opInfo{
asm: loong64.AADDVU,
reg: regInfo{
inputs: []inputInfo{
- {0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31
- {1, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31
+ {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
+ {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
},
outputs: []outputInfo{
- {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31
+ {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
},
},
},
@@ -22923,10 +23208,10 @@ var opcodeTable = [...]opInfo{
asm: loong64.AADDVU,
reg: regInfo{
inputs: []inputInfo{
- {0, 1072693244}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31
+ {0, 1073741820}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
},
outputs: []outputInfo{
- {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31
+ {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
},
},
},
@@ -22936,11 +23221,11 @@ var opcodeTable = [...]opInfo{
asm: loong64.ASUBVU,
reg: regInfo{
inputs: []inputInfo{
- {0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31
- {1, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31
+ {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
+ {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
},
outputs: []outputInfo{
- {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31
+ {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
},
},
},
@@ -22951,10 +23236,10 @@ var opcodeTable = [...]opInfo{
asm: loong64.ASUBVU,
reg: regInfo{
inputs: []inputInfo{
- {0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31
+ {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
},
outputs: []outputInfo{
- {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31
+ {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
},
},
},
@@ -22965,11 +23250,11 @@ var opcodeTable = [...]opInfo{
asm: loong64.AMULV,
reg: regInfo{
inputs: []inputInfo{
- {0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31
- {1, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31
+ {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
+ {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
},
outputs: []outputInfo{
- {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31
+ {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
},
},
},
@@ -22980,11 +23265,11 @@ var opcodeTable = [...]opInfo{
asm: loong64.AMULHV,
reg: regInfo{
inputs: []inputInfo{
- {0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31
- {1, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31
+ {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
+ {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
},
outputs: []outputInfo{
- {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31
+ {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
},
},
},
@@ -22995,11 +23280,11 @@ var opcodeTable = [...]opInfo{
asm: loong64.AMULHVU,
reg: regInfo{
inputs: []inputInfo{
- {0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31
- {1, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31
+ {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
+ {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
},
outputs: []outputInfo{
- {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31
+ {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
},
},
},
@@ -23009,11 +23294,11 @@ var opcodeTable = [...]opInfo{
asm: loong64.ADIVV,
reg: regInfo{
inputs: []inputInfo{
- {0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31
- {1, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31
+ {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
+ {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
},
outputs: []outputInfo{
- {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31
+ {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
},
},
},
@@ -23023,11 +23308,11 @@ var opcodeTable = [...]opInfo{
asm: loong64.ADIVVU,
reg: regInfo{
inputs: []inputInfo{
- {0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31
- {1, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31
+ {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
+ {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
},
outputs: []outputInfo{
- {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31
+ {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
},
},
},
@@ -23037,11 +23322,11 @@ var opcodeTable = [...]opInfo{
asm: loong64.AREMV,
reg: regInfo{
inputs: []inputInfo{
- {0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31
- {1, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31
+ {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
+ {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
},
outputs: []outputInfo{
- {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31
+ {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
},
},
},
@@ -23051,11 +23336,11 @@ var opcodeTable = [...]opInfo{
asm: loong64.AREMVU,
reg: regInfo{
inputs: []inputInfo{
- {0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31
- {1, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31
+ {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
+ {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
},
outputs: []outputInfo{
- {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31
+ {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
},
},
},
@@ -23182,11 +23467,11 @@ var opcodeTable = [...]opInfo{
asm: loong64.AAND,
reg: regInfo{
inputs: []inputInfo{
- {0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31
- {1, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31
+ {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
+ {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
},
outputs: []outputInfo{
- {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31
+ {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
},
},
},
@@ -23197,10 +23482,10 @@ var opcodeTable = [...]opInfo{
asm: loong64.AAND,
reg: regInfo{
inputs: []inputInfo{
- {0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31
+ {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
},
outputs: []outputInfo{
- {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31
+ {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
},
},
},
@@ -23211,11 +23496,11 @@ var opcodeTable = [...]opInfo{
asm: loong64.AOR,
reg: regInfo{
inputs: []inputInfo{
- {0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31
- {1, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31
+ {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
+ {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
},
outputs: []outputInfo{
- {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31
+ {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
},
},
},
@@ -23226,10 +23511,10 @@ var opcodeTable = [...]opInfo{
asm: loong64.AOR,
reg: regInfo{
inputs: []inputInfo{
- {0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31
+ {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
},
outputs: []outputInfo{
- {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31
+ {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
},
},
},
@@ -23240,11 +23525,11 @@ var opcodeTable = [...]opInfo{
asm: loong64.AXOR,
reg: regInfo{
inputs: []inputInfo{
- {0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31
- {1, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31
+ {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
+ {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
},
outputs: []outputInfo{
- {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31
+ {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
},
},
},
@@ -23255,10 +23540,10 @@ var opcodeTable = [...]opInfo{
asm: loong64.AXOR,
reg: regInfo{
inputs: []inputInfo{
- {0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31
+ {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
},
outputs: []outputInfo{
- {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31
+ {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
},
},
},
@@ -23269,11 +23554,11 @@ var opcodeTable = [...]opInfo{
asm: loong64.ANOR,
reg: regInfo{
inputs: []inputInfo{
- {0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31
- {1, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31
+ {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
+ {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
},
outputs: []outputInfo{
- {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31
+ {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
},
},
},
@@ -23284,10 +23569,10 @@ var opcodeTable = [...]opInfo{
asm: loong64.ANOR,
reg: regInfo{
inputs: []inputInfo{
- {0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31
+ {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
},
outputs: []outputInfo{
- {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31
+ {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
},
},
},
@@ -23296,10 +23581,10 @@ var opcodeTable = [...]opInfo{
argLen: 1,
reg: regInfo{
inputs: []inputInfo{
- {0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31
+ {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
},
outputs: []outputInfo{
- {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31
+ {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
},
},
},
@@ -23361,11 +23646,11 @@ var opcodeTable = [...]opInfo{
asm: loong64.AMASKEQZ,
reg: regInfo{
inputs: []inputInfo{
- {0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31
- {1, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31
+ {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
+ {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
},
outputs: []outputInfo{
- {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31
+ {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
},
},
},
@@ -23375,11 +23660,11 @@ var opcodeTable = [...]opInfo{
asm: loong64.AMASKNEZ,
reg: regInfo{
inputs: []inputInfo{
- {0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31
- {1, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31
+ {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
+ {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
},
outputs: []outputInfo{
- {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31
+ {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
},
},
},
@@ -23389,11 +23674,11 @@ var opcodeTable = [...]opInfo{
asm: loong64.ASLLV,
reg: regInfo{
inputs: []inputInfo{
- {0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31
- {1, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31
+ {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
+ {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
},
outputs: []outputInfo{
- {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31
+ {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
},
},
},
@@ -23404,10 +23689,10 @@ var opcodeTable = [...]opInfo{
asm: loong64.ASLLV,
reg: regInfo{
inputs: []inputInfo{
- {0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31
+ {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
},
outputs: []outputInfo{
- {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31
+ {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
},
},
},
@@ -23417,11 +23702,11 @@ var opcodeTable = [...]opInfo{
asm: loong64.ASRLV,
reg: regInfo{
inputs: []inputInfo{
- {0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31
- {1, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31
+ {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
+ {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
},
outputs: []outputInfo{
- {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31
+ {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
},
},
},
@@ -23432,10 +23717,10 @@ var opcodeTable = [...]opInfo{
asm: loong64.ASRLV,
reg: regInfo{
inputs: []inputInfo{
- {0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31
+ {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
},
outputs: []outputInfo{
- {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31
+ {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
},
},
},
@@ -23445,11 +23730,11 @@ var opcodeTable = [...]opInfo{
asm: loong64.ASRAV,
reg: regInfo{
inputs: []inputInfo{
- {0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31
- {1, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31
+ {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
+ {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
},
outputs: []outputInfo{
- {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31
+ {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
},
},
},
@@ -23460,10 +23745,10 @@ var opcodeTable = [...]opInfo{
asm: loong64.ASRAV,
reg: regInfo{
inputs: []inputInfo{
- {0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31
+ {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
},
outputs: []outputInfo{
- {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31
+ {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
},
},
},
@@ -23473,11 +23758,11 @@ var opcodeTable = [...]opInfo{
asm: loong64.AROTR,
reg: regInfo{
inputs: []inputInfo{
- {0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31
- {1, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31
+ {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
+ {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
},
outputs: []outputInfo{
- {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31
+ {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
},
},
},
@@ -23487,11 +23772,11 @@ var opcodeTable = [...]opInfo{
asm: loong64.AROTRV,
reg: regInfo{
inputs: []inputInfo{
- {0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31
- {1, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31
+ {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
+ {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
},
outputs: []outputInfo{
- {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31
+ {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
},
},
},
@@ -23502,10 +23787,10 @@ var opcodeTable = [...]opInfo{
asm: loong64.AROTR,
reg: regInfo{
inputs: []inputInfo{
- {0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31
+ {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
},
outputs: []outputInfo{
- {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31
+ {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
},
},
},
@@ -23516,10 +23801,10 @@ var opcodeTable = [...]opInfo{
asm: loong64.AROTRV,
reg: regInfo{
inputs: []inputInfo{
- {0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31
+ {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
},
outputs: []outputInfo{
- {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31
+ {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
},
},
},
@@ -23529,11 +23814,11 @@ var opcodeTable = [...]opInfo{
asm: loong64.ASGT,
reg: regInfo{
inputs: []inputInfo{
- {0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31
- {1, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31
+ {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
+ {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
},
outputs: []outputInfo{
- {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31
+ {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
},
},
},
@@ -23544,10 +23829,10 @@ var opcodeTable = [...]opInfo{
asm: loong64.ASGT,
reg: regInfo{
inputs: []inputInfo{
- {0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31
+ {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
},
outputs: []outputInfo{
- {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31
+ {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
},
},
},
@@ -23557,11 +23842,11 @@ var opcodeTable = [...]opInfo{
asm: loong64.ASGTU,
reg: regInfo{
inputs: []inputInfo{
- {0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31
- {1, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31
+ {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
+ {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
},
outputs: []outputInfo{
- {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31
+ {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
},
},
},
@@ -23572,10 +23857,10 @@ var opcodeTable = [...]opInfo{
asm: loong64.ASGTU,
reg: regInfo{
inputs: []inputInfo{
- {0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31
+ {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
},
outputs: []outputInfo{
- {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31
+ {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
},
},
},
@@ -23653,7 +23938,7 @@ var opcodeTable = [...]opInfo{
asm: loong64.AMOVV,
reg: regInfo{
outputs: []outputInfo{
- {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31
+ {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
},
},
},
@@ -23693,7 +23978,7 @@ var opcodeTable = [...]opInfo{
{0, 4611686018427387908}, // SP SB
},
outputs: []outputInfo{
- {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31
+ {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
},
},
},
@@ -23706,10 +23991,10 @@ var opcodeTable = [...]opInfo{
asm: loong64.AMOVB,
reg: regInfo{
inputs: []inputInfo{
- {0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB
+ {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB
},
outputs: []outputInfo{
- {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31
+ {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
},
},
},
@@ -23722,10 +24007,10 @@ var opcodeTable = [...]opInfo{
asm: loong64.AMOVBU,
reg: regInfo{
inputs: []inputInfo{
- {0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB
+ {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB
},
outputs: []outputInfo{
- {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31
+ {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
},
},
},
@@ -23738,10 +24023,10 @@ var opcodeTable = [...]opInfo{
asm: loong64.AMOVH,
reg: regInfo{
inputs: []inputInfo{
- {0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB
+ {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB
},
outputs: []outputInfo{
- {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31
+ {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
},
},
},
@@ -23754,10 +24039,10 @@ var opcodeTable = [...]opInfo{
asm: loong64.AMOVHU,
reg: regInfo{
inputs: []inputInfo{
- {0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB
+ {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB
},
outputs: []outputInfo{
- {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31
+ {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
},
},
},
@@ -23770,10 +24055,10 @@ var opcodeTable = [...]opInfo{
asm: loong64.AMOVW,
reg: regInfo{
inputs: []inputInfo{
- {0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB
+ {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB
},
outputs: []outputInfo{
- {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31
+ {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
},
},
},
@@ -23786,10 +24071,10 @@ var opcodeTable = [...]opInfo{
asm: loong64.AMOVWU,
reg: regInfo{
inputs: []inputInfo{
- {0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB
+ {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB
},
outputs: []outputInfo{
- {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31
+ {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
},
},
},
@@ -23802,10 +24087,10 @@ var opcodeTable = [...]opInfo{
asm: loong64.AMOVV,
reg: regInfo{
inputs: []inputInfo{
- {0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB
+ {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB
},
outputs: []outputInfo{
- {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31
+ {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
},
},
},
@@ -23818,7 +24103,7 @@ var opcodeTable = [...]opInfo{
asm: loong64.AMOVF,
reg: regInfo{
inputs: []inputInfo{
- {0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB
+ {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB
},
outputs: []outputInfo{
{0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
@@ -23834,7 +24119,7 @@ var opcodeTable = [...]opInfo{
asm: loong64.AMOVD,
reg: regInfo{
inputs: []inputInfo{
- {0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB
+ {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB
},
outputs: []outputInfo{
{0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
@@ -23850,8 +24135,8 @@ var opcodeTable = [...]opInfo{
asm: loong64.AMOVB,
reg: regInfo{
inputs: []inputInfo{
- {1, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31
- {0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB
+ {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
+ {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB
},
},
},
@@ -23864,8 +24149,8 @@ var opcodeTable = [...]opInfo{
asm: loong64.AMOVH,
reg: regInfo{
inputs: []inputInfo{
- {1, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31
- {0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB
+ {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
+ {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB
},
},
},
@@ -23878,8 +24163,8 @@ var opcodeTable = [...]opInfo{
asm: loong64.AMOVW,
reg: regInfo{
inputs: []inputInfo{
- {1, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31
- {0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB
+ {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
+ {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB
},
},
},
@@ -23892,8 +24177,8 @@ var opcodeTable = [...]opInfo{
asm: loong64.AMOVV,
reg: regInfo{
inputs: []inputInfo{
- {1, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31
- {0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB
+ {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
+ {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB
},
},
},
@@ -23906,7 +24191,7 @@ var opcodeTable = [...]opInfo{
asm: loong64.AMOVF,
reg: regInfo{
inputs: []inputInfo{
- {0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB
+ {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB
{1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
},
},
@@ -23920,7 +24205,7 @@ var opcodeTable = [...]opInfo{
asm: loong64.AMOVD,
reg: regInfo{
inputs: []inputInfo{
- {0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB
+ {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB
{1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
},
},
@@ -23934,7 +24219,7 @@ var opcodeTable = [...]opInfo{
asm: loong64.AMOVB,
reg: regInfo{
inputs: []inputInfo{
- {0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB
+ {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB
},
},
},
@@ -23947,7 +24232,7 @@ var opcodeTable = [...]opInfo{
asm: loong64.AMOVH,
reg: regInfo{
inputs: []inputInfo{
- {0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB
+ {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB
},
},
},
@@ -23960,7 +24245,7 @@ var opcodeTable = [...]opInfo{
asm: loong64.AMOVW,
reg: regInfo{
inputs: []inputInfo{
- {0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB
+ {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB
},
},
},
@@ -23973,7 +24258,7 @@ var opcodeTable = [...]opInfo{
asm: loong64.AMOVV,
reg: regInfo{
inputs: []inputInfo{
- {0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB
+ {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB
},
},
},
@@ -23983,10 +24268,10 @@ var opcodeTable = [...]opInfo{
asm: loong64.AMOVB,
reg: regInfo{
inputs: []inputInfo{
- {0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31
+ {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
},
outputs: []outputInfo{
- {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31
+ {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
},
},
},
@@ -23996,10 +24281,10 @@ var opcodeTable = [...]opInfo{
asm: loong64.AMOVBU,
reg: regInfo{
inputs: []inputInfo{
- {0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31
+ {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
},
outputs: []outputInfo{
- {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31
+ {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
},
},
},
@@ -24009,10 +24294,10 @@ var opcodeTable = [...]opInfo{
asm: loong64.AMOVH,
reg: regInfo{
inputs: []inputInfo{
- {0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31
+ {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
},
outputs: []outputInfo{
- {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31
+ {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
},
},
},
@@ -24022,10 +24307,10 @@ var opcodeTable = [...]opInfo{
asm: loong64.AMOVHU,
reg: regInfo{
inputs: []inputInfo{
- {0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31
+ {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
},
outputs: []outputInfo{
- {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31
+ {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
},
},
},
@@ -24035,10 +24320,10 @@ var opcodeTable = [...]opInfo{
asm: loong64.AMOVW,
reg: regInfo{
inputs: []inputInfo{
- {0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31
+ {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
},
outputs: []outputInfo{
- {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31
+ {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
},
},
},
@@ -24048,10 +24333,10 @@ var opcodeTable = [...]opInfo{
asm: loong64.AMOVWU,
reg: regInfo{
inputs: []inputInfo{
- {0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31
+ {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
},
outputs: []outputInfo{
- {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31
+ {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
},
},
},
@@ -24061,10 +24346,10 @@ var opcodeTable = [...]opInfo{
asm: loong64.AMOVV,
reg: regInfo{
inputs: []inputInfo{
- {0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31
+ {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
},
outputs: []outputInfo{
- {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31
+ {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
},
},
},
@@ -24074,10 +24359,10 @@ var opcodeTable = [...]opInfo{
resultInArg0: true,
reg: regInfo{
inputs: []inputInfo{
- {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31
+ {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
},
outputs: []outputInfo{
- {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31
+ {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
},
},
},
@@ -24214,49 +24499,49 @@ var opcodeTable = [...]opInfo{
{
name: "CALLstatic",
auxType: auxCallOff,
- argLen: 1,
+ argLen: -1,
clobberFlags: true,
call: true,
reg: regInfo{
- clobbers: 4611686018426339320, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ clobbers: 4611686018427387896, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
},
},
{
name: "CALLtail",
auxType: auxCallOff,
- argLen: 1,
+ argLen: -1,
clobberFlags: true,
call: true,
tailCall: true,
reg: regInfo{
- clobbers: 4611686018426339320, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ clobbers: 4611686018427387896, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
},
},
{
name: "CALLclosure",
auxType: auxCallOff,
- argLen: 3,
+ argLen: -1,
clobberFlags: true,
call: true,
reg: regInfo{
inputs: []inputInfo{
{1, 268435456}, // R29
- {0, 1070596092}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31
+ {0, 1071644668}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
},
- clobbers: 4611686018426339320, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ clobbers: 4611686018427387896, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
},
},
{
name: "CALLinter",
auxType: auxCallOff,
- argLen: 2,
+ argLen: -1,
clobberFlags: true,
call: true,
reg: regInfo{
inputs: []inputInfo{
- {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31
+ {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
},
- clobbers: 4611686018426339320, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ clobbers: 4611686018427387896, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
},
},
{
@@ -24266,9 +24551,9 @@ var opcodeTable = [...]opInfo{
faultOnNilArg0: true,
reg: regInfo{
inputs: []inputInfo{
- {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31
+ {0, 524288}, // R20
},
- clobbers: 262146, // R1 R19
+ clobbers: 524290, // R1 R20
},
},
{
@@ -24279,40 +24564,38 @@ var opcodeTable = [...]opInfo{
faultOnNilArg1: true,
reg: regInfo{
inputs: []inputInfo{
- {0, 524288}, // R20
- {1, 262144}, // R19
+ {0, 1048576}, // R21
+ {1, 524288}, // R20
},
- clobbers: 786434, // R1 R19 R20
+ clobbers: 1572866, // R1 R20 R21
},
},
{
name: "LoweredZero",
auxType: auxInt64,
argLen: 3,
- clobberFlags: true,
faultOnNilArg0: true,
reg: regInfo{
inputs: []inputInfo{
- {0, 262144}, // R19
- {1, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31
+ {0, 524288}, // R20
+ {1, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
},
- clobbers: 262144, // R19
+ clobbers: 524288, // R20
},
},
{
name: "LoweredMove",
auxType: auxInt64,
argLen: 4,
- clobberFlags: true,
faultOnNilArg0: true,
faultOnNilArg1: true,
reg: regInfo{
inputs: []inputInfo{
- {0, 8}, // R4
- {1, 262144}, // R19
- {2, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31
+ {0, 1048576}, // R21
+ {1, 524288}, // R20
+ {2, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
},
- clobbers: 262152, // R4 R19
+ clobbers: 1572864, // R20 R21
},
},
{
@@ -24321,10 +24604,10 @@ var opcodeTable = [...]opInfo{
faultOnNilArg0: true,
reg: regInfo{
inputs: []inputInfo{
- {0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB
+ {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB
},
outputs: []outputInfo{
- {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31
+ {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
},
},
},
@@ -24334,10 +24617,10 @@ var opcodeTable = [...]opInfo{
faultOnNilArg0: true,
reg: regInfo{
inputs: []inputInfo{
- {0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB
+ {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB
},
outputs: []outputInfo{
- {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31
+ {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
},
},
},
@@ -24347,10 +24630,10 @@ var opcodeTable = [...]opInfo{
faultOnNilArg0: true,
reg: regInfo{
inputs: []inputInfo{
- {0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB
+ {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB
},
outputs: []outputInfo{
- {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31
+ {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
},
},
},
@@ -24361,8 +24644,8 @@ var opcodeTable = [...]opInfo{
hasSideEffects: true,
reg: regInfo{
inputs: []inputInfo{
- {1, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31
- {0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB
+ {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
+ {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB
},
},
},
@@ -24373,8 +24656,8 @@ var opcodeTable = [...]opInfo{
hasSideEffects: true,
reg: regInfo{
inputs: []inputInfo{
- {1, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31
- {0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB
+ {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
+ {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB
},
},
},
@@ -24385,8 +24668,8 @@ var opcodeTable = [...]opInfo{
hasSideEffects: true,
reg: regInfo{
inputs: []inputInfo{
- {1, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31
- {0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB
+ {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
+ {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB
},
},
},
@@ -24397,7 +24680,7 @@ var opcodeTable = [...]opInfo{
hasSideEffects: true,
reg: regInfo{
inputs: []inputInfo{
- {0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB
+ {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB
},
},
},
@@ -24408,7 +24691,7 @@ var opcodeTable = [...]opInfo{
hasSideEffects: true,
reg: regInfo{
inputs: []inputInfo{
- {0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB
+ {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB
},
},
},
@@ -24421,11 +24704,11 @@ var opcodeTable = [...]opInfo{
unsafePoint: true,
reg: regInfo{
inputs: []inputInfo{
- {1, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31
- {0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB
+ {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
+ {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB
},
outputs: []outputInfo{
- {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31
+ {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
},
},
},
@@ -24438,11 +24721,11 @@ var opcodeTable = [...]opInfo{
unsafePoint: true,
reg: regInfo{
inputs: []inputInfo{
- {1, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31
- {0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB
+ {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
+ {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB
},
outputs: []outputInfo{
- {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31
+ {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
},
},
},
@@ -24455,11 +24738,11 @@ var opcodeTable = [...]opInfo{
unsafePoint: true,
reg: regInfo{
inputs: []inputInfo{
- {1, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31
- {0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB
+ {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
+ {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB
},
outputs: []outputInfo{
- {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31
+ {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
},
},
},
@@ -24472,11 +24755,11 @@ var opcodeTable = [...]opInfo{
unsafePoint: true,
reg: regInfo{
inputs: []inputInfo{
- {1, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31
- {0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB
+ {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
+ {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB
},
outputs: []outputInfo{
- {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31
+ {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
},
},
},
@@ -24490,10 +24773,10 @@ var opcodeTable = [...]opInfo{
unsafePoint: true,
reg: regInfo{
inputs: []inputInfo{
- {0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB
+ {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB
},
outputs: []outputInfo{
- {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31
+ {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
},
},
},
@@ -24507,10 +24790,10 @@ var opcodeTable = [...]opInfo{
unsafePoint: true,
reg: regInfo{
inputs: []inputInfo{
- {0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB
+ {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB
},
outputs: []outputInfo{
- {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31
+ {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
},
},
},
@@ -24523,12 +24806,12 @@ var opcodeTable = [...]opInfo{
unsafePoint: true,
reg: regInfo{
inputs: []inputInfo{
- {1, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31
- {2, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31
- {0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB
+ {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
+ {2, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
+ {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB
},
outputs: []outputInfo{
- {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31
+ {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
},
},
},
@@ -24541,12 +24824,12 @@ var opcodeTable = [...]opInfo{
unsafePoint: true,
reg: regInfo{
inputs: []inputInfo{
- {1, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31
- {2, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31
- {0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB
+ {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
+ {2, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
+ {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB
},
outputs: []outputInfo{
- {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31
+ {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
},
},
},
@@ -24557,7 +24840,7 @@ var opcodeTable = [...]opInfo{
faultOnNilArg0: true,
reg: regInfo{
inputs: []inputInfo{
- {0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31
+ {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
},
},
},
@@ -24566,7 +24849,7 @@ var opcodeTable = [...]opInfo{
argLen: 1,
reg: regInfo{
outputs: []outputInfo{
- {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31
+ {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
},
},
},
@@ -24575,7 +24858,7 @@ var opcodeTable = [...]opInfo{
argLen: 1,
reg: regInfo{
outputs: []outputInfo{
- {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31
+ {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
},
},
},
@@ -24595,7 +24878,7 @@ var opcodeTable = [...]opInfo{
rematerializeable: true,
reg: regInfo{
outputs: []outputInfo{
- {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31
+ {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
},
},
},
@@ -24605,7 +24888,7 @@ var opcodeTable = [...]opInfo{
rematerializeable: true,
reg: regInfo{
outputs: []outputInfo{
- {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31
+ {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
},
},
},
@@ -24628,8 +24911,8 @@ var opcodeTable = [...]opInfo{
call: true,
reg: regInfo{
inputs: []inputInfo{
- {0, 65536}, // R17
- {1, 8}, // R4
+ {0, 4194304}, // R23
+ {1, 8388608}, // R24
},
},
},
@@ -24640,8 +24923,8 @@ var opcodeTable = [...]opInfo{
call: true,
reg: regInfo{
inputs: []inputInfo{
- {0, 131072}, // R18
- {1, 65536}, // R17
+ {0, 1048576}, // R21
+ {1, 4194304}, // R23
},
},
},
@@ -24652,8 +24935,8 @@ var opcodeTable = [...]opInfo{
call: true,
reg: regInfo{
inputs: []inputInfo{
- {0, 262144}, // R19
- {1, 131072}, // R18
+ {0, 524288}, // R20
+ {1, 1048576}, // R21
},
},
},
@@ -27957,6 +28240,21 @@ var opcodeTable = [...]opInfo{
},
},
{
+ name: "ADDCC",
+ argLen: 2,
+ commutative: true,
+ asm: ppc64.AADDCC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
name: "ADDconst",
auxType: auxInt64,
argLen: 1,
@@ -27971,6 +28269,21 @@ var opcodeTable = [...]opInfo{
},
},
{
+ name: "ADDCCconst",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: ppc64.AADDCCC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ clobbers: 9223372036854775808, // XER
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
name: "FADD",
argLen: 2,
commutative: true,
@@ -28015,6 +28328,20 @@ var opcodeTable = [...]opInfo{
},
},
{
+ name: "SUBCC",
+ argLen: 2,
+ asm: ppc64.ASUBCC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
name: "SUBFCconst",
auxType: auxInt64,
argLen: 1,
@@ -28395,20 +28722,6 @@ var opcodeTable = [...]opInfo{
},
},
{
- name: "RLDICL",
- auxType: auxInt32,
- argLen: 1,
- asm: ppc64.ARLDICL,
- reg: regInfo{
- inputs: []inputInfo{
- {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
- },
- outputs: []outputInfo{
- {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
- },
- },
- },
- {
name: "CLRLSLWI",
auxType: auxInt32,
argLen: 1,
@@ -28736,10 +29049,10 @@ var opcodeTable = [...]opInfo{
},
},
{
- name: "CNTLZD",
- argLen: 1,
- clobberFlags: true,
- asm: ppc64.ACNTLZD,
+ name: "RLDICL",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: ppc64.ARLDICL,
reg: regInfo{
inputs: []inputInfo{
{0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
@@ -28750,10 +29063,49 @@ var opcodeTable = [...]opInfo{
},
},
{
- name: "CNTLZW",
- argLen: 1,
- clobberFlags: true,
- asm: ppc64.ACNTLZW,
+ name: "RLDICR",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: ppc64.ARLDICR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "CNTLZD",
+ argLen: 1,
+ asm: ppc64.ACNTLZD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "CNTLZDCC",
+ argLen: 1,
+ asm: ppc64.ACNTLZDCC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "CNTLZW",
+ argLen: 1,
+ asm: ppc64.ACNTLZW,
reg: regInfo{
inputs: []inputInfo{
{0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
@@ -29089,11 +29441,24 @@ var opcodeTable = [...]opInfo{
},
},
{
- name: "ANDCC",
- argLen: 2,
- commutative: true,
- clobberFlags: true,
- asm: ppc64.AANDCC,
+ name: "ANDNCC",
+ argLen: 2,
+ asm: ppc64.AANDNCC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "ANDCC",
+ argLen: 2,
+ commutative: true,
+ asm: ppc64.AANDCC,
reg: regInfo{
inputs: []inputInfo{
{0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
@@ -29134,11 +29499,10 @@ var opcodeTable = [...]opInfo{
},
},
{
- name: "ORCC",
- argLen: 2,
- commutative: true,
- clobberFlags: true,
- asm: ppc64.AORCC,
+ name: "ORCC",
+ argLen: 2,
+ commutative: true,
+ asm: ppc64.AORCC,
reg: regInfo{
inputs: []inputInfo{
{0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
@@ -29165,6 +29529,21 @@ var opcodeTable = [...]opInfo{
},
},
{
+ name: "NORCC",
+ argLen: 2,
+ commutative: true,
+ asm: ppc64.ANORCC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
name: "XOR",
argLen: 2,
commutative: true,
@@ -29180,11 +29559,10 @@ var opcodeTable = [...]opInfo{
},
},
{
- name: "XORCC",
- argLen: 2,
- commutative: true,
- clobberFlags: true,
- asm: ppc64.AXORCC,
+ name: "XORCC",
+ argLen: 2,
+ commutative: true,
+ asm: ppc64.AXORCC,
reg: regInfo{
inputs: []inputInfo{
{0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
@@ -29224,6 +29602,19 @@ var opcodeTable = [...]opInfo{
},
},
{
+ name: "NEGCC",
+ argLen: 1,
+ asm: ppc64.ANEGCC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
name: "BRD",
argLen: 1,
asm: ppc64.ABRD,
@@ -29422,11 +29813,10 @@ var opcodeTable = [...]opInfo{
},
},
{
- name: "ANDCCconst",
- auxType: auxInt64,
- argLen: 1,
- clobberFlags: true,
- asm: ppc64.AANDCC,
+ name: "ANDCCconst",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: ppc64.AANDCC,
reg: regInfo{
inputs: []inputInfo{
{0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
@@ -31666,6 +32056,20 @@ var opcodeTable = [...]opInfo{
},
},
{
+ name: "SRAW",
+ argLen: 2,
+ asm: riscv.ASRAW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
name: "SRL",
argLen: 2,
asm: riscv.ASRL,
@@ -31680,6 +32084,20 @@ var opcodeTable = [...]opInfo{
},
},
{
+ name: "SRLW",
+ argLen: 2,
+ asm: riscv.ASRLW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
name: "SLLI",
auxType: auxInt64,
argLen: 1,
@@ -31708,6 +32126,20 @@ var opcodeTable = [...]opInfo{
},
},
{
+ name: "SRAIW",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: riscv.ASRAIW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
name: "SRLI",
auxType: auxInt64,
argLen: 1,
@@ -31722,6 +32154,20 @@ var opcodeTable = [...]opInfo{
},
},
{
+ name: "SRLIW",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: riscv.ASRLIW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
name: "XOR",
argLen: 2,
commutative: true,
@@ -31904,15 +32350,28 @@ var opcodeTable = [...]opInfo{
},
},
{
- name: "MOVconvert",
- argLen: 2,
- asm: riscv.AMOV,
+ name: "LoweredRound32F",
+ argLen: 1,
+ resultInArg0: true,
reg: regInfo{
inputs: []inputInfo{
- {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
},
outputs: []outputInfo{
- {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "LoweredRound64F",
+ argLen: 1,
+ resultInArg0: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
},
},
},
@@ -32270,6 +32729,13 @@ var opcodeTable = [...]opInfo{
},
},
{
+ name: "LoweredPubBarrier",
+ argLen: 1,
+ hasSideEffects: true,
+ asm: riscv.AFENCE,
+ reg: regInfo{},
+ },
+ {
name: "LoweredPanicBoundsA",
auxType: auxInt64,
argLen: 3,
@@ -32364,6 +32830,70 @@ var opcodeTable = [...]opInfo{
},
},
{
+ name: "FMADDS",
+ argLen: 3,
+ commutative: true,
+ asm: riscv.AFMADDS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FMSUBS",
+ argLen: 3,
+ commutative: true,
+ asm: riscv.AFMSUBS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FNMADDS",
+ argLen: 3,
+ commutative: true,
+ asm: riscv.AFNMADDS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FNMSUBS",
+ argLen: 3,
+ commutative: true,
+ asm: riscv.AFNMSUBS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
name: "FSQRTS",
argLen: 1,
asm: riscv.AFSQRTS,
@@ -38923,6 +39453,26 @@ var opcodeTable = [...]opInfo{
generic: true,
},
{
+ name: "Min64F",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Min32F",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Max64F",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Max32F",
+ argLen: 2,
+ generic: true,
+ },
+ {
name: "FMA",
argLen: 3,
generic: true,
@@ -39373,9 +39923,10 @@ var opcodeTable = [...]opInfo{
generic: true,
},
{
- name: "NilCheck",
- argLen: 2,
- generic: true,
+ name: "NilCheck",
+ argLen: 2,
+ nilCheck: true,
+ generic: true,
},
{
name: "GetG",
@@ -40156,16 +40707,16 @@ var registersLOONG64 = [...]Register{
{17, loong64.REG_R18, 14, "R18"},
{18, loong64.REG_R19, 15, "R19"},
{19, loong64.REG_R20, 16, "R20"},
- {20, loong64.REG_R21, -1, "R21"},
+ {20, loong64.REG_R21, 17, "R21"},
{21, loong64.REGG, -1, "g"},
- {22, loong64.REG_R23, 17, "R23"},
- {23, loong64.REG_R24, 18, "R24"},
- {24, loong64.REG_R25, 19, "R25"},
- {25, loong64.REG_R26, 20, "R26"},
- {26, loong64.REG_R27, 21, "R27"},
- {27, loong64.REG_R28, 22, "R28"},
- {28, loong64.REG_R29, 23, "R29"},
- {29, loong64.REG_R31, 24, "R31"},
+ {22, loong64.REG_R23, 18, "R23"},
+ {23, loong64.REG_R24, 19, "R24"},
+ {24, loong64.REG_R25, 20, "R25"},
+ {25, loong64.REG_R26, 21, "R26"},
+ {26, loong64.REG_R27, 22, "R27"},
+ {27, loong64.REG_R28, 23, "R28"},
+ {28, loong64.REG_R29, 24, "R29"},
+ {29, loong64.REG_R31, 25, "R31"},
{30, loong64.REG_F0, -1, "F0"},
{31, loong64.REG_F1, -1, "F1"},
{32, loong64.REG_F2, -1, "F2"},
@@ -40200,9 +40751,9 @@ var registersLOONG64 = [...]Register{
{61, loong64.REG_F31, -1, "F31"},
{62, 0, -1, "SB"},
}
-var paramIntRegLOONG64 = []int8{3, 4, 5, 6, 7, 8, 9, 10}
-var paramFloatRegLOONG64 = []int8{30, 31, 32, 33, 34, 35, 36, 37}
-var gpRegMaskLOONG64 = regMask(1070596088)
+var paramIntRegLOONG64 = []int8{3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18}
+var paramFloatRegLOONG64 = []int8{30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45}
+var gpRegMaskLOONG64 = regMask(1071644664)
var fpRegMaskLOONG64 = regMask(4611686017353646080)
var specialRegMaskLOONG64 = regMask(0)
var framepointerRegLOONG64 = int8(-1)
diff --git a/src/cmd/compile/internal/ssa/poset.go b/src/cmd/compile/internal/ssa/poset.go
index fd2887d020..7b64843fe9 100644
--- a/src/cmd/compile/internal/ssa/poset.go
+++ b/src/cmd/compile/internal/ssa/poset.go
@@ -122,7 +122,7 @@ type posetNode struct {
// poset is implemented as a forest of DAGs; in each DAG, if there is a path (directed)
// from node A to B, it means that A<B (or A<=B). Equality is represented by mapping
// two SSA values to the same DAG node; when a new equality relation is recorded
-// between two existing nodes,the nodes are merged, adjusting incoming and outgoing edges.
+// between two existing nodes, the nodes are merged, adjusting incoming and outgoing edges.
//
// Constants are specially treated. When a constant is added to the poset, it is
// immediately linked to other constants already present; so for instance if the
@@ -1065,7 +1065,7 @@ func (po *poset) setOrder(n1, n2 *Value, strict bool) bool {
return true
}
- // Case #1, #3 o #4: nothing to do
+ // Case #1, #3, or #4: nothing to do
return true
}
diff --git a/src/cmd/compile/internal/ssa/prove.go b/src/cmd/compile/internal/ssa/prove.go
index 38758c3361..842719fb4c 100644
--- a/src/cmd/compile/internal/ssa/prove.go
+++ b/src/cmd/compile/internal/ssa/prove.go
@@ -100,10 +100,11 @@ func (d domain) String() string {
}
type pair struct {
- v, w *Value // a pair of values, ordered by ID.
+ // a pair of values, ordered by ID.
// v can be nil, to mean the zero value.
// for booleans the zero value (v == nil) is false.
- d domain
+ v, w *Value
+ d domain
}
// fact is a pair plus a relation for that pair.
@@ -165,7 +166,7 @@ type factsTable struct {
facts map[pair]relation // current known set of relation
stack []fact // previous sets of relations
- // order is a couple of partial order sets that record information
+ // order* is a couple of partial order sets that record information
// about relations between SSA values in the signed and unsigned
// domain.
orderS *poset
@@ -798,6 +799,166 @@ func (ft *factsTable) cleanup(f *Func) {
// its negation. If either leads to a contradiction, it can trim that
// successor.
func prove(f *Func) {
+ // Find induction variables. Currently, findIndVars
+ // is limited to one induction variable per block.
+ var indVars map[*Block]indVar
+ for _, v := range findIndVar(f) {
+ ind := v.ind
+ if len(ind.Args) != 2 {
+ // the rewrite code assumes there is only ever two parents to loops
+ panic("unexpected induction with too many parents")
+ }
+
+ nxt := v.nxt
+ if !(ind.Uses == 2 && // 2 used by comparison and next
+ nxt.Uses == 1) { // 1 used by induction
+ // ind or nxt is used inside the loop, add it for the facts table
+ if indVars == nil {
+ indVars = make(map[*Block]indVar)
+ }
+ indVars[v.entry] = v
+ continue
+ } else {
+ // Since this induction variable is not used for anything but counting the iterations,
+ // no point in putting it into the facts table.
+ }
+
+ // try to rewrite to a downward counting loop checking against start if the
+ // loop body does not depends on ind or nxt and end is known before the loop.
+ // This reduce pressure on the register allocator because this do not need
+ // to use end on each iteration anymore. We compare against the start constant instead.
+ // That means this code:
+ //
+ // loop:
+ // ind = (Phi (Const [x]) nxt),
+ // if ind < end
+ // then goto enter_loop
+ // else goto exit_loop
+ //
+ // enter_loop:
+ // do something without using ind nor nxt
+ // nxt = inc + ind
+ // goto loop
+ //
+ // exit_loop:
+ //
+ // is rewritten to:
+ //
+ // loop:
+ // ind = (Phi end nxt)
+ // if (Const [x]) < ind
+ // then goto enter_loop
+ // else goto exit_loop
+ //
+ // enter_loop:
+ // do something without using ind nor nxt
+ // nxt = ind - inc
+ // goto loop
+ //
+ // exit_loop:
+ //
+ // this is better because it only require to keep ind then nxt alive while looping,
+ // while the original form keeps ind then nxt and end alive
+ start, end := v.min, v.max
+ if v.flags&indVarCountDown != 0 {
+ start, end = end, start
+ }
+
+ if !(start.Op == OpConst8 || start.Op == OpConst16 || start.Op == OpConst32 || start.Op == OpConst64) {
+ // if start is not a constant we would be winning nothing from inverting the loop
+ continue
+ }
+ if end.Op == OpConst8 || end.Op == OpConst16 || end.Op == OpConst32 || end.Op == OpConst64 {
+ // TODO: if both start and end are constants we should rewrite such that the comparison
+ // is against zero and nxt is ++ or -- operation
+ // That means:
+ // for i := 2; i < 11; i += 2 {
+ // should be rewritten to:
+ // for i := 5; 0 < i; i-- {
+ continue
+ }
+
+ header := ind.Block
+ check := header.Controls[0]
+ if check == nil {
+ // we don't know how to rewrite a loop that not simple comparison
+ continue
+ }
+ switch check.Op {
+ case OpLeq64, OpLeq32, OpLeq16, OpLeq8,
+ OpLess64, OpLess32, OpLess16, OpLess8:
+ default:
+ // we don't know how to rewrite a loop that not simple comparison
+ continue
+ }
+ if !((check.Args[0] == ind && check.Args[1] == end) ||
+ (check.Args[1] == ind && check.Args[0] == end)) {
+ // we don't know how to rewrite a loop that not simple comparison
+ continue
+ }
+ if end.Block == ind.Block {
+ // we can't rewrite loops where the condition depends on the loop body
+ // this simple check is forced to work because if this is true a Phi in ind.Block must exists
+ continue
+ }
+
+ // invert the check
+ check.Args[0], check.Args[1] = check.Args[1], check.Args[0]
+
+ // invert start and end in the loop
+ for i, v := range check.Args {
+ if v != end {
+ continue
+ }
+
+ check.SetArg(i, start)
+ goto replacedEnd
+ }
+ panic(fmt.Sprintf("unreachable, ind: %v, start: %v, end: %v", ind, start, end))
+ replacedEnd:
+
+ for i, v := range ind.Args {
+ if v != start {
+ continue
+ }
+
+ ind.SetArg(i, end)
+ goto replacedStart
+ }
+ panic(fmt.Sprintf("unreachable, ind: %v, start: %v, end: %v", ind, start, end))
+ replacedStart:
+
+ if nxt.Args[0] != ind {
+ // unlike additions subtractions are not commutative so be sure we get it right
+ nxt.Args[0], nxt.Args[1] = nxt.Args[1], nxt.Args[0]
+ }
+
+ switch nxt.Op {
+ case OpAdd8:
+ nxt.Op = OpSub8
+ case OpAdd16:
+ nxt.Op = OpSub16
+ case OpAdd32:
+ nxt.Op = OpSub32
+ case OpAdd64:
+ nxt.Op = OpSub64
+ case OpSub8:
+ nxt.Op = OpAdd8
+ case OpSub16:
+ nxt.Op = OpAdd16
+ case OpSub32:
+ nxt.Op = OpAdd32
+ case OpSub64:
+ nxt.Op = OpAdd64
+ default:
+ panic("unreachable")
+ }
+
+ if f.pass.debug > 0 {
+ f.Warnl(ind.Pos, "Inverted loop iteration")
+ }
+ }
+
ft := newFactsTable(f)
ft.checkpoint()
@@ -933,15 +1094,6 @@ func prove(f *Func) {
}
}
}
- // Find induction variables. Currently, findIndVars
- // is limited to one induction variable per block.
- var indVars map[*Block]indVar
- for _, v := range findIndVar(f) {
- if indVars == nil {
- indVars = make(map[*Block]indVar)
- }
- indVars[v.entry] = v
- }
// current node state
type walkState int
diff --git a/src/cmd/compile/internal/ssa/regalloc.go b/src/cmd/compile/internal/ssa/regalloc.go
index c4d6e48cad..2325b9ee45 100644
--- a/src/cmd/compile/internal/ssa/regalloc.go
+++ b/src/cmd/compile/internal/ssa/regalloc.go
@@ -672,6 +672,8 @@ func (s *regAllocState) init(f *Func) {
s.allocatable &^= 1 << 9 // R9
case "arm64":
// nothing to do
+ case "loong64": // R2 (aka TP) already reserved.
+ // nothing to do
case "ppc64le": // R2 already reserved.
// nothing to do
case "riscv64": // X3 (aka GP) and X4 (aka TP) already reserved.
@@ -2544,7 +2546,7 @@ func (e *edgeState) findRegFor(typ *types.Type) Location {
// Allocate a temp location to spill a register to.
// The type of the slot is immaterial - it will not be live across
// any safepoint. Just use a type big enough to hold any register.
- t := LocalSlot{N: e.s.f.fe.Auto(c.Pos, types.Int64), Type: types.Int64}
+ t := LocalSlot{N: e.s.f.NewLocal(c.Pos, types.Int64), Type: types.Int64}
// TODO: reuse these slots. They'll need to be erased first.
e.set(t, vid, x, false, c.Pos)
if e.s.f.pass.debug > regDebug {
diff --git a/src/cmd/compile/internal/ssa/regalloc_test.go b/src/cmd/compile/internal/ssa/regalloc_test.go
index d990cac47b..7d804a0d30 100644
--- a/src/cmd/compile/internal/ssa/regalloc_test.go
+++ b/src/cmd/compile/internal/ssa/regalloc_test.go
@@ -6,7 +6,6 @@ package ssa
import (
"cmd/compile/internal/types"
- "cmd/internal/src"
"testing"
)
@@ -53,7 +52,7 @@ func TestNoGetgLoadReg(t *testing.T) {
f := c.Fun("b1",
Bloc("b1",
Valu("v1", OpInitMem, types.TypeMem, 0, nil),
- Valu("v6", OpArg, c.config.Types.Int64, 0, c.Frontend().Auto(src.NoXPos, c.config.Types.Int64)),
+ Valu("v6", OpArg, c.config.Types.Int64, 0, c.Temp(c.config.Types.Int64)),
Valu("v8", OpGetG, c.config.Types.Int64.PtrTo(), 0, nil, "v1"),
Valu("v11", OpARM64CMPconst, types.TypeFlags, 0, nil, "v6"),
Eq("v11", "b2", "b4"),
@@ -92,8 +91,8 @@ func TestSpillWithLoop(t *testing.T) {
f := c.Fun("entry",
Bloc("entry",
Valu("mem", OpInitMem, types.TypeMem, 0, nil),
- Valu("ptr", OpArg, c.config.Types.Int64.PtrTo(), 0, c.Frontend().Auto(src.NoXPos, c.config.Types.Int64)),
- Valu("cond", OpArg, c.config.Types.Bool, 0, c.Frontend().Auto(src.NoXPos, c.config.Types.Bool)),
+ Valu("ptr", OpArg, c.config.Types.Int64.PtrTo(), 0, c.Temp(c.config.Types.Int64)),
+ Valu("cond", OpArg, c.config.Types.Bool, 0, c.Temp(c.config.Types.Bool)),
Valu("ld", OpAMD64MOVQload, c.config.Types.Int64, 0, nil, "ptr", "mem"), // this value needs a spill
Goto("loop"),
),
@@ -125,8 +124,8 @@ func TestSpillMove1(t *testing.T) {
f := c.Fun("entry",
Bloc("entry",
Valu("mem", OpInitMem, types.TypeMem, 0, nil),
- Valu("x", OpArg, c.config.Types.Int64, 0, c.Frontend().Auto(src.NoXPos, c.config.Types.Int64)),
- Valu("p", OpArg, c.config.Types.Int64.PtrTo(), 0, c.Frontend().Auto(src.NoXPos, c.config.Types.Int64.PtrTo())),
+ Valu("x", OpArg, c.config.Types.Int64, 0, c.Temp(c.config.Types.Int64)),
+ Valu("p", OpArg, c.config.Types.Int64.PtrTo(), 0, c.Temp(c.config.Types.Int64.PtrTo())),
Valu("a", OpAMD64TESTQ, types.TypeFlags, 0, nil, "x", "x"),
Goto("loop1"),
),
@@ -174,8 +173,8 @@ func TestSpillMove2(t *testing.T) {
f := c.Fun("entry",
Bloc("entry",
Valu("mem", OpInitMem, types.TypeMem, 0, nil),
- Valu("x", OpArg, c.config.Types.Int64, 0, c.Frontend().Auto(src.NoXPos, c.config.Types.Int64)),
- Valu("p", OpArg, c.config.Types.Int64.PtrTo(), 0, c.Frontend().Auto(src.NoXPos, c.config.Types.Int64.PtrTo())),
+ Valu("x", OpArg, c.config.Types.Int64, 0, c.Temp(c.config.Types.Int64)),
+ Valu("p", OpArg, c.config.Types.Int64.PtrTo(), 0, c.Temp(c.config.Types.Int64.PtrTo())),
Valu("a", OpAMD64TESTQ, types.TypeFlags, 0, nil, "x", "x"),
Goto("loop1"),
),
diff --git a/src/cmd/compile/internal/ssa/rewrite.go b/src/cmd/compile/internal/ssa/rewrite.go
index 43843bda55..bb09c6cdda 100644
--- a/src/cmd/compile/internal/ssa/rewrite.go
+++ b/src/cmd/compile/internal/ssa/rewrite.go
@@ -859,6 +859,9 @@ func disjoint(p1 *Value, n1 int64, p2 *Value, n2 int64) bool {
offset += base.AuxInt
base = base.Args[0]
}
+ if opcodeTable[base.Op].nilCheck {
+ base = base.Args[0]
+ }
return base, offset
}
p1, off1 := baseAndOffset(p1)
@@ -1183,6 +1186,12 @@ func min(x, y int64) int64 {
}
return y
}
+func max(x, y int64) int64 {
+ if x > y {
+ return x
+ }
+ return y
+}
func isConstZero(v *Value) bool {
switch v.Op {
@@ -1281,6 +1290,10 @@ func zeroUpper32Bits(x *Value, depth int) bool {
OpAMD64SHRL, OpAMD64SHRLconst, OpAMD64SARL, OpAMD64SARLconst,
OpAMD64SHLL, OpAMD64SHLLconst:
return true
+ case OpARM64REV16W, OpARM64REVW, OpARM64RBITW, OpARM64CLZW, OpARM64EXTRWconst,
+ OpARM64MULW, OpARM64MNEGW, OpARM64UDIVW, OpARM64DIVW, OpARM64UMODW,
+ OpARM64MADDW, OpARM64MSUBW, OpARM64RORW, OpARM64RORWconst:
+ return true
case OpArg:
return x.Type.Size() == 4
case OpPhi, OpSelect0, OpSelect1:
@@ -1474,7 +1487,7 @@ func encodePPC64RotateMask(rotate, mask, nbits int64) int64 {
// Determine boundaries and then decode them
if mask == 0 || ^mask == 0 || rotate >= nbits {
- panic("Invalid PPC64 rotate mask")
+ panic(fmt.Sprintf("invalid PPC64 rotate mask: %x %d %d", uint64(mask), rotate, nbits))
} else if nbits == 32 {
mb = bits.LeadingZeros32(uint32(mask))
me = 32 - bits.TrailingZeros32(uint32(mask))
@@ -1495,6 +1508,25 @@ func encodePPC64RotateMask(rotate, mask, nbits int64) int64 {
return int64(me) | int64(mb<<8) | int64(rotate<<16) | int64(nbits<<24)
}
+// Merge (RLDICL [encoded] (SRDconst [s] x)) into (RLDICL [new_encoded] x)
+// SRDconst on PPC64 is an extended mnemonic of RLDICL. If the input to an
+// RLDICL is an SRDconst, and the RLDICL does not rotate its value, the two
+// operations can be combined. This functions assumes the two opcodes can
+// be merged, and returns an encoded rotate+mask value of the combined RLDICL.
+func mergePPC64RLDICLandSRDconst(encoded, s int64) int64 {
+ mb := s
+ r := 64 - s
+ // A larger mb is a smaller mask.
+ if (encoded>>8)&0xFF < mb {
+ encoded = (encoded &^ 0xFF00) | mb<<8
+ }
+ // The rotate is expected to be 0.
+ if (encoded & 0xFF0000) != 0 {
+ panic("non-zero rotate")
+ }
+ return encoded | r<<16
+}
+
// DecodePPC64RotateMask is the inverse operation of encodePPC64RotateMask. The values returned as
// mb and me satisfy the POWER ISA definition of MASK(x,y) where MASK(mb,me) = mask.
func DecodePPC64RotateMask(sauxint int64) (rotate, mb, me int64, mask uint64) {
@@ -1598,6 +1630,52 @@ func mergePPC64SldiSrw(sld, srw int64) int64 {
return encodePPC64RotateMask((32-srw+sld)&31, int64(mask), 32)
}
+// Convert a PPC64 opcode from the Op to OpCC form. This converts (op x y)
+// to (Select0 (opCC x y)) without having to explicitly fixup every user
+// of op.
+//
+// E.g consider the case:
+// a = (ADD x y)
+// b = (CMPconst [0] a)
+// c = (OR a z)
+//
+// A rule like (CMPconst [0] (ADD x y)) => (CMPconst [0] (Select0 (ADDCC x y)))
+// would produce:
+// a = (ADD x y)
+// a' = (ADDCC x y)
+// a” = (Select0 a')
+// b = (CMPconst [0] a”)
+// c = (OR a z)
+//
+// which makes it impossible to rewrite the second user. Instead the result
+// of this conversion is:
+// a' = (ADDCC x y)
+// a = (Select0 a')
+// b = (CMPconst [0] a)
+// c = (OR a z)
+//
+// Which makes it trivial to rewrite b using a lowering rule.
+func convertPPC64OpToOpCC(op *Value) *Value {
+ ccOpMap := map[Op]Op{
+ OpPPC64ADD: OpPPC64ADDCC,
+ OpPPC64ADDconst: OpPPC64ADDCCconst,
+ OpPPC64AND: OpPPC64ANDCC,
+ OpPPC64ANDN: OpPPC64ANDNCC,
+ OpPPC64CNTLZD: OpPPC64CNTLZDCC,
+ OpPPC64OR: OpPPC64ORCC,
+ OpPPC64SUB: OpPPC64SUBCC,
+ OpPPC64NEG: OpPPC64NEGCC,
+ OpPPC64NOR: OpPPC64NORCC,
+ OpPPC64XOR: OpPPC64XORCC,
+ }
+ b := op.Block
+ opCC := b.NewValue0I(op.Pos, ccOpMap[op.Op], types.NewTuple(op.Type, types.TypeFlags), op.AuxInt)
+ opCC.AddArgs(op.Args...)
+ op.reset(OpSelect0)
+ op.AddArgs(opCC)
+ return op
+}
+
// Convenience function to rotate a 32 bit constant value by another constant.
func rotateLeft32(v, rotate int64) int64 {
return int64(bits.RotateLeft32(uint32(v), int(rotate)))
@@ -2053,8 +2131,8 @@ func logicFlags32(x int32) flagConstant {
func makeJumpTableSym(b *Block) *obj.LSym {
s := base.Ctxt.Lookup(fmt.Sprintf("%s.jump%d", b.Func.fe.Func().LSym.Name, b.ID))
- s.Set(obj.AttrDuplicateOK, true)
- s.Set(obj.AttrLocal, true)
+ // The jump table symbol is accessed only from the function symbol.
+ s.Set(obj.AttrStatic, true)
return s
}
@@ -2123,3 +2201,11 @@ func isARM64addcon(v int64) bool {
}
return v <= 0xFFF
}
+
+// setPos sets the position of v to pos, then returns true.
+// Useful for setting the result of a rewrite's position to
+// something other than the default.
+func setPos(v *Value, pos src.XPos) bool {
+ v.Pos = pos
+ return true
+}
diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go
index 88bd48f331..5332512f2a 100644
--- a/src/cmd/compile/internal/ssa/rewriteAMD64.go
+++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go
@@ -73,20 +73,14 @@ func rewriteValueAMD64(v *Value) bool {
return rewriteValueAMD64_OpAMD64BSWAPL(v)
case OpAMD64BSWAPQ:
return rewriteValueAMD64_OpAMD64BSWAPQ(v)
- case OpAMD64BTCLconst:
- return rewriteValueAMD64_OpAMD64BTCLconst(v)
case OpAMD64BTCQconst:
return rewriteValueAMD64_OpAMD64BTCQconst(v)
case OpAMD64BTLconst:
return rewriteValueAMD64_OpAMD64BTLconst(v)
case OpAMD64BTQconst:
return rewriteValueAMD64_OpAMD64BTQconst(v)
- case OpAMD64BTRLconst:
- return rewriteValueAMD64_OpAMD64BTRLconst(v)
case OpAMD64BTRQconst:
return rewriteValueAMD64_OpAMD64BTRQconst(v)
- case OpAMD64BTSLconst:
- return rewriteValueAMD64_OpAMD64BTSLconst(v)
case OpAMD64BTSQconst:
return rewriteValueAMD64_OpAMD64BTSQconst(v)
case OpAMD64CMOVLCC:
@@ -871,6 +865,14 @@ func rewriteValueAMD64(v *Value) bool {
return rewriteValueAMD64_OpLsh8x64(v)
case OpLsh8x8:
return rewriteValueAMD64_OpLsh8x8(v)
+ case OpMax32F:
+ return rewriteValueAMD64_OpMax32F(v)
+ case OpMax64F:
+ return rewriteValueAMD64_OpMax64F(v)
+ case OpMin32F:
+ return rewriteValueAMD64_OpMin32F(v)
+ case OpMin64F:
+ return rewriteValueAMD64_OpMin64F(v)
case OpMod16:
return rewriteValueAMD64_OpMod16(v)
case OpMod16u:
@@ -2618,26 +2620,6 @@ func rewriteValueAMD64_OpAMD64ANDL(v *Value) bool {
}
break
}
- // match: (ANDL (MOVLconst [c]) x)
- // cond: isUint32PowerOfTwo(int64(^c)) && uint64(^c) >= 128
- // result: (BTRLconst [int8(log32(^c))] x)
- for {
- for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
- if v_0.Op != OpAMD64MOVLconst {
- continue
- }
- c := auxIntToInt32(v_0.AuxInt)
- x := v_1
- if !(isUint32PowerOfTwo(int64(^c)) && uint64(^c) >= 128) {
- continue
- }
- v.reset(OpAMD64BTRLconst)
- v.AuxInt = int8ToAuxInt(int8(log32(^c)))
- v.AddArg(x)
- return true
- }
- break
- }
// match: (ANDL x (MOVLconst [c]))
// result: (ANDLconst [c] x)
for {
@@ -2746,20 +2728,6 @@ func rewriteValueAMD64_OpAMD64ANDL(v *Value) bool {
}
func rewriteValueAMD64_OpAMD64ANDLconst(v *Value) bool {
v_0 := v.Args[0]
- // match: (ANDLconst [c] x)
- // cond: isUint32PowerOfTwo(int64(^c)) && uint64(^c) >= 128
- // result: (BTRLconst [int8(log32(^c))] x)
- for {
- c := auxIntToInt32(v.AuxInt)
- x := v_0
- if !(isUint32PowerOfTwo(int64(^c)) && uint64(^c) >= 128) {
- break
- }
- v.reset(OpAMD64BTRLconst)
- v.AuxInt = int8ToAuxInt(int8(log32(^c)))
- v.AddArg(x)
- return true
- }
// match: (ANDLconst [c] (ANDLconst [d] x))
// result: (ANDLconst [c & d] x)
for {
@@ -2774,20 +2742,6 @@ func rewriteValueAMD64_OpAMD64ANDLconst(v *Value) bool {
v.AddArg(x)
return true
}
- // match: (ANDLconst [c] (BTRLconst [d] x))
- // result: (ANDLconst [c &^ (1<<uint32(d))] x)
- for {
- c := auxIntToInt32(v.AuxInt)
- if v_0.Op != OpAMD64BTRLconst {
- break
- }
- d := auxIntToInt8(v_0.AuxInt)
- x := v_0.Args[0]
- v.reset(OpAMD64ANDLconst)
- v.AuxInt = int32ToAuxInt(c &^ (1 << uint32(d)))
- v.AddArg(x)
- return true
- }
// match: (ANDLconst [ 0xFF] x)
// result: (MOVBQZX x)
for {
@@ -3091,7 +3045,7 @@ func rewriteValueAMD64_OpAMD64ANDQ(v *Value) bool {
break
}
// match: (ANDQ (MOVQconst [c]) x)
- // cond: isUint64PowerOfTwo(^c) && uint64(^c) >= 128
+ // cond: isUint64PowerOfTwo(^c) && uint64(^c) >= 1<<31
// result: (BTRQconst [int8(log64(^c))] x)
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
@@ -3100,7 +3054,7 @@ func rewriteValueAMD64_OpAMD64ANDQ(v *Value) bool {
}
c := auxIntToInt64(v_0.AuxInt)
x := v_1
- if !(isUint64PowerOfTwo(^c) && uint64(^c) >= 128) {
+ if !(isUint64PowerOfTwo(^c) && uint64(^c) >= 1<<31) {
continue
}
v.reset(OpAMD64BTRQconst)
@@ -3222,20 +3176,6 @@ func rewriteValueAMD64_OpAMD64ANDQ(v *Value) bool {
}
func rewriteValueAMD64_OpAMD64ANDQconst(v *Value) bool {
v_0 := v.Args[0]
- // match: (ANDQconst [c] x)
- // cond: isUint64PowerOfTwo(int64(^c)) && uint64(^c) >= 128
- // result: (BTRQconst [int8(log32(^c))] x)
- for {
- c := auxIntToInt32(v.AuxInt)
- x := v_0
- if !(isUint64PowerOfTwo(int64(^c)) && uint64(^c) >= 128) {
- break
- }
- v.reset(OpAMD64BTRQconst)
- v.AuxInt = int8ToAuxInt(int8(log32(^c)))
- v.AddArg(x)
- return true
- }
// match: (ANDQconst [c] (ANDQconst [d] x))
// result: (ANDQconst [c & d] x)
for {
@@ -3250,24 +3190,6 @@ func rewriteValueAMD64_OpAMD64ANDQconst(v *Value) bool {
v.AddArg(x)
return true
}
- // match: (ANDQconst [c] (BTRQconst [d] x))
- // cond: is32Bit(int64(c) &^ (1<<uint32(d)))
- // result: (ANDQconst [c &^ (1<<uint32(d))] x)
- for {
- c := auxIntToInt32(v.AuxInt)
- if v_0.Op != OpAMD64BTRQconst {
- break
- }
- d := auxIntToInt8(v_0.AuxInt)
- x := v_0.Args[0]
- if !(is32Bit(int64(c) &^ (1 << uint32(d)))) {
- break
- }
- v.reset(OpAMD64ANDQconst)
- v.AuxInt = int32ToAuxInt(c &^ (1 << uint32(d)))
- v.AddArg(x)
- return true
- }
// match: (ANDQconst [ 0xFF] x)
// result: (MOVBQZX x)
for {
@@ -3669,88 +3591,8 @@ func rewriteValueAMD64_OpAMD64BSWAPQ(v *Value) bool {
}
return false
}
-func rewriteValueAMD64_OpAMD64BTCLconst(v *Value) bool {
- v_0 := v.Args[0]
- // match: (BTCLconst [c] (XORLconst [d] x))
- // result: (XORLconst [d ^ 1<<uint32(c)] x)
- for {
- c := auxIntToInt8(v.AuxInt)
- if v_0.Op != OpAMD64XORLconst {
- break
- }
- d := auxIntToInt32(v_0.AuxInt)
- x := v_0.Args[0]
- v.reset(OpAMD64XORLconst)
- v.AuxInt = int32ToAuxInt(d ^ 1<<uint32(c))
- v.AddArg(x)
- return true
- }
- // match: (BTCLconst [c] (BTCLconst [d] x))
- // result: (XORLconst [1<<uint32(c) | 1<<uint32(d)] x)
- for {
- c := auxIntToInt8(v.AuxInt)
- if v_0.Op != OpAMD64BTCLconst {
- break
- }
- d := auxIntToInt8(v_0.AuxInt)
- x := v_0.Args[0]
- v.reset(OpAMD64XORLconst)
- v.AuxInt = int32ToAuxInt(1<<uint32(c) | 1<<uint32(d))
- v.AddArg(x)
- return true
- }
- // match: (BTCLconst [c] (MOVLconst [d]))
- // result: (MOVLconst [d^(1<<uint32(c))])
- for {
- c := auxIntToInt8(v.AuxInt)
- if v_0.Op != OpAMD64MOVLconst {
- break
- }
- d := auxIntToInt32(v_0.AuxInt)
- v.reset(OpAMD64MOVLconst)
- v.AuxInt = int32ToAuxInt(d ^ (1 << uint32(c)))
- return true
- }
- return false
-}
func rewriteValueAMD64_OpAMD64BTCQconst(v *Value) bool {
v_0 := v.Args[0]
- // match: (BTCQconst [c] (XORQconst [d] x))
- // cond: is32Bit(int64(d) ^ 1<<uint32(c))
- // result: (XORQconst [d ^ 1<<uint32(c)] x)
- for {
- c := auxIntToInt8(v.AuxInt)
- if v_0.Op != OpAMD64XORQconst {
- break
- }
- d := auxIntToInt32(v_0.AuxInt)
- x := v_0.Args[0]
- if !(is32Bit(int64(d) ^ 1<<uint32(c))) {
- break
- }
- v.reset(OpAMD64XORQconst)
- v.AuxInt = int32ToAuxInt(d ^ 1<<uint32(c))
- v.AddArg(x)
- return true
- }
- // match: (BTCQconst [c] (BTCQconst [d] x))
- // cond: is32Bit(1<<uint32(c) ^ 1<<uint32(d))
- // result: (XORQconst [1<<uint32(c) ^ 1<<uint32(d)] x)
- for {
- c := auxIntToInt8(v.AuxInt)
- if v_0.Op != OpAMD64BTCQconst {
- break
- }
- d := auxIntToInt8(v_0.AuxInt)
- x := v_0.Args[0]
- if !(is32Bit(1<<uint32(c) ^ 1<<uint32(d))) {
- break
- }
- v.reset(OpAMD64XORQconst)
- v.AuxInt = int32ToAuxInt(1<<uint32(c) ^ 1<<uint32(d))
- v.AddArg(x)
- return true
- }
// match: (BTCQconst [c] (MOVQconst [d]))
// result: (MOVQconst [d^(1<<uint32(c))])
for {
@@ -3945,76 +3787,6 @@ func rewriteValueAMD64_OpAMD64BTQconst(v *Value) bool {
}
return false
}
-func rewriteValueAMD64_OpAMD64BTRLconst(v *Value) bool {
- v_0 := v.Args[0]
- // match: (BTRLconst [c] (BTSLconst [c] x))
- // result: (BTRLconst [c] x)
- for {
- c := auxIntToInt8(v.AuxInt)
- if v_0.Op != OpAMD64BTSLconst || auxIntToInt8(v_0.AuxInt) != c {
- break
- }
- x := v_0.Args[0]
- v.reset(OpAMD64BTRLconst)
- v.AuxInt = int8ToAuxInt(c)
- v.AddArg(x)
- return true
- }
- // match: (BTRLconst [c] (BTCLconst [c] x))
- // result: (BTRLconst [c] x)
- for {
- c := auxIntToInt8(v.AuxInt)
- if v_0.Op != OpAMD64BTCLconst || auxIntToInt8(v_0.AuxInt) != c {
- break
- }
- x := v_0.Args[0]
- v.reset(OpAMD64BTRLconst)
- v.AuxInt = int8ToAuxInt(c)
- v.AddArg(x)
- return true
- }
- // match: (BTRLconst [c] (ANDLconst [d] x))
- // result: (ANDLconst [d &^ (1<<uint32(c))] x)
- for {
- c := auxIntToInt8(v.AuxInt)
- if v_0.Op != OpAMD64ANDLconst {
- break
- }
- d := auxIntToInt32(v_0.AuxInt)
- x := v_0.Args[0]
- v.reset(OpAMD64ANDLconst)
- v.AuxInt = int32ToAuxInt(d &^ (1 << uint32(c)))
- v.AddArg(x)
- return true
- }
- // match: (BTRLconst [c] (BTRLconst [d] x))
- // result: (ANDLconst [^(1<<uint32(c) | 1<<uint32(d))] x)
- for {
- c := auxIntToInt8(v.AuxInt)
- if v_0.Op != OpAMD64BTRLconst {
- break
- }
- d := auxIntToInt8(v_0.AuxInt)
- x := v_0.Args[0]
- v.reset(OpAMD64ANDLconst)
- v.AuxInt = int32ToAuxInt(^(1<<uint32(c) | 1<<uint32(d)))
- v.AddArg(x)
- return true
- }
- // match: (BTRLconst [c] (MOVLconst [d]))
- // result: (MOVLconst [d&^(1<<uint32(c))])
- for {
- c := auxIntToInt8(v.AuxInt)
- if v_0.Op != OpAMD64MOVLconst {
- break
- }
- d := auxIntToInt32(v_0.AuxInt)
- v.reset(OpAMD64MOVLconst)
- v.AuxInt = int32ToAuxInt(d &^ (1 << uint32(c)))
- return true
- }
- return false
-}
func rewriteValueAMD64_OpAMD64BTRQconst(v *Value) bool {
v_0 := v.Args[0]
// match: (BTRQconst [c] (BTSQconst [c] x))
@@ -4043,42 +3815,6 @@ func rewriteValueAMD64_OpAMD64BTRQconst(v *Value) bool {
v.AddArg(x)
return true
}
- // match: (BTRQconst [c] (ANDQconst [d] x))
- // cond: is32Bit(int64(d) &^ (1<<uint32(c)))
- // result: (ANDQconst [d &^ (1<<uint32(c))] x)
- for {
- c := auxIntToInt8(v.AuxInt)
- if v_0.Op != OpAMD64ANDQconst {
- break
- }
- d := auxIntToInt32(v_0.AuxInt)
- x := v_0.Args[0]
- if !(is32Bit(int64(d) &^ (1 << uint32(c)))) {
- break
- }
- v.reset(OpAMD64ANDQconst)
- v.AuxInt = int32ToAuxInt(d &^ (1 << uint32(c)))
- v.AddArg(x)
- return true
- }
- // match: (BTRQconst [c] (BTRQconst [d] x))
- // cond: is32Bit(^(1<<uint32(c) | 1<<uint32(d)))
- // result: (ANDQconst [^(1<<uint32(c) | 1<<uint32(d))] x)
- for {
- c := auxIntToInt8(v.AuxInt)
- if v_0.Op != OpAMD64BTRQconst {
- break
- }
- d := auxIntToInt8(v_0.AuxInt)
- x := v_0.Args[0]
- if !(is32Bit(^(1<<uint32(c) | 1<<uint32(d)))) {
- break
- }
- v.reset(OpAMD64ANDQconst)
- v.AuxInt = int32ToAuxInt(^(1<<uint32(c) | 1<<uint32(d)))
- v.AddArg(x)
- return true
- }
// match: (BTRQconst [c] (MOVQconst [d]))
// result: (MOVQconst [d&^(1<<uint32(c))])
for {
@@ -4093,76 +3829,6 @@ func rewriteValueAMD64_OpAMD64BTRQconst(v *Value) bool {
}
return false
}
-func rewriteValueAMD64_OpAMD64BTSLconst(v *Value) bool {
- v_0 := v.Args[0]
- // match: (BTSLconst [c] (BTRLconst [c] x))
- // result: (BTSLconst [c] x)
- for {
- c := auxIntToInt8(v.AuxInt)
- if v_0.Op != OpAMD64BTRLconst || auxIntToInt8(v_0.AuxInt) != c {
- break
- }
- x := v_0.Args[0]
- v.reset(OpAMD64BTSLconst)
- v.AuxInt = int8ToAuxInt(c)
- v.AddArg(x)
- return true
- }
- // match: (BTSLconst [c] (BTCLconst [c] x))
- // result: (BTSLconst [c] x)
- for {
- c := auxIntToInt8(v.AuxInt)
- if v_0.Op != OpAMD64BTCLconst || auxIntToInt8(v_0.AuxInt) != c {
- break
- }
- x := v_0.Args[0]
- v.reset(OpAMD64BTSLconst)
- v.AuxInt = int8ToAuxInt(c)
- v.AddArg(x)
- return true
- }
- // match: (BTSLconst [c] (ORLconst [d] x))
- // result: (ORLconst [d | 1<<uint32(c)] x)
- for {
- c := auxIntToInt8(v.AuxInt)
- if v_0.Op != OpAMD64ORLconst {
- break
- }
- d := auxIntToInt32(v_0.AuxInt)
- x := v_0.Args[0]
- v.reset(OpAMD64ORLconst)
- v.AuxInt = int32ToAuxInt(d | 1<<uint32(c))
- v.AddArg(x)
- return true
- }
- // match: (BTSLconst [c] (BTSLconst [d] x))
- // result: (ORLconst [1<<uint32(c) | 1<<uint32(d)] x)
- for {
- c := auxIntToInt8(v.AuxInt)
- if v_0.Op != OpAMD64BTSLconst {
- break
- }
- d := auxIntToInt8(v_0.AuxInt)
- x := v_0.Args[0]
- v.reset(OpAMD64ORLconst)
- v.AuxInt = int32ToAuxInt(1<<uint32(c) | 1<<uint32(d))
- v.AddArg(x)
- return true
- }
- // match: (BTSLconst [c] (MOVLconst [d]))
- // result: (MOVLconst [d|(1<<uint32(c))])
- for {
- c := auxIntToInt8(v.AuxInt)
- if v_0.Op != OpAMD64MOVLconst {
- break
- }
- d := auxIntToInt32(v_0.AuxInt)
- v.reset(OpAMD64MOVLconst)
- v.AuxInt = int32ToAuxInt(d | (1 << uint32(c)))
- return true
- }
- return false
-}
func rewriteValueAMD64_OpAMD64BTSQconst(v *Value) bool {
v_0 := v.Args[0]
// match: (BTSQconst [c] (BTRQconst [c] x))
@@ -4191,42 +3857,6 @@ func rewriteValueAMD64_OpAMD64BTSQconst(v *Value) bool {
v.AddArg(x)
return true
}
- // match: (BTSQconst [c] (ORQconst [d] x))
- // cond: is32Bit(int64(d) | 1<<uint32(c))
- // result: (ORQconst [d | 1<<uint32(c)] x)
- for {
- c := auxIntToInt8(v.AuxInt)
- if v_0.Op != OpAMD64ORQconst {
- break
- }
- d := auxIntToInt32(v_0.AuxInt)
- x := v_0.Args[0]
- if !(is32Bit(int64(d) | 1<<uint32(c))) {
- break
- }
- v.reset(OpAMD64ORQconst)
- v.AuxInt = int32ToAuxInt(d | 1<<uint32(c))
- v.AddArg(x)
- return true
- }
- // match: (BTSQconst [c] (BTSQconst [d] x))
- // cond: is32Bit(1<<uint32(c) | 1<<uint32(d))
- // result: (ORQconst [1<<uint32(c) | 1<<uint32(d)] x)
- for {
- c := auxIntToInt8(v.AuxInt)
- if v_0.Op != OpAMD64BTSQconst {
- break
- }
- d := auxIntToInt8(v_0.AuxInt)
- x := v_0.Args[0]
- if !(is32Bit(1<<uint32(c) | 1<<uint32(d))) {
- break
- }
- v.reset(OpAMD64ORQconst)
- v.AuxInt = int32ToAuxInt(1<<uint32(c) | 1<<uint32(d))
- v.AddArg(x)
- return true
- }
// match: (BTSQconst [c] (MOVQconst [d]))
// result: (MOVQconst [d|(1<<uint32(c))])
for {
@@ -10181,8 +9811,6 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool {
v_2 := v.Args[2]
v_1 := v.Args[1]
v_0 := v.Args[0]
- b := v.Block
- typ := &b.Func.Config.Types
// match: (MOVBstore [off] {sym} ptr y:(SETL x) mem)
// cond: y.Uses == 1
// result: (SETLstore [off] {sym} ptr x mem)
@@ -10516,47 +10144,6 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool {
v.AddArg3(base, val, mem)
return true
}
- // match: (MOVBstore [i] {s} p x1:(MOVBload [j] {s2} p2 mem) mem2:(MOVBstore [i-1] {s} p x2:(MOVBload [j-1] {s2} p2 mem) mem))
- // cond: x1.Uses == 1 && x2.Uses == 1 && mem2.Uses == 1 && clobber(x1, x2, mem2)
- // result: (MOVWstore [i-1] {s} p (MOVWload [j-1] {s2} p2 mem) mem)
- for {
- i := auxIntToInt32(v.AuxInt)
- s := auxToSym(v.Aux)
- p := v_0
- x1 := v_1
- if x1.Op != OpAMD64MOVBload {
- break
- }
- j := auxIntToInt32(x1.AuxInt)
- s2 := auxToSym(x1.Aux)
- mem := x1.Args[1]
- p2 := x1.Args[0]
- mem2 := v_2
- if mem2.Op != OpAMD64MOVBstore || auxIntToInt32(mem2.AuxInt) != i-1 || auxToSym(mem2.Aux) != s {
- break
- }
- _ = mem2.Args[2]
- if p != mem2.Args[0] {
- break
- }
- x2 := mem2.Args[1]
- if x2.Op != OpAMD64MOVBload || auxIntToInt32(x2.AuxInt) != j-1 || auxToSym(x2.Aux) != s2 {
- break
- }
- _ = x2.Args[1]
- if p2 != x2.Args[0] || mem != x2.Args[1] || mem != mem2.Args[2] || !(x1.Uses == 1 && x2.Uses == 1 && mem2.Uses == 1 && clobber(x1, x2, mem2)) {
- break
- }
- v.reset(OpAMD64MOVWstore)
- v.AuxInt = int32ToAuxInt(i - 1)
- v.Aux = symToAux(s)
- v0 := b.NewValue0(x2.Pos, OpAMD64MOVWload, typ.UInt16)
- v0.AuxInt = int32ToAuxInt(j - 1)
- v0.Aux = symToAux(s2)
- v0.AddArg2(p2, mem)
- v.AddArg3(p, v0, mem)
- return true
- }
return false
}
func rewriteValueAMD64_OpAMD64MOVBstoreconst(v *Value) bool {
@@ -11069,8 +10656,6 @@ func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool {
v_2 := v.Args[2]
v_1 := v.Args[1]
v_0 := v.Args[0]
- b := v.Block
- typ := &b.Func.Config.Types
// match: (MOVLstore [off] {sym} ptr (MOVLQSX x) mem)
// result: (MOVLstore [off] {sym} ptr x mem)
for {
@@ -11184,47 +10769,6 @@ func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool {
v.AddArg3(base, val, mem)
return true
}
- // match: (MOVLstore [i] {s} p x1:(MOVLload [j] {s2} p2 mem) mem2:(MOVLstore [i-4] {s} p x2:(MOVLload [j-4] {s2} p2 mem) mem))
- // cond: x1.Uses == 1 && x2.Uses == 1 && mem2.Uses == 1 && clobber(x1, x2, mem2)
- // result: (MOVQstore [i-4] {s} p (MOVQload [j-4] {s2} p2 mem) mem)
- for {
- i := auxIntToInt32(v.AuxInt)
- s := auxToSym(v.Aux)
- p := v_0
- x1 := v_1
- if x1.Op != OpAMD64MOVLload {
- break
- }
- j := auxIntToInt32(x1.AuxInt)
- s2 := auxToSym(x1.Aux)
- mem := x1.Args[1]
- p2 := x1.Args[0]
- mem2 := v_2
- if mem2.Op != OpAMD64MOVLstore || auxIntToInt32(mem2.AuxInt) != i-4 || auxToSym(mem2.Aux) != s {
- break
- }
- _ = mem2.Args[2]
- if p != mem2.Args[0] {
- break
- }
- x2 := mem2.Args[1]
- if x2.Op != OpAMD64MOVLload || auxIntToInt32(x2.AuxInt) != j-4 || auxToSym(x2.Aux) != s2 {
- break
- }
- _ = x2.Args[1]
- if p2 != x2.Args[0] || mem != x2.Args[1] || mem != mem2.Args[2] || !(x1.Uses == 1 && x2.Uses == 1 && mem2.Uses == 1 && clobber(x1, x2, mem2)) {
- break
- }
- v.reset(OpAMD64MOVQstore)
- v.AuxInt = int32ToAuxInt(i - 4)
- v.Aux = symToAux(s)
- v0 := b.NewValue0(x2.Pos, OpAMD64MOVQload, typ.UInt64)
- v0.AuxInt = int32ToAuxInt(j - 4)
- v0.Aux = symToAux(s2)
- v0.AddArg2(p2, mem)
- v.AddArg3(p, v0, mem)
- return true
- }
// match: (MOVLstore {sym} [off] ptr y:(ADDLload x [off] {sym} ptr mem) mem)
// cond: y.Uses==1 && clobber(y)
// result: (ADDLmodify [off] {sym} ptr x mem)
@@ -12384,6 +11928,84 @@ func rewriteValueAMD64_OpAMD64MOVQstore(v *Value) bool {
}
break
}
+ // match: (MOVQstore {sym} [off] ptr x:(BTSQconst [c] l:(MOVQload {sym} [off] ptr mem)) mem)
+ // cond: x.Uses == 1 && l.Uses == 1 && clobber(x, l)
+ // result: (BTSQconstmodify {sym} [makeValAndOff(int32(c),off)] ptr mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ x := v_1
+ if x.Op != OpAMD64BTSQconst {
+ break
+ }
+ c := auxIntToInt8(x.AuxInt)
+ l := x.Args[0]
+ if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
+ break
+ }
+ mem := l.Args[1]
+ if ptr != l.Args[0] || mem != v_2 || !(x.Uses == 1 && l.Uses == 1 && clobber(x, l)) {
+ break
+ }
+ v.reset(OpAMD64BTSQconstmodify)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVQstore {sym} [off] ptr x:(BTRQconst [c] l:(MOVQload {sym} [off] ptr mem)) mem)
+ // cond: x.Uses == 1 && l.Uses == 1 && clobber(x, l)
+ // result: (BTRQconstmodify {sym} [makeValAndOff(int32(c),off)] ptr mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ x := v_1
+ if x.Op != OpAMD64BTRQconst {
+ break
+ }
+ c := auxIntToInt8(x.AuxInt)
+ l := x.Args[0]
+ if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
+ break
+ }
+ mem := l.Args[1]
+ if ptr != l.Args[0] || mem != v_2 || !(x.Uses == 1 && l.Uses == 1 && clobber(x, l)) {
+ break
+ }
+ v.reset(OpAMD64BTRQconstmodify)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVQstore {sym} [off] ptr x:(BTCQconst [c] l:(MOVQload {sym} [off] ptr mem)) mem)
+ // cond: x.Uses == 1 && l.Uses == 1 && clobber(x, l)
+ // result: (BTCQconstmodify {sym} [makeValAndOff(int32(c),off)] ptr mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ x := v_1
+ if x.Op != OpAMD64BTCQconst {
+ break
+ }
+ c := auxIntToInt8(x.AuxInt)
+ l := x.Args[0]
+ if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
+ break
+ }
+ mem := l.Args[1]
+ if ptr != l.Args[0] || mem != v_2 || !(x.Uses == 1 && l.Uses == 1 && clobber(x, l)) {
+ break
+ }
+ v.reset(OpAMD64BTCQconstmodify)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
// match: (MOVQstore [off] {sym} ptr a:(ADDQconst [c] l:(MOVQload [off] {sym} ptr2 mem)) mem)
// cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)
// result: (ADDQconstmodify {sym} [makeValAndOff(int32(c),off)] ptr mem)
@@ -12582,7 +12204,7 @@ func rewriteValueAMD64_OpAMD64MOVQstoreconst(v *Value) bool {
return true
}
// match: (MOVQstoreconst [c] {s} p1 x:(MOVQstoreconst [a] {s} p0 mem))
- // cond: config.useSSE && x.Uses == 1 && sequentialAddresses(p0, p1, int64(a.Off()+8-c.Off())) && a.Val() == 0 && c.Val() == 0 && clobber(x)
+ // cond: config.useSSE && x.Uses == 1 && sequentialAddresses(p0, p1, int64(a.Off()+8-c.Off())) && a.Val() == 0 && c.Val() == 0 && setPos(v, x.Pos) && clobber(x)
// result: (MOVOstoreconst [makeValAndOff(0,a.Off())] {s} p0 mem)
for {
c := auxIntToValAndOff(v.AuxInt)
@@ -12598,7 +12220,7 @@ func rewriteValueAMD64_OpAMD64MOVQstoreconst(v *Value) bool {
}
mem := x.Args[1]
p0 := x.Args[0]
- if !(config.useSSE && x.Uses == 1 && sequentialAddresses(p0, p1, int64(a.Off()+8-c.Off())) && a.Val() == 0 && c.Val() == 0 && clobber(x)) {
+ if !(config.useSSE && x.Uses == 1 && sequentialAddresses(p0, p1, int64(a.Off()+8-c.Off())) && a.Val() == 0 && c.Val() == 0 && setPos(v, x.Pos) && clobber(x)) {
break
}
v.reset(OpAMD64MOVOstoreconst)
@@ -12608,7 +12230,7 @@ func rewriteValueAMD64_OpAMD64MOVQstoreconst(v *Value) bool {
return true
}
// match: (MOVQstoreconst [a] {s} p0 x:(MOVQstoreconst [c] {s} p1 mem))
- // cond: config.useSSE && x.Uses == 1 && sequentialAddresses(p0, p1, int64(a.Off()+8-c.Off())) && a.Val() == 0 && c.Val() == 0 && clobber(x)
+ // cond: config.useSSE && x.Uses == 1 && sequentialAddresses(p0, p1, int64(a.Off()+8-c.Off())) && a.Val() == 0 && c.Val() == 0 && setPos(v, x.Pos) && clobber(x)
// result: (MOVOstoreconst [makeValAndOff(0,a.Off())] {s} p0 mem)
for {
a := auxIntToValAndOff(v.AuxInt)
@@ -12624,7 +12246,7 @@ func rewriteValueAMD64_OpAMD64MOVQstoreconst(v *Value) bool {
}
mem := x.Args[1]
p1 := x.Args[0]
- if !(config.useSSE && x.Uses == 1 && sequentialAddresses(p0, p1, int64(a.Off()+8-c.Off())) && a.Val() == 0 && c.Val() == 0 && clobber(x)) {
+ if !(config.useSSE && x.Uses == 1 && sequentialAddresses(p0, p1, int64(a.Off()+8-c.Off())) && a.Val() == 0 && c.Val() == 0 && setPos(v, x.Pos) && clobber(x)) {
break
}
v.reset(OpAMD64MOVOstoreconst)
@@ -13270,8 +12892,6 @@ func rewriteValueAMD64_OpAMD64MOVWstore(v *Value) bool {
v_2 := v.Args[2]
v_1 := v.Args[1]
v_0 := v.Args[0]
- b := v.Block
- typ := &b.Func.Config.Types
// match: (MOVWstore [off] {sym} ptr (MOVWQSX x) mem)
// result: (MOVWstore [off] {sym} ptr x mem)
for {
@@ -13385,47 +13005,6 @@ func rewriteValueAMD64_OpAMD64MOVWstore(v *Value) bool {
v.AddArg3(base, val, mem)
return true
}
- // match: (MOVWstore [i] {s} p x1:(MOVWload [j] {s2} p2 mem) mem2:(MOVWstore [i-2] {s} p x2:(MOVWload [j-2] {s2} p2 mem) mem))
- // cond: x1.Uses == 1 && x2.Uses == 1 && mem2.Uses == 1 && clobber(x1, x2, mem2)
- // result: (MOVLstore [i-2] {s} p (MOVLload [j-2] {s2} p2 mem) mem)
- for {
- i := auxIntToInt32(v.AuxInt)
- s := auxToSym(v.Aux)
- p := v_0
- x1 := v_1
- if x1.Op != OpAMD64MOVWload {
- break
- }
- j := auxIntToInt32(x1.AuxInt)
- s2 := auxToSym(x1.Aux)
- mem := x1.Args[1]
- p2 := x1.Args[0]
- mem2 := v_2
- if mem2.Op != OpAMD64MOVWstore || auxIntToInt32(mem2.AuxInt) != i-2 || auxToSym(mem2.Aux) != s {
- break
- }
- _ = mem2.Args[2]
- if p != mem2.Args[0] {
- break
- }
- x2 := mem2.Args[1]
- if x2.Op != OpAMD64MOVWload || auxIntToInt32(x2.AuxInt) != j-2 || auxToSym(x2.Aux) != s2 {
- break
- }
- _ = x2.Args[1]
- if p2 != x2.Args[0] || mem != x2.Args[1] || mem != mem2.Args[2] || !(x1.Uses == 1 && x2.Uses == 1 && mem2.Uses == 1 && clobber(x1, x2, mem2)) {
- break
- }
- v.reset(OpAMD64MOVLstore)
- v.AuxInt = int32ToAuxInt(i - 2)
- v.Aux = symToAux(s)
- v0 := b.NewValue0(x2.Pos, OpAMD64MOVLload, typ.UInt32)
- v0.AuxInt = int32ToAuxInt(j - 2)
- v0.Aux = symToAux(s2)
- v0.AddArg2(p2, mem)
- v.AddArg3(p, v0, mem)
- return true
- }
// match: (MOVWstore [i] {s} p x:(ROLWconst [8] w) mem)
// cond: x.Uses == 1 && buildcfg.GOAMD64 >= 3
// result: (MOVBEWstore [i] {s} p w mem)
@@ -14764,26 +14343,6 @@ func rewriteValueAMD64_OpAMD64ORL(v *Value) bool {
}
break
}
- // match: (ORL (MOVLconst [c]) x)
- // cond: isUint32PowerOfTwo(int64(c)) && uint64(c) >= 128
- // result: (BTSLconst [int8(log32(c))] x)
- for {
- for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
- if v_0.Op != OpAMD64MOVLconst {
- continue
- }
- c := auxIntToInt32(v_0.AuxInt)
- x := v_1
- if !(isUint32PowerOfTwo(int64(c)) && uint64(c) >= 128) {
- continue
- }
- v.reset(OpAMD64BTSLconst)
- v.AuxInt = int8ToAuxInt(int8(log32(c)))
- v.AddArg(x)
- return true
- }
- break
- }
// match: (ORL x (MOVLconst [c]))
// result: (ORLconst [c] x)
for {
@@ -14839,20 +14398,6 @@ func rewriteValueAMD64_OpAMD64ORL(v *Value) bool {
}
func rewriteValueAMD64_OpAMD64ORLconst(v *Value) bool {
v_0 := v.Args[0]
- // match: (ORLconst [c] x)
- // cond: isUint32PowerOfTwo(int64(c)) && uint64(c) >= 128
- // result: (BTSLconst [int8(log32(c))] x)
- for {
- c := auxIntToInt32(v.AuxInt)
- x := v_0
- if !(isUint32PowerOfTwo(int64(c)) && uint64(c) >= 128) {
- break
- }
- v.reset(OpAMD64BTSLconst)
- v.AuxInt = int8ToAuxInt(int8(log32(c)))
- v.AddArg(x)
- return true
- }
// match: (ORLconst [c] (ORLconst [d] x))
// result: (ORLconst [c | d] x)
for {
@@ -14867,20 +14412,6 @@ func rewriteValueAMD64_OpAMD64ORLconst(v *Value) bool {
v.AddArg(x)
return true
}
- // match: (ORLconst [c] (BTSLconst [d] x))
- // result: (ORLconst [c | 1<<uint32(d)] x)
- for {
- c := auxIntToInt32(v.AuxInt)
- if v_0.Op != OpAMD64BTSLconst {
- break
- }
- d := auxIntToInt8(v_0.AuxInt)
- x := v_0.Args[0]
- v.reset(OpAMD64ORLconst)
- v.AuxInt = int32ToAuxInt(c | 1<<uint32(d))
- v.AddArg(x)
- return true
- }
// match: (ORLconst [c] x)
// cond: c==0
// result: x
@@ -15114,7 +14645,7 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool {
break
}
// match: (ORQ (MOVQconst [c]) x)
- // cond: isUint64PowerOfTwo(c) && uint64(c) >= 128
+ // cond: isUint64PowerOfTwo(c) && uint64(c) >= 1<<31
// result: (BTSQconst [int8(log64(c))] x)
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
@@ -15123,7 +14654,7 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool {
}
c := auxIntToInt64(v_0.AuxInt)
x := v_1
- if !(isUint64PowerOfTwo(c) && uint64(c) >= 128) {
+ if !(isUint64PowerOfTwo(c) && uint64(c) >= 1<<31) {
continue
}
v.reset(OpAMD64BTSQconst)
@@ -15322,20 +14853,6 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool {
}
func rewriteValueAMD64_OpAMD64ORQconst(v *Value) bool {
v_0 := v.Args[0]
- // match: (ORQconst [c] x)
- // cond: isUint64PowerOfTwo(int64(c)) && uint64(c) >= 128
- // result: (BTSQconst [int8(log32(c))] x)
- for {
- c := auxIntToInt32(v.AuxInt)
- x := v_0
- if !(isUint64PowerOfTwo(int64(c)) && uint64(c) >= 128) {
- break
- }
- v.reset(OpAMD64BTSQconst)
- v.AuxInt = int8ToAuxInt(int8(log32(c)))
- v.AddArg(x)
- return true
- }
// match: (ORQconst [c] (ORQconst [d] x))
// result: (ORQconst [c | d] x)
for {
@@ -15350,24 +14867,6 @@ func rewriteValueAMD64_OpAMD64ORQconst(v *Value) bool {
v.AddArg(x)
return true
}
- // match: (ORQconst [c] (BTSQconst [d] x))
- // cond: is32Bit(int64(c) | 1<<uint32(d))
- // result: (ORQconst [c | 1<<uint32(d)] x)
- for {
- c := auxIntToInt32(v.AuxInt)
- if v_0.Op != OpAMD64BTSQconst {
- break
- }
- d := auxIntToInt8(v_0.AuxInt)
- x := v_0.Args[0]
- if !(is32Bit(int64(c) | 1<<uint32(d))) {
- break
- }
- v.reset(OpAMD64ORQconst)
- v.AuxInt = int32ToAuxInt(c | 1<<uint32(d))
- v.AddArg(x)
- return true
- }
// match: (ORQconst [0] x)
// result: x
for {
@@ -21179,14 +20678,14 @@ func rewriteValueAMD64_OpAMD64SHLL(v *Value) bool {
func rewriteValueAMD64_OpAMD64SHLLconst(v *Value) bool {
v_0 := v.Args[0]
// match: (SHLLconst [1] (SHRLconst [1] x))
- // result: (BTRLconst [0] x)
+ // result: (ANDLconst [-2] x)
for {
if auxIntToInt8(v.AuxInt) != 1 || v_0.Op != OpAMD64SHRLconst || auxIntToInt8(v_0.AuxInt) != 1 {
break
}
x := v_0.Args[0]
- v.reset(OpAMD64BTRLconst)
- v.AuxInt = int8ToAuxInt(0)
+ v.reset(OpAMD64ANDLconst)
+ v.AuxInt = int32ToAuxInt(-2)
v.AddArg(x)
return true
}
@@ -21435,14 +20934,14 @@ func rewriteValueAMD64_OpAMD64SHLQ(v *Value) bool {
func rewriteValueAMD64_OpAMD64SHLQconst(v *Value) bool {
v_0 := v.Args[0]
// match: (SHLQconst [1] (SHRQconst [1] x))
- // result: (BTRQconst [0] x)
+ // result: (ANDQconst [-2] x)
for {
if auxIntToInt8(v.AuxInt) != 1 || v_0.Op != OpAMD64SHRQconst || auxIntToInt8(v_0.AuxInt) != 1 {
break
}
x := v_0.Args[0]
- v.reset(OpAMD64BTRQconst)
- v.AuxInt = int8ToAuxInt(0)
+ v.reset(OpAMD64ANDQconst)
+ v.AuxInt = int32ToAuxInt(-2)
v.AddArg(x)
return true
}
@@ -21862,14 +21361,14 @@ func rewriteValueAMD64_OpAMD64SHRL(v *Value) bool {
func rewriteValueAMD64_OpAMD64SHRLconst(v *Value) bool {
v_0 := v.Args[0]
// match: (SHRLconst [1] (SHLLconst [1] x))
- // result: (BTRLconst [31] x)
+ // result: (ANDLconst [0x7fffffff] x)
for {
if auxIntToInt8(v.AuxInt) != 1 || v_0.Op != OpAMD64SHLLconst || auxIntToInt8(v_0.AuxInt) != 1 {
break
}
x := v_0.Args[0]
- v.reset(OpAMD64BTRLconst)
- v.AuxInt = int8ToAuxInt(31)
+ v.reset(OpAMD64ANDLconst)
+ v.AuxInt = int32ToAuxInt(0x7fffffff)
v.AddArg(x)
return true
}
@@ -23571,26 +23070,6 @@ func rewriteValueAMD64_OpAMD64XORL(v *Value) bool {
}
break
}
- // match: (XORL (MOVLconst [c]) x)
- // cond: isUint32PowerOfTwo(int64(c)) && uint64(c) >= 128
- // result: (BTCLconst [int8(log32(c))] x)
- for {
- for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
- if v_0.Op != OpAMD64MOVLconst {
- continue
- }
- c := auxIntToInt32(v_0.AuxInt)
- x := v_1
- if !(isUint32PowerOfTwo(int64(c)) && uint64(c) >= 128) {
- continue
- }
- v.reset(OpAMD64BTCLconst)
- v.AuxInt = int8ToAuxInt(int8(log32(c)))
- v.AddArg(x)
- return true
- }
- break
- }
// match: (XORL x (MOVLconst [c]))
// result: (XORLconst [c] x)
for {
@@ -23662,20 +23141,6 @@ func rewriteValueAMD64_OpAMD64XORL(v *Value) bool {
}
func rewriteValueAMD64_OpAMD64XORLconst(v *Value) bool {
v_0 := v.Args[0]
- // match: (XORLconst [c] x)
- // cond: isUint32PowerOfTwo(int64(c)) && uint64(c) >= 128
- // result: (BTCLconst [int8(log32(c))] x)
- for {
- c := auxIntToInt32(v.AuxInt)
- x := v_0
- if !(isUint32PowerOfTwo(int64(c)) && uint64(c) >= 128) {
- break
- }
- v.reset(OpAMD64BTCLconst)
- v.AuxInt = int8ToAuxInt(int8(log32(c)))
- v.AddArg(x)
- return true
- }
// match: (XORLconst [1] (SETNE x))
// result: (SETEQ x)
for {
@@ -23800,20 +23265,6 @@ func rewriteValueAMD64_OpAMD64XORLconst(v *Value) bool {
v.AddArg(x)
return true
}
- // match: (XORLconst [c] (BTCLconst [d] x))
- // result: (XORLconst [c ^ 1<<uint32(d)] x)
- for {
- c := auxIntToInt32(v.AuxInt)
- if v_0.Op != OpAMD64BTCLconst {
- break
- }
- d := auxIntToInt8(v_0.AuxInt)
- x := v_0.Args[0]
- v.reset(OpAMD64XORLconst)
- v.AuxInt = int32ToAuxInt(c ^ 1<<uint32(d))
- v.AddArg(x)
- return true
- }
// match: (XORLconst [c] x)
// cond: c==0
// result: x
@@ -24035,7 +23486,7 @@ func rewriteValueAMD64_OpAMD64XORQ(v *Value) bool {
break
}
// match: (XORQ (MOVQconst [c]) x)
- // cond: isUint64PowerOfTwo(c) && uint64(c) >= 128
+ // cond: isUint64PowerOfTwo(c) && uint64(c) >= 1<<31
// result: (BTCQconst [int8(log64(c))] x)
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
@@ -24044,7 +23495,7 @@ func rewriteValueAMD64_OpAMD64XORQ(v *Value) bool {
}
c := auxIntToInt64(v_0.AuxInt)
x := v_1
- if !(isUint64PowerOfTwo(c) && uint64(c) >= 128) {
+ if !(isUint64PowerOfTwo(c) && uint64(c) >= 1<<31) {
continue
}
v.reset(OpAMD64BTCQconst)
@@ -24129,20 +23580,6 @@ func rewriteValueAMD64_OpAMD64XORQ(v *Value) bool {
}
func rewriteValueAMD64_OpAMD64XORQconst(v *Value) bool {
v_0 := v.Args[0]
- // match: (XORQconst [c] x)
- // cond: isUint64PowerOfTwo(int64(c)) && uint64(c) >= 128
- // result: (BTCQconst [int8(log32(c))] x)
- for {
- c := auxIntToInt32(v.AuxInt)
- x := v_0
- if !(isUint64PowerOfTwo(int64(c)) && uint64(c) >= 128) {
- break
- }
- v.reset(OpAMD64BTCQconst)
- v.AuxInt = int8ToAuxInt(int8(log32(c)))
- v.AddArg(x)
- return true
- }
// match: (XORQconst [c] (XORQconst [d] x))
// result: (XORQconst [c ^ d] x)
for {
@@ -24157,24 +23594,6 @@ func rewriteValueAMD64_OpAMD64XORQconst(v *Value) bool {
v.AddArg(x)
return true
}
- // match: (XORQconst [c] (BTCQconst [d] x))
- // cond: is32Bit(int64(c) ^ 1<<uint32(d))
- // result: (XORQconst [c ^ 1<<uint32(d)] x)
- for {
- c := auxIntToInt32(v.AuxInt)
- if v_0.Op != OpAMD64BTCQconst {
- break
- }
- d := auxIntToInt8(v_0.AuxInt)
- x := v_0.Args[0]
- if !(is32Bit(int64(c) ^ 1<<uint32(d))) {
- break
- }
- v.reset(OpAMD64XORQconst)
- v.AuxInt = int32ToAuxInt(c ^ 1<<uint32(d))
- v.AddArg(x)
- return true
- }
// match: (XORQconst [0] x)
// result: x
for {
@@ -25791,12 +25210,12 @@ func rewriteValueAMD64_OpCtz16(v *Value) bool {
b := v.Block
typ := &b.Func.Config.Types
// match: (Ctz16 x)
- // result: (BSFL (BTSLconst <typ.UInt32> [16] x))
+ // result: (BSFL (ORLconst <typ.UInt32> [1<<16] x))
for {
x := v_0
v.reset(OpAMD64BSFL)
- v0 := b.NewValue0(v.Pos, OpAMD64BTSLconst, typ.UInt32)
- v0.AuxInt = int8ToAuxInt(16)
+ v0 := b.NewValue0(v.Pos, OpAMD64ORLconst, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(1 << 16)
v0.AddArg(x)
v.AddArg(v0)
return true
@@ -25969,12 +25388,12 @@ func rewriteValueAMD64_OpCtz8(v *Value) bool {
b := v.Block
typ := &b.Func.Config.Types
// match: (Ctz8 x)
- // result: (BSFL (BTSLconst <typ.UInt32> [ 8] x))
+ // result: (BSFL (ORLconst <typ.UInt32> [1<<8 ] x))
for {
x := v_0
v.reset(OpAMD64BSFL)
- v0 := b.NewValue0(v.Pos, OpAMD64BTSLconst, typ.UInt32)
- v0.AuxInt = int8ToAuxInt(8)
+ v0 := b.NewValue0(v.Pos, OpAMD64ORLconst, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(1 << 8)
v0.AddArg(x)
v.AddArg(v0)
return true
@@ -27481,6 +26900,88 @@ func rewriteValueAMD64_OpLsh8x8(v *Value) bool {
}
return false
}
+func rewriteValueAMD64_OpMax32F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Max32F <t> x y)
+ // result: (Neg32F <t> (Min32F <t> (Neg32F <t> x) (Neg32F <t> y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpNeg32F)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpMin32F, t)
+ v1 := b.NewValue0(v.Pos, OpNeg32F, t)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpNeg32F, t)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpMax64F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Max64F <t> x y)
+ // result: (Neg64F <t> (Min64F <t> (Neg64F <t> x) (Neg64F <t> y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpNeg64F)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpMin64F, t)
+ v1 := b.NewValue0(v.Pos, OpNeg64F, t)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpNeg64F, t)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpMin32F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Min32F <t> x y)
+ // result: (POR (MINSS <t> (MINSS <t> x y) x) (MINSS <t> x y))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpAMD64POR)
+ v0 := b.NewValue0(v.Pos, OpAMD64MINSS, t)
+ v1 := b.NewValue0(v.Pos, OpAMD64MINSS, t)
+ v1.AddArg2(x, y)
+ v0.AddArg2(v1, x)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueAMD64_OpMin64F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Min64F <t> x y)
+ // result: (POR (MINSD <t> (MINSD <t> x y) x) (MINSD <t> x y))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpAMD64POR)
+ v0 := b.NewValue0(v.Pos, OpAMD64MINSD, t)
+ v1 := b.NewValue0(v.Pos, OpAMD64MINSD, t)
+ v1.AddArg2(x, y)
+ v0.AddArg2(v1, x)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
func rewriteValueAMD64_OpMod16(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
@@ -27955,6 +27456,27 @@ func rewriteValueAMD64_OpMove(v *Value) bool {
v.AddArg3(dst, v0, v1)
return true
}
+ // match: (Move [11] dst src mem)
+ // result: (MOVLstore [7] dst (MOVLload [7] src mem) (MOVQstore dst (MOVQload src mem) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 11 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpAMD64MOVLstore)
+ v.AuxInt = int32ToAuxInt(7)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(7)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem)
+ v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
+ v2.AddArg2(src, mem)
+ v1.AddArg3(dst, v2, mem)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
// match: (Move [12] dst src mem)
// result: (MOVLstore [8] dst (MOVLload [8] src mem) (MOVQstore dst (MOVQload src mem) mem))
for {
@@ -27977,14 +27499,14 @@ func rewriteValueAMD64_OpMove(v *Value) bool {
return true
}
// match: (Move [s] dst src mem)
- // cond: s == 11 || s >= 13 && s <= 15
+ // cond: s >= 13 && s <= 15
// result: (MOVQstore [int32(s-8)] dst (MOVQload [int32(s-8)] src mem) (MOVQstore dst (MOVQload src mem) mem))
for {
s := auxIntToInt64(v.AuxInt)
dst := v_0
src := v_1
mem := v_2
- if !(s == 11 || s >= 13 && s <= 15) {
+ if !(s >= 13 && s <= 15) {
break
}
v.reset(OpAMD64MOVQstore)
@@ -30443,14 +29965,94 @@ func rewriteValueAMD64_OpZero(v *Value) bool {
v.AddArg2(destptr, v0)
return true
}
+ // match: (Zero [9] destptr mem)
+ // cond: config.useSSE
+ // result: (MOVBstoreconst [makeValAndOff(0,8)] destptr (MOVQstoreconst [makeValAndOff(0,0)] destptr mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 9 {
+ break
+ }
+ destptr := v_0
+ mem := v_1
+ if !(config.useSSE) {
+ break
+ }
+ v.reset(OpAMD64MOVBstoreconst)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 8))
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem)
+ v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
+ v0.AddArg2(destptr, mem)
+ v.AddArg2(destptr, v0)
+ return true
+ }
+ // match: (Zero [10] destptr mem)
+ // cond: config.useSSE
+ // result: (MOVWstoreconst [makeValAndOff(0,8)] destptr (MOVQstoreconst [makeValAndOff(0,0)] destptr mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 10 {
+ break
+ }
+ destptr := v_0
+ mem := v_1
+ if !(config.useSSE) {
+ break
+ }
+ v.reset(OpAMD64MOVWstoreconst)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 8))
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem)
+ v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
+ v0.AddArg2(destptr, mem)
+ v.AddArg2(destptr, v0)
+ return true
+ }
+ // match: (Zero [11] destptr mem)
+ // cond: config.useSSE
+ // result: (MOVLstoreconst [makeValAndOff(0,7)] destptr (MOVQstoreconst [makeValAndOff(0,0)] destptr mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 11 {
+ break
+ }
+ destptr := v_0
+ mem := v_1
+ if !(config.useSSE) {
+ break
+ }
+ v.reset(OpAMD64MOVLstoreconst)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 7))
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem)
+ v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
+ v0.AddArg2(destptr, mem)
+ v.AddArg2(destptr, v0)
+ return true
+ }
+ // match: (Zero [12] destptr mem)
+ // cond: config.useSSE
+ // result: (MOVLstoreconst [makeValAndOff(0,8)] destptr (MOVQstoreconst [makeValAndOff(0,0)] destptr mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 12 {
+ break
+ }
+ destptr := v_0
+ mem := v_1
+ if !(config.useSSE) {
+ break
+ }
+ v.reset(OpAMD64MOVLstoreconst)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 8))
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem)
+ v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
+ v0.AddArg2(destptr, mem)
+ v.AddArg2(destptr, v0)
+ return true
+ }
// match: (Zero [s] destptr mem)
- // cond: s > 8 && s < 16 && config.useSSE
+ // cond: s > 12 && s < 16 && config.useSSE
// result: (MOVQstoreconst [makeValAndOff(0,int32(s-8))] destptr (MOVQstoreconst [makeValAndOff(0,0)] destptr mem))
for {
s := auxIntToInt64(v.AuxInt)
destptr := v_0
mem := v_1
- if !(s > 8 && s < 16 && config.useSSE) {
+ if !(s > 12 && s < 16 && config.useSSE) {
break
}
v.reset(OpAMD64MOVQstoreconst)
diff --git a/src/cmd/compile/internal/ssa/rewriteARM.go b/src/cmd/compile/internal/ssa/rewriteARM.go
index 70cacb90ed..971c9a5d55 100644
--- a/src/cmd/compile/internal/ssa/rewriteARM.go
+++ b/src/cmd/compile/internal/ssa/rewriteARM.go
@@ -1496,7 +1496,7 @@ func rewriteValueARM_OpARMADDD(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
// match: (ADDD a (MULD x y))
- // cond: a.Uses == 1 && buildcfg.GOARM >= 6
+ // cond: a.Uses == 1 && buildcfg.GOARM.Version >= 6
// result: (MULAD a x y)
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
@@ -1506,7 +1506,7 @@ func rewriteValueARM_OpARMADDD(v *Value) bool {
}
y := v_1.Args[1]
x := v_1.Args[0]
- if !(a.Uses == 1 && buildcfg.GOARM >= 6) {
+ if !(a.Uses == 1 && buildcfg.GOARM.Version >= 6) {
continue
}
v.reset(OpARMMULAD)
@@ -1516,7 +1516,7 @@ func rewriteValueARM_OpARMADDD(v *Value) bool {
break
}
// match: (ADDD a (NMULD x y))
- // cond: a.Uses == 1 && buildcfg.GOARM >= 6
+ // cond: a.Uses == 1 && buildcfg.GOARM.Version >= 6
// result: (MULSD a x y)
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
@@ -1526,7 +1526,7 @@ func rewriteValueARM_OpARMADDD(v *Value) bool {
}
y := v_1.Args[1]
x := v_1.Args[0]
- if !(a.Uses == 1 && buildcfg.GOARM >= 6) {
+ if !(a.Uses == 1 && buildcfg.GOARM.Version >= 6) {
continue
}
v.reset(OpARMMULSD)
@@ -1541,7 +1541,7 @@ func rewriteValueARM_OpARMADDF(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
// match: (ADDF a (MULF x y))
- // cond: a.Uses == 1 && buildcfg.GOARM >= 6
+ // cond: a.Uses == 1 && buildcfg.GOARM.Version >= 6
// result: (MULAF a x y)
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
@@ -1551,7 +1551,7 @@ func rewriteValueARM_OpARMADDF(v *Value) bool {
}
y := v_1.Args[1]
x := v_1.Args[0]
- if !(a.Uses == 1 && buildcfg.GOARM >= 6) {
+ if !(a.Uses == 1 && buildcfg.GOARM.Version >= 6) {
continue
}
v.reset(OpARMMULAF)
@@ -1561,7 +1561,7 @@ func rewriteValueARM_OpARMADDF(v *Value) bool {
break
}
// match: (ADDF a (NMULF x y))
- // cond: a.Uses == 1 && buildcfg.GOARM >= 6
+ // cond: a.Uses == 1 && buildcfg.GOARM.Version >= 6
// result: (MULSF a x y)
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
@@ -1571,7 +1571,7 @@ func rewriteValueARM_OpARMADDF(v *Value) bool {
}
y := v_1.Args[1]
x := v_1.Args[0]
- if !(a.Uses == 1 && buildcfg.GOARM >= 6) {
+ if !(a.Uses == 1 && buildcfg.GOARM.Version >= 6) {
continue
}
v.reset(OpARMMULSF)
@@ -1979,12 +1979,12 @@ func rewriteValueARM_OpARMADDconst(v *Value) bool {
return true
}
// match: (ADDconst [c] x)
- // cond: buildcfg.GOARM==7 && !isARMImmRot(uint32(c)) && uint32(c)>0xffff && uint32(-c)<=0xffff
+ // cond: buildcfg.GOARM.Version==7 && !isARMImmRot(uint32(c)) && uint32(c)>0xffff && uint32(-c)<=0xffff
// result: (SUBconst [-c] x)
for {
c := auxIntToInt32(v.AuxInt)
x := v_0
- if !(buildcfg.GOARM == 7 && !isARMImmRot(uint32(c)) && uint32(c) > 0xffff && uint32(-c) <= 0xffff) {
+ if !(buildcfg.GOARM.Version == 7 && !isARMImmRot(uint32(c)) && uint32(c) > 0xffff && uint32(-c) <= 0xffff) {
break
}
v.reset(OpARMSUBconst)
@@ -2099,7 +2099,7 @@ func rewriteValueARM_OpARMADDshiftLL(v *Value) bool {
return true
}
// match: (ADDshiftLL <typ.UInt16> [8] (SRLconst <typ.UInt16> [24] (SLLconst [16] x)) x)
- // cond: buildcfg.GOARM>=6
+ // cond: buildcfg.GOARM.Version>=6
// result: (REV16 x)
for {
if v.Type != typ.UInt16 || auxIntToInt32(v.AuxInt) != 8 || v_0.Op != OpARMSRLconst || v_0.Type != typ.UInt16 || auxIntToInt32(v_0.AuxInt) != 24 {
@@ -2110,7 +2110,7 @@ func rewriteValueARM_OpARMADDshiftLL(v *Value) bool {
break
}
x := v_0_0.Args[0]
- if x != v_1 || !(buildcfg.GOARM >= 6) {
+ if x != v_1 || !(buildcfg.GOARM.Version >= 6) {
break
}
v.reset(OpARMREV16)
@@ -2551,12 +2551,12 @@ func rewriteValueARM_OpARMANDconst(v *Value) bool {
return true
}
// match: (ANDconst [c] x)
- // cond: buildcfg.GOARM==7 && !isARMImmRot(uint32(c)) && uint32(c)>0xffff && ^uint32(c)<=0xffff
+ // cond: buildcfg.GOARM.Version==7 && !isARMImmRot(uint32(c)) && uint32(c)>0xffff && ^uint32(c)<=0xffff
// result: (BICconst [int32(^uint32(c))] x)
for {
c := auxIntToInt32(v.AuxInt)
x := v_0
- if !(buildcfg.GOARM == 7 && !isARMImmRot(uint32(c)) && uint32(c) > 0xffff && ^uint32(c) <= 0xffff) {
+ if !(buildcfg.GOARM.Version == 7 && !isARMImmRot(uint32(c)) && uint32(c) > 0xffff && ^uint32(c) <= 0xffff) {
break
}
v.reset(OpARMBICconst)
@@ -3052,12 +3052,12 @@ func rewriteValueARM_OpARMBICconst(v *Value) bool {
return true
}
// match: (BICconst [c] x)
- // cond: buildcfg.GOARM==7 && !isARMImmRot(uint32(c)) && uint32(c)>0xffff && ^uint32(c)<=0xffff
+ // cond: buildcfg.GOARM.Version==7 && !isARMImmRot(uint32(c)) && uint32(c)>0xffff && ^uint32(c)<=0xffff
// result: (ANDconst [int32(^uint32(c))] x)
for {
c := auxIntToInt32(v.AuxInt)
x := v_0
- if !(buildcfg.GOARM == 7 && !isARMImmRot(uint32(c)) && uint32(c) > 0xffff && ^uint32(c) <= 0xffff) {
+ if !(buildcfg.GOARM.Version == 7 && !isARMImmRot(uint32(c)) && uint32(c) > 0xffff && ^uint32(c) <= 0xffff) {
break
}
v.reset(OpARMANDconst)
@@ -7590,7 +7590,7 @@ func rewriteValueARM_OpARMMULD(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
// match: (MULD (NEGD x) y)
- // cond: buildcfg.GOARM >= 6
+ // cond: buildcfg.GOARM.Version >= 6
// result: (NMULD x y)
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
@@ -7599,7 +7599,7 @@ func rewriteValueARM_OpARMMULD(v *Value) bool {
}
x := v_0.Args[0]
y := v_1
- if !(buildcfg.GOARM >= 6) {
+ if !(buildcfg.GOARM.Version >= 6) {
continue
}
v.reset(OpARMNMULD)
@@ -7614,7 +7614,7 @@ func rewriteValueARM_OpARMMULF(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
// match: (MULF (NEGF x) y)
- // cond: buildcfg.GOARM >= 6
+ // cond: buildcfg.GOARM.Version >= 6
// result: (NMULF x y)
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
@@ -7623,7 +7623,7 @@ func rewriteValueARM_OpARMMULF(v *Value) bool {
}
x := v_0.Args[0]
y := v_1
- if !(buildcfg.GOARM >= 6) {
+ if !(buildcfg.GOARM.Version >= 6) {
continue
}
v.reset(OpARMNMULF)
@@ -8247,7 +8247,7 @@ func rewriteValueARM_OpARMMVNshiftRLreg(v *Value) bool {
func rewriteValueARM_OpARMNEGD(v *Value) bool {
v_0 := v.Args[0]
// match: (NEGD (MULD x y))
- // cond: buildcfg.GOARM >= 6
+ // cond: buildcfg.GOARM.Version >= 6
// result: (NMULD x y)
for {
if v_0.Op != OpARMMULD {
@@ -8255,7 +8255,7 @@ func rewriteValueARM_OpARMNEGD(v *Value) bool {
}
y := v_0.Args[1]
x := v_0.Args[0]
- if !(buildcfg.GOARM >= 6) {
+ if !(buildcfg.GOARM.Version >= 6) {
break
}
v.reset(OpARMNMULD)
@@ -8267,7 +8267,7 @@ func rewriteValueARM_OpARMNEGD(v *Value) bool {
func rewriteValueARM_OpARMNEGF(v *Value) bool {
v_0 := v.Args[0]
// match: (NEGF (MULF x y))
- // cond: buildcfg.GOARM >= 6
+ // cond: buildcfg.GOARM.Version >= 6
// result: (NMULF x y)
for {
if v_0.Op != OpARMMULF {
@@ -8275,7 +8275,7 @@ func rewriteValueARM_OpARMNEGF(v *Value) bool {
}
y := v_0.Args[1]
x := v_0.Args[0]
- if !(buildcfg.GOARM >= 6) {
+ if !(buildcfg.GOARM.Version >= 6) {
break
}
v.reset(OpARMNMULF)
@@ -8583,7 +8583,7 @@ func rewriteValueARM_OpARMORshiftLL(v *Value) bool {
return true
}
// match: (ORshiftLL <typ.UInt16> [8] (SRLconst <typ.UInt16> [24] (SLLconst [16] x)) x)
- // cond: buildcfg.GOARM>=6
+ // cond: buildcfg.GOARM.Version>=6
// result: (REV16 x)
for {
if v.Type != typ.UInt16 || auxIntToInt32(v.AuxInt) != 8 || v_0.Op != OpARMSRLconst || v_0.Type != typ.UInt16 || auxIntToInt32(v_0.AuxInt) != 24 {
@@ -8594,7 +8594,7 @@ func rewriteValueARM_OpARMORshiftLL(v *Value) bool {
break
}
x := v_0_0.Args[0]
- if x != v_1 || !(buildcfg.GOARM >= 6) {
+ if x != v_1 || !(buildcfg.GOARM.Version >= 6) {
break
}
v.reset(OpARMREV16)
@@ -9048,7 +9048,7 @@ func rewriteValueARM_OpARMRSB(v *Value) bool {
return true
}
// match: (RSB (MUL x y) a)
- // cond: buildcfg.GOARM == 7
+ // cond: buildcfg.GOARM.Version == 7
// result: (MULS x y a)
for {
if v_0.Op != OpARMMUL {
@@ -9057,7 +9057,7 @@ func rewriteValueARM_OpARMRSB(v *Value) bool {
y := v_0.Args[1]
x := v_0.Args[0]
a := v_1
- if !(buildcfg.GOARM == 7) {
+ if !(buildcfg.GOARM.Version == 7) {
break
}
v.reset(OpARMMULS)
@@ -10534,7 +10534,7 @@ func rewriteValueARM_OpARMSRAconst(v *Value) bool {
return true
}
// match: (SRAconst (SLLconst x [c]) [d])
- // cond: buildcfg.GOARM==7 && uint64(d)>=uint64(c) && uint64(d)<=31
+ // cond: buildcfg.GOARM.Version==7 && uint64(d)>=uint64(c) && uint64(d)<=31
// result: (BFX [(d-c)|(32-d)<<8] x)
for {
d := auxIntToInt32(v.AuxInt)
@@ -10543,7 +10543,7 @@ func rewriteValueARM_OpARMSRAconst(v *Value) bool {
}
c := auxIntToInt32(v_0.AuxInt)
x := v_0.Args[0]
- if !(buildcfg.GOARM == 7 && uint64(d) >= uint64(c) && uint64(d) <= 31) {
+ if !(buildcfg.GOARM.Version == 7 && uint64(d) >= uint64(c) && uint64(d) <= 31) {
break
}
v.reset(OpARMBFX)
@@ -10590,7 +10590,7 @@ func rewriteValueARM_OpARMSRLconst(v *Value) bool {
return true
}
// match: (SRLconst (SLLconst x [c]) [d])
- // cond: buildcfg.GOARM==7 && uint64(d)>=uint64(c) && uint64(d)<=31
+ // cond: buildcfg.GOARM.Version==7 && uint64(d)>=uint64(c) && uint64(d)<=31
// result: (BFXU [(d-c)|(32-d)<<8] x)
for {
d := auxIntToInt32(v.AuxInt)
@@ -10599,7 +10599,7 @@ func rewriteValueARM_OpARMSRLconst(v *Value) bool {
}
c := auxIntToInt32(v_0.AuxInt)
x := v_0.Args[0]
- if !(buildcfg.GOARM == 7 && uint64(d) >= uint64(c) && uint64(d) <= 31) {
+ if !(buildcfg.GOARM.Version == 7 && uint64(d) >= uint64(c) && uint64(d) <= 31) {
break
}
v.reset(OpARMBFXU)
@@ -10830,7 +10830,7 @@ func rewriteValueARM_OpARMSUB(v *Value) bool {
return true
}
// match: (SUB a (MUL x y))
- // cond: buildcfg.GOARM == 7
+ // cond: buildcfg.GOARM.Version == 7
// result: (MULS x y a)
for {
a := v_0
@@ -10839,7 +10839,7 @@ func rewriteValueARM_OpARMSUB(v *Value) bool {
}
y := v_1.Args[1]
x := v_1.Args[0]
- if !(buildcfg.GOARM == 7) {
+ if !(buildcfg.GOARM.Version == 7) {
break
}
v.reset(OpARMMULS)
@@ -10852,7 +10852,7 @@ func rewriteValueARM_OpARMSUBD(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
// match: (SUBD a (MULD x y))
- // cond: a.Uses == 1 && buildcfg.GOARM >= 6
+ // cond: a.Uses == 1 && buildcfg.GOARM.Version >= 6
// result: (MULSD a x y)
for {
a := v_0
@@ -10861,7 +10861,7 @@ func rewriteValueARM_OpARMSUBD(v *Value) bool {
}
y := v_1.Args[1]
x := v_1.Args[0]
- if !(a.Uses == 1 && buildcfg.GOARM >= 6) {
+ if !(a.Uses == 1 && buildcfg.GOARM.Version >= 6) {
break
}
v.reset(OpARMMULSD)
@@ -10869,7 +10869,7 @@ func rewriteValueARM_OpARMSUBD(v *Value) bool {
return true
}
// match: (SUBD a (NMULD x y))
- // cond: a.Uses == 1 && buildcfg.GOARM >= 6
+ // cond: a.Uses == 1 && buildcfg.GOARM.Version >= 6
// result: (MULAD a x y)
for {
a := v_0
@@ -10878,7 +10878,7 @@ func rewriteValueARM_OpARMSUBD(v *Value) bool {
}
y := v_1.Args[1]
x := v_1.Args[0]
- if !(a.Uses == 1 && buildcfg.GOARM >= 6) {
+ if !(a.Uses == 1 && buildcfg.GOARM.Version >= 6) {
break
}
v.reset(OpARMMULAD)
@@ -10891,7 +10891,7 @@ func rewriteValueARM_OpARMSUBF(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
// match: (SUBF a (MULF x y))
- // cond: a.Uses == 1 && buildcfg.GOARM >= 6
+ // cond: a.Uses == 1 && buildcfg.GOARM.Version >= 6
// result: (MULSF a x y)
for {
a := v_0
@@ -10900,7 +10900,7 @@ func rewriteValueARM_OpARMSUBF(v *Value) bool {
}
y := v_1.Args[1]
x := v_1.Args[0]
- if !(a.Uses == 1 && buildcfg.GOARM >= 6) {
+ if !(a.Uses == 1 && buildcfg.GOARM.Version >= 6) {
break
}
v.reset(OpARMMULSF)
@@ -10908,7 +10908,7 @@ func rewriteValueARM_OpARMSUBF(v *Value) bool {
return true
}
// match: (SUBF a (NMULF x y))
- // cond: a.Uses == 1 && buildcfg.GOARM >= 6
+ // cond: a.Uses == 1 && buildcfg.GOARM.Version >= 6
// result: (MULAF a x y)
for {
a := v_0
@@ -10917,7 +10917,7 @@ func rewriteValueARM_OpARMSUBF(v *Value) bool {
}
y := v_1.Args[1]
x := v_1.Args[0]
- if !(a.Uses == 1 && buildcfg.GOARM >= 6) {
+ if !(a.Uses == 1 && buildcfg.GOARM.Version >= 6) {
break
}
v.reset(OpARMMULAF)
@@ -11383,12 +11383,12 @@ func rewriteValueARM_OpARMSUBconst(v *Value) bool {
return true
}
// match: (SUBconst [c] x)
- // cond: buildcfg.GOARM==7 && !isARMImmRot(uint32(c)) && uint32(c)>0xffff && uint32(-c)<=0xffff
+ // cond: buildcfg.GOARM.Version==7 && !isARMImmRot(uint32(c)) && uint32(c)>0xffff && uint32(-c)<=0xffff
// result: (ADDconst [-c] x)
for {
c := auxIntToInt32(v.AuxInt)
x := v_0
- if !(buildcfg.GOARM == 7 && !isARMImmRot(uint32(c)) && uint32(c) > 0xffff && uint32(-c) <= 0xffff) {
+ if !(buildcfg.GOARM.Version == 7 && !isARMImmRot(uint32(c)) && uint32(c) > 0xffff && uint32(-c) <= 0xffff) {
break
}
v.reset(OpARMADDconst)
@@ -12710,7 +12710,7 @@ func rewriteValueARM_OpARMXORshiftLL(v *Value) bool {
return true
}
// match: (XORshiftLL <typ.UInt16> [8] (SRLconst <typ.UInt16> [24] (SLLconst [16] x)) x)
- // cond: buildcfg.GOARM>=6
+ // cond: buildcfg.GOARM.Version>=6
// result: (REV16 x)
for {
if v.Type != typ.UInt16 || auxIntToInt32(v.AuxInt) != 8 || v_0.Op != OpARMSRLconst || v_0.Type != typ.UInt16 || auxIntToInt32(v_0.AuxInt) != 24 {
@@ -12721,7 +12721,7 @@ func rewriteValueARM_OpARMXORshiftLL(v *Value) bool {
break
}
x := v_0_0.Args[0]
- if x != v_1 || !(buildcfg.GOARM >= 6) {
+ if x != v_1 || !(buildcfg.GOARM.Version >= 6) {
break
}
v.reset(OpARMREV16)
@@ -13062,12 +13062,12 @@ func rewriteValueARM_OpBswap32(v *Value) bool {
v_0 := v.Args[0]
b := v.Block
// match: (Bswap32 <t> x)
- // cond: buildcfg.GOARM==5
+ // cond: buildcfg.GOARM.Version==5
// result: (XOR <t> (SRLconst <t> (BICconst <t> (XOR <t> x (SRRconst <t> [16] x)) [0xff0000]) [8]) (SRRconst <t> x [8]))
for {
t := v.Type
x := v_0
- if !(buildcfg.GOARM == 5) {
+ if !(buildcfg.GOARM.Version == 5) {
break
}
v.reset(OpARMXOR)
@@ -13090,11 +13090,11 @@ func rewriteValueARM_OpBswap32(v *Value) bool {
return true
}
// match: (Bswap32 x)
- // cond: buildcfg.GOARM>=6
+ // cond: buildcfg.GOARM.Version>=6
// result: (REV x)
for {
x := v_0
- if !(buildcfg.GOARM >= 6) {
+ if !(buildcfg.GOARM.Version >= 6) {
break
}
v.reset(OpARMREV)
@@ -13177,12 +13177,12 @@ func rewriteValueARM_OpCtz16(v *Value) bool {
b := v.Block
typ := &b.Func.Config.Types
// match: (Ctz16 <t> x)
- // cond: buildcfg.GOARM<=6
+ // cond: buildcfg.GOARM.Version<=6
// result: (RSBconst [32] (CLZ <t> (SUBconst <typ.UInt32> (AND <typ.UInt32> (ORconst <typ.UInt32> [0x10000] x) (RSBconst <typ.UInt32> [0] (ORconst <typ.UInt32> [0x10000] x))) [1])))
for {
t := v.Type
x := v_0
- if !(buildcfg.GOARM <= 6) {
+ if !(buildcfg.GOARM.Version <= 6) {
break
}
v.reset(OpARMRSBconst)
@@ -13204,12 +13204,12 @@ func rewriteValueARM_OpCtz16(v *Value) bool {
return true
}
// match: (Ctz16 <t> x)
- // cond: buildcfg.GOARM==7
+ // cond: buildcfg.GOARM.Version==7
// result: (CLZ <t> (RBIT <typ.UInt32> (ORconst <typ.UInt32> [0x10000] x)))
for {
t := v.Type
x := v_0
- if !(buildcfg.GOARM == 7) {
+ if !(buildcfg.GOARM.Version == 7) {
break
}
v.reset(OpARMCLZ)
@@ -13228,12 +13228,12 @@ func rewriteValueARM_OpCtz32(v *Value) bool {
v_0 := v.Args[0]
b := v.Block
// match: (Ctz32 <t> x)
- // cond: buildcfg.GOARM<=6
+ // cond: buildcfg.GOARM.Version<=6
// result: (RSBconst [32] (CLZ <t> (SUBconst <t> (AND <t> x (RSBconst <t> [0] x)) [1])))
for {
t := v.Type
x := v_0
- if !(buildcfg.GOARM <= 6) {
+ if !(buildcfg.GOARM.Version <= 6) {
break
}
v.reset(OpARMRSBconst)
@@ -13252,12 +13252,12 @@ func rewriteValueARM_OpCtz32(v *Value) bool {
return true
}
// match: (Ctz32 <t> x)
- // cond: buildcfg.GOARM==7
+ // cond: buildcfg.GOARM.Version==7
// result: (CLZ <t> (RBIT <t> x))
for {
t := v.Type
x := v_0
- if !(buildcfg.GOARM == 7) {
+ if !(buildcfg.GOARM.Version == 7) {
break
}
v.reset(OpARMCLZ)
@@ -13274,12 +13274,12 @@ func rewriteValueARM_OpCtz8(v *Value) bool {
b := v.Block
typ := &b.Func.Config.Types
// match: (Ctz8 <t> x)
- // cond: buildcfg.GOARM<=6
+ // cond: buildcfg.GOARM.Version<=6
// result: (RSBconst [32] (CLZ <t> (SUBconst <typ.UInt32> (AND <typ.UInt32> (ORconst <typ.UInt32> [0x100] x) (RSBconst <typ.UInt32> [0] (ORconst <typ.UInt32> [0x100] x))) [1])))
for {
t := v.Type
x := v_0
- if !(buildcfg.GOARM <= 6) {
+ if !(buildcfg.GOARM.Version <= 6) {
break
}
v.reset(OpARMRSBconst)
@@ -13301,12 +13301,12 @@ func rewriteValueARM_OpCtz8(v *Value) bool {
return true
}
// match: (Ctz8 <t> x)
- // cond: buildcfg.GOARM==7
+ // cond: buildcfg.GOARM.Version==7
// result: (CLZ <t> (RBIT <typ.UInt32> (ORconst <typ.UInt32> [0x100] x)))
for {
t := v.Type
x := v_0
- if !(buildcfg.GOARM == 7) {
+ if !(buildcfg.GOARM.Version == 7) {
break
}
v.reset(OpARMCLZ)
diff --git a/src/cmd/compile/internal/ssa/rewriteARM64.go b/src/cmd/compile/internal/ssa/rewriteARM64.go
index 3b8fe30371..f0a4425502 100644
--- a/src/cmd/compile/internal/ssa/rewriteARM64.go
+++ b/src/cmd/compile/internal/ssa/rewriteARM64.go
@@ -820,6 +820,18 @@ func rewriteValueARM64(v *Value) bool {
return rewriteValueARM64_OpLsh8x64(v)
case OpLsh8x8:
return rewriteValueARM64_OpLsh8x8(v)
+ case OpMax32F:
+ v.Op = OpARM64FMAXS
+ return true
+ case OpMax64F:
+ v.Op = OpARM64FMAXD
+ return true
+ case OpMin32F:
+ v.Op = OpARM64FMINS
+ return true
+ case OpMin64F:
+ v.Op = OpARM64FMIND
+ return true
case OpMod16:
return rewriteValueARM64_OpMod16(v)
case OpMod16u:
@@ -1249,7 +1261,7 @@ func rewriteValueARM64_OpARM64ADD(v *Value) bool {
break
}
// match: (ADD a l:(MULW x y))
- // cond: a.Type.Size() != 8 && l.Uses==1 && clobber(l)
+ // cond: v.Type.Size() <= 4 && l.Uses==1 && clobber(l)
// result: (MADDW a x y)
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
@@ -1260,7 +1272,7 @@ func rewriteValueARM64_OpARM64ADD(v *Value) bool {
}
y := l.Args[1]
x := l.Args[0]
- if !(a.Type.Size() != 8 && l.Uses == 1 && clobber(l)) {
+ if !(v.Type.Size() <= 4 && l.Uses == 1 && clobber(l)) {
continue
}
v.reset(OpARM64MADDW)
@@ -1270,7 +1282,7 @@ func rewriteValueARM64_OpARM64ADD(v *Value) bool {
break
}
// match: (ADD a l:(MNEGW x y))
- // cond: a.Type.Size() != 8 && l.Uses==1 && clobber(l)
+ // cond: v.Type.Size() <= 4 && l.Uses==1 && clobber(l)
// result: (MSUBW a x y)
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
@@ -1281,7 +1293,7 @@ func rewriteValueARM64_OpARM64ADD(v *Value) bool {
}
y := l.Args[1]
x := l.Args[0]
- if !(a.Type.Size() != 8 && l.Uses == 1 && clobber(l)) {
+ if !(v.Type.Size() <= 4 && l.Uses == 1 && clobber(l)) {
continue
}
v.reset(OpARM64MSUBW)
@@ -3736,7 +3748,7 @@ func rewriteValueARM64_OpARM64DIVW(v *Value) bool {
v_0 := v.Args[0]
// match: (DIVW (MOVDconst [c]) (MOVDconst [d]))
// cond: d != 0
- // result: (MOVDconst [int64(int32(c)/int32(d))])
+ // result: (MOVDconst [int64(uint32(int32(c)/int32(d)))])
for {
if v_0.Op != OpARM64MOVDconst {
break
@@ -3750,7 +3762,7 @@ func rewriteValueARM64_OpARM64DIVW(v *Value) bool {
break
}
v.reset(OpARM64MOVDconst)
- v.AuxInt = int64ToAuxInt(int64(int32(c) / int32(d)))
+ v.AuxInt = int64ToAuxInt(int64(uint32(int32(c) / int32(d))))
return true
}
return false
@@ -5962,18 +5974,19 @@ func rewriteValueARM64_OpARM64GreaterEqualNoov(v *Value) bool {
b := v.Block
typ := &b.Func.Config.Types
// match: (GreaterEqualNoov (InvertFlags x))
- // result: (OR (LessThanNoov <typ.Bool> x) (Equal <typ.Bool> x))
+ // result: (CSINC [OpARM64NotEqual] (LessThanNoov <typ.Bool> x) (MOVDconst [0]) x)
for {
if v_0.Op != OpARM64InvertFlags {
break
}
x := v_0.Args[0]
- v.reset(OpARM64OR)
+ v.reset(OpARM64CSINC)
+ v.AuxInt = opToAuxInt(OpARM64NotEqual)
v0 := b.NewValue0(v.Pos, OpARM64LessThanNoov, typ.Bool)
v0.AddArg(x)
- v1 := b.NewValue0(v.Pos, OpARM64Equal, typ.Bool)
- v1.AddArg(x)
- v.AddArg2(v0, v1)
+ v1 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(0)
+ v.AddArg3(v0, v1, x)
return true
}
return false
@@ -6697,18 +6710,17 @@ func rewriteValueARM64_OpARM64LessThanNoov(v *Value) bool {
b := v.Block
typ := &b.Func.Config.Types
// match: (LessThanNoov (InvertFlags x))
- // result: (BIC (GreaterEqualNoov <typ.Bool> x) (Equal <typ.Bool> x))
+ // result: (CSEL0 [OpARM64NotEqual] (GreaterEqualNoov <typ.Bool> x) x)
for {
if v_0.Op != OpARM64InvertFlags {
break
}
x := v_0.Args[0]
- v.reset(OpARM64BIC)
+ v.reset(OpARM64CSEL0)
+ v.AuxInt = opToAuxInt(OpARM64NotEqual)
v0 := b.NewValue0(v.Pos, OpARM64GreaterEqualNoov, typ.Bool)
v0.AddArg(x)
- v1 := b.NewValue0(v.Pos, OpARM64Equal, typ.Bool)
- v1.AddArg(x)
- v.AddArg2(v0, v1)
+ v.AddArg2(v0, x)
return true
}
return false
@@ -7138,7 +7150,7 @@ func rewriteValueARM64_OpARM64MADDW(v *Value) bool {
b := v.Block
// match: (MADDW a x (MOVDconst [c]))
// cond: int32(c)==-1
- // result: (SUB a x)
+ // result: (MOVWUreg (SUB <a.Type> a x))
for {
a := v_0
x := v_1
@@ -7149,13 +7161,15 @@ func rewriteValueARM64_OpARM64MADDW(v *Value) bool {
if !(int32(c) == -1) {
break
}
- v.reset(OpARM64SUB)
- v.AddArg2(a, x)
+ v.reset(OpARM64MOVWUreg)
+ v0 := b.NewValue0(v.Pos, OpARM64SUB, a.Type)
+ v0.AddArg2(a, x)
+ v.AddArg(v0)
return true
}
// match: (MADDW a _ (MOVDconst [c]))
// cond: int32(c)==0
- // result: a
+ // result: (MOVWUreg a)
for {
a := v_0
if v_2.Op != OpARM64MOVDconst {
@@ -7165,12 +7179,13 @@ func rewriteValueARM64_OpARM64MADDW(v *Value) bool {
if !(int32(c) == 0) {
break
}
- v.copyOf(a)
+ v.reset(OpARM64MOVWUreg)
+ v.AddArg(a)
return true
}
// match: (MADDW a x (MOVDconst [c]))
// cond: int32(c)==1
- // result: (ADD a x)
+ // result: (MOVWUreg (ADD <a.Type> a x))
for {
a := v_0
x := v_1
@@ -7181,13 +7196,15 @@ func rewriteValueARM64_OpARM64MADDW(v *Value) bool {
if !(int32(c) == 1) {
break
}
- v.reset(OpARM64ADD)
- v.AddArg2(a, x)
+ v.reset(OpARM64MOVWUreg)
+ v0 := b.NewValue0(v.Pos, OpARM64ADD, a.Type)
+ v0.AddArg2(a, x)
+ v.AddArg(v0)
return true
}
// match: (MADDW a x (MOVDconst [c]))
// cond: isPowerOfTwo64(c)
- // result: (ADDshiftLL a x [log64(c)])
+ // result: (MOVWUreg (ADDshiftLL <a.Type> a x [log64(c)]))
for {
a := v_0
x := v_1
@@ -7198,14 +7215,16 @@ func rewriteValueARM64_OpARM64MADDW(v *Value) bool {
if !(isPowerOfTwo64(c)) {
break
}
- v.reset(OpARM64ADDshiftLL)
- v.AuxInt = int64ToAuxInt(log64(c))
- v.AddArg2(a, x)
+ v.reset(OpARM64MOVWUreg)
+ v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, a.Type)
+ v0.AuxInt = int64ToAuxInt(log64(c))
+ v0.AddArg2(a, x)
+ v.AddArg(v0)
return true
}
// match: (MADDW a x (MOVDconst [c]))
// cond: isPowerOfTwo64(c-1) && int32(c)>=3
- // result: (ADD a (ADDshiftLL <x.Type> x x [log64(c-1)]))
+ // result: (MOVWUreg (ADD <a.Type> a (ADDshiftLL <x.Type> x x [log64(c-1)])))
for {
a := v_0
x := v_1
@@ -7216,16 +7235,18 @@ func rewriteValueARM64_OpARM64MADDW(v *Value) bool {
if !(isPowerOfTwo64(c-1) && int32(c) >= 3) {
break
}
- v.reset(OpARM64ADD)
- v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
- v0.AuxInt = int64ToAuxInt(log64(c - 1))
- v0.AddArg2(x, x)
- v.AddArg2(a, v0)
+ v.reset(OpARM64MOVWUreg)
+ v0 := b.NewValue0(v.Pos, OpARM64ADD, a.Type)
+ v1 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
+ v1.AuxInt = int64ToAuxInt(log64(c - 1))
+ v1.AddArg2(x, x)
+ v0.AddArg2(a, v1)
+ v.AddArg(v0)
return true
}
// match: (MADDW a x (MOVDconst [c]))
// cond: isPowerOfTwo64(c+1) && int32(c)>=7
- // result: (SUB a (SUBshiftLL <x.Type> x x [log64(c+1)]))
+ // result: (MOVWUreg (SUB <a.Type> a (SUBshiftLL <x.Type> x x [log64(c+1)])))
for {
a := v_0
x := v_1
@@ -7236,16 +7257,18 @@ func rewriteValueARM64_OpARM64MADDW(v *Value) bool {
if !(isPowerOfTwo64(c+1) && int32(c) >= 7) {
break
}
- v.reset(OpARM64SUB)
- v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type)
- v0.AuxInt = int64ToAuxInt(log64(c + 1))
- v0.AddArg2(x, x)
- v.AddArg2(a, v0)
+ v.reset(OpARM64MOVWUreg)
+ v0 := b.NewValue0(v.Pos, OpARM64SUB, a.Type)
+ v1 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type)
+ v1.AuxInt = int64ToAuxInt(log64(c + 1))
+ v1.AddArg2(x, x)
+ v0.AddArg2(a, v1)
+ v.AddArg(v0)
return true
}
// match: (MADDW a x (MOVDconst [c]))
// cond: c%3 == 0 && isPowerOfTwo64(c/3) && is32Bit(c)
- // result: (SUBshiftLL a (SUBshiftLL <x.Type> x x [2]) [log64(c/3)])
+ // result: (MOVWUreg (SUBshiftLL <a.Type> a (SUBshiftLL <x.Type> x x [2]) [log64(c/3)]))
for {
a := v_0
x := v_1
@@ -7256,17 +7279,19 @@ func rewriteValueARM64_OpARM64MADDW(v *Value) bool {
if !(c%3 == 0 && isPowerOfTwo64(c/3) && is32Bit(c)) {
break
}
- v.reset(OpARM64SUBshiftLL)
- v.AuxInt = int64ToAuxInt(log64(c / 3))
- v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type)
- v0.AuxInt = int64ToAuxInt(2)
- v0.AddArg2(x, x)
- v.AddArg2(a, v0)
+ v.reset(OpARM64MOVWUreg)
+ v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, a.Type)
+ v0.AuxInt = int64ToAuxInt(log64(c / 3))
+ v1 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type)
+ v1.AuxInt = int64ToAuxInt(2)
+ v1.AddArg2(x, x)
+ v0.AddArg2(a, v1)
+ v.AddArg(v0)
return true
}
// match: (MADDW a x (MOVDconst [c]))
// cond: c%5 == 0 && isPowerOfTwo64(c/5) && is32Bit(c)
- // result: (ADDshiftLL a (ADDshiftLL <x.Type> x x [2]) [log64(c/5)])
+ // result: (MOVWUreg (ADDshiftLL <a.Type> a (ADDshiftLL <x.Type> x x [2]) [log64(c/5)]))
for {
a := v_0
x := v_1
@@ -7277,17 +7302,19 @@ func rewriteValueARM64_OpARM64MADDW(v *Value) bool {
if !(c%5 == 0 && isPowerOfTwo64(c/5) && is32Bit(c)) {
break
}
- v.reset(OpARM64ADDshiftLL)
- v.AuxInt = int64ToAuxInt(log64(c / 5))
- v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
- v0.AuxInt = int64ToAuxInt(2)
- v0.AddArg2(x, x)
- v.AddArg2(a, v0)
+ v.reset(OpARM64MOVWUreg)
+ v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, a.Type)
+ v0.AuxInt = int64ToAuxInt(log64(c / 5))
+ v1 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
+ v1.AuxInt = int64ToAuxInt(2)
+ v1.AddArg2(x, x)
+ v0.AddArg2(a, v1)
+ v.AddArg(v0)
return true
}
// match: (MADDW a x (MOVDconst [c]))
// cond: c%7 == 0 && isPowerOfTwo64(c/7) && is32Bit(c)
- // result: (SUBshiftLL a (SUBshiftLL <x.Type> x x [3]) [log64(c/7)])
+ // result: (MOVWUreg (SUBshiftLL <a.Type> a (SUBshiftLL <x.Type> x x [3]) [log64(c/7)]))
for {
a := v_0
x := v_1
@@ -7298,17 +7325,19 @@ func rewriteValueARM64_OpARM64MADDW(v *Value) bool {
if !(c%7 == 0 && isPowerOfTwo64(c/7) && is32Bit(c)) {
break
}
- v.reset(OpARM64SUBshiftLL)
- v.AuxInt = int64ToAuxInt(log64(c / 7))
- v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type)
- v0.AuxInt = int64ToAuxInt(3)
- v0.AddArg2(x, x)
- v.AddArg2(a, v0)
+ v.reset(OpARM64MOVWUreg)
+ v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, a.Type)
+ v0.AuxInt = int64ToAuxInt(log64(c / 7))
+ v1 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type)
+ v1.AuxInt = int64ToAuxInt(3)
+ v1.AddArg2(x, x)
+ v0.AddArg2(a, v1)
+ v.AddArg(v0)
return true
}
// match: (MADDW a x (MOVDconst [c]))
// cond: c%9 == 0 && isPowerOfTwo64(c/9) && is32Bit(c)
- // result: (ADDshiftLL a (ADDshiftLL <x.Type> x x [3]) [log64(c/9)])
+ // result: (MOVWUreg (ADDshiftLL <a.Type> a (ADDshiftLL <x.Type> x x [3]) [log64(c/9)]))
for {
a := v_0
x := v_1
@@ -7319,17 +7348,19 @@ func rewriteValueARM64_OpARM64MADDW(v *Value) bool {
if !(c%9 == 0 && isPowerOfTwo64(c/9) && is32Bit(c)) {
break
}
- v.reset(OpARM64ADDshiftLL)
- v.AuxInt = int64ToAuxInt(log64(c / 9))
- v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
- v0.AuxInt = int64ToAuxInt(3)
- v0.AddArg2(x, x)
- v.AddArg2(a, v0)
+ v.reset(OpARM64MOVWUreg)
+ v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, a.Type)
+ v0.AuxInt = int64ToAuxInt(log64(c / 9))
+ v1 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
+ v1.AuxInt = int64ToAuxInt(3)
+ v1.AddArg2(x, x)
+ v0.AddArg2(a, v1)
+ v.AddArg(v0)
return true
}
// match: (MADDW a (MOVDconst [c]) x)
// cond: int32(c)==-1
- // result: (SUB a x)
+ // result: (MOVWUreg (SUB <a.Type> a x))
for {
a := v_0
if v_1.Op != OpARM64MOVDconst {
@@ -7340,13 +7371,15 @@ func rewriteValueARM64_OpARM64MADDW(v *Value) bool {
if !(int32(c) == -1) {
break
}
- v.reset(OpARM64SUB)
- v.AddArg2(a, x)
+ v.reset(OpARM64MOVWUreg)
+ v0 := b.NewValue0(v.Pos, OpARM64SUB, a.Type)
+ v0.AddArg2(a, x)
+ v.AddArg(v0)
return true
}
// match: (MADDW a (MOVDconst [c]) _)
// cond: int32(c)==0
- // result: a
+ // result: (MOVWUreg a)
for {
a := v_0
if v_1.Op != OpARM64MOVDconst {
@@ -7356,12 +7389,13 @@ func rewriteValueARM64_OpARM64MADDW(v *Value) bool {
if !(int32(c) == 0) {
break
}
- v.copyOf(a)
+ v.reset(OpARM64MOVWUreg)
+ v.AddArg(a)
return true
}
// match: (MADDW a (MOVDconst [c]) x)
// cond: int32(c)==1
- // result: (ADD a x)
+ // result: (MOVWUreg (ADD <a.Type> a x))
for {
a := v_0
if v_1.Op != OpARM64MOVDconst {
@@ -7372,13 +7406,15 @@ func rewriteValueARM64_OpARM64MADDW(v *Value) bool {
if !(int32(c) == 1) {
break
}
- v.reset(OpARM64ADD)
- v.AddArg2(a, x)
+ v.reset(OpARM64MOVWUreg)
+ v0 := b.NewValue0(v.Pos, OpARM64ADD, a.Type)
+ v0.AddArg2(a, x)
+ v.AddArg(v0)
return true
}
// match: (MADDW a (MOVDconst [c]) x)
// cond: isPowerOfTwo64(c)
- // result: (ADDshiftLL a x [log64(c)])
+ // result: (MOVWUreg (ADDshiftLL <a.Type> a x [log64(c)]))
for {
a := v_0
if v_1.Op != OpARM64MOVDconst {
@@ -7389,14 +7425,16 @@ func rewriteValueARM64_OpARM64MADDW(v *Value) bool {
if !(isPowerOfTwo64(c)) {
break
}
- v.reset(OpARM64ADDshiftLL)
- v.AuxInt = int64ToAuxInt(log64(c))
- v.AddArg2(a, x)
+ v.reset(OpARM64MOVWUreg)
+ v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, a.Type)
+ v0.AuxInt = int64ToAuxInt(log64(c))
+ v0.AddArg2(a, x)
+ v.AddArg(v0)
return true
}
// match: (MADDW a (MOVDconst [c]) x)
// cond: isPowerOfTwo64(c-1) && int32(c)>=3
- // result: (ADD a (ADDshiftLL <x.Type> x x [log64(c-1)]))
+ // result: (MOVWUreg (ADD <a.Type> a (ADDshiftLL <x.Type> x x [log64(c-1)])))
for {
a := v_0
if v_1.Op != OpARM64MOVDconst {
@@ -7407,16 +7445,18 @@ func rewriteValueARM64_OpARM64MADDW(v *Value) bool {
if !(isPowerOfTwo64(c-1) && int32(c) >= 3) {
break
}
- v.reset(OpARM64ADD)
- v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
- v0.AuxInt = int64ToAuxInt(log64(c - 1))
- v0.AddArg2(x, x)
- v.AddArg2(a, v0)
+ v.reset(OpARM64MOVWUreg)
+ v0 := b.NewValue0(v.Pos, OpARM64ADD, a.Type)
+ v1 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
+ v1.AuxInt = int64ToAuxInt(log64(c - 1))
+ v1.AddArg2(x, x)
+ v0.AddArg2(a, v1)
+ v.AddArg(v0)
return true
}
// match: (MADDW a (MOVDconst [c]) x)
// cond: isPowerOfTwo64(c+1) && int32(c)>=7
- // result: (SUB a (SUBshiftLL <x.Type> x x [log64(c+1)]))
+ // result: (MOVWUreg (SUB <a.Type> a (SUBshiftLL <x.Type> x x [log64(c+1)])))
for {
a := v_0
if v_1.Op != OpARM64MOVDconst {
@@ -7427,16 +7467,18 @@ func rewriteValueARM64_OpARM64MADDW(v *Value) bool {
if !(isPowerOfTwo64(c+1) && int32(c) >= 7) {
break
}
- v.reset(OpARM64SUB)
- v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type)
- v0.AuxInt = int64ToAuxInt(log64(c + 1))
- v0.AddArg2(x, x)
- v.AddArg2(a, v0)
+ v.reset(OpARM64MOVWUreg)
+ v0 := b.NewValue0(v.Pos, OpARM64SUB, a.Type)
+ v1 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type)
+ v1.AuxInt = int64ToAuxInt(log64(c + 1))
+ v1.AddArg2(x, x)
+ v0.AddArg2(a, v1)
+ v.AddArg(v0)
return true
}
// match: (MADDW a (MOVDconst [c]) x)
// cond: c%3 == 0 && isPowerOfTwo64(c/3) && is32Bit(c)
- // result: (SUBshiftLL a (SUBshiftLL <x.Type> x x [2]) [log64(c/3)])
+ // result: (MOVWUreg (SUBshiftLL <a.Type> a (SUBshiftLL <x.Type> x x [2]) [log64(c/3)]))
for {
a := v_0
if v_1.Op != OpARM64MOVDconst {
@@ -7447,17 +7489,19 @@ func rewriteValueARM64_OpARM64MADDW(v *Value) bool {
if !(c%3 == 0 && isPowerOfTwo64(c/3) && is32Bit(c)) {
break
}
- v.reset(OpARM64SUBshiftLL)
- v.AuxInt = int64ToAuxInt(log64(c / 3))
- v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type)
- v0.AuxInt = int64ToAuxInt(2)
- v0.AddArg2(x, x)
- v.AddArg2(a, v0)
+ v.reset(OpARM64MOVWUreg)
+ v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, a.Type)
+ v0.AuxInt = int64ToAuxInt(log64(c / 3))
+ v1 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type)
+ v1.AuxInt = int64ToAuxInt(2)
+ v1.AddArg2(x, x)
+ v0.AddArg2(a, v1)
+ v.AddArg(v0)
return true
}
// match: (MADDW a (MOVDconst [c]) x)
// cond: c%5 == 0 && isPowerOfTwo64(c/5) && is32Bit(c)
- // result: (ADDshiftLL a (ADDshiftLL <x.Type> x x [2]) [log64(c/5)])
+ // result: (MOVWUreg (ADDshiftLL <a.Type> a (ADDshiftLL <x.Type> x x [2]) [log64(c/5)]))
for {
a := v_0
if v_1.Op != OpARM64MOVDconst {
@@ -7468,17 +7512,19 @@ func rewriteValueARM64_OpARM64MADDW(v *Value) bool {
if !(c%5 == 0 && isPowerOfTwo64(c/5) && is32Bit(c)) {
break
}
- v.reset(OpARM64ADDshiftLL)
- v.AuxInt = int64ToAuxInt(log64(c / 5))
- v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
- v0.AuxInt = int64ToAuxInt(2)
- v0.AddArg2(x, x)
- v.AddArg2(a, v0)
+ v.reset(OpARM64MOVWUreg)
+ v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, a.Type)
+ v0.AuxInt = int64ToAuxInt(log64(c / 5))
+ v1 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
+ v1.AuxInt = int64ToAuxInt(2)
+ v1.AddArg2(x, x)
+ v0.AddArg2(a, v1)
+ v.AddArg(v0)
return true
}
// match: (MADDW a (MOVDconst [c]) x)
// cond: c%7 == 0 && isPowerOfTwo64(c/7) && is32Bit(c)
- // result: (SUBshiftLL a (SUBshiftLL <x.Type> x x [3]) [log64(c/7)])
+ // result: (MOVWUreg (SUBshiftLL <a.Type> a (SUBshiftLL <x.Type> x x [3]) [log64(c/7)]))
for {
a := v_0
if v_1.Op != OpARM64MOVDconst {
@@ -7489,17 +7535,19 @@ func rewriteValueARM64_OpARM64MADDW(v *Value) bool {
if !(c%7 == 0 && isPowerOfTwo64(c/7) && is32Bit(c)) {
break
}
- v.reset(OpARM64SUBshiftLL)
- v.AuxInt = int64ToAuxInt(log64(c / 7))
- v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type)
- v0.AuxInt = int64ToAuxInt(3)
- v0.AddArg2(x, x)
- v.AddArg2(a, v0)
+ v.reset(OpARM64MOVWUreg)
+ v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, a.Type)
+ v0.AuxInt = int64ToAuxInt(log64(c / 7))
+ v1 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type)
+ v1.AuxInt = int64ToAuxInt(3)
+ v1.AddArg2(x, x)
+ v0.AddArg2(a, v1)
+ v.AddArg(v0)
return true
}
// match: (MADDW a (MOVDconst [c]) x)
// cond: c%9 == 0 && isPowerOfTwo64(c/9) && is32Bit(c)
- // result: (ADDshiftLL a (ADDshiftLL <x.Type> x x [3]) [log64(c/9)])
+ // result: (MOVWUreg (ADDshiftLL <a.Type> a (ADDshiftLL <x.Type> x x [3]) [log64(c/9)]))
for {
a := v_0
if v_1.Op != OpARM64MOVDconst {
@@ -7510,16 +7558,18 @@ func rewriteValueARM64_OpARM64MADDW(v *Value) bool {
if !(c%9 == 0 && isPowerOfTwo64(c/9) && is32Bit(c)) {
break
}
- v.reset(OpARM64ADDshiftLL)
- v.AuxInt = int64ToAuxInt(log64(c / 9))
- v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
- v0.AuxInt = int64ToAuxInt(3)
- v0.AddArg2(x, x)
- v.AddArg2(a, v0)
+ v.reset(OpARM64MOVWUreg)
+ v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, a.Type)
+ v0.AuxInt = int64ToAuxInt(log64(c / 9))
+ v1 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
+ v1.AuxInt = int64ToAuxInt(3)
+ v1.AddArg2(x, x)
+ v0.AddArg2(a, v1)
+ v.AddArg(v0)
return true
}
// match: (MADDW (MOVDconst [c]) x y)
- // result: (ADDconst [c] (MULW <x.Type> x y))
+ // result: (MOVWUreg (ADDconst <x.Type> [c] (MULW <x.Type> x y)))
for {
if v_0.Op != OpARM64MOVDconst {
break
@@ -7527,15 +7577,17 @@ func rewriteValueARM64_OpARM64MADDW(v *Value) bool {
c := auxIntToInt64(v_0.AuxInt)
x := v_1
y := v_2
- v.reset(OpARM64ADDconst)
- v.AuxInt = int64ToAuxInt(c)
- v0 := b.NewValue0(v.Pos, OpARM64MULW, x.Type)
- v0.AddArg2(x, y)
+ v.reset(OpARM64MOVWUreg)
+ v0 := b.NewValue0(v.Pos, OpARM64ADDconst, x.Type)
+ v0.AuxInt = int64ToAuxInt(c)
+ v1 := b.NewValue0(v.Pos, OpARM64MULW, x.Type)
+ v1.AddArg2(x, y)
+ v0.AddArg(v1)
v.AddArg(v0)
return true
}
// match: (MADDW a (MOVDconst [c]) (MOVDconst [d]))
- // result: (ADDconst [int64(int32(c)*int32(d))] a)
+ // result: (MOVWUreg (ADDconst <a.Type> [c*d] a))
for {
a := v_0
if v_1.Op != OpARM64MOVDconst {
@@ -7546,9 +7598,11 @@ func rewriteValueARM64_OpARM64MADDW(v *Value) bool {
break
}
d := auxIntToInt64(v_2.AuxInt)
- v.reset(OpARM64ADDconst)
- v.AuxInt = int64ToAuxInt(int64(int32(c) * int32(d)))
- v.AddArg(a)
+ v.reset(OpARM64MOVWUreg)
+ v0 := b.NewValue0(v.Pos, OpARM64ADDconst, a.Type)
+ v0.AuxInt = int64ToAuxInt(c * d)
+ v0.AddArg(a)
+ v.AddArg(v0)
return true
}
return false
@@ -7789,7 +7843,7 @@ func rewriteValueARM64_OpARM64MNEGW(v *Value) bool {
b := v.Block
// match: (MNEGW x (MOVDconst [c]))
// cond: int32(c)==-1
- // result: x
+ // result: (MOVWUreg x)
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
x := v_0
@@ -7800,7 +7854,8 @@ func rewriteValueARM64_OpARM64MNEGW(v *Value) bool {
if !(int32(c) == -1) {
continue
}
- v.copyOf(x)
+ v.reset(OpARM64MOVWUreg)
+ v.AddArg(x)
return true
}
break
@@ -7825,7 +7880,7 @@ func rewriteValueARM64_OpARM64MNEGW(v *Value) bool {
}
// match: (MNEGW x (MOVDconst [c]))
// cond: int32(c)==1
- // result: (NEG x)
+ // result: (MOVWUreg (NEG <x.Type> x))
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
x := v_0
@@ -7836,8 +7891,10 @@ func rewriteValueARM64_OpARM64MNEGW(v *Value) bool {
if !(int32(c) == 1) {
continue
}
- v.reset(OpARM64NEG)
- v.AddArg(x)
+ v.reset(OpARM64MOVWUreg)
+ v0 := b.NewValue0(v.Pos, OpARM64NEG, x.Type)
+ v0.AddArg(x)
+ v.AddArg(v0)
return true
}
break
@@ -7866,7 +7923,7 @@ func rewriteValueARM64_OpARM64MNEGW(v *Value) bool {
}
// match: (MNEGW x (MOVDconst [c]))
// cond: isPowerOfTwo64(c-1) && int32(c) >= 3
- // result: (NEG (ADDshiftLL <x.Type> x x [log64(c-1)]))
+ // result: (MOVWUreg (NEG <x.Type> (ADDshiftLL <x.Type> x x [log64(c-1)])))
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
x := v_0
@@ -7877,10 +7934,12 @@ func rewriteValueARM64_OpARM64MNEGW(v *Value) bool {
if !(isPowerOfTwo64(c-1) && int32(c) >= 3) {
continue
}
- v.reset(OpARM64NEG)
- v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
- v0.AuxInt = int64ToAuxInt(log64(c - 1))
- v0.AddArg2(x, x)
+ v.reset(OpARM64MOVWUreg)
+ v0 := b.NewValue0(v.Pos, OpARM64NEG, x.Type)
+ v1 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
+ v1.AuxInt = int64ToAuxInt(log64(c - 1))
+ v1.AddArg2(x, x)
+ v0.AddArg(v1)
v.AddArg(v0)
return true
}
@@ -7888,7 +7947,7 @@ func rewriteValueARM64_OpARM64MNEGW(v *Value) bool {
}
// match: (MNEGW x (MOVDconst [c]))
// cond: isPowerOfTwo64(c+1) && int32(c) >= 7
- // result: (NEG (ADDshiftLL <x.Type> (NEG <x.Type> x) x [log64(c+1)]))
+ // result: (MOVWUreg (NEG <x.Type> (ADDshiftLL <x.Type> (NEG <x.Type> x) x [log64(c+1)])))
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
x := v_0
@@ -7899,12 +7958,14 @@ func rewriteValueARM64_OpARM64MNEGW(v *Value) bool {
if !(isPowerOfTwo64(c+1) && int32(c) >= 7) {
continue
}
- v.reset(OpARM64NEG)
- v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
- v0.AuxInt = int64ToAuxInt(log64(c + 1))
- v1 := b.NewValue0(v.Pos, OpARM64NEG, x.Type)
- v1.AddArg(x)
- v0.AddArg2(v1, x)
+ v.reset(OpARM64MOVWUreg)
+ v0 := b.NewValue0(v.Pos, OpARM64NEG, x.Type)
+ v1 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
+ v1.AuxInt = int64ToAuxInt(log64(c + 1))
+ v2 := b.NewValue0(v.Pos, OpARM64NEG, x.Type)
+ v2.AddArg(x)
+ v1.AddArg2(v2, x)
+ v0.AddArg(v1)
v.AddArg(v0)
return true
}
@@ -7912,7 +7973,7 @@ func rewriteValueARM64_OpARM64MNEGW(v *Value) bool {
}
// match: (MNEGW x (MOVDconst [c]))
// cond: c%3 == 0 && isPowerOfTwo64(c/3) && is32Bit(c)
- // result: (SLLconst <x.Type> [log64(c/3)] (SUBshiftLL <x.Type> x x [2]))
+ // result: (MOVWUreg (SLLconst <x.Type> [log64(c/3)] (SUBshiftLL <x.Type> x x [2])))
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
x := v_0
@@ -7923,12 +7984,13 @@ func rewriteValueARM64_OpARM64MNEGW(v *Value) bool {
if !(c%3 == 0 && isPowerOfTwo64(c/3) && is32Bit(c)) {
continue
}
- v.reset(OpARM64SLLconst)
- v.Type = x.Type
- v.AuxInt = int64ToAuxInt(log64(c / 3))
- v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type)
- v0.AuxInt = int64ToAuxInt(2)
- v0.AddArg2(x, x)
+ v.reset(OpARM64MOVWUreg)
+ v0 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type)
+ v0.AuxInt = int64ToAuxInt(log64(c / 3))
+ v1 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type)
+ v1.AuxInt = int64ToAuxInt(2)
+ v1.AddArg2(x, x)
+ v0.AddArg(v1)
v.AddArg(v0)
return true
}
@@ -7936,7 +7998,7 @@ func rewriteValueARM64_OpARM64MNEGW(v *Value) bool {
}
// match: (MNEGW x (MOVDconst [c]))
// cond: c%5 == 0 && isPowerOfTwo64(c/5) && is32Bit(c)
- // result: (NEG (SLLconst <x.Type> [log64(c/5)] (ADDshiftLL <x.Type> x x [2])))
+ // result: (MOVWUreg (NEG <x.Type> (SLLconst <x.Type> [log64(c/5)] (ADDshiftLL <x.Type> x x [2]))))
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
x := v_0
@@ -7947,12 +8009,14 @@ func rewriteValueARM64_OpARM64MNEGW(v *Value) bool {
if !(c%5 == 0 && isPowerOfTwo64(c/5) && is32Bit(c)) {
continue
}
- v.reset(OpARM64NEG)
- v0 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type)
- v0.AuxInt = int64ToAuxInt(log64(c / 5))
- v1 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
- v1.AuxInt = int64ToAuxInt(2)
- v1.AddArg2(x, x)
+ v.reset(OpARM64MOVWUreg)
+ v0 := b.NewValue0(v.Pos, OpARM64NEG, x.Type)
+ v1 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type)
+ v1.AuxInt = int64ToAuxInt(log64(c / 5))
+ v2 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
+ v2.AuxInt = int64ToAuxInt(2)
+ v2.AddArg2(x, x)
+ v1.AddArg(v2)
v0.AddArg(v1)
v.AddArg(v0)
return true
@@ -7961,7 +8025,7 @@ func rewriteValueARM64_OpARM64MNEGW(v *Value) bool {
}
// match: (MNEGW x (MOVDconst [c]))
// cond: c%7 == 0 && isPowerOfTwo64(c/7) && is32Bit(c)
- // result: (SLLconst <x.Type> [log64(c/7)] (SUBshiftLL <x.Type> x x [3]))
+ // result: (MOVWUreg (SLLconst <x.Type> [log64(c/7)] (SUBshiftLL <x.Type> x x [3])))
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
x := v_0
@@ -7972,12 +8036,13 @@ func rewriteValueARM64_OpARM64MNEGW(v *Value) bool {
if !(c%7 == 0 && isPowerOfTwo64(c/7) && is32Bit(c)) {
continue
}
- v.reset(OpARM64SLLconst)
- v.Type = x.Type
- v.AuxInt = int64ToAuxInt(log64(c / 7))
- v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type)
- v0.AuxInt = int64ToAuxInt(3)
- v0.AddArg2(x, x)
+ v.reset(OpARM64MOVWUreg)
+ v0 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type)
+ v0.AuxInt = int64ToAuxInt(log64(c / 7))
+ v1 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type)
+ v1.AuxInt = int64ToAuxInt(3)
+ v1.AddArg2(x, x)
+ v0.AddArg(v1)
v.AddArg(v0)
return true
}
@@ -7985,7 +8050,7 @@ func rewriteValueARM64_OpARM64MNEGW(v *Value) bool {
}
// match: (MNEGW x (MOVDconst [c]))
// cond: c%9 == 0 && isPowerOfTwo64(c/9) && is32Bit(c)
- // result: (NEG (SLLconst <x.Type> [log64(c/9)] (ADDshiftLL <x.Type> x x [3])))
+ // result: (MOVWUreg (NEG <x.Type> (SLLconst <x.Type> [log64(c/9)] (ADDshiftLL <x.Type> x x [3]))))
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
x := v_0
@@ -7996,12 +8061,14 @@ func rewriteValueARM64_OpARM64MNEGW(v *Value) bool {
if !(c%9 == 0 && isPowerOfTwo64(c/9) && is32Bit(c)) {
continue
}
- v.reset(OpARM64NEG)
- v0 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type)
- v0.AuxInt = int64ToAuxInt(log64(c / 9))
- v1 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
- v1.AuxInt = int64ToAuxInt(3)
- v1.AddArg2(x, x)
+ v.reset(OpARM64MOVWUreg)
+ v0 := b.NewValue0(v.Pos, OpARM64NEG, x.Type)
+ v1 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type)
+ v1.AuxInt = int64ToAuxInt(log64(c / 9))
+ v2 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
+ v2.AuxInt = int64ToAuxInt(3)
+ v2.AddArg2(x, x)
+ v1.AddArg(v2)
v0.AddArg(v1)
v.AddArg(v0)
return true
@@ -8009,7 +8076,7 @@ func rewriteValueARM64_OpARM64MNEGW(v *Value) bool {
break
}
// match: (MNEGW (MOVDconst [c]) (MOVDconst [d]))
- // result: (MOVDconst [-int64(int32(c)*int32(d))])
+ // result: (MOVDconst [int64(uint32(-c*d))])
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
if v_0.Op != OpARM64MOVDconst {
@@ -8021,7 +8088,7 @@ func rewriteValueARM64_OpARM64MNEGW(v *Value) bool {
}
d := auxIntToInt64(v_1.AuxInt)
v.reset(OpARM64MOVDconst)
- v.AuxInt = int64ToAuxInt(-int64(int32(c) * int32(d)))
+ v.AuxInt = int64ToAuxInt(int64(uint32(-c * d)))
return true
}
break
@@ -8057,7 +8124,7 @@ func rewriteValueARM64_OpARM64MODW(v *Value) bool {
v_0 := v.Args[0]
// match: (MODW (MOVDconst [c]) (MOVDconst [d]))
// cond: d != 0
- // result: (MOVDconst [int64(int32(c)%int32(d))])
+ // result: (MOVDconst [int64(uint32(int32(c)%int32(d)))])
for {
if v_0.Op != OpARM64MOVDconst {
break
@@ -8071,7 +8138,7 @@ func rewriteValueARM64_OpARM64MODW(v *Value) bool {
break
}
v.reset(OpARM64MOVDconst)
- v.AuxInt = int64ToAuxInt(int64(int32(c) % int32(d)))
+ v.AuxInt = int64ToAuxInt(int64(uint32(int32(c) % int32(d))))
return true
}
return false
@@ -8451,6 +8518,17 @@ func rewriteValueARM64_OpARM64MOVBUreg(v *Value) bool {
v.AddArg(x)
return true
}
+ // match: (MOVBUreg x)
+ // cond: v.Type.Size() <= 1
+ // result: x
+ for {
+ x := v_0
+ if !(v.Type.Size() <= 1) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
// match: (MOVBUreg (SLLconst [lc] x))
// cond: lc >= 8
// result: (MOVDconst [0])
@@ -8714,6 +8792,36 @@ func rewriteValueARM64_OpARM64MOVBreg(v *Value) bool {
v.AuxInt = int64ToAuxInt(int64(int8(c)))
return true
}
+ // match: (MOVBreg x)
+ // cond: v.Type.Size() <= 1
+ // result: x
+ for {
+ x := v_0
+ if !(v.Type.Size() <= 1) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVBreg <t> (ANDconst x [c]))
+ // cond: uint64(c) & uint64(0xffffffffffffff80) == 0
+ // result: (ANDconst <t> x [c])
+ for {
+ t := v.Type
+ if v_0.Op != OpARM64ANDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(uint64(c)&uint64(0xffffffffffffff80) == 0) {
+ break
+ }
+ v.reset(OpARM64ANDconst)
+ v.Type = t
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
// match: (MOVBreg (SLLconst [lc] x))
// cond: lc < 8
// result: (SBFIZ [armBFAuxInt(lc, 8-lc)] x)
@@ -9759,7 +9867,7 @@ func rewriteValueARM64_OpARM64MOVDstorezero(v *Value) bool {
b := v.Block
config := b.Func.Config
// match: (MOVDstorezero {s} [i] ptr x:(MOVDstorezero {s} [i+8] ptr mem))
- // cond: x.Uses == 1 && clobber(x)
+ // cond: x.Uses == 1 && setPos(v, x.Pos) && clobber(x)
// result: (MOVQstorezero {s} [i] ptr mem)
for {
i := auxIntToInt32(v.AuxInt)
@@ -9770,7 +9878,7 @@ func rewriteValueARM64_OpARM64MOVDstorezero(v *Value) bool {
break
}
mem := x.Args[1]
- if ptr != x.Args[0] || !(x.Uses == 1 && clobber(x)) {
+ if ptr != x.Args[0] || !(x.Uses == 1 && setPos(v, x.Pos) && clobber(x)) {
break
}
v.reset(OpARM64MOVQstorezero)
@@ -9780,7 +9888,7 @@ func rewriteValueARM64_OpARM64MOVDstorezero(v *Value) bool {
return true
}
// match: (MOVDstorezero {s} [i] ptr x:(MOVDstorezero {s} [i-8] ptr mem))
- // cond: x.Uses == 1 && clobber(x)
+ // cond: x.Uses == 1 && setPos(v, x.Pos) && clobber(x)
// result: (MOVQstorezero {s} [i-8] ptr mem)
for {
i := auxIntToInt32(v.AuxInt)
@@ -9791,7 +9899,7 @@ func rewriteValueARM64_OpARM64MOVDstorezero(v *Value) bool {
break
}
mem := x.Args[1]
- if ptr != x.Args[0] || !(x.Uses == 1 && clobber(x)) {
+ if ptr != x.Args[0] || !(x.Uses == 1 && setPos(v, x.Pos) && clobber(x)) {
break
}
v.reset(OpARM64MOVQstorezero)
@@ -10346,6 +10454,17 @@ func rewriteValueARM64_OpARM64MOVHUreg(v *Value) bool {
v.AuxInt = int64ToAuxInt(int64(uint16(c)))
return true
}
+ // match: (MOVHUreg x)
+ // cond: v.Type.Size() <= 2
+ // result: x
+ for {
+ x := v_0
+ if !(v.Type.Size() <= 2) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
// match: (MOVHUreg (SLLconst [lc] x))
// cond: lc >= 16
// result: (MOVDconst [0])
@@ -10792,6 +10911,36 @@ func rewriteValueARM64_OpARM64MOVHreg(v *Value) bool {
v.AuxInt = int64ToAuxInt(int64(int16(c)))
return true
}
+ // match: (MOVHreg x)
+ // cond: v.Type.Size() <= 2
+ // result: x
+ for {
+ x := v_0
+ if !(v.Type.Size() <= 2) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVHreg <t> (ANDconst x [c]))
+ // cond: uint64(c) & uint64(0xffffffffffff8000) == 0
+ // result: (ANDconst <t> x [c])
+ for {
+ t := v.Type
+ if v_0.Op != OpARM64ANDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(uint64(c)&uint64(0xffffffffffff8000) == 0) {
+ break
+ }
+ v.reset(OpARM64ANDconst)
+ v.Type = t
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
// match: (MOVHreg (SLLconst [lc] x))
// cond: lc < 16
// result: (SBFIZ [armBFAuxInt(lc, 16-lc)] x)
@@ -11951,6 +12100,28 @@ func rewriteValueARM64_OpARM64MOVWUreg(v *Value) bool {
v.AuxInt = int64ToAuxInt(int64(uint32(c)))
return true
}
+ // match: (MOVWUreg x)
+ // cond: v.Type.Size() <= 4
+ // result: x
+ for {
+ x := v_0
+ if !(v.Type.Size() <= 4) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVWUreg x)
+ // cond: zeroUpper32Bits(x, 3)
+ // result: x
+ for {
+ x := v_0
+ if !(zeroUpper32Bits(x, 3)) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
// match: (MOVWUreg (SLLconst [lc] x))
// cond: lc >= 32
// result: (MOVDconst [0])
@@ -12455,6 +12626,36 @@ func rewriteValueARM64_OpARM64MOVWreg(v *Value) bool {
v.AuxInt = int64ToAuxInt(int64(int32(c)))
return true
}
+ // match: (MOVWreg x)
+ // cond: v.Type.Size() <= 4
+ // result: x
+ for {
+ x := v_0
+ if !(v.Type.Size() <= 4) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVWreg <t> (ANDconst x [c]))
+ // cond: uint64(c) & uint64(0xffffffff80000000) == 0
+ // result: (ANDconst <t> x [c])
+ for {
+ t := v.Type
+ if v_0.Op != OpARM64ANDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(uint64(c)&uint64(0xffffffff80000000) == 0) {
+ break
+ }
+ v.reset(OpARM64ANDconst)
+ v.Type = t
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
// match: (MOVWreg (SLLconst [lc] x))
// cond: lc < 32
// result: (SBFIZ [armBFAuxInt(lc, 32-lc)] x)
@@ -13411,7 +13612,7 @@ func rewriteValueARM64_OpARM64MSUBW(v *Value) bool {
b := v.Block
// match: (MSUBW a x (MOVDconst [c]))
// cond: int32(c)==-1
- // result: (ADD a x)
+ // result: (MOVWUreg (ADD <a.Type> a x))
for {
a := v_0
x := v_1
@@ -13422,13 +13623,15 @@ func rewriteValueARM64_OpARM64MSUBW(v *Value) bool {
if !(int32(c) == -1) {
break
}
- v.reset(OpARM64ADD)
- v.AddArg2(a, x)
+ v.reset(OpARM64MOVWUreg)
+ v0 := b.NewValue0(v.Pos, OpARM64ADD, a.Type)
+ v0.AddArg2(a, x)
+ v.AddArg(v0)
return true
}
// match: (MSUBW a _ (MOVDconst [c]))
// cond: int32(c)==0
- // result: a
+ // result: (MOVWUreg a)
for {
a := v_0
if v_2.Op != OpARM64MOVDconst {
@@ -13438,12 +13641,13 @@ func rewriteValueARM64_OpARM64MSUBW(v *Value) bool {
if !(int32(c) == 0) {
break
}
- v.copyOf(a)
+ v.reset(OpARM64MOVWUreg)
+ v.AddArg(a)
return true
}
// match: (MSUBW a x (MOVDconst [c]))
// cond: int32(c)==1
- // result: (SUB a x)
+ // result: (MOVWUreg (SUB <a.Type> a x))
for {
a := v_0
x := v_1
@@ -13454,13 +13658,15 @@ func rewriteValueARM64_OpARM64MSUBW(v *Value) bool {
if !(int32(c) == 1) {
break
}
- v.reset(OpARM64SUB)
- v.AddArg2(a, x)
+ v.reset(OpARM64MOVWUreg)
+ v0 := b.NewValue0(v.Pos, OpARM64SUB, a.Type)
+ v0.AddArg2(a, x)
+ v.AddArg(v0)
return true
}
// match: (MSUBW a x (MOVDconst [c]))
// cond: isPowerOfTwo64(c)
- // result: (SUBshiftLL a x [log64(c)])
+ // result: (MOVWUreg (SUBshiftLL <a.Type> a x [log64(c)]))
for {
a := v_0
x := v_1
@@ -13471,14 +13677,16 @@ func rewriteValueARM64_OpARM64MSUBW(v *Value) bool {
if !(isPowerOfTwo64(c)) {
break
}
- v.reset(OpARM64SUBshiftLL)
- v.AuxInt = int64ToAuxInt(log64(c))
- v.AddArg2(a, x)
+ v.reset(OpARM64MOVWUreg)
+ v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, a.Type)
+ v0.AuxInt = int64ToAuxInt(log64(c))
+ v0.AddArg2(a, x)
+ v.AddArg(v0)
return true
}
// match: (MSUBW a x (MOVDconst [c]))
// cond: isPowerOfTwo64(c-1) && int32(c)>=3
- // result: (SUB a (ADDshiftLL <x.Type> x x [log64(c-1)]))
+ // result: (MOVWUreg (SUB <a.Type> a (ADDshiftLL <x.Type> x x [log64(c-1)])))
for {
a := v_0
x := v_1
@@ -13489,16 +13697,18 @@ func rewriteValueARM64_OpARM64MSUBW(v *Value) bool {
if !(isPowerOfTwo64(c-1) && int32(c) >= 3) {
break
}
- v.reset(OpARM64SUB)
- v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
- v0.AuxInt = int64ToAuxInt(log64(c - 1))
- v0.AddArg2(x, x)
- v.AddArg2(a, v0)
+ v.reset(OpARM64MOVWUreg)
+ v0 := b.NewValue0(v.Pos, OpARM64SUB, a.Type)
+ v1 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
+ v1.AuxInt = int64ToAuxInt(log64(c - 1))
+ v1.AddArg2(x, x)
+ v0.AddArg2(a, v1)
+ v.AddArg(v0)
return true
}
// match: (MSUBW a x (MOVDconst [c]))
// cond: isPowerOfTwo64(c+1) && int32(c)>=7
- // result: (ADD a (SUBshiftLL <x.Type> x x [log64(c+1)]))
+ // result: (MOVWUreg (ADD <a.Type> a (SUBshiftLL <x.Type> x x [log64(c+1)])))
for {
a := v_0
x := v_1
@@ -13509,16 +13719,18 @@ func rewriteValueARM64_OpARM64MSUBW(v *Value) bool {
if !(isPowerOfTwo64(c+1) && int32(c) >= 7) {
break
}
- v.reset(OpARM64ADD)
- v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type)
- v0.AuxInt = int64ToAuxInt(log64(c + 1))
- v0.AddArg2(x, x)
- v.AddArg2(a, v0)
+ v.reset(OpARM64MOVWUreg)
+ v0 := b.NewValue0(v.Pos, OpARM64ADD, a.Type)
+ v1 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type)
+ v1.AuxInt = int64ToAuxInt(log64(c + 1))
+ v1.AddArg2(x, x)
+ v0.AddArg2(a, v1)
+ v.AddArg(v0)
return true
}
// match: (MSUBW a x (MOVDconst [c]))
// cond: c%3 == 0 && isPowerOfTwo64(c/3) && is32Bit(c)
- // result: (ADDshiftLL a (SUBshiftLL <x.Type> x x [2]) [log64(c/3)])
+ // result: (MOVWUreg (ADDshiftLL <a.Type> a (SUBshiftLL <x.Type> x x [2]) [log64(c/3)]))
for {
a := v_0
x := v_1
@@ -13529,17 +13741,19 @@ func rewriteValueARM64_OpARM64MSUBW(v *Value) bool {
if !(c%3 == 0 && isPowerOfTwo64(c/3) && is32Bit(c)) {
break
}
- v.reset(OpARM64ADDshiftLL)
- v.AuxInt = int64ToAuxInt(log64(c / 3))
- v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type)
- v0.AuxInt = int64ToAuxInt(2)
- v0.AddArg2(x, x)
- v.AddArg2(a, v0)
+ v.reset(OpARM64MOVWUreg)
+ v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, a.Type)
+ v0.AuxInt = int64ToAuxInt(log64(c / 3))
+ v1 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type)
+ v1.AuxInt = int64ToAuxInt(2)
+ v1.AddArg2(x, x)
+ v0.AddArg2(a, v1)
+ v.AddArg(v0)
return true
}
// match: (MSUBW a x (MOVDconst [c]))
// cond: c%5 == 0 && isPowerOfTwo64(c/5) && is32Bit(c)
- // result: (SUBshiftLL a (ADDshiftLL <x.Type> x x [2]) [log64(c/5)])
+ // result: (MOVWUreg (SUBshiftLL <a.Type> a (ADDshiftLL <x.Type> x x [2]) [log64(c/5)]))
for {
a := v_0
x := v_1
@@ -13550,17 +13764,19 @@ func rewriteValueARM64_OpARM64MSUBW(v *Value) bool {
if !(c%5 == 0 && isPowerOfTwo64(c/5) && is32Bit(c)) {
break
}
- v.reset(OpARM64SUBshiftLL)
- v.AuxInt = int64ToAuxInt(log64(c / 5))
- v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
- v0.AuxInt = int64ToAuxInt(2)
- v0.AddArg2(x, x)
- v.AddArg2(a, v0)
+ v.reset(OpARM64MOVWUreg)
+ v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, a.Type)
+ v0.AuxInt = int64ToAuxInt(log64(c / 5))
+ v1 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
+ v1.AuxInt = int64ToAuxInt(2)
+ v1.AddArg2(x, x)
+ v0.AddArg2(a, v1)
+ v.AddArg(v0)
return true
}
// match: (MSUBW a x (MOVDconst [c]))
// cond: c%7 == 0 && isPowerOfTwo64(c/7) && is32Bit(c)
- // result: (ADDshiftLL a (SUBshiftLL <x.Type> x x [3]) [log64(c/7)])
+ // result: (MOVWUreg (ADDshiftLL <a.Type> a (SUBshiftLL <x.Type> x x [3]) [log64(c/7)]))
for {
a := v_0
x := v_1
@@ -13571,17 +13787,19 @@ func rewriteValueARM64_OpARM64MSUBW(v *Value) bool {
if !(c%7 == 0 && isPowerOfTwo64(c/7) && is32Bit(c)) {
break
}
- v.reset(OpARM64ADDshiftLL)
- v.AuxInt = int64ToAuxInt(log64(c / 7))
- v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type)
- v0.AuxInt = int64ToAuxInt(3)
- v0.AddArg2(x, x)
- v.AddArg2(a, v0)
+ v.reset(OpARM64MOVWUreg)
+ v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, a.Type)
+ v0.AuxInt = int64ToAuxInt(log64(c / 7))
+ v1 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type)
+ v1.AuxInt = int64ToAuxInt(3)
+ v1.AddArg2(x, x)
+ v0.AddArg2(a, v1)
+ v.AddArg(v0)
return true
}
// match: (MSUBW a x (MOVDconst [c]))
// cond: c%9 == 0 && isPowerOfTwo64(c/9) && is32Bit(c)
- // result: (SUBshiftLL a (ADDshiftLL <x.Type> x x [3]) [log64(c/9)])
+ // result: (MOVWUreg (SUBshiftLL <a.Type> a (ADDshiftLL <x.Type> x x [3]) [log64(c/9)]))
for {
a := v_0
x := v_1
@@ -13592,17 +13810,19 @@ func rewriteValueARM64_OpARM64MSUBW(v *Value) bool {
if !(c%9 == 0 && isPowerOfTwo64(c/9) && is32Bit(c)) {
break
}
- v.reset(OpARM64SUBshiftLL)
- v.AuxInt = int64ToAuxInt(log64(c / 9))
- v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
- v0.AuxInt = int64ToAuxInt(3)
- v0.AddArg2(x, x)
- v.AddArg2(a, v0)
+ v.reset(OpARM64MOVWUreg)
+ v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, a.Type)
+ v0.AuxInt = int64ToAuxInt(log64(c / 9))
+ v1 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
+ v1.AuxInt = int64ToAuxInt(3)
+ v1.AddArg2(x, x)
+ v0.AddArg2(a, v1)
+ v.AddArg(v0)
return true
}
// match: (MSUBW a (MOVDconst [c]) x)
// cond: int32(c)==-1
- // result: (ADD a x)
+ // result: (MOVWUreg (ADD <a.Type> a x))
for {
a := v_0
if v_1.Op != OpARM64MOVDconst {
@@ -13613,13 +13833,15 @@ func rewriteValueARM64_OpARM64MSUBW(v *Value) bool {
if !(int32(c) == -1) {
break
}
- v.reset(OpARM64ADD)
- v.AddArg2(a, x)
+ v.reset(OpARM64MOVWUreg)
+ v0 := b.NewValue0(v.Pos, OpARM64ADD, a.Type)
+ v0.AddArg2(a, x)
+ v.AddArg(v0)
return true
}
// match: (MSUBW a (MOVDconst [c]) _)
// cond: int32(c)==0
- // result: a
+ // result: (MOVWUreg a)
for {
a := v_0
if v_1.Op != OpARM64MOVDconst {
@@ -13629,12 +13851,13 @@ func rewriteValueARM64_OpARM64MSUBW(v *Value) bool {
if !(int32(c) == 0) {
break
}
- v.copyOf(a)
+ v.reset(OpARM64MOVWUreg)
+ v.AddArg(a)
return true
}
// match: (MSUBW a (MOVDconst [c]) x)
// cond: int32(c)==1
- // result: (SUB a x)
+ // result: (MOVWUreg (SUB <a.Type> a x))
for {
a := v_0
if v_1.Op != OpARM64MOVDconst {
@@ -13645,13 +13868,15 @@ func rewriteValueARM64_OpARM64MSUBW(v *Value) bool {
if !(int32(c) == 1) {
break
}
- v.reset(OpARM64SUB)
- v.AddArg2(a, x)
+ v.reset(OpARM64MOVWUreg)
+ v0 := b.NewValue0(v.Pos, OpARM64SUB, a.Type)
+ v0.AddArg2(a, x)
+ v.AddArg(v0)
return true
}
// match: (MSUBW a (MOVDconst [c]) x)
// cond: isPowerOfTwo64(c)
- // result: (SUBshiftLL a x [log64(c)])
+ // result: (MOVWUreg (SUBshiftLL <a.Type> a x [log64(c)]))
for {
a := v_0
if v_1.Op != OpARM64MOVDconst {
@@ -13662,14 +13887,16 @@ func rewriteValueARM64_OpARM64MSUBW(v *Value) bool {
if !(isPowerOfTwo64(c)) {
break
}
- v.reset(OpARM64SUBshiftLL)
- v.AuxInt = int64ToAuxInt(log64(c))
- v.AddArg2(a, x)
+ v.reset(OpARM64MOVWUreg)
+ v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, a.Type)
+ v0.AuxInt = int64ToAuxInt(log64(c))
+ v0.AddArg2(a, x)
+ v.AddArg(v0)
return true
}
// match: (MSUBW a (MOVDconst [c]) x)
// cond: isPowerOfTwo64(c-1) && int32(c)>=3
- // result: (SUB a (ADDshiftLL <x.Type> x x [log64(c-1)]))
+ // result: (MOVWUreg (SUB <a.Type> a (ADDshiftLL <x.Type> x x [log64(c-1)])))
for {
a := v_0
if v_1.Op != OpARM64MOVDconst {
@@ -13680,16 +13907,18 @@ func rewriteValueARM64_OpARM64MSUBW(v *Value) bool {
if !(isPowerOfTwo64(c-1) && int32(c) >= 3) {
break
}
- v.reset(OpARM64SUB)
- v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
- v0.AuxInt = int64ToAuxInt(log64(c - 1))
- v0.AddArg2(x, x)
- v.AddArg2(a, v0)
+ v.reset(OpARM64MOVWUreg)
+ v0 := b.NewValue0(v.Pos, OpARM64SUB, a.Type)
+ v1 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
+ v1.AuxInt = int64ToAuxInt(log64(c - 1))
+ v1.AddArg2(x, x)
+ v0.AddArg2(a, v1)
+ v.AddArg(v0)
return true
}
// match: (MSUBW a (MOVDconst [c]) x)
// cond: isPowerOfTwo64(c+1) && int32(c)>=7
- // result: (ADD a (SUBshiftLL <x.Type> x x [log64(c+1)]))
+ // result: (MOVWUreg (ADD <a.Type> a (SUBshiftLL <x.Type> x x [log64(c+1)])))
for {
a := v_0
if v_1.Op != OpARM64MOVDconst {
@@ -13700,16 +13929,18 @@ func rewriteValueARM64_OpARM64MSUBW(v *Value) bool {
if !(isPowerOfTwo64(c+1) && int32(c) >= 7) {
break
}
- v.reset(OpARM64ADD)
- v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type)
- v0.AuxInt = int64ToAuxInt(log64(c + 1))
- v0.AddArg2(x, x)
- v.AddArg2(a, v0)
+ v.reset(OpARM64MOVWUreg)
+ v0 := b.NewValue0(v.Pos, OpARM64ADD, a.Type)
+ v1 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type)
+ v1.AuxInt = int64ToAuxInt(log64(c + 1))
+ v1.AddArg2(x, x)
+ v0.AddArg2(a, v1)
+ v.AddArg(v0)
return true
}
// match: (MSUBW a (MOVDconst [c]) x)
// cond: c%3 == 0 && isPowerOfTwo64(c/3) && is32Bit(c)
- // result: (ADDshiftLL a (SUBshiftLL <x.Type> x x [2]) [log64(c/3)])
+ // result: (MOVWUreg (ADDshiftLL <a.Type> a (SUBshiftLL <x.Type> x x [2]) [log64(c/3)]))
for {
a := v_0
if v_1.Op != OpARM64MOVDconst {
@@ -13720,17 +13951,19 @@ func rewriteValueARM64_OpARM64MSUBW(v *Value) bool {
if !(c%3 == 0 && isPowerOfTwo64(c/3) && is32Bit(c)) {
break
}
- v.reset(OpARM64ADDshiftLL)
- v.AuxInt = int64ToAuxInt(log64(c / 3))
- v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type)
- v0.AuxInt = int64ToAuxInt(2)
- v0.AddArg2(x, x)
- v.AddArg2(a, v0)
+ v.reset(OpARM64MOVWUreg)
+ v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, a.Type)
+ v0.AuxInt = int64ToAuxInt(log64(c / 3))
+ v1 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type)
+ v1.AuxInt = int64ToAuxInt(2)
+ v1.AddArg2(x, x)
+ v0.AddArg2(a, v1)
+ v.AddArg(v0)
return true
}
// match: (MSUBW a (MOVDconst [c]) x)
// cond: c%5 == 0 && isPowerOfTwo64(c/5) && is32Bit(c)
- // result: (SUBshiftLL a (ADDshiftLL <x.Type> x x [2]) [log64(c/5)])
+ // result: (MOVWUreg (SUBshiftLL <a.Type> a (ADDshiftLL <x.Type> x x [2]) [log64(c/5)]))
for {
a := v_0
if v_1.Op != OpARM64MOVDconst {
@@ -13741,17 +13974,19 @@ func rewriteValueARM64_OpARM64MSUBW(v *Value) bool {
if !(c%5 == 0 && isPowerOfTwo64(c/5) && is32Bit(c)) {
break
}
- v.reset(OpARM64SUBshiftLL)
- v.AuxInt = int64ToAuxInt(log64(c / 5))
- v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
- v0.AuxInt = int64ToAuxInt(2)
- v0.AddArg2(x, x)
- v.AddArg2(a, v0)
+ v.reset(OpARM64MOVWUreg)
+ v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, a.Type)
+ v0.AuxInt = int64ToAuxInt(log64(c / 5))
+ v1 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
+ v1.AuxInt = int64ToAuxInt(2)
+ v1.AddArg2(x, x)
+ v0.AddArg2(a, v1)
+ v.AddArg(v0)
return true
}
// match: (MSUBW a (MOVDconst [c]) x)
// cond: c%7 == 0 && isPowerOfTwo64(c/7) && is32Bit(c)
- // result: (ADDshiftLL a (SUBshiftLL <x.Type> x x [3]) [log64(c/7)])
+ // result: (MOVWUreg (ADDshiftLL <a.Type> a (SUBshiftLL <x.Type> x x [3]) [log64(c/7)]))
for {
a := v_0
if v_1.Op != OpARM64MOVDconst {
@@ -13762,17 +13997,19 @@ func rewriteValueARM64_OpARM64MSUBW(v *Value) bool {
if !(c%7 == 0 && isPowerOfTwo64(c/7) && is32Bit(c)) {
break
}
- v.reset(OpARM64ADDshiftLL)
- v.AuxInt = int64ToAuxInt(log64(c / 7))
- v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type)
- v0.AuxInt = int64ToAuxInt(3)
- v0.AddArg2(x, x)
- v.AddArg2(a, v0)
+ v.reset(OpARM64MOVWUreg)
+ v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, a.Type)
+ v0.AuxInt = int64ToAuxInt(log64(c / 7))
+ v1 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type)
+ v1.AuxInt = int64ToAuxInt(3)
+ v1.AddArg2(x, x)
+ v0.AddArg2(a, v1)
+ v.AddArg(v0)
return true
}
// match: (MSUBW a (MOVDconst [c]) x)
// cond: c%9 == 0 && isPowerOfTwo64(c/9) && is32Bit(c)
- // result: (SUBshiftLL a (ADDshiftLL <x.Type> x x [3]) [log64(c/9)])
+ // result: (MOVWUreg (SUBshiftLL <a.Type> a (ADDshiftLL <x.Type> x x [3]) [log64(c/9)]))
for {
a := v_0
if v_1.Op != OpARM64MOVDconst {
@@ -13783,16 +14020,18 @@ func rewriteValueARM64_OpARM64MSUBW(v *Value) bool {
if !(c%9 == 0 && isPowerOfTwo64(c/9) && is32Bit(c)) {
break
}
- v.reset(OpARM64SUBshiftLL)
- v.AuxInt = int64ToAuxInt(log64(c / 9))
- v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
- v0.AuxInt = int64ToAuxInt(3)
- v0.AddArg2(x, x)
- v.AddArg2(a, v0)
+ v.reset(OpARM64MOVWUreg)
+ v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, a.Type)
+ v0.AuxInt = int64ToAuxInt(log64(c / 9))
+ v1 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
+ v1.AuxInt = int64ToAuxInt(3)
+ v1.AddArg2(x, x)
+ v0.AddArg2(a, v1)
+ v.AddArg(v0)
return true
}
// match: (MSUBW (MOVDconst [c]) x y)
- // result: (ADDconst [c] (MNEGW <x.Type> x y))
+ // result: (MOVWUreg (ADDconst <x.Type> [c] (MNEGW <x.Type> x y)))
for {
if v_0.Op != OpARM64MOVDconst {
break
@@ -13800,15 +14039,17 @@ func rewriteValueARM64_OpARM64MSUBW(v *Value) bool {
c := auxIntToInt64(v_0.AuxInt)
x := v_1
y := v_2
- v.reset(OpARM64ADDconst)
- v.AuxInt = int64ToAuxInt(c)
- v0 := b.NewValue0(v.Pos, OpARM64MNEGW, x.Type)
- v0.AddArg2(x, y)
+ v.reset(OpARM64MOVWUreg)
+ v0 := b.NewValue0(v.Pos, OpARM64ADDconst, x.Type)
+ v0.AuxInt = int64ToAuxInt(c)
+ v1 := b.NewValue0(v.Pos, OpARM64MNEGW, x.Type)
+ v1.AddArg2(x, y)
+ v0.AddArg(v1)
v.AddArg(v0)
return true
}
// match: (MSUBW a (MOVDconst [c]) (MOVDconst [d]))
- // result: (SUBconst [int64(int32(c)*int32(d))] a)
+ // result: (MOVWUreg (SUBconst <a.Type> [c*d] a))
for {
a := v_0
if v_1.Op != OpARM64MOVDconst {
@@ -13819,9 +14060,11 @@ func rewriteValueARM64_OpARM64MSUBW(v *Value) bool {
break
}
d := auxIntToInt64(v_2.AuxInt)
- v.reset(OpARM64SUBconst)
- v.AuxInt = int64ToAuxInt(int64(int32(c) * int32(d)))
- v.AddArg(a)
+ v.reset(OpARM64MOVWUreg)
+ v0 := b.NewValue0(v.Pos, OpARM64SUBconst, a.Type)
+ v0.AuxInt = int64ToAuxInt(c * d)
+ v0.AddArg(a)
+ v.AddArg(v0)
return true
}
return false
@@ -14082,7 +14325,7 @@ func rewriteValueARM64_OpARM64MULW(v *Value) bool {
}
// match: (MULW x (MOVDconst [c]))
// cond: int32(c)==-1
- // result: (NEG x)
+ // result: (MOVWUreg (NEG <x.Type> x))
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
x := v_0
@@ -14093,8 +14336,10 @@ func rewriteValueARM64_OpARM64MULW(v *Value) bool {
if !(int32(c) == -1) {
continue
}
- v.reset(OpARM64NEG)
- v.AddArg(x)
+ v.reset(OpARM64MOVWUreg)
+ v0 := b.NewValue0(v.Pos, OpARM64NEG, x.Type)
+ v0.AddArg(x)
+ v.AddArg(v0)
return true
}
break
@@ -14119,7 +14364,7 @@ func rewriteValueARM64_OpARM64MULW(v *Value) bool {
}
// match: (MULW x (MOVDconst [c]))
// cond: int32(c)==1
- // result: x
+ // result: (MOVWUreg x)
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
x := v_0
@@ -14130,14 +14375,15 @@ func rewriteValueARM64_OpARM64MULW(v *Value) bool {
if !(int32(c) == 1) {
continue
}
- v.copyOf(x)
+ v.reset(OpARM64MOVWUreg)
+ v.AddArg(x)
return true
}
break
}
// match: (MULW x (MOVDconst [c]))
// cond: isPowerOfTwo64(c)
- // result: (SLLconst [log64(c)] x)
+ // result: (MOVWUreg (SLLconst <x.Type> [log64(c)] x))
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
x := v_0
@@ -14148,16 +14394,18 @@ func rewriteValueARM64_OpARM64MULW(v *Value) bool {
if !(isPowerOfTwo64(c)) {
continue
}
- v.reset(OpARM64SLLconst)
- v.AuxInt = int64ToAuxInt(log64(c))
- v.AddArg(x)
+ v.reset(OpARM64MOVWUreg)
+ v0 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type)
+ v0.AuxInt = int64ToAuxInt(log64(c))
+ v0.AddArg(x)
+ v.AddArg(v0)
return true
}
break
}
// match: (MULW x (MOVDconst [c]))
// cond: isPowerOfTwo64(c-1) && int32(c) >= 3
- // result: (ADDshiftLL x x [log64(c-1)])
+ // result: (MOVWUreg (ADDshiftLL <x.Type> x x [log64(c-1)]))
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
x := v_0
@@ -14168,16 +14416,18 @@ func rewriteValueARM64_OpARM64MULW(v *Value) bool {
if !(isPowerOfTwo64(c-1) && int32(c) >= 3) {
continue
}
- v.reset(OpARM64ADDshiftLL)
- v.AuxInt = int64ToAuxInt(log64(c - 1))
- v.AddArg2(x, x)
+ v.reset(OpARM64MOVWUreg)
+ v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
+ v0.AuxInt = int64ToAuxInt(log64(c - 1))
+ v0.AddArg2(x, x)
+ v.AddArg(v0)
return true
}
break
}
// match: (MULW x (MOVDconst [c]))
// cond: isPowerOfTwo64(c+1) && int32(c) >= 7
- // result: (ADDshiftLL (NEG <x.Type> x) x [log64(c+1)])
+ // result: (MOVWUreg (ADDshiftLL <x.Type> (NEG <x.Type> x) x [log64(c+1)]))
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
x := v_0
@@ -14188,18 +14438,20 @@ func rewriteValueARM64_OpARM64MULW(v *Value) bool {
if !(isPowerOfTwo64(c+1) && int32(c) >= 7) {
continue
}
- v.reset(OpARM64ADDshiftLL)
- v.AuxInt = int64ToAuxInt(log64(c + 1))
- v0 := b.NewValue0(v.Pos, OpARM64NEG, x.Type)
- v0.AddArg(x)
- v.AddArg2(v0, x)
+ v.reset(OpARM64MOVWUreg)
+ v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
+ v0.AuxInt = int64ToAuxInt(log64(c + 1))
+ v1 := b.NewValue0(v.Pos, OpARM64NEG, x.Type)
+ v1.AddArg(x)
+ v0.AddArg2(v1, x)
+ v.AddArg(v0)
return true
}
break
}
// match: (MULW x (MOVDconst [c]))
// cond: c%3 == 0 && isPowerOfTwo64(c/3) && is32Bit(c)
- // result: (SLLconst [log64(c/3)] (ADDshiftLL <x.Type> x x [1]))
+ // result: (MOVWUreg (SLLconst <x.Type> [log64(c/3)] (ADDshiftLL <x.Type> x x [1])))
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
x := v_0
@@ -14210,11 +14462,13 @@ func rewriteValueARM64_OpARM64MULW(v *Value) bool {
if !(c%3 == 0 && isPowerOfTwo64(c/3) && is32Bit(c)) {
continue
}
- v.reset(OpARM64SLLconst)
- v.AuxInt = int64ToAuxInt(log64(c / 3))
- v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
- v0.AuxInt = int64ToAuxInt(1)
- v0.AddArg2(x, x)
+ v.reset(OpARM64MOVWUreg)
+ v0 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type)
+ v0.AuxInt = int64ToAuxInt(log64(c / 3))
+ v1 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
+ v1.AuxInt = int64ToAuxInt(1)
+ v1.AddArg2(x, x)
+ v0.AddArg(v1)
v.AddArg(v0)
return true
}
@@ -14222,7 +14476,7 @@ func rewriteValueARM64_OpARM64MULW(v *Value) bool {
}
// match: (MULW x (MOVDconst [c]))
// cond: c%5 == 0 && isPowerOfTwo64(c/5) && is32Bit(c)
- // result: (SLLconst [log64(c/5)] (ADDshiftLL <x.Type> x x [2]))
+ // result: (MOVWUreg (SLLconst <x.Type> [log64(c/5)] (ADDshiftLL <x.Type> x x [2])))
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
x := v_0
@@ -14233,11 +14487,13 @@ func rewriteValueARM64_OpARM64MULW(v *Value) bool {
if !(c%5 == 0 && isPowerOfTwo64(c/5) && is32Bit(c)) {
continue
}
- v.reset(OpARM64SLLconst)
- v.AuxInt = int64ToAuxInt(log64(c / 5))
- v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
- v0.AuxInt = int64ToAuxInt(2)
- v0.AddArg2(x, x)
+ v.reset(OpARM64MOVWUreg)
+ v0 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type)
+ v0.AuxInt = int64ToAuxInt(log64(c / 5))
+ v1 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
+ v1.AuxInt = int64ToAuxInt(2)
+ v1.AddArg2(x, x)
+ v0.AddArg(v1)
v.AddArg(v0)
return true
}
@@ -14245,7 +14501,7 @@ func rewriteValueARM64_OpARM64MULW(v *Value) bool {
}
// match: (MULW x (MOVDconst [c]))
// cond: c%7 == 0 && isPowerOfTwo64(c/7) && is32Bit(c)
- // result: (SLLconst [log64(c/7)] (ADDshiftLL <x.Type> (NEG <x.Type> x) x [3]))
+ // result: (MOVWUreg (SLLconst <x.Type> [log64(c/7)] (ADDshiftLL <x.Type> (NEG <x.Type> x) x [3])))
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
x := v_0
@@ -14256,13 +14512,15 @@ func rewriteValueARM64_OpARM64MULW(v *Value) bool {
if !(c%7 == 0 && isPowerOfTwo64(c/7) && is32Bit(c)) {
continue
}
- v.reset(OpARM64SLLconst)
- v.AuxInt = int64ToAuxInt(log64(c / 7))
- v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
- v0.AuxInt = int64ToAuxInt(3)
- v1 := b.NewValue0(v.Pos, OpARM64NEG, x.Type)
- v1.AddArg(x)
- v0.AddArg2(v1, x)
+ v.reset(OpARM64MOVWUreg)
+ v0 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type)
+ v0.AuxInt = int64ToAuxInt(log64(c / 7))
+ v1 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
+ v1.AuxInt = int64ToAuxInt(3)
+ v2 := b.NewValue0(v.Pos, OpARM64NEG, x.Type)
+ v2.AddArg(x)
+ v1.AddArg2(v2, x)
+ v0.AddArg(v1)
v.AddArg(v0)
return true
}
@@ -14270,7 +14528,7 @@ func rewriteValueARM64_OpARM64MULW(v *Value) bool {
}
// match: (MULW x (MOVDconst [c]))
// cond: c%9 == 0 && isPowerOfTwo64(c/9) && is32Bit(c)
- // result: (SLLconst [log64(c/9)] (ADDshiftLL <x.Type> x x [3]))
+ // result: (MOVWUreg (SLLconst <x.Type> [log64(c/9)] (ADDshiftLL <x.Type> x x [3])))
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
x := v_0
@@ -14281,18 +14539,20 @@ func rewriteValueARM64_OpARM64MULW(v *Value) bool {
if !(c%9 == 0 && isPowerOfTwo64(c/9) && is32Bit(c)) {
continue
}
- v.reset(OpARM64SLLconst)
- v.AuxInt = int64ToAuxInt(log64(c / 9))
- v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
- v0.AuxInt = int64ToAuxInt(3)
- v0.AddArg2(x, x)
+ v.reset(OpARM64MOVWUreg)
+ v0 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type)
+ v0.AuxInt = int64ToAuxInt(log64(c / 9))
+ v1 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
+ v1.AuxInt = int64ToAuxInt(3)
+ v1.AddArg2(x, x)
+ v0.AddArg(v1)
v.AddArg(v0)
return true
}
break
}
// match: (MULW (MOVDconst [c]) (MOVDconst [d]))
- // result: (MOVDconst [int64(int32(c)*int32(d))])
+ // result: (MOVDconst [int64(uint32(c*d))])
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
if v_0.Op != OpARM64MOVDconst {
@@ -14304,7 +14564,7 @@ func rewriteValueARM64_OpARM64MULW(v *Value) bool {
}
d := auxIntToInt64(v_1.AuxInt)
v.reset(OpARM64MOVDconst)
- v.AuxInt = int64ToAuxInt(int64(int32(c) * int32(d)))
+ v.AuxInt = int64ToAuxInt(int64(uint32(c * d)))
return true
}
break
@@ -14489,6 +14749,7 @@ func rewriteValueARM64_OpARM64NEG(v *Value) bool {
return true
}
// match: (NEG (MULW x y))
+ // cond: v.Type.Size() <= 4
// result: (MNEGW x y)
for {
if v_0.Op != OpARM64MULW {
@@ -14496,6 +14757,9 @@ func rewriteValueARM64_OpARM64NEG(v *Value) bool {
}
y := v_0.Args[1]
x := v_0.Args[0]
+ if !(v.Type.Size() <= 4) {
+ break
+ }
v.reset(OpARM64MNEGW)
v.AddArg2(x, y)
return true
@@ -16745,7 +17009,7 @@ func rewriteValueARM64_OpARM64SUB(v *Value) bool {
return true
}
// match: (SUB a l:(MULW x y))
- // cond: a.Type.Size() != 8 && l.Uses==1 && clobber(l)
+ // cond: v.Type.Size() <= 4 && l.Uses==1 && clobber(l)
// result: (MSUBW a x y)
for {
a := v_0
@@ -16755,7 +17019,7 @@ func rewriteValueARM64_OpARM64SUB(v *Value) bool {
}
y := l.Args[1]
x := l.Args[0]
- if !(a.Type.Size() != 8 && l.Uses == 1 && clobber(l)) {
+ if !(v.Type.Size() <= 4 && l.Uses == 1 && clobber(l)) {
break
}
v.reset(OpARM64MSUBW)
@@ -16763,7 +17027,7 @@ func rewriteValueARM64_OpARM64SUB(v *Value) bool {
return true
}
// match: (SUB a l:(MNEGW x y))
- // cond: a.Type.Size() != 8 && l.Uses==1 && clobber(l)
+ // cond: v.Type.Size() <= 4 && l.Uses==1 && clobber(l)
// result: (MADDW a x y)
for {
a := v_0
@@ -16773,7 +17037,7 @@ func rewriteValueARM64_OpARM64SUB(v *Value) bool {
}
y := l.Args[1]
x := l.Args[0]
- if !(a.Type.Size() != 8 && l.Uses == 1 && clobber(l)) {
+ if !(v.Type.Size() <= 4 && l.Uses == 1 && clobber(l)) {
break
}
v.reset(OpARM64MADDW)
@@ -17516,9 +17780,10 @@ func rewriteValueARM64_OpARM64UDIV(v *Value) bool {
func rewriteValueARM64_OpARM64UDIVW(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
+ b := v.Block
// match: (UDIVW x (MOVDconst [c]))
// cond: uint32(c)==1
- // result: x
+ // result: (MOVWUreg x)
for {
x := v_0
if v_1.Op != OpARM64MOVDconst {
@@ -17528,12 +17793,13 @@ func rewriteValueARM64_OpARM64UDIVW(v *Value) bool {
if !(uint32(c) == 1) {
break
}
- v.copyOf(x)
+ v.reset(OpARM64MOVWUreg)
+ v.AddArg(x)
return true
}
// match: (UDIVW x (MOVDconst [c]))
// cond: isPowerOfTwo64(c) && is32Bit(c)
- // result: (SRLconst [log64(c)] x)
+ // result: (SRLconst [log64(c)] (MOVWUreg <v.Type> x))
for {
x := v_0
if v_1.Op != OpARM64MOVDconst {
@@ -17545,7 +17811,9 @@ func rewriteValueARM64_OpARM64UDIVW(v *Value) bool {
}
v.reset(OpARM64SRLconst)
v.AuxInt = int64ToAuxInt(log64(c))
- v.AddArg(x)
+ v0 := b.NewValue0(v.Pos, OpARM64MOVWUreg, v.Type)
+ v0.AddArg(x)
+ v.AddArg(v0)
return true
}
// match: (UDIVW (MOVDconst [c]) (MOVDconst [d]))
diff --git a/src/cmd/compile/internal/ssa/rewriteLOONG64.go b/src/cmd/compile/internal/ssa/rewriteLOONG64.go
index e88b74cb22..edd3ffe6b9 100644
--- a/src/cmd/compile/internal/ssa/rewriteLOONG64.go
+++ b/src/cmd/compile/internal/ssa/rewriteLOONG64.go
@@ -1724,8 +1724,10 @@ func rewriteValueLOONG64_OpLOONG64MASKNEZ(v *Value) bool {
func rewriteValueLOONG64_OpLOONG64MOVBUload(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
// match: (MOVBUload [off1] {sym} (ADDVconst [off2] ptr) mem)
- // cond: is32Bit(int64(off1)+off2)
+ // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)
// result: (MOVBUload [off1+int32(off2)] {sym} ptr mem)
for {
off1 := auxIntToInt32(v.AuxInt)
@@ -1736,7 +1738,7 @@ func rewriteValueLOONG64_OpLOONG64MOVBUload(v *Value) bool {
off2 := auxIntToInt64(v_0.AuxInt)
ptr := v_0.Args[0]
mem := v_1
- if !(is32Bit(int64(off1) + off2)) {
+ if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) {
break
}
v.reset(OpLOONG64MOVBUload)
@@ -1746,7 +1748,7 @@ func rewriteValueLOONG64_OpLOONG64MOVBUload(v *Value) bool {
return true
}
// match: (MOVBUload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem)
- // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)
// result: (MOVBUload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
for {
off1 := auxIntToInt32(v.AuxInt)
@@ -1758,7 +1760,7 @@ func rewriteValueLOONG64_OpLOONG64MOVBUload(v *Value) bool {
sym2 := auxToSym(v_0.Aux)
ptr := v_0.Args[0]
mem := v_1
- if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) {
+ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) {
break
}
v.reset(OpLOONG64MOVBUload)
@@ -1771,6 +1773,26 @@ func rewriteValueLOONG64_OpLOONG64MOVBUload(v *Value) bool {
}
func rewriteValueLOONG64_OpLOONG64MOVBUreg(v *Value) bool {
v_0 := v.Args[0]
+ // match: (MOVBUreg x:(SGT _ _))
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpLOONG64SGT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVBUreg x:(SGTU _ _))
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpLOONG64SGTU {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
// match: (MOVBUreg x:(MOVBUload _ _))
// result: (MOVVreg x)
for {
@@ -1809,8 +1831,10 @@ func rewriteValueLOONG64_OpLOONG64MOVBUreg(v *Value) bool {
func rewriteValueLOONG64_OpLOONG64MOVBload(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
// match: (MOVBload [off1] {sym} (ADDVconst [off2] ptr) mem)
- // cond: is32Bit(int64(off1)+off2)
+ // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)
// result: (MOVBload [off1+int32(off2)] {sym} ptr mem)
for {
off1 := auxIntToInt32(v.AuxInt)
@@ -1821,7 +1845,7 @@ func rewriteValueLOONG64_OpLOONG64MOVBload(v *Value) bool {
off2 := auxIntToInt64(v_0.AuxInt)
ptr := v_0.Args[0]
mem := v_1
- if !(is32Bit(int64(off1) + off2)) {
+ if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) {
break
}
v.reset(OpLOONG64MOVBload)
@@ -1831,7 +1855,7 @@ func rewriteValueLOONG64_OpLOONG64MOVBload(v *Value) bool {
return true
}
// match: (MOVBload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem)
- // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)
// result: (MOVBload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
for {
off1 := auxIntToInt32(v.AuxInt)
@@ -1843,7 +1867,7 @@ func rewriteValueLOONG64_OpLOONG64MOVBload(v *Value) bool {
sym2 := auxToSym(v_0.Aux)
ptr := v_0.Args[0]
mem := v_1
- if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) {
+ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) {
break
}
v.reset(OpLOONG64MOVBload)
@@ -1895,8 +1919,10 @@ func rewriteValueLOONG64_OpLOONG64MOVBstore(v *Value) bool {
v_2 := v.Args[2]
v_1 := v.Args[1]
v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
// match: (MOVBstore [off1] {sym} (ADDVconst [off2] ptr) val mem)
- // cond: is32Bit(int64(off1)+off2)
+ // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)
// result: (MOVBstore [off1+int32(off2)] {sym} ptr val mem)
for {
off1 := auxIntToInt32(v.AuxInt)
@@ -1908,7 +1934,7 @@ func rewriteValueLOONG64_OpLOONG64MOVBstore(v *Value) bool {
ptr := v_0.Args[0]
val := v_1
mem := v_2
- if !(is32Bit(int64(off1) + off2)) {
+ if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) {
break
}
v.reset(OpLOONG64MOVBstore)
@@ -1918,7 +1944,7 @@ func rewriteValueLOONG64_OpLOONG64MOVBstore(v *Value) bool {
return true
}
// match: (MOVBstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem)
- // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)
// result: (MOVBstore [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr val mem)
for {
off1 := auxIntToInt32(v.AuxInt)
@@ -1931,7 +1957,7 @@ func rewriteValueLOONG64_OpLOONG64MOVBstore(v *Value) bool {
ptr := v_0.Args[0]
val := v_1
mem := v_2
- if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) {
+ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) {
break
}
v.reset(OpLOONG64MOVBstore)
@@ -2047,8 +2073,10 @@ func rewriteValueLOONG64_OpLOONG64MOVBstore(v *Value) bool {
func rewriteValueLOONG64_OpLOONG64MOVBstorezero(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
// match: (MOVBstorezero [off1] {sym} (ADDVconst [off2] ptr) mem)
- // cond: is32Bit(int64(off1)+off2)
+ // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)
// result: (MOVBstorezero [off1+int32(off2)] {sym} ptr mem)
for {
off1 := auxIntToInt32(v.AuxInt)
@@ -2059,7 +2087,7 @@ func rewriteValueLOONG64_OpLOONG64MOVBstorezero(v *Value) bool {
off2 := auxIntToInt64(v_0.AuxInt)
ptr := v_0.Args[0]
mem := v_1
- if !(is32Bit(int64(off1) + off2)) {
+ if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) {
break
}
v.reset(OpLOONG64MOVBstorezero)
@@ -2069,7 +2097,7 @@ func rewriteValueLOONG64_OpLOONG64MOVBstorezero(v *Value) bool {
return true
}
// match: (MOVBstorezero [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem)
- // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)
// result: (MOVBstorezero [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
for {
off1 := auxIntToInt32(v.AuxInt)
@@ -2081,7 +2109,7 @@ func rewriteValueLOONG64_OpLOONG64MOVBstorezero(v *Value) bool {
sym2 := auxToSym(v_0.Aux)
ptr := v_0.Args[0]
mem := v_1
- if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) {
+ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) {
break
}
v.reset(OpLOONG64MOVBstorezero)
@@ -2095,8 +2123,10 @@ func rewriteValueLOONG64_OpLOONG64MOVBstorezero(v *Value) bool {
func rewriteValueLOONG64_OpLOONG64MOVDload(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
// match: (MOVDload [off1] {sym} (ADDVconst [off2] ptr) mem)
- // cond: is32Bit(int64(off1)+off2)
+ // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)
// result: (MOVDload [off1+int32(off2)] {sym} ptr mem)
for {
off1 := auxIntToInt32(v.AuxInt)
@@ -2107,7 +2137,7 @@ func rewriteValueLOONG64_OpLOONG64MOVDload(v *Value) bool {
off2 := auxIntToInt64(v_0.AuxInt)
ptr := v_0.Args[0]
mem := v_1
- if !(is32Bit(int64(off1) + off2)) {
+ if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) {
break
}
v.reset(OpLOONG64MOVDload)
@@ -2117,7 +2147,7 @@ func rewriteValueLOONG64_OpLOONG64MOVDload(v *Value) bool {
return true
}
// match: (MOVDload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem)
- // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)
// result: (MOVDload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
for {
off1 := auxIntToInt32(v.AuxInt)
@@ -2129,7 +2159,7 @@ func rewriteValueLOONG64_OpLOONG64MOVDload(v *Value) bool {
sym2 := auxToSym(v_0.Aux)
ptr := v_0.Args[0]
mem := v_1
- if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) {
+ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) {
break
}
v.reset(OpLOONG64MOVDload)
@@ -2144,8 +2174,10 @@ func rewriteValueLOONG64_OpLOONG64MOVDstore(v *Value) bool {
v_2 := v.Args[2]
v_1 := v.Args[1]
v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
// match: (MOVDstore [off1] {sym} (ADDVconst [off2] ptr) val mem)
- // cond: is32Bit(int64(off1)+off2)
+ // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)
// result: (MOVDstore [off1+int32(off2)] {sym} ptr val mem)
for {
off1 := auxIntToInt32(v.AuxInt)
@@ -2157,7 +2189,7 @@ func rewriteValueLOONG64_OpLOONG64MOVDstore(v *Value) bool {
ptr := v_0.Args[0]
val := v_1
mem := v_2
- if !(is32Bit(int64(off1) + off2)) {
+ if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) {
break
}
v.reset(OpLOONG64MOVDstore)
@@ -2167,7 +2199,7 @@ func rewriteValueLOONG64_OpLOONG64MOVDstore(v *Value) bool {
return true
}
// match: (MOVDstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem)
- // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)
// result: (MOVDstore [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr val mem)
for {
off1 := auxIntToInt32(v.AuxInt)
@@ -2180,7 +2212,7 @@ func rewriteValueLOONG64_OpLOONG64MOVDstore(v *Value) bool {
ptr := v_0.Args[0]
val := v_1
mem := v_2
- if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) {
+ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) {
break
}
v.reset(OpLOONG64MOVDstore)
@@ -2194,8 +2226,10 @@ func rewriteValueLOONG64_OpLOONG64MOVDstore(v *Value) bool {
func rewriteValueLOONG64_OpLOONG64MOVFload(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
// match: (MOVFload [off1] {sym} (ADDVconst [off2] ptr) mem)
- // cond: is32Bit(int64(off1)+off2)
+ // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)
// result: (MOVFload [off1+int32(off2)] {sym} ptr mem)
for {
off1 := auxIntToInt32(v.AuxInt)
@@ -2206,7 +2240,7 @@ func rewriteValueLOONG64_OpLOONG64MOVFload(v *Value) bool {
off2 := auxIntToInt64(v_0.AuxInt)
ptr := v_0.Args[0]
mem := v_1
- if !(is32Bit(int64(off1) + off2)) {
+ if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) {
break
}
v.reset(OpLOONG64MOVFload)
@@ -2216,7 +2250,7 @@ func rewriteValueLOONG64_OpLOONG64MOVFload(v *Value) bool {
return true
}
// match: (MOVFload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem)
- // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)
// result: (MOVFload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
for {
off1 := auxIntToInt32(v.AuxInt)
@@ -2228,7 +2262,7 @@ func rewriteValueLOONG64_OpLOONG64MOVFload(v *Value) bool {
sym2 := auxToSym(v_0.Aux)
ptr := v_0.Args[0]
mem := v_1
- if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) {
+ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) {
break
}
v.reset(OpLOONG64MOVFload)
@@ -2243,8 +2277,10 @@ func rewriteValueLOONG64_OpLOONG64MOVFstore(v *Value) bool {
v_2 := v.Args[2]
v_1 := v.Args[1]
v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
// match: (MOVFstore [off1] {sym} (ADDVconst [off2] ptr) val mem)
- // cond: is32Bit(int64(off1)+off2)
+ // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)
// result: (MOVFstore [off1+int32(off2)] {sym} ptr val mem)
for {
off1 := auxIntToInt32(v.AuxInt)
@@ -2256,7 +2292,7 @@ func rewriteValueLOONG64_OpLOONG64MOVFstore(v *Value) bool {
ptr := v_0.Args[0]
val := v_1
mem := v_2
- if !(is32Bit(int64(off1) + off2)) {
+ if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) {
break
}
v.reset(OpLOONG64MOVFstore)
@@ -2266,7 +2302,7 @@ func rewriteValueLOONG64_OpLOONG64MOVFstore(v *Value) bool {
return true
}
// match: (MOVFstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem)
- // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)
// result: (MOVFstore [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr val mem)
for {
off1 := auxIntToInt32(v.AuxInt)
@@ -2279,7 +2315,7 @@ func rewriteValueLOONG64_OpLOONG64MOVFstore(v *Value) bool {
ptr := v_0.Args[0]
val := v_1
mem := v_2
- if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) {
+ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) {
break
}
v.reset(OpLOONG64MOVFstore)
@@ -2293,8 +2329,10 @@ func rewriteValueLOONG64_OpLOONG64MOVFstore(v *Value) bool {
func rewriteValueLOONG64_OpLOONG64MOVHUload(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
// match: (MOVHUload [off1] {sym} (ADDVconst [off2] ptr) mem)
- // cond: is32Bit(int64(off1)+off2)
+ // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)
// result: (MOVHUload [off1+int32(off2)] {sym} ptr mem)
for {
off1 := auxIntToInt32(v.AuxInt)
@@ -2305,7 +2343,7 @@ func rewriteValueLOONG64_OpLOONG64MOVHUload(v *Value) bool {
off2 := auxIntToInt64(v_0.AuxInt)
ptr := v_0.Args[0]
mem := v_1
- if !(is32Bit(int64(off1) + off2)) {
+ if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) {
break
}
v.reset(OpLOONG64MOVHUload)
@@ -2315,7 +2353,7 @@ func rewriteValueLOONG64_OpLOONG64MOVHUload(v *Value) bool {
return true
}
// match: (MOVHUload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem)
- // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)
// result: (MOVHUload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
for {
off1 := auxIntToInt32(v.AuxInt)
@@ -2327,7 +2365,7 @@ func rewriteValueLOONG64_OpLOONG64MOVHUload(v *Value) bool {
sym2 := auxToSym(v_0.Aux)
ptr := v_0.Args[0]
mem := v_1
- if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) {
+ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) {
break
}
v.reset(OpLOONG64MOVHUload)
@@ -2400,8 +2438,10 @@ func rewriteValueLOONG64_OpLOONG64MOVHUreg(v *Value) bool {
func rewriteValueLOONG64_OpLOONG64MOVHload(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
// match: (MOVHload [off1] {sym} (ADDVconst [off2] ptr) mem)
- // cond: is32Bit(int64(off1)+off2)
+ // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)
// result: (MOVHload [off1+int32(off2)] {sym} ptr mem)
for {
off1 := auxIntToInt32(v.AuxInt)
@@ -2412,7 +2452,7 @@ func rewriteValueLOONG64_OpLOONG64MOVHload(v *Value) bool {
off2 := auxIntToInt64(v_0.AuxInt)
ptr := v_0.Args[0]
mem := v_1
- if !(is32Bit(int64(off1) + off2)) {
+ if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) {
break
}
v.reset(OpLOONG64MOVHload)
@@ -2422,7 +2462,7 @@ func rewriteValueLOONG64_OpLOONG64MOVHload(v *Value) bool {
return true
}
// match: (MOVHload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem)
- // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)
// result: (MOVHload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
for {
off1 := auxIntToInt32(v.AuxInt)
@@ -2434,7 +2474,7 @@ func rewriteValueLOONG64_OpLOONG64MOVHload(v *Value) bool {
sym2 := auxToSym(v_0.Aux)
ptr := v_0.Args[0]
mem := v_1
- if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) {
+ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) {
break
}
v.reset(OpLOONG64MOVHload)
@@ -2530,8 +2570,10 @@ func rewriteValueLOONG64_OpLOONG64MOVHstore(v *Value) bool {
v_2 := v.Args[2]
v_1 := v.Args[1]
v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
// match: (MOVHstore [off1] {sym} (ADDVconst [off2] ptr) val mem)
- // cond: is32Bit(int64(off1)+off2)
+ // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)
// result: (MOVHstore [off1+int32(off2)] {sym} ptr val mem)
for {
off1 := auxIntToInt32(v.AuxInt)
@@ -2543,7 +2585,7 @@ func rewriteValueLOONG64_OpLOONG64MOVHstore(v *Value) bool {
ptr := v_0.Args[0]
val := v_1
mem := v_2
- if !(is32Bit(int64(off1) + off2)) {
+ if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) {
break
}
v.reset(OpLOONG64MOVHstore)
@@ -2553,7 +2595,7 @@ func rewriteValueLOONG64_OpLOONG64MOVHstore(v *Value) bool {
return true
}
// match: (MOVHstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem)
- // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)
// result: (MOVHstore [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr val mem)
for {
off1 := auxIntToInt32(v.AuxInt)
@@ -2566,7 +2608,7 @@ func rewriteValueLOONG64_OpLOONG64MOVHstore(v *Value) bool {
ptr := v_0.Args[0]
val := v_1
mem := v_2
- if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) {
+ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) {
break
}
v.reset(OpLOONG64MOVHstore)
@@ -2648,8 +2690,10 @@ func rewriteValueLOONG64_OpLOONG64MOVHstore(v *Value) bool {
func rewriteValueLOONG64_OpLOONG64MOVHstorezero(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
// match: (MOVHstorezero [off1] {sym} (ADDVconst [off2] ptr) mem)
- // cond: is32Bit(int64(off1)+off2)
+ // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)
// result: (MOVHstorezero [off1+int32(off2)] {sym} ptr mem)
for {
off1 := auxIntToInt32(v.AuxInt)
@@ -2660,7 +2704,7 @@ func rewriteValueLOONG64_OpLOONG64MOVHstorezero(v *Value) bool {
off2 := auxIntToInt64(v_0.AuxInt)
ptr := v_0.Args[0]
mem := v_1
- if !(is32Bit(int64(off1) + off2)) {
+ if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) {
break
}
v.reset(OpLOONG64MOVHstorezero)
@@ -2670,7 +2714,7 @@ func rewriteValueLOONG64_OpLOONG64MOVHstorezero(v *Value) bool {
return true
}
// match: (MOVHstorezero [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem)
- // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)
// result: (MOVHstorezero [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
for {
off1 := auxIntToInt32(v.AuxInt)
@@ -2682,7 +2726,7 @@ func rewriteValueLOONG64_OpLOONG64MOVHstorezero(v *Value) bool {
sym2 := auxToSym(v_0.Aux)
ptr := v_0.Args[0]
mem := v_1
- if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) {
+ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) {
break
}
v.reset(OpLOONG64MOVHstorezero)
@@ -2696,8 +2740,10 @@ func rewriteValueLOONG64_OpLOONG64MOVHstorezero(v *Value) bool {
func rewriteValueLOONG64_OpLOONG64MOVVload(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
// match: (MOVVload [off1] {sym} (ADDVconst [off2] ptr) mem)
- // cond: is32Bit(int64(off1)+off2)
+ // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)
// result: (MOVVload [off1+int32(off2)] {sym} ptr mem)
for {
off1 := auxIntToInt32(v.AuxInt)
@@ -2708,7 +2754,7 @@ func rewriteValueLOONG64_OpLOONG64MOVVload(v *Value) bool {
off2 := auxIntToInt64(v_0.AuxInt)
ptr := v_0.Args[0]
mem := v_1
- if !(is32Bit(int64(off1) + off2)) {
+ if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) {
break
}
v.reset(OpLOONG64MOVVload)
@@ -2718,7 +2764,7 @@ func rewriteValueLOONG64_OpLOONG64MOVVload(v *Value) bool {
return true
}
// match: (MOVVload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem)
- // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)
// result: (MOVVload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
for {
off1 := auxIntToInt32(v.AuxInt)
@@ -2730,7 +2776,7 @@ func rewriteValueLOONG64_OpLOONG64MOVVload(v *Value) bool {
sym2 := auxToSym(v_0.Aux)
ptr := v_0.Args[0]
mem := v_1
- if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) {
+ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) {
break
}
v.reset(OpLOONG64MOVVload)
@@ -2772,8 +2818,10 @@ func rewriteValueLOONG64_OpLOONG64MOVVstore(v *Value) bool {
v_2 := v.Args[2]
v_1 := v.Args[1]
v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
// match: (MOVVstore [off1] {sym} (ADDVconst [off2] ptr) val mem)
- // cond: is32Bit(int64(off1)+off2)
+ // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)
// result: (MOVVstore [off1+int32(off2)] {sym} ptr val mem)
for {
off1 := auxIntToInt32(v.AuxInt)
@@ -2785,7 +2833,7 @@ func rewriteValueLOONG64_OpLOONG64MOVVstore(v *Value) bool {
ptr := v_0.Args[0]
val := v_1
mem := v_2
- if !(is32Bit(int64(off1) + off2)) {
+ if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) {
break
}
v.reset(OpLOONG64MOVVstore)
@@ -2795,7 +2843,7 @@ func rewriteValueLOONG64_OpLOONG64MOVVstore(v *Value) bool {
return true
}
// match: (MOVVstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem)
- // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)
// result: (MOVVstore [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr val mem)
for {
off1 := auxIntToInt32(v.AuxInt)
@@ -2808,7 +2856,7 @@ func rewriteValueLOONG64_OpLOONG64MOVVstore(v *Value) bool {
ptr := v_0.Args[0]
val := v_1
mem := v_2
- if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) {
+ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) {
break
}
v.reset(OpLOONG64MOVVstore)
@@ -2822,8 +2870,10 @@ func rewriteValueLOONG64_OpLOONG64MOVVstore(v *Value) bool {
func rewriteValueLOONG64_OpLOONG64MOVVstorezero(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
// match: (MOVVstorezero [off1] {sym} (ADDVconst [off2] ptr) mem)
- // cond: is32Bit(int64(off1)+off2)
+ // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)
// result: (MOVVstorezero [off1+int32(off2)] {sym} ptr mem)
for {
off1 := auxIntToInt32(v.AuxInt)
@@ -2834,7 +2884,7 @@ func rewriteValueLOONG64_OpLOONG64MOVVstorezero(v *Value) bool {
off2 := auxIntToInt64(v_0.AuxInt)
ptr := v_0.Args[0]
mem := v_1
- if !(is32Bit(int64(off1) + off2)) {
+ if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) {
break
}
v.reset(OpLOONG64MOVVstorezero)
@@ -2844,7 +2894,7 @@ func rewriteValueLOONG64_OpLOONG64MOVVstorezero(v *Value) bool {
return true
}
// match: (MOVVstorezero [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem)
- // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)
// result: (MOVVstorezero [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
for {
off1 := auxIntToInt32(v.AuxInt)
@@ -2856,7 +2906,7 @@ func rewriteValueLOONG64_OpLOONG64MOVVstorezero(v *Value) bool {
sym2 := auxToSym(v_0.Aux)
ptr := v_0.Args[0]
mem := v_1
- if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) {
+ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) {
break
}
v.reset(OpLOONG64MOVVstorezero)
@@ -2870,8 +2920,10 @@ func rewriteValueLOONG64_OpLOONG64MOVVstorezero(v *Value) bool {
func rewriteValueLOONG64_OpLOONG64MOVWUload(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
// match: (MOVWUload [off1] {sym} (ADDVconst [off2] ptr) mem)
- // cond: is32Bit(int64(off1)+off2)
+ // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)
// result: (MOVWUload [off1+int32(off2)] {sym} ptr mem)
for {
off1 := auxIntToInt32(v.AuxInt)
@@ -2882,7 +2934,7 @@ func rewriteValueLOONG64_OpLOONG64MOVWUload(v *Value) bool {
off2 := auxIntToInt64(v_0.AuxInt)
ptr := v_0.Args[0]
mem := v_1
- if !(is32Bit(int64(off1) + off2)) {
+ if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) {
break
}
v.reset(OpLOONG64MOVWUload)
@@ -2892,7 +2944,7 @@ func rewriteValueLOONG64_OpLOONG64MOVWUload(v *Value) bool {
return true
}
// match: (MOVWUload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem)
- // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)
// result: (MOVWUload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
for {
off1 := auxIntToInt32(v.AuxInt)
@@ -2904,7 +2956,7 @@ func rewriteValueLOONG64_OpLOONG64MOVWUload(v *Value) bool {
sym2 := auxToSym(v_0.Aux)
ptr := v_0.Args[0]
mem := v_1
- if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) {
+ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) {
break
}
v.reset(OpLOONG64MOVWUload)
@@ -2999,8 +3051,10 @@ func rewriteValueLOONG64_OpLOONG64MOVWUreg(v *Value) bool {
func rewriteValueLOONG64_OpLOONG64MOVWload(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
// match: (MOVWload [off1] {sym} (ADDVconst [off2] ptr) mem)
- // cond: is32Bit(int64(off1)+off2)
+ // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)
// result: (MOVWload [off1+int32(off2)] {sym} ptr mem)
for {
off1 := auxIntToInt32(v.AuxInt)
@@ -3011,7 +3065,7 @@ func rewriteValueLOONG64_OpLOONG64MOVWload(v *Value) bool {
off2 := auxIntToInt64(v_0.AuxInt)
ptr := v_0.Args[0]
mem := v_1
- if !(is32Bit(int64(off1) + off2)) {
+ if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) {
break
}
v.reset(OpLOONG64MOVWload)
@@ -3021,7 +3075,7 @@ func rewriteValueLOONG64_OpLOONG64MOVWload(v *Value) bool {
return true
}
// match: (MOVWload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem)
- // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)
// result: (MOVWload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
for {
off1 := auxIntToInt32(v.AuxInt)
@@ -3033,7 +3087,7 @@ func rewriteValueLOONG64_OpLOONG64MOVWload(v *Value) bool {
sym2 := auxToSym(v_0.Aux)
ptr := v_0.Args[0]
mem := v_1
- if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) {
+ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) {
break
}
v.reset(OpLOONG64MOVWload)
@@ -3162,8 +3216,10 @@ func rewriteValueLOONG64_OpLOONG64MOVWstore(v *Value) bool {
v_2 := v.Args[2]
v_1 := v.Args[1]
v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
// match: (MOVWstore [off1] {sym} (ADDVconst [off2] ptr) val mem)
- // cond: is32Bit(int64(off1)+off2)
+ // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)
// result: (MOVWstore [off1+int32(off2)] {sym} ptr val mem)
for {
off1 := auxIntToInt32(v.AuxInt)
@@ -3175,7 +3231,7 @@ func rewriteValueLOONG64_OpLOONG64MOVWstore(v *Value) bool {
ptr := v_0.Args[0]
val := v_1
mem := v_2
- if !(is32Bit(int64(off1) + off2)) {
+ if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) {
break
}
v.reset(OpLOONG64MOVWstore)
@@ -3185,7 +3241,7 @@ func rewriteValueLOONG64_OpLOONG64MOVWstore(v *Value) bool {
return true
}
// match: (MOVWstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem)
- // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)
// result: (MOVWstore [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr val mem)
for {
off1 := auxIntToInt32(v.AuxInt)
@@ -3198,7 +3254,7 @@ func rewriteValueLOONG64_OpLOONG64MOVWstore(v *Value) bool {
ptr := v_0.Args[0]
val := v_1
mem := v_2
- if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) {
+ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) {
break
}
v.reset(OpLOONG64MOVWstore)
@@ -3246,8 +3302,10 @@ func rewriteValueLOONG64_OpLOONG64MOVWstore(v *Value) bool {
func rewriteValueLOONG64_OpLOONG64MOVWstorezero(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
// match: (MOVWstorezero [off1] {sym} (ADDVconst [off2] ptr) mem)
- // cond: is32Bit(int64(off1)+off2)
+ // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)
// result: (MOVWstorezero [off1+int32(off2)] {sym} ptr mem)
for {
off1 := auxIntToInt32(v.AuxInt)
@@ -3258,7 +3316,7 @@ func rewriteValueLOONG64_OpLOONG64MOVWstorezero(v *Value) bool {
off2 := auxIntToInt64(v_0.AuxInt)
ptr := v_0.Args[0]
mem := v_1
- if !(is32Bit(int64(off1) + off2)) {
+ if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) {
break
}
v.reset(OpLOONG64MOVWstorezero)
@@ -3268,7 +3326,7 @@ func rewriteValueLOONG64_OpLOONG64MOVWstorezero(v *Value) bool {
return true
}
// match: (MOVWstorezero [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem)
- // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)
// result: (MOVWstorezero [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
for {
off1 := auxIntToInt32(v.AuxInt)
@@ -3280,7 +3338,7 @@ func rewriteValueLOONG64_OpLOONG64MOVWstorezero(v *Value) bool {
sym2 := auxToSym(v_0.Aux)
ptr := v_0.Args[0]
mem := v_1
- if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) {
+ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) {
break
}
v.reset(OpLOONG64MOVWstorezero)
@@ -7570,6 +7628,7 @@ func rewriteValueLOONG64_OpZero(v *Value) bool {
return false
}
func rewriteBlockLOONG64(b *Block) bool {
+ typ := &b.Func.Config.Types
switch b.Kind {
case BlockLOONG64EQ:
// match: (EQ (FPFlagTrue cmp) yes no)
@@ -7769,10 +7828,12 @@ func rewriteBlockLOONG64(b *Block) bool {
}
case BlockIf:
// match: (If cond yes no)
- // result: (NE cond yes no)
+ // result: (NE (MOVBUreg <typ.UInt64> cond) yes no)
for {
cond := b.Controls[0]
- b.resetWithControl(BlockLOONG64NE, cond)
+ v0 := b.NewValue0(cond.Pos, OpLOONG64MOVBUreg, typ.UInt64)
+ v0.AddArg(cond)
+ b.resetWithControl(BlockLOONG64NE, v0)
return true
}
case BlockLOONG64LEZ:
diff --git a/src/cmd/compile/internal/ssa/rewriteMIPS64.go b/src/cmd/compile/internal/ssa/rewriteMIPS64.go
index de316e9678..764465d0b7 100644
--- a/src/cmd/compile/internal/ssa/rewriteMIPS64.go
+++ b/src/cmd/compile/internal/ssa/rewriteMIPS64.go
@@ -4797,6 +4797,17 @@ func rewriteValueMIPS64_OpMIPS64SGT(v *Value) bool {
v.AddArg(x)
return true
}
+ // match: (SGT x x)
+ // result: (MOVVconst [0])
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.reset(OpMIPS64MOVVconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
return false
}
func rewriteValueMIPS64_OpMIPS64SGTU(v *Value) bool {
@@ -4819,6 +4830,17 @@ func rewriteValueMIPS64_OpMIPS64SGTU(v *Value) bool {
v.AddArg(x)
return true
}
+ // match: (SGTU x x)
+ // result: (MOVVconst [0])
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.reset(OpMIPS64MOVVconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
return false
}
func rewriteValueMIPS64_OpMIPS64SGTUconst(v *Value) bool {
@@ -7315,6 +7337,38 @@ func rewriteValueMIPS64_OpSelect0(v *Value) bool {
v.AddArg(v0)
return true
}
+ // match: (Select0 <t> (Add64carry x y c))
+ // result: (ADDV (ADDV <t> x y) c)
+ for {
+ t := v.Type
+ if v_0.Op != OpAdd64carry {
+ break
+ }
+ c := v_0.Args[2]
+ x := v_0.Args[0]
+ y := v_0.Args[1]
+ v.reset(OpMIPS64ADDV)
+ v0 := b.NewValue0(v.Pos, OpMIPS64ADDV, t)
+ v0.AddArg2(x, y)
+ v.AddArg2(v0, c)
+ return true
+ }
+ // match: (Select0 <t> (Sub64borrow x y c))
+ // result: (SUBV (SUBV <t> x y) c)
+ for {
+ t := v.Type
+ if v_0.Op != OpSub64borrow {
+ break
+ }
+ c := v_0.Args[2]
+ x := v_0.Args[0]
+ y := v_0.Args[1]
+ v.reset(OpMIPS64SUBV)
+ v0 := b.NewValue0(v.Pos, OpMIPS64SUBV, t)
+ v0.AddArg2(x, y)
+ v.AddArg2(v0, c)
+ return true
+ }
// match: (Select0 (DIVVU _ (MOVVconst [1])))
// result: (MOVVconst [0])
for {
@@ -7427,6 +7481,50 @@ func rewriteValueMIPS64_OpSelect1(v *Value) bool {
v.AddArg2(v0, v2)
return true
}
+ // match: (Select1 <t> (Add64carry x y c))
+ // result: (OR (SGTU <t> x s:(ADDV <t> x y)) (SGTU <t> s (ADDV <t> s c)))
+ for {
+ t := v.Type
+ if v_0.Op != OpAdd64carry {
+ break
+ }
+ c := v_0.Args[2]
+ x := v_0.Args[0]
+ y := v_0.Args[1]
+ v.reset(OpMIPS64OR)
+ v0 := b.NewValue0(v.Pos, OpMIPS64SGTU, t)
+ s := b.NewValue0(v.Pos, OpMIPS64ADDV, t)
+ s.AddArg2(x, y)
+ v0.AddArg2(x, s)
+ v2 := b.NewValue0(v.Pos, OpMIPS64SGTU, t)
+ v3 := b.NewValue0(v.Pos, OpMIPS64ADDV, t)
+ v3.AddArg2(s, c)
+ v2.AddArg2(s, v3)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ // match: (Select1 <t> (Sub64borrow x y c))
+ // result: (OR (SGTU <t> s:(SUBV <t> x y) x) (SGTU <t> (SUBV <t> s c) s))
+ for {
+ t := v.Type
+ if v_0.Op != OpSub64borrow {
+ break
+ }
+ c := v_0.Args[2]
+ x := v_0.Args[0]
+ y := v_0.Args[1]
+ v.reset(OpMIPS64OR)
+ v0 := b.NewValue0(v.Pos, OpMIPS64SGTU, t)
+ s := b.NewValue0(v.Pos, OpMIPS64SUBV, t)
+ s.AddArg2(x, y)
+ v0.AddArg2(s, x)
+ v2 := b.NewValue0(v.Pos, OpMIPS64SGTU, t)
+ v3 := b.NewValue0(v.Pos, OpMIPS64SUBV, t)
+ v3.AddArg2(s, c)
+ v2.AddArg2(v3, s)
+ v.AddArg2(v0, v2)
+ return true
+ }
// match: (Select1 (MULVU x (MOVVconst [-1])))
// result: (NEGV x)
for {
diff --git a/src/cmd/compile/internal/ssa/rewritePPC64.go b/src/cmd/compile/internal/ssa/rewritePPC64.go
index d1c0c2b07f..473a8ff9d9 100644
--- a/src/cmd/compile/internal/ssa/rewritePPC64.go
+++ b/src/cmd/compile/internal/ssa/rewritePPC64.go
@@ -533,6 +533,8 @@ func rewriteValuePPC64(v *Value) bool {
return rewriteValuePPC64_OpPPC64MOVBstoreidx(v)
case OpPPC64MOVBstorezero:
return rewriteValuePPC64_OpPPC64MOVBstorezero(v)
+ case OpPPC64MOVDaddr:
+ return rewriteValuePPC64_OpPPC64MOVDaddr(v)
case OpPPC64MOVDload:
return rewriteValuePPC64_OpPPC64MOVDload(v)
case OpPPC64MOVDloadidx:
@@ -1176,7 +1178,7 @@ func rewriteValuePPC64_OpBswap16(v *Value) bool {
return true
}
// match: (Bswap16 x:(MOVHZloadidx ptr idx mem))
- // result: @x.Block (MOVHZreg (MOVHBRloadidx ptr idx mem))
+ // result: @x.Block (MOVHBRloadidx ptr idx mem)
for {
x := v_0
if x.Op != OpPPC64MOVHZloadidx {
@@ -1186,11 +1188,9 @@ func rewriteValuePPC64_OpBswap16(v *Value) bool {
ptr := x.Args[0]
idx := x.Args[1]
b = x.Block
- v0 := b.NewValue0(v.Pos, OpPPC64MOVHZreg, typ.Int64)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVHBRloadidx, typ.Int16)
v.copyOf(v0)
- v1 := b.NewValue0(v.Pos, OpPPC64MOVHBRloadidx, typ.Int16)
- v1.AddArg3(ptr, idx, mem)
- v0.AddArg(v1)
+ v0.AddArg3(ptr, idx, mem)
return true
}
return false
@@ -1233,7 +1233,7 @@ func rewriteValuePPC64_OpBswap32(v *Value) bool {
return true
}
// match: (Bswap32 x:(MOVWZloadidx ptr idx mem))
- // result: @x.Block (MOVWZreg (MOVWBRloadidx ptr idx mem))
+ // result: @x.Block (MOVWBRloadidx ptr idx mem)
for {
x := v_0
if x.Op != OpPPC64MOVWZloadidx {
@@ -1243,11 +1243,9 @@ func rewriteValuePPC64_OpBswap32(v *Value) bool {
ptr := x.Args[0]
idx := x.Args[1]
b = x.Block
- v0 := b.NewValue0(v.Pos, OpPPC64MOVWZreg, typ.Int64)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVWBRloadidx, typ.Int32)
v.copyOf(v0)
- v1 := b.NewValue0(v.Pos, OpPPC64MOVWBRloadidx, typ.Int32)
- v1.AddArg3(ptr, idx, mem)
- v0.AddArg(v1)
+ v0.AddArg3(ptr, idx, mem)
return true
}
return false
@@ -4226,6 +4224,19 @@ func rewriteValuePPC64_OpPPC64AND(v *Value) bool {
}
break
}
+ // match: (AND x (MOVDconst [-1]))
+ // result: x
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpPPC64MOVDconst || auxIntToInt64(v_1.AuxInt) != -1 {
+ continue
+ }
+ v.copyOf(x)
+ return true
+ }
+ break
+ }
// match: (AND x (MOVDconst [c]))
// cond: isU16Bit(c)
// result: (Select0 (ANDCCconst [c] x))
@@ -4431,7 +4442,7 @@ func rewriteValuePPC64_OpPPC64BRH(v *Value) bool {
}
// match: (BRH x:(MOVHZloadidx ptr idx mem))
// cond: x.Uses == 1
- // result: @x.Block (MOVHZreg (MOVHBRloadidx ptr idx mem))
+ // result: @x.Block (MOVHBRloadidx ptr idx mem)
for {
x := v_0
if x.Op != OpPPC64MOVHZloadidx {
@@ -4444,11 +4455,9 @@ func rewriteValuePPC64_OpPPC64BRH(v *Value) bool {
break
}
b = x.Block
- v0 := b.NewValue0(v.Pos, OpPPC64MOVHZreg, typ.Int64)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVHBRloadidx, typ.Int16)
v.copyOf(v0)
- v1 := b.NewValue0(v.Pos, OpPPC64MOVHBRloadidx, typ.Int16)
- v1.AddArg3(ptr, idx, mem)
- v0.AddArg(v1)
+ v0.AddArg3(ptr, idx, mem)
return true
}
return false
@@ -4484,7 +4493,7 @@ func rewriteValuePPC64_OpPPC64BRW(v *Value) bool {
}
// match: (BRW x:(MOVWZloadidx ptr idx mem))
// cond: x.Uses == 1
- // result: @x.Block (MOVWZreg (MOVWBRloadidx ptr idx mem))
+ // result: @x.Block (MOVWBRloadidx ptr idx mem)
for {
x := v_0
if x.Op != OpPPC64MOVWZloadidx {
@@ -4497,11 +4506,9 @@ func rewriteValuePPC64_OpPPC64BRW(v *Value) bool {
break
}
b = x.Block
- v0 := b.NewValue0(v.Pos, OpPPC64MOVWZreg, typ.Int64)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVWBRloadidx, typ.Int32)
v.copyOf(v0)
- v1 := b.NewValue0(v.Pos, OpPPC64MOVWBRloadidx, typ.Int32)
- v1.AddArg3(ptr, idx, mem)
- v0.AddArg(v1)
+ v0.AddArg3(ptr, idx, mem)
return true
}
return false
@@ -7765,6 +7772,39 @@ func rewriteValuePPC64_OpPPC64MOVBstorezero(v *Value) bool {
}
return false
}
+func rewriteValuePPC64_OpPPC64MOVDaddr(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (MOVDaddr {sym} [n] p:(ADD x y))
+ // cond: sym == nil && n == 0
+ // result: p
+ for {
+ n := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ p := v_0
+ if p.Op != OpPPC64ADD {
+ break
+ }
+ if !(sym == nil && n == 0) {
+ break
+ }
+ v.copyOf(p)
+ return true
+ }
+ // match: (MOVDaddr {sym} [n] ptr)
+ // cond: sym == nil && n == 0 && (ptr.Op == OpArgIntReg || ptr.Op == OpPhi)
+ // result: ptr
+ for {
+ n := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if !(sym == nil && n == 0 && (ptr.Op == OpArgIntReg || ptr.Op == OpPhi)) {
+ break
+ }
+ v.copyOf(ptr)
+ return true
+ }
+ return false
+}
func rewriteValuePPC64_OpPPC64MOVDload(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
@@ -14512,6 +14552,26 @@ func rewriteValuePPC64_OpSelect0(v *Value) bool {
v.AddArg(v0)
return true
}
+ // match: (Select0 (ANDCCconst [1] z:(SRADconst [63] x)))
+ // cond: z.Uses == 1
+ // result: (SRDconst [63] x)
+ for {
+ if v_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_0.AuxInt) != 1 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpPPC64SRADconst || auxIntToInt64(z.AuxInt) != 63 {
+ break
+ }
+ x := z.Args[0]
+ if !(z.Uses == 1) {
+ break
+ }
+ v.reset(OpPPC64SRDconst)
+ v.AuxInt = int64ToAuxInt(63)
+ v.AddArg(x)
+ return true
+ }
return false
}
func rewriteValuePPC64_OpSelect1(v *Value) bool {
@@ -15223,56 +15283,6 @@ func rewriteBlockPPC64(b *Block) bool {
typ := &b.Func.Config.Types
switch b.Kind {
case BlockPPC64EQ:
- // match: (EQ (CMPconst [0] (Select0 (ANDCCconst [c] x))) yes no)
- // result: (EQ (Select1 <types.TypeFlags> (ANDCCconst [c] x)) yes no)
- for b.Controls[0].Op == OpPPC64CMPconst {
- v_0 := b.Controls[0]
- if auxIntToInt64(v_0.AuxInt) != 0 {
- break
- }
- v_0_0 := v_0.Args[0]
- if v_0_0.Op != OpSelect0 {
- break
- }
- v_0_0_0 := v_0_0.Args[0]
- if v_0_0_0.Op != OpPPC64ANDCCconst {
- break
- }
- c := auxIntToInt64(v_0_0_0.AuxInt)
- x := v_0_0_0.Args[0]
- v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags)
- v1 := b.NewValue0(v_0.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags))
- v1.AuxInt = int64ToAuxInt(c)
- v1.AddArg(x)
- v0.AddArg(v1)
- b.resetWithControl(BlockPPC64EQ, v0)
- return true
- }
- // match: (EQ (CMPWconst [0] (Select0 (ANDCCconst [c] x))) yes no)
- // result: (EQ (Select1 <types.TypeFlags> (ANDCCconst [c] x)) yes no)
- for b.Controls[0].Op == OpPPC64CMPWconst {
- v_0 := b.Controls[0]
- if auxIntToInt32(v_0.AuxInt) != 0 {
- break
- }
- v_0_0 := v_0.Args[0]
- if v_0_0.Op != OpSelect0 {
- break
- }
- v_0_0_0 := v_0_0.Args[0]
- if v_0_0_0.Op != OpPPC64ANDCCconst {
- break
- }
- c := auxIntToInt64(v_0_0_0.AuxInt)
- x := v_0_0_0.Args[0]
- v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags)
- v1 := b.NewValue0(v_0.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags))
- v1.AuxInt = int64ToAuxInt(c)
- v1.AddArg(x)
- v0.AddArg(v1)
- b.resetWithControl(BlockPPC64EQ, v0)
- return true
- }
// match: (EQ (FlagEQ) yes no)
// result: (First yes no)
for b.Controls[0].Op == OpPPC64FlagEQ {
@@ -15301,8 +15311,8 @@ func rewriteBlockPPC64(b *Block) bool {
b.resetWithControl(BlockPPC64EQ, cmp)
return true
}
- // match: (EQ (CMPconst [0] (Select0 (ANDCCconst [c] x))) yes no)
- // result: (EQ (Select1 <types.TypeFlags> (ANDCCconst [c] x)) yes no)
+ // match: (EQ (CMPconst [0] (Select0 z:(ANDCCconst [c] x))) yes no)
+ // result: (EQ (Select1 <types.TypeFlags> z) yes no)
for b.Controls[0].Op == OpPPC64CMPconst {
v_0 := b.Controls[0]
if auxIntToInt64(v_0.AuxInt) != 0 {
@@ -15312,22 +15322,17 @@ func rewriteBlockPPC64(b *Block) bool {
if v_0_0.Op != OpSelect0 {
break
}
- v_0_0_0 := v_0_0.Args[0]
- if v_0_0_0.Op != OpPPC64ANDCCconst {
+ z := v_0_0.Args[0]
+ if z.Op != OpPPC64ANDCCconst {
break
}
- c := auxIntToInt64(v_0_0_0.AuxInt)
- x := v_0_0_0.Args[0]
v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags)
- v1 := b.NewValue0(v_0.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags))
- v1.AuxInt = int64ToAuxInt(c)
- v1.AddArg(x)
- v0.AddArg(v1)
+ v0.AddArg(z)
b.resetWithControl(BlockPPC64EQ, v0)
return true
}
- // match: (EQ (CMPWconst [0] (Select0 (ANDCCconst [c] x))) yes no)
- // result: (EQ (Select1 <types.TypeFlags> (ANDCCconst [c] x)) yes no)
+ // match: (EQ (CMPWconst [0] (Select0 z:(ANDCCconst [c] x))) yes no)
+ // result: (EQ (Select1 <types.TypeFlags> z) yes no)
for b.Controls[0].Op == OpPPC64CMPWconst {
v_0 := b.Controls[0]
if auxIntToInt32(v_0.AuxInt) != 0 {
@@ -15337,17 +15342,12 @@ func rewriteBlockPPC64(b *Block) bool {
if v_0_0.Op != OpSelect0 {
break
}
- v_0_0_0 := v_0_0.Args[0]
- if v_0_0_0.Op != OpPPC64ANDCCconst {
+ z := v_0_0.Args[0]
+ if z.Op != OpPPC64ANDCCconst {
break
}
- c := auxIntToInt64(v_0_0_0.AuxInt)
- x := v_0_0_0.Args[0]
v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags)
- v1 := b.NewValue0(v_0.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags))
- v1.AuxInt = int64ToAuxInt(c)
- v1.AddArg(x)
- v0.AddArg(v1)
+ v0.AddArg(z)
b.resetWithControl(BlockPPC64EQ, v0)
return true
}
@@ -15469,8 +15469,8 @@ func rewriteBlockPPC64(b *Block) bool {
b.resetWithControl(BlockPPC64LE, cmp)
return true
}
- // match: (GE (CMPconst [0] (Select0 (ANDCCconst [c] x))) yes no)
- // result: (GE (Select1 <types.TypeFlags> (ANDCCconst [c] x)) yes no)
+ // match: (GE (CMPconst [0] (Select0 z:(ANDCCconst [c] x))) yes no)
+ // result: (GE (Select1 <types.TypeFlags> z) yes no)
for b.Controls[0].Op == OpPPC64CMPconst {
v_0 := b.Controls[0]
if auxIntToInt64(v_0.AuxInt) != 0 {
@@ -15480,22 +15480,17 @@ func rewriteBlockPPC64(b *Block) bool {
if v_0_0.Op != OpSelect0 {
break
}
- v_0_0_0 := v_0_0.Args[0]
- if v_0_0_0.Op != OpPPC64ANDCCconst {
+ z := v_0_0.Args[0]
+ if z.Op != OpPPC64ANDCCconst {
break
}
- c := auxIntToInt64(v_0_0_0.AuxInt)
- x := v_0_0_0.Args[0]
v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags)
- v1 := b.NewValue0(v_0.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags))
- v1.AuxInt = int64ToAuxInt(c)
- v1.AddArg(x)
- v0.AddArg(v1)
+ v0.AddArg(z)
b.resetWithControl(BlockPPC64GE, v0)
return true
}
- // match: (GE (CMPWconst [0] (Select0 (ANDCCconst [c] x))) yes no)
- // result: (GE (Select1 <types.TypeFlags> (ANDCCconst [c] x)) yes no)
+ // match: (GE (CMPWconst [0] (Select0 z:(ANDCCconst [c] x))) yes no)
+ // result: (GE (Select1 <types.TypeFlags> z) yes no)
for b.Controls[0].Op == OpPPC64CMPWconst {
v_0 := b.Controls[0]
if auxIntToInt32(v_0.AuxInt) != 0 {
@@ -15505,17 +15500,12 @@ func rewriteBlockPPC64(b *Block) bool {
if v_0_0.Op != OpSelect0 {
break
}
- v_0_0_0 := v_0_0.Args[0]
- if v_0_0_0.Op != OpPPC64ANDCCconst {
+ z := v_0_0.Args[0]
+ if z.Op != OpPPC64ANDCCconst {
break
}
- c := auxIntToInt64(v_0_0_0.AuxInt)
- x := v_0_0_0.Args[0]
v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags)
- v1 := b.NewValue0(v_0.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags))
- v1.AuxInt = int64ToAuxInt(c)
- v1.AddArg(x)
- v0.AddArg(v1)
+ v0.AddArg(z)
b.resetWithControl(BlockPPC64GE, v0)
return true
}
@@ -15638,8 +15628,8 @@ func rewriteBlockPPC64(b *Block) bool {
b.resetWithControl(BlockPPC64LT, cmp)
return true
}
- // match: (GT (CMPconst [0] (Select0 (ANDCCconst [c] x))) yes no)
- // result: (GT (Select1 <types.TypeFlags> (ANDCCconst [c] x)) yes no)
+ // match: (GT (CMPconst [0] (Select0 z:(ANDCCconst [c] x))) yes no)
+ // result: (GT (Select1 <types.TypeFlags> z) yes no)
for b.Controls[0].Op == OpPPC64CMPconst {
v_0 := b.Controls[0]
if auxIntToInt64(v_0.AuxInt) != 0 {
@@ -15649,22 +15639,17 @@ func rewriteBlockPPC64(b *Block) bool {
if v_0_0.Op != OpSelect0 {
break
}
- v_0_0_0 := v_0_0.Args[0]
- if v_0_0_0.Op != OpPPC64ANDCCconst {
+ z := v_0_0.Args[0]
+ if z.Op != OpPPC64ANDCCconst {
break
}
- c := auxIntToInt64(v_0_0_0.AuxInt)
- x := v_0_0_0.Args[0]
v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags)
- v1 := b.NewValue0(v_0.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags))
- v1.AuxInt = int64ToAuxInt(c)
- v1.AddArg(x)
- v0.AddArg(v1)
+ v0.AddArg(z)
b.resetWithControl(BlockPPC64GT, v0)
return true
}
- // match: (GT (CMPWconst [0] (Select0 (ANDCCconst [c] x))) yes no)
- // result: (GT (Select1 <types.TypeFlags> (ANDCCconst [c] x)) yes no)
+ // match: (GT (CMPWconst [0] (Select0 z:(ANDCCconst [c] x))) yes no)
+ // result: (GT (Select1 <types.TypeFlags> z) yes no)
for b.Controls[0].Op == OpPPC64CMPWconst {
v_0 := b.Controls[0]
if auxIntToInt32(v_0.AuxInt) != 0 {
@@ -15674,17 +15659,12 @@ func rewriteBlockPPC64(b *Block) bool {
if v_0_0.Op != OpSelect0 {
break
}
- v_0_0_0 := v_0_0.Args[0]
- if v_0_0_0.Op != OpPPC64ANDCCconst {
+ z := v_0_0.Args[0]
+ if z.Op != OpPPC64ANDCCconst {
break
}
- c := auxIntToInt64(v_0_0_0.AuxInt)
- x := v_0_0_0.Args[0]
v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags)
- v1 := b.NewValue0(v_0.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags))
- v1.AuxInt = int64ToAuxInt(c)
- v1.AddArg(x)
- v0.AddArg(v1)
+ v0.AddArg(z)
b.resetWithControl(BlockPPC64GT, v0)
return true
}
@@ -15902,8 +15882,8 @@ func rewriteBlockPPC64(b *Block) bool {
b.resetWithControl(BlockPPC64GE, cmp)
return true
}
- // match: (LE (CMPconst [0] (Select0 (ANDCCconst [c] x))) yes no)
- // result: (LE (Select1 <types.TypeFlags> (ANDCCconst [c] x)) yes no)
+ // match: (LE (CMPconst [0] (Select0 z:(ANDCCconst [c] x))) yes no)
+ // result: (LE (Select1 <types.TypeFlags> z) yes no)
for b.Controls[0].Op == OpPPC64CMPconst {
v_0 := b.Controls[0]
if auxIntToInt64(v_0.AuxInt) != 0 {
@@ -15913,22 +15893,17 @@ func rewriteBlockPPC64(b *Block) bool {
if v_0_0.Op != OpSelect0 {
break
}
- v_0_0_0 := v_0_0.Args[0]
- if v_0_0_0.Op != OpPPC64ANDCCconst {
+ z := v_0_0.Args[0]
+ if z.Op != OpPPC64ANDCCconst {
break
}
- c := auxIntToInt64(v_0_0_0.AuxInt)
- x := v_0_0_0.Args[0]
v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags)
- v1 := b.NewValue0(v_0.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags))
- v1.AuxInt = int64ToAuxInt(c)
- v1.AddArg(x)
- v0.AddArg(v1)
+ v0.AddArg(z)
b.resetWithControl(BlockPPC64LE, v0)
return true
}
- // match: (LE (CMPWconst [0] (Select0 (ANDCCconst [c] x))) yes no)
- // result: (LE (Select1 <types.TypeFlags> (ANDCCconst [c] x)) yes no)
+ // match: (LE (CMPWconst [0] (Select0 z:(ANDCCconst [c] x))) yes no)
+ // result: (LE (Select1 <types.TypeFlags> z) yes no)
for b.Controls[0].Op == OpPPC64CMPWconst {
v_0 := b.Controls[0]
if auxIntToInt32(v_0.AuxInt) != 0 {
@@ -15938,17 +15913,12 @@ func rewriteBlockPPC64(b *Block) bool {
if v_0_0.Op != OpSelect0 {
break
}
- v_0_0_0 := v_0_0.Args[0]
- if v_0_0_0.Op != OpPPC64ANDCCconst {
+ z := v_0_0.Args[0]
+ if z.Op != OpPPC64ANDCCconst {
break
}
- c := auxIntToInt64(v_0_0_0.AuxInt)
- x := v_0_0_0.Args[0]
v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags)
- v1 := b.NewValue0(v_0.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags))
- v1.AuxInt = int64ToAuxInt(c)
- v1.AddArg(x)
- v0.AddArg(v1)
+ v0.AddArg(z)
b.resetWithControl(BlockPPC64LE, v0)
return true
}
@@ -16071,8 +16041,8 @@ func rewriteBlockPPC64(b *Block) bool {
b.resetWithControl(BlockPPC64GT, cmp)
return true
}
- // match: (LT (CMPconst [0] (Select0 (ANDCCconst [c] x))) yes no)
- // result: (LT (Select1 <types.TypeFlags> (ANDCCconst [c] x)) yes no)
+ // match: (LT (CMPconst [0] (Select0 z:(ANDCCconst [c] x))) yes no)
+ // result: (LT (Select1 <types.TypeFlags> z) yes no)
for b.Controls[0].Op == OpPPC64CMPconst {
v_0 := b.Controls[0]
if auxIntToInt64(v_0.AuxInt) != 0 {
@@ -16082,22 +16052,17 @@ func rewriteBlockPPC64(b *Block) bool {
if v_0_0.Op != OpSelect0 {
break
}
- v_0_0_0 := v_0_0.Args[0]
- if v_0_0_0.Op != OpPPC64ANDCCconst {
+ z := v_0_0.Args[0]
+ if z.Op != OpPPC64ANDCCconst {
break
}
- c := auxIntToInt64(v_0_0_0.AuxInt)
- x := v_0_0_0.Args[0]
v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags)
- v1 := b.NewValue0(v_0.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags))
- v1.AuxInt = int64ToAuxInt(c)
- v1.AddArg(x)
- v0.AddArg(v1)
+ v0.AddArg(z)
b.resetWithControl(BlockPPC64LT, v0)
return true
}
- // match: (LT (CMPWconst [0] (Select0 (ANDCCconst [c] x))) yes no)
- // result: (LT (Select1 <types.TypeFlags> (ANDCCconst [c] x)) yes no)
+ // match: (LT (CMPWconst [0] (Select0 z:(ANDCCconst [c] x))) yes no)
+ // result: (LT (Select1 <types.TypeFlags> z) yes no)
for b.Controls[0].Op == OpPPC64CMPWconst {
v_0 := b.Controls[0]
if auxIntToInt32(v_0.AuxInt) != 0 {
@@ -16107,17 +16072,12 @@ func rewriteBlockPPC64(b *Block) bool {
if v_0_0.Op != OpSelect0 {
break
}
- v_0_0_0 := v_0_0.Args[0]
- if v_0_0_0.Op != OpPPC64ANDCCconst {
+ z := v_0_0.Args[0]
+ if z.Op != OpPPC64ANDCCconst {
break
}
- c := auxIntToInt64(v_0_0_0.AuxInt)
- x := v_0_0_0.Args[0]
v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags)
- v1 := b.NewValue0(v_0.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags))
- v1.AuxInt = int64ToAuxInt(c)
- v1.AddArg(x)
- v0.AddArg(v1)
+ v0.AddArg(z)
b.resetWithControl(BlockPPC64LT, v0)
return true
}
@@ -16442,56 +16402,6 @@ func rewriteBlockPPC64(b *Block) bool {
b.resetWithControl(BlockPPC64FGE, cc)
return true
}
- // match: (NE (CMPconst [0] (Select0 (ANDCCconst [c] x))) yes no)
- // result: (NE (Select1 <types.TypeFlags> (ANDCCconst [c] x)) yes no)
- for b.Controls[0].Op == OpPPC64CMPconst {
- v_0 := b.Controls[0]
- if auxIntToInt64(v_0.AuxInt) != 0 {
- break
- }
- v_0_0 := v_0.Args[0]
- if v_0_0.Op != OpSelect0 {
- break
- }
- v_0_0_0 := v_0_0.Args[0]
- if v_0_0_0.Op != OpPPC64ANDCCconst {
- break
- }
- c := auxIntToInt64(v_0_0_0.AuxInt)
- x := v_0_0_0.Args[0]
- v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags)
- v1 := b.NewValue0(v_0.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags))
- v1.AuxInt = int64ToAuxInt(c)
- v1.AddArg(x)
- v0.AddArg(v1)
- b.resetWithControl(BlockPPC64NE, v0)
- return true
- }
- // match: (NE (CMPWconst [0] (Select0 (ANDCCconst [c] x))) yes no)
- // result: (NE (Select1 <types.TypeFlags> (ANDCCconst [c] x)) yes no)
- for b.Controls[0].Op == OpPPC64CMPWconst {
- v_0 := b.Controls[0]
- if auxIntToInt32(v_0.AuxInt) != 0 {
- break
- }
- v_0_0 := v_0.Args[0]
- if v_0_0.Op != OpSelect0 {
- break
- }
- v_0_0_0 := v_0_0.Args[0]
- if v_0_0_0.Op != OpPPC64ANDCCconst {
- break
- }
- c := auxIntToInt64(v_0_0_0.AuxInt)
- x := v_0_0_0.Args[0]
- v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags)
- v1 := b.NewValue0(v_0.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags))
- v1.AuxInt = int64ToAuxInt(c)
- v1.AddArg(x)
- v0.AddArg(v1)
- b.resetWithControl(BlockPPC64NE, v0)
- return true
- }
// match: (NE (FlagEQ) yes no)
// result: (First no yes)
for b.Controls[0].Op == OpPPC64FlagEQ {
@@ -16519,8 +16429,8 @@ func rewriteBlockPPC64(b *Block) bool {
b.resetWithControl(BlockPPC64NE, cmp)
return true
}
- // match: (NE (CMPconst [0] (Select0 (ANDCCconst [c] x))) yes no)
- // result: (NE (Select1 <types.TypeFlags> (ANDCCconst [c] x)) yes no)
+ // match: (NE (CMPconst [0] (Select0 z:(ANDCCconst [c] x))) yes no)
+ // result: (NE (Select1 <types.TypeFlags> z) yes no)
for b.Controls[0].Op == OpPPC64CMPconst {
v_0 := b.Controls[0]
if auxIntToInt64(v_0.AuxInt) != 0 {
@@ -16530,22 +16440,17 @@ func rewriteBlockPPC64(b *Block) bool {
if v_0_0.Op != OpSelect0 {
break
}
- v_0_0_0 := v_0_0.Args[0]
- if v_0_0_0.Op != OpPPC64ANDCCconst {
+ z := v_0_0.Args[0]
+ if z.Op != OpPPC64ANDCCconst {
break
}
- c := auxIntToInt64(v_0_0_0.AuxInt)
- x := v_0_0_0.Args[0]
v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags)
- v1 := b.NewValue0(v_0.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags))
- v1.AuxInt = int64ToAuxInt(c)
- v1.AddArg(x)
- v0.AddArg(v1)
+ v0.AddArg(z)
b.resetWithControl(BlockPPC64NE, v0)
return true
}
- // match: (NE (CMPWconst [0] (Select0 (ANDCCconst [c] x))) yes no)
- // result: (NE (Select1 <types.TypeFlags> (ANDCCconst [c] x)) yes no)
+ // match: (NE (CMPWconst [0] (Select0 z:(ANDCCconst [c] x))) yes no)
+ // result: (NE (Select1 <types.TypeFlags> z) yes no)
for b.Controls[0].Op == OpPPC64CMPWconst {
v_0 := b.Controls[0]
if auxIntToInt32(v_0.AuxInt) != 0 {
@@ -16555,17 +16460,12 @@ func rewriteBlockPPC64(b *Block) bool {
if v_0_0.Op != OpSelect0 {
break
}
- v_0_0_0 := v_0_0.Args[0]
- if v_0_0_0.Op != OpPPC64ANDCCconst {
+ z := v_0_0.Args[0]
+ if z.Op != OpPPC64ANDCCconst {
break
}
- c := auxIntToInt64(v_0_0_0.AuxInt)
- x := v_0_0_0.Args[0]
v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags)
- v1 := b.NewValue0(v_0.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags))
- v1.AuxInt = int64ToAuxInt(c)
- v1.AddArg(x)
- v0.AddArg(v1)
+ v0.AddArg(z)
b.resetWithControl(BlockPPC64NE, v0)
return true
}
diff --git a/src/cmd/compile/internal/ssa/rewritePPC64latelower.go b/src/cmd/compile/internal/ssa/rewritePPC64latelower.go
index 56acbe403b..771dd6aaa2 100644
--- a/src/cmd/compile/internal/ssa/rewritePPC64latelower.go
+++ b/src/cmd/compile/internal/ssa/rewritePPC64latelower.go
@@ -3,15 +3,501 @@
package ssa
import "internal/buildcfg"
+import "cmd/compile/internal/types"
func rewriteValuePPC64latelower(v *Value) bool {
switch v.Op {
+ case OpPPC64ADD:
+ return rewriteValuePPC64latelower_OpPPC64ADD(v)
+ case OpPPC64AND:
+ return rewriteValuePPC64latelower_OpPPC64AND(v)
+ case OpPPC64CMPconst:
+ return rewriteValuePPC64latelower_OpPPC64CMPconst(v)
case OpPPC64ISEL:
return rewriteValuePPC64latelower_OpPPC64ISEL(v)
+ case OpPPC64RLDICL:
+ return rewriteValuePPC64latelower_OpPPC64RLDICL(v)
case OpPPC64SETBC:
return rewriteValuePPC64latelower_OpPPC64SETBC(v)
case OpPPC64SETBCR:
return rewriteValuePPC64latelower_OpPPC64SETBCR(v)
+ case OpSelect0:
+ return rewriteValuePPC64latelower_OpSelect0(v)
+ }
+ return false
+}
+func rewriteValuePPC64latelower_OpPPC64ADD(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (ADD (MOVDconst [m]) x)
+ // cond: supportsPPC64PCRel() && (m<<30)>>30 == m
+ // result: (ADDconst [m] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpPPC64MOVDconst {
+ continue
+ }
+ m := auxIntToInt64(v_0.AuxInt)
+ x := v_1
+ if !(supportsPPC64PCRel() && (m<<30)>>30 == m) {
+ continue
+ }
+ v.reset(OpPPC64ADDconst)
+ v.AuxInt = int64ToAuxInt(m)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValuePPC64latelower_OpPPC64AND(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (AND <t> x:(MOVDconst [m]) n)
+ // cond: t.Size() <= 2
+ // result: (Select0 (ANDCCconst [int64(int16(m))] n))
+ for {
+ t := v.Type
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if x.Op != OpPPC64MOVDconst {
+ continue
+ }
+ m := auxIntToInt64(x.AuxInt)
+ n := v_1
+ if !(t.Size() <= 2) {
+ continue
+ }
+ v.reset(OpSelect0)
+ v0 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags))
+ v0.AuxInt = int64ToAuxInt(int64(int16(m)))
+ v0.AddArg(n)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (AND x:(MOVDconst [m]) n)
+ // cond: isPPC64ValidShiftMask(m)
+ // result: (RLDICL [encodePPC64RotateMask(0,m,64)] n)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if x.Op != OpPPC64MOVDconst {
+ continue
+ }
+ m := auxIntToInt64(x.AuxInt)
+ n := v_1
+ if !(isPPC64ValidShiftMask(m)) {
+ continue
+ }
+ v.reset(OpPPC64RLDICL)
+ v.AuxInt = int64ToAuxInt(encodePPC64RotateMask(0, m, 64))
+ v.AddArg(n)
+ return true
+ }
+ break
+ }
+ // match: (AND x:(MOVDconst [m]) n)
+ // cond: m != 0 && isPPC64ValidShiftMask(^m)
+ // result: (RLDICR [encodePPC64RotateMask(0,m,64)] n)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if x.Op != OpPPC64MOVDconst {
+ continue
+ }
+ m := auxIntToInt64(x.AuxInt)
+ n := v_1
+ if !(m != 0 && isPPC64ValidShiftMask(^m)) {
+ continue
+ }
+ v.reset(OpPPC64RLDICR)
+ v.AuxInt = int64ToAuxInt(encodePPC64RotateMask(0, m, 64))
+ v.AddArg(n)
+ return true
+ }
+ break
+ }
+ // match: (AND <t> x:(MOVDconst [m]) n)
+ // cond: t.Size() == 4 && isPPC64WordRotateMask(m)
+ // result: (RLWINM [encodePPC64RotateMask(0,m,32)] n)
+ for {
+ t := v.Type
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if x.Op != OpPPC64MOVDconst {
+ continue
+ }
+ m := auxIntToInt64(x.AuxInt)
+ n := v_1
+ if !(t.Size() == 4 && isPPC64WordRotateMask(m)) {
+ continue
+ }
+ v.reset(OpPPC64RLWINM)
+ v.AuxInt = int64ToAuxInt(encodePPC64RotateMask(0, m, 32))
+ v.AddArg(n)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValuePPC64latelower_OpPPC64CMPconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (CMPconst [0] z:(ADD x y))
+ // cond: v.Block == z.Block
+ // result: (CMPconst [0] convertPPC64OpToOpCC(z))
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ z := v_0
+ if z.Op != OpPPC64ADD {
+ break
+ }
+ if !(v.Block == z.Block) {
+ break
+ }
+ v.reset(OpPPC64CMPconst)
+ v.AuxInt = int64ToAuxInt(0)
+ v.AddArg(convertPPC64OpToOpCC(z))
+ return true
+ }
+ // match: (CMPconst [0] z:(AND x y))
+ // cond: v.Block == z.Block
+ // result: (CMPconst [0] convertPPC64OpToOpCC(z))
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ z := v_0
+ if z.Op != OpPPC64AND {
+ break
+ }
+ if !(v.Block == z.Block) {
+ break
+ }
+ v.reset(OpPPC64CMPconst)
+ v.AuxInt = int64ToAuxInt(0)
+ v.AddArg(convertPPC64OpToOpCC(z))
+ return true
+ }
+ // match: (CMPconst [0] z:(ANDN x y))
+ // cond: v.Block == z.Block
+ // result: (CMPconst [0] convertPPC64OpToOpCC(z))
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ z := v_0
+ if z.Op != OpPPC64ANDN {
+ break
+ }
+ if !(v.Block == z.Block) {
+ break
+ }
+ v.reset(OpPPC64CMPconst)
+ v.AuxInt = int64ToAuxInt(0)
+ v.AddArg(convertPPC64OpToOpCC(z))
+ return true
+ }
+ // match: (CMPconst [0] z:(OR x y))
+ // cond: v.Block == z.Block
+ // result: (CMPconst [0] convertPPC64OpToOpCC(z))
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ z := v_0
+ if z.Op != OpPPC64OR {
+ break
+ }
+ if !(v.Block == z.Block) {
+ break
+ }
+ v.reset(OpPPC64CMPconst)
+ v.AuxInt = int64ToAuxInt(0)
+ v.AddArg(convertPPC64OpToOpCC(z))
+ return true
+ }
+ // match: (CMPconst [0] z:(SUB x y))
+ // cond: v.Block == z.Block
+ // result: (CMPconst [0] convertPPC64OpToOpCC(z))
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ z := v_0
+ if z.Op != OpPPC64SUB {
+ break
+ }
+ if !(v.Block == z.Block) {
+ break
+ }
+ v.reset(OpPPC64CMPconst)
+ v.AuxInt = int64ToAuxInt(0)
+ v.AddArg(convertPPC64OpToOpCC(z))
+ return true
+ }
+ // match: (CMPconst [0] z:(NOR x y))
+ // cond: v.Block == z.Block
+ // result: (CMPconst [0] convertPPC64OpToOpCC(z))
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ z := v_0
+ if z.Op != OpPPC64NOR {
+ break
+ }
+ if !(v.Block == z.Block) {
+ break
+ }
+ v.reset(OpPPC64CMPconst)
+ v.AuxInt = int64ToAuxInt(0)
+ v.AddArg(convertPPC64OpToOpCC(z))
+ return true
+ }
+ // match: (CMPconst [0] z:(XOR x y))
+ // cond: v.Block == z.Block
+ // result: (CMPconst [0] convertPPC64OpToOpCC(z))
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ z := v_0
+ if z.Op != OpPPC64XOR {
+ break
+ }
+ if !(v.Block == z.Block) {
+ break
+ }
+ v.reset(OpPPC64CMPconst)
+ v.AuxInt = int64ToAuxInt(0)
+ v.AddArg(convertPPC64OpToOpCC(z))
+ return true
+ }
+ // match: (CMPconst [0] z:(NEG x))
+ // cond: v.Block == z.Block
+ // result: (CMPconst [0] convertPPC64OpToOpCC(z))
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ z := v_0
+ if z.Op != OpPPC64NEG {
+ break
+ }
+ if !(v.Block == z.Block) {
+ break
+ }
+ v.reset(OpPPC64CMPconst)
+ v.AuxInt = int64ToAuxInt(0)
+ v.AddArg(convertPPC64OpToOpCC(z))
+ return true
+ }
+ // match: (CMPconst [0] z:(CNTLZD x))
+ // cond: v.Block == z.Block
+ // result: (CMPconst [0] convertPPC64OpToOpCC(z))
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ z := v_0
+ if z.Op != OpPPC64CNTLZD {
+ break
+ }
+ if !(v.Block == z.Block) {
+ break
+ }
+ v.reset(OpPPC64CMPconst)
+ v.AuxInt = int64ToAuxInt(0)
+ v.AddArg(convertPPC64OpToOpCC(z))
+ return true
+ }
+ // match: (CMPconst [0] z:(ADDconst [c] x))
+ // cond: int64(int16(c)) == c && v.Block == z.Block
+ // result: (CMPconst [0] convertPPC64OpToOpCC(z))
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ z := v_0
+ if z.Op != OpPPC64ADDconst {
+ break
+ }
+ c := auxIntToInt64(z.AuxInt)
+ if !(int64(int16(c)) == c && v.Block == z.Block) {
+ break
+ }
+ v.reset(OpPPC64CMPconst)
+ v.AuxInt = int64ToAuxInt(0)
+ v.AddArg(convertPPC64OpToOpCC(z))
+ return true
+ }
+ // match: (CMPconst <t> [0] (Select0 z:(ADDCC x y)))
+ // result: (Select1 <t> z)
+ for {
+ t := v.Type
+ if auxIntToInt64(v.AuxInt) != 0 || v_0.Op != OpSelect0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpPPC64ADDCC {
+ break
+ }
+ v.reset(OpSelect1)
+ v.Type = t
+ v.AddArg(z)
+ return true
+ }
+ // match: (CMPconst <t> [0] (Select0 z:(ANDCC x y)))
+ // result: (Select1 <t> z)
+ for {
+ t := v.Type
+ if auxIntToInt64(v.AuxInt) != 0 || v_0.Op != OpSelect0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpPPC64ANDCC {
+ break
+ }
+ v.reset(OpSelect1)
+ v.Type = t
+ v.AddArg(z)
+ return true
+ }
+ // match: (CMPconst <t> [0] (Select0 z:(ANDNCC x y)))
+ // result: (Select1 <t> z)
+ for {
+ t := v.Type
+ if auxIntToInt64(v.AuxInt) != 0 || v_0.Op != OpSelect0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpPPC64ANDNCC {
+ break
+ }
+ v.reset(OpSelect1)
+ v.Type = t
+ v.AddArg(z)
+ return true
+ }
+ // match: (CMPconst <t> [0] (Select0 z:(ORCC x y)))
+ // result: (Select1 <t> z)
+ for {
+ t := v.Type
+ if auxIntToInt64(v.AuxInt) != 0 || v_0.Op != OpSelect0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpPPC64ORCC {
+ break
+ }
+ v.reset(OpSelect1)
+ v.Type = t
+ v.AddArg(z)
+ return true
+ }
+ // match: (CMPconst <t> [0] (Select0 z:(SUBCC x y)))
+ // result: (Select1 <t> z)
+ for {
+ t := v.Type
+ if auxIntToInt64(v.AuxInt) != 0 || v_0.Op != OpSelect0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpPPC64SUBCC {
+ break
+ }
+ v.reset(OpSelect1)
+ v.Type = t
+ v.AddArg(z)
+ return true
+ }
+ // match: (CMPconst <t> [0] (Select0 z:(NORCC x y)))
+ // result: (Select1 <t> z)
+ for {
+ t := v.Type
+ if auxIntToInt64(v.AuxInt) != 0 || v_0.Op != OpSelect0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpPPC64NORCC {
+ break
+ }
+ v.reset(OpSelect1)
+ v.Type = t
+ v.AddArg(z)
+ return true
+ }
+ // match: (CMPconst <t> [0] (Select0 z:(XORCC x y)))
+ // result: (Select1 <t> z)
+ for {
+ t := v.Type
+ if auxIntToInt64(v.AuxInt) != 0 || v_0.Op != OpSelect0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpPPC64XORCC {
+ break
+ }
+ v.reset(OpSelect1)
+ v.Type = t
+ v.AddArg(z)
+ return true
+ }
+ // match: (CMPconst <t> [0] (Select0 z:(ADDCCconst y)))
+ // result: (Select1 <t> z)
+ for {
+ t := v.Type
+ if auxIntToInt64(v.AuxInt) != 0 || v_0.Op != OpSelect0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpPPC64ADDCCconst {
+ break
+ }
+ v.reset(OpSelect1)
+ v.Type = t
+ v.AddArg(z)
+ return true
+ }
+ // match: (CMPconst <t> [0] (Select0 z:(NEGCC y)))
+ // result: (Select1 <t> z)
+ for {
+ t := v.Type
+ if auxIntToInt64(v.AuxInt) != 0 || v_0.Op != OpSelect0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpPPC64NEGCC {
+ break
+ }
+ v.reset(OpSelect1)
+ v.Type = t
+ v.AddArg(z)
+ return true
+ }
+ // match: (CMPconst <t> [0] (Select0 z:(CNTLZDCC y)))
+ // result: (Select1 <t> z)
+ for {
+ t := v.Type
+ if auxIntToInt64(v.AuxInt) != 0 || v_0.Op != OpSelect0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpPPC64CNTLZDCC {
+ break
+ }
+ v.reset(OpSelect1)
+ v.Type = t
+ v.AddArg(z)
+ return true
}
return false
}
@@ -49,6 +535,29 @@ func rewriteValuePPC64latelower_OpPPC64ISEL(v *Value) bool {
}
return false
}
+func rewriteValuePPC64latelower_OpPPC64RLDICL(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (RLDICL [em] x:(SRDconst [s] a))
+ // cond: (em&0xFF0000) == 0
+ // result: (RLDICL [mergePPC64RLDICLandSRDconst(em, s)] a)
+ for {
+ em := auxIntToInt64(v.AuxInt)
+ x := v_0
+ if x.Op != OpPPC64SRDconst {
+ break
+ }
+ s := auxIntToInt64(x.AuxInt)
+ a := x.Args[0]
+ if !((em & 0xFF0000) == 0) {
+ break
+ }
+ v.reset(OpPPC64RLDICL)
+ v.AuxInt = int64ToAuxInt(mergePPC64RLDICLandSRDconst(em, s))
+ v.AddArg(a)
+ return true
+ }
+ return false
+}
func rewriteValuePPC64latelower_OpPPC64SETBC(v *Value) bool {
v_0 := v.Args[0]
b := v.Block
@@ -169,6 +678,28 @@ func rewriteValuePPC64latelower_OpPPC64SETBCR(v *Value) bool {
}
return false
}
+func rewriteValuePPC64latelower_OpSelect0(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Select0 z:(ANDCCconst [m] x))
+ // cond: z.Uses == 1 && isPPC64ValidShiftMask(m)
+ // result: (RLDICL [encodePPC64RotateMask(0,m,64)] x)
+ for {
+ z := v_0
+ if z.Op != OpPPC64ANDCCconst {
+ break
+ }
+ m := auxIntToInt64(z.AuxInt)
+ x := z.Args[0]
+ if !(z.Uses == 1 && isPPC64ValidShiftMask(m)) {
+ break
+ }
+ v.reset(OpPPC64RLDICL)
+ v.AuxInt = int64ToAuxInt(encodePPC64RotateMask(0, m, 64))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
func rewriteBlockPPC64latelower(b *Block) bool {
return false
}
diff --git a/src/cmd/compile/internal/ssa/rewriteRISCV64.go b/src/cmd/compile/internal/ssa/rewriteRISCV64.go
index ffbeb1df47..52ddca1c7d 100644
--- a/src/cmd/compile/internal/ssa/rewriteRISCV64.go
+++ b/src/cmd/compile/internal/ssa/rewriteRISCV64.go
@@ -132,9 +132,6 @@ func rewriteValueRISCV64(v *Value) bool {
return rewriteValueRISCV64_OpConstBool(v)
case OpConstNil:
return rewriteValueRISCV64_OpConstNil(v)
- case OpConvert:
- v.Op = OpRISCV64MOVconvert
- return true
case OpCopysign:
v.Op = OpRISCV64FSGNJD
return true
@@ -432,6 +429,9 @@ func rewriteValueRISCV64(v *Value) bool {
return true
case OpPanicBounds:
return rewriteValueRISCV64_OpPanicBounds(v)
+ case OpPubBarrier:
+ v.Op = OpRISCV64LoweredPubBarrier
+ return true
case OpRISCV64ADD:
return rewriteValueRISCV64_OpRISCV64ADD(v)
case OpRISCV64ADDI:
@@ -440,14 +440,30 @@ func rewriteValueRISCV64(v *Value) bool {
return rewriteValueRISCV64_OpRISCV64AND(v)
case OpRISCV64ANDI:
return rewriteValueRISCV64_OpRISCV64ANDI(v)
+ case OpRISCV64FADDD:
+ return rewriteValueRISCV64_OpRISCV64FADDD(v)
+ case OpRISCV64FADDS:
+ return rewriteValueRISCV64_OpRISCV64FADDS(v)
case OpRISCV64FMADDD:
return rewriteValueRISCV64_OpRISCV64FMADDD(v)
+ case OpRISCV64FMADDS:
+ return rewriteValueRISCV64_OpRISCV64FMADDS(v)
case OpRISCV64FMSUBD:
return rewriteValueRISCV64_OpRISCV64FMSUBD(v)
+ case OpRISCV64FMSUBS:
+ return rewriteValueRISCV64_OpRISCV64FMSUBS(v)
case OpRISCV64FNMADDD:
return rewriteValueRISCV64_OpRISCV64FNMADDD(v)
+ case OpRISCV64FNMADDS:
+ return rewriteValueRISCV64_OpRISCV64FNMADDS(v)
case OpRISCV64FNMSUBD:
return rewriteValueRISCV64_OpRISCV64FNMSUBD(v)
+ case OpRISCV64FNMSUBS:
+ return rewriteValueRISCV64_OpRISCV64FNMSUBS(v)
+ case OpRISCV64FSUBD:
+ return rewriteValueRISCV64_OpRISCV64FSUBD(v)
+ case OpRISCV64FSUBS:
+ return rewriteValueRISCV64_OpRISCV64FSUBS(v)
case OpRISCV64MOVBUload:
return rewriteValueRISCV64_OpRISCV64MOVBUload(v)
case OpRISCV64MOVBUreg:
@@ -522,10 +538,14 @@ func rewriteValueRISCV64(v *Value) bool {
return rewriteValueRISCV64_OpRISCV64SRA(v)
case OpRISCV64SRAI:
return rewriteValueRISCV64_OpRISCV64SRAI(v)
+ case OpRISCV64SRAW:
+ return rewriteValueRISCV64_OpRISCV64SRAW(v)
case OpRISCV64SRL:
return rewriteValueRISCV64_OpRISCV64SRL(v)
case OpRISCV64SRLI:
return rewriteValueRISCV64_OpRISCV64SRLI(v)
+ case OpRISCV64SRLW:
+ return rewriteValueRISCV64_OpRISCV64SRLW(v)
case OpRISCV64SUB:
return rewriteValueRISCV64_OpRISCV64SUB(v)
case OpRISCV64SUBW:
@@ -541,10 +561,10 @@ func rewriteValueRISCV64(v *Value) bool {
case OpRotateLeft8:
return rewriteValueRISCV64_OpRotateLeft8(v)
case OpRound32F:
- v.Op = OpCopy
+ v.Op = OpRISCV64LoweredRound32F
return true
case OpRound64F:
- v.Op = OpCopy
+ v.Op = OpRISCV64LoweredRound64F
return true
case OpRsh16Ux16:
return rewriteValueRISCV64_OpRsh16Ux16(v)
@@ -1081,20 +1101,50 @@ func rewriteValueRISCV64_OpEq32(v *Value) bool {
b := v.Block
typ := &b.Func.Config.Types
// match: (Eq32 x y)
+ // cond: x.Type.IsSigned()
+ // result: (SEQZ (SUB <x.Type> (SignExt32to64 x) (SignExt32to64 y)))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ y := v_1
+ if !(x.Type.IsSigned()) {
+ continue
+ }
+ v.reset(OpRISCV64SEQZ)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SUB, x.Type)
+ v1 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (Eq32 x y)
+ // cond: !x.Type.IsSigned()
// result: (SEQZ (SUB <x.Type> (ZeroExt32to64 x) (ZeroExt32to64 y)))
for {
- x := v_0
- y := v_1
- v.reset(OpRISCV64SEQZ)
- v0 := b.NewValue0(v.Pos, OpRISCV64SUB, x.Type)
- v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
- v1.AddArg(x)
- v2 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
- v2.AddArg(y)
- v0.AddArg2(v1, v2)
- v.AddArg(v0)
- return true
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ y := v_1
+ if !(!x.Type.IsSigned()) {
+ continue
+ }
+ v.reset(OpRISCV64SEQZ)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SUB, x.Type)
+ v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+ break
}
+ return false
}
func rewriteValueRISCV64_OpEq64(v *Value) bool {
v_1 := v.Args[1]
@@ -2942,17 +2992,13 @@ func rewriteValueRISCV64_OpNeq16(v *Value) bool {
b := v.Block
typ := &b.Func.Config.Types
// match: (Neq16 x y)
- // result: (SNEZ (SUB <x.Type> (ZeroExt16to64 x) (ZeroExt16to64 y)))
+ // result: (Not (Eq16 x y))
for {
x := v_0
y := v_1
- v.reset(OpRISCV64SNEZ)
- v0 := b.NewValue0(v.Pos, OpRISCV64SUB, x.Type)
- v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
- v1.AddArg(x)
- v2 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
- v2.AddArg(y)
- v0.AddArg2(v1, v2)
+ v.reset(OpNot)
+ v0 := b.NewValue0(v.Pos, OpEq16, typ.Bool)
+ v0.AddArg2(x, y)
v.AddArg(v0)
return true
}
@@ -2963,17 +3009,13 @@ func rewriteValueRISCV64_OpNeq32(v *Value) bool {
b := v.Block
typ := &b.Func.Config.Types
// match: (Neq32 x y)
- // result: (SNEZ (SUB <x.Type> (ZeroExt32to64 x) (ZeroExt32to64 y)))
+ // result: (Not (Eq32 x y))
for {
x := v_0
y := v_1
- v.reset(OpRISCV64SNEZ)
- v0 := b.NewValue0(v.Pos, OpRISCV64SUB, x.Type)
- v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
- v1.AddArg(x)
- v2 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
- v2.AddArg(y)
- v0.AddArg2(v1, v2)
+ v.reset(OpNot)
+ v0 := b.NewValue0(v.Pos, OpEq32, typ.Bool)
+ v0.AddArg2(x, y)
v.AddArg(v0)
return true
}
@@ -2982,13 +3024,14 @@ func rewriteValueRISCV64_OpNeq64(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
b := v.Block
+ typ := &b.Func.Config.Types
// match: (Neq64 x y)
- // result: (SNEZ (SUB <x.Type> x y))
+ // result: (Not (Eq64 x y))
for {
x := v_0
y := v_1
- v.reset(OpRISCV64SNEZ)
- v0 := b.NewValue0(v.Pos, OpRISCV64SUB, x.Type)
+ v.reset(OpNot)
+ v0 := b.NewValue0(v.Pos, OpEq64, typ.Bool)
v0.AddArg2(x, y)
v.AddArg(v0)
return true
@@ -3000,17 +3043,13 @@ func rewriteValueRISCV64_OpNeq8(v *Value) bool {
b := v.Block
typ := &b.Func.Config.Types
// match: (Neq8 x y)
- // result: (SNEZ (SUB <x.Type> (ZeroExt8to64 x) (ZeroExt8to64 y)))
+ // result: (Not (Eq8 x y))
for {
x := v_0
y := v_1
- v.reset(OpRISCV64SNEZ)
- v0 := b.NewValue0(v.Pos, OpRISCV64SUB, x.Type)
- v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
- v1.AddArg(x)
- v2 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
- v2.AddArg(y)
- v0.AddArg2(v1, v2)
+ v.reset(OpNot)
+ v0 := b.NewValue0(v.Pos, OpEq8, typ.Bool)
+ v0.AddArg2(x, y)
v.AddArg(v0)
return true
}
@@ -3038,12 +3077,12 @@ func rewriteValueRISCV64_OpNeqPtr(v *Value) bool {
b := v.Block
typ := &b.Func.Config.Types
// match: (NeqPtr x y)
- // result: (SNEZ (SUB <typ.Uintptr> x y))
+ // result: (Not (EqPtr x y))
for {
x := v_0
y := v_1
- v.reset(OpRISCV64SNEZ)
- v0 := b.NewValue0(v.Pos, OpRISCV64SUB, typ.Uintptr)
+ v.reset(OpNot)
+ v0 := b.NewValue0(v.Pos, OpEqPtr, typ.Bool)
v0.AddArg2(x, y)
v.AddArg(v0)
return true
@@ -3316,6 +3355,56 @@ func rewriteValueRISCV64_OpRISCV64ANDI(v *Value) bool {
}
return false
}
+func rewriteValueRISCV64_OpRISCV64FADDD(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (FADDD a (FMULD x y))
+ // cond: a.Block.Func.useFMA(v)
+ // result: (FMADDD x y a)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ a := v_0
+ if v_1.Op != OpRISCV64FMULD {
+ continue
+ }
+ y := v_1.Args[1]
+ x := v_1.Args[0]
+ if !(a.Block.Func.useFMA(v)) {
+ continue
+ }
+ v.reset(OpRISCV64FMADDD)
+ v.AddArg3(x, y, a)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRISCV64FADDS(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (FADDS a (FMULS x y))
+ // cond: a.Block.Func.useFMA(v)
+ // result: (FMADDS x y a)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ a := v_0
+ if v_1.Op != OpRISCV64FMULS {
+ continue
+ }
+ y := v_1.Args[1]
+ x := v_1.Args[0]
+ if !(a.Block.Func.useFMA(v)) {
+ continue
+ }
+ v.reset(OpRISCV64FMADDS)
+ v.AddArg3(x, y, a)
+ return true
+ }
+ break
+ }
+ return false
+}
func rewriteValueRISCV64_OpRISCV64FMADDD(v *Value) bool {
v_2 := v.Args[2]
v_1 := v.Args[1]
@@ -3361,6 +3450,51 @@ func rewriteValueRISCV64_OpRISCV64FMADDD(v *Value) bool {
}
return false
}
+func rewriteValueRISCV64_OpRISCV64FMADDS(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (FMADDS neg:(FNEGS x) y z)
+ // cond: neg.Uses == 1
+ // result: (FNMSUBS x y z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ neg := v_0
+ if neg.Op != OpRISCV64FNEGS {
+ continue
+ }
+ x := neg.Args[0]
+ y := v_1
+ z := v_2
+ if !(neg.Uses == 1) {
+ continue
+ }
+ v.reset(OpRISCV64FNMSUBS)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ break
+ }
+ // match: (FMADDS x y neg:(FNEGS z))
+ // cond: neg.Uses == 1
+ // result: (FMSUBS x y z)
+ for {
+ x := v_0
+ y := v_1
+ neg := v_2
+ if neg.Op != OpRISCV64FNEGS {
+ break
+ }
+ z := neg.Args[0]
+ if !(neg.Uses == 1) {
+ break
+ }
+ v.reset(OpRISCV64FMSUBS)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ return false
+}
func rewriteValueRISCV64_OpRISCV64FMSUBD(v *Value) bool {
v_2 := v.Args[2]
v_1 := v.Args[1]
@@ -3406,6 +3540,51 @@ func rewriteValueRISCV64_OpRISCV64FMSUBD(v *Value) bool {
}
return false
}
+func rewriteValueRISCV64_OpRISCV64FMSUBS(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (FMSUBS neg:(FNEGS x) y z)
+ // cond: neg.Uses == 1
+ // result: (FNMADDS x y z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ neg := v_0
+ if neg.Op != OpRISCV64FNEGS {
+ continue
+ }
+ x := neg.Args[0]
+ y := v_1
+ z := v_2
+ if !(neg.Uses == 1) {
+ continue
+ }
+ v.reset(OpRISCV64FNMADDS)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ break
+ }
+ // match: (FMSUBS x y neg:(FNEGS z))
+ // cond: neg.Uses == 1
+ // result: (FMADDS x y z)
+ for {
+ x := v_0
+ y := v_1
+ neg := v_2
+ if neg.Op != OpRISCV64FNEGS {
+ break
+ }
+ z := neg.Args[0]
+ if !(neg.Uses == 1) {
+ break
+ }
+ v.reset(OpRISCV64FMADDS)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ return false
+}
func rewriteValueRISCV64_OpRISCV64FNMADDD(v *Value) bool {
v_2 := v.Args[2]
v_1 := v.Args[1]
@@ -3451,6 +3630,51 @@ func rewriteValueRISCV64_OpRISCV64FNMADDD(v *Value) bool {
}
return false
}
+func rewriteValueRISCV64_OpRISCV64FNMADDS(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (FNMADDS neg:(FNEGS x) y z)
+ // cond: neg.Uses == 1
+ // result: (FMSUBS x y z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ neg := v_0
+ if neg.Op != OpRISCV64FNEGS {
+ continue
+ }
+ x := neg.Args[0]
+ y := v_1
+ z := v_2
+ if !(neg.Uses == 1) {
+ continue
+ }
+ v.reset(OpRISCV64FMSUBS)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ break
+ }
+ // match: (FNMADDS x y neg:(FNEGS z))
+ // cond: neg.Uses == 1
+ // result: (FNMSUBS x y z)
+ for {
+ x := v_0
+ y := v_1
+ neg := v_2
+ if neg.Op != OpRISCV64FNEGS {
+ break
+ }
+ z := neg.Args[0]
+ if !(neg.Uses == 1) {
+ break
+ }
+ v.reset(OpRISCV64FNMSUBS)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ return false
+}
func rewriteValueRISCV64_OpRISCV64FNMSUBD(v *Value) bool {
v_2 := v.Args[2]
v_1 := v.Args[1]
@@ -3496,6 +3720,129 @@ func rewriteValueRISCV64_OpRISCV64FNMSUBD(v *Value) bool {
}
return false
}
+func rewriteValueRISCV64_OpRISCV64FNMSUBS(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (FNMSUBS neg:(FNEGS x) y z)
+ // cond: neg.Uses == 1
+ // result: (FMADDS x y z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ neg := v_0
+ if neg.Op != OpRISCV64FNEGS {
+ continue
+ }
+ x := neg.Args[0]
+ y := v_1
+ z := v_2
+ if !(neg.Uses == 1) {
+ continue
+ }
+ v.reset(OpRISCV64FMADDS)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ break
+ }
+ // match: (FNMSUBS x y neg:(FNEGS z))
+ // cond: neg.Uses == 1
+ // result: (FNMADDS x y z)
+ for {
+ x := v_0
+ y := v_1
+ neg := v_2
+ if neg.Op != OpRISCV64FNEGS {
+ break
+ }
+ z := neg.Args[0]
+ if !(neg.Uses == 1) {
+ break
+ }
+ v.reset(OpRISCV64FNMADDS)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRISCV64FSUBD(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (FSUBD a (FMULD x y))
+ // cond: a.Block.Func.useFMA(v)
+ // result: (FNMSUBD x y a)
+ for {
+ a := v_0
+ if v_1.Op != OpRISCV64FMULD {
+ break
+ }
+ y := v_1.Args[1]
+ x := v_1.Args[0]
+ if !(a.Block.Func.useFMA(v)) {
+ break
+ }
+ v.reset(OpRISCV64FNMSUBD)
+ v.AddArg3(x, y, a)
+ return true
+ }
+ // match: (FSUBD (FMULD x y) a)
+ // cond: a.Block.Func.useFMA(v)
+ // result: (FMSUBD x y a)
+ for {
+ if v_0.Op != OpRISCV64FMULD {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ a := v_1
+ if !(a.Block.Func.useFMA(v)) {
+ break
+ }
+ v.reset(OpRISCV64FMSUBD)
+ v.AddArg3(x, y, a)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRISCV64FSUBS(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (FSUBS a (FMULS x y))
+ // cond: a.Block.Func.useFMA(v)
+ // result: (FNMSUBS x y a)
+ for {
+ a := v_0
+ if v_1.Op != OpRISCV64FMULS {
+ break
+ }
+ y := v_1.Args[1]
+ x := v_1.Args[0]
+ if !(a.Block.Func.useFMA(v)) {
+ break
+ }
+ v.reset(OpRISCV64FNMSUBS)
+ v.AddArg3(x, y, a)
+ return true
+ }
+ // match: (FSUBS (FMULS x y) a)
+ // cond: a.Block.Func.useFMA(v)
+ // result: (FMSUBS x y a)
+ for {
+ if v_0.Op != OpRISCV64FMULS {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ a := v_1
+ if !(a.Block.Func.useFMA(v)) {
+ break
+ }
+ v.reset(OpRISCV64FMSUBS)
+ v.AddArg3(x, y, a)
+ return true
+ }
+ return false
+}
func rewriteValueRISCV64_OpRISCV64MOVBUload(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
@@ -5930,6 +6277,85 @@ func rewriteValueRISCV64_OpRISCV64SRA(v *Value) bool {
}
func rewriteValueRISCV64_OpRISCV64SRAI(v *Value) bool {
v_0 := v.Args[0]
+ b := v.Block
+ // match: (SRAI <t> [x] (MOVWreg y))
+ // cond: x >= 0 && x <= 31
+ // result: (SRAIW <t> [int64(x)] y)
+ for {
+ t := v.Type
+ x := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpRISCV64MOVWreg {
+ break
+ }
+ y := v_0.Args[0]
+ if !(x >= 0 && x <= 31) {
+ break
+ }
+ v.reset(OpRISCV64SRAIW)
+ v.Type = t
+ v.AuxInt = int64ToAuxInt(int64(x))
+ v.AddArg(y)
+ return true
+ }
+ // match: (SRAI <t> [x] (MOVBreg y))
+ // cond: x >= 8
+ // result: (SRAI [63] (SLLI <t> [56] y))
+ for {
+ t := v.Type
+ x := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpRISCV64MOVBreg {
+ break
+ }
+ y := v_0.Args[0]
+ if !(x >= 8) {
+ break
+ }
+ v.reset(OpRISCV64SRAI)
+ v.AuxInt = int64ToAuxInt(63)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SLLI, t)
+ v0.AuxInt = int64ToAuxInt(56)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (SRAI <t> [x] (MOVHreg y))
+ // cond: x >= 16
+ // result: (SRAI [63] (SLLI <t> [48] y))
+ for {
+ t := v.Type
+ x := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpRISCV64MOVHreg {
+ break
+ }
+ y := v_0.Args[0]
+ if !(x >= 16) {
+ break
+ }
+ v.reset(OpRISCV64SRAI)
+ v.AuxInt = int64ToAuxInt(63)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SLLI, t)
+ v0.AuxInt = int64ToAuxInt(48)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (SRAI <t> [x] (MOVWreg y))
+ // cond: x >= 32
+ // result: (SRAIW [31] y)
+ for {
+ x := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpRISCV64MOVWreg {
+ break
+ }
+ y := v_0.Args[0]
+ if !(x >= 32) {
+ break
+ }
+ v.reset(OpRISCV64SRAIW)
+ v.AuxInt = int64ToAuxInt(31)
+ v.AddArg(y)
+ return true
+ }
// match: (SRAI [x] (MOVDconst [y]))
// result: (MOVDconst [int64(y) >> uint32(x)])
for {
@@ -5944,6 +6370,24 @@ func rewriteValueRISCV64_OpRISCV64SRAI(v *Value) bool {
}
return false
}
+func rewriteValueRISCV64_OpRISCV64SRAW(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SRAW x (MOVDconst [val]))
+ // result: (SRAIW [int64(val&31)] x)
+ for {
+ x := v_0
+ if v_1.Op != OpRISCV64MOVDconst {
+ break
+ }
+ val := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpRISCV64SRAIW)
+ v.AuxInt = int64ToAuxInt(int64(val & 31))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
func rewriteValueRISCV64_OpRISCV64SRL(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
@@ -5964,6 +6408,76 @@ func rewriteValueRISCV64_OpRISCV64SRL(v *Value) bool {
}
func rewriteValueRISCV64_OpRISCV64SRLI(v *Value) bool {
v_0 := v.Args[0]
+ // match: (SRLI <t> [x] (MOVWUreg y))
+ // cond: x >= 0 && x <= 31
+ // result: (SRLIW <t> [int64(x)] y)
+ for {
+ t := v.Type
+ x := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpRISCV64MOVWUreg {
+ break
+ }
+ y := v_0.Args[0]
+ if !(x >= 0 && x <= 31) {
+ break
+ }
+ v.reset(OpRISCV64SRLIW)
+ v.Type = t
+ v.AuxInt = int64ToAuxInt(int64(x))
+ v.AddArg(y)
+ return true
+ }
+ // match: (SRLI <t> [x] (MOVBUreg y))
+ // cond: x >= 8
+ // result: (MOVDconst <t> [0])
+ for {
+ t := v.Type
+ x := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpRISCV64MOVBUreg {
+ break
+ }
+ if !(x >= 8) {
+ break
+ }
+ v.reset(OpRISCV64MOVDconst)
+ v.Type = t
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (SRLI <t> [x] (MOVHUreg y))
+ // cond: x >= 16
+ // result: (MOVDconst <t> [0])
+ for {
+ t := v.Type
+ x := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpRISCV64MOVHUreg {
+ break
+ }
+ if !(x >= 16) {
+ break
+ }
+ v.reset(OpRISCV64MOVDconst)
+ v.Type = t
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (SRLI <t> [x] (MOVWUreg y))
+ // cond: x >= 32
+ // result: (MOVDconst <t> [0])
+ for {
+ t := v.Type
+ x := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpRISCV64MOVWUreg {
+ break
+ }
+ if !(x >= 32) {
+ break
+ }
+ v.reset(OpRISCV64MOVDconst)
+ v.Type = t
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
// match: (SRLI [x] (MOVDconst [y]))
// result: (MOVDconst [int64(uint64(y) >> uint32(x))])
for {
@@ -5978,6 +6492,24 @@ func rewriteValueRISCV64_OpRISCV64SRLI(v *Value) bool {
}
return false
}
+func rewriteValueRISCV64_OpRISCV64SRLW(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SRLW x (MOVDconst [val]))
+ // result: (SRLIW [int64(val&31)] x)
+ for {
+ x := v_0
+ if v_1.Op != OpRISCV64MOVDconst {
+ break
+ }
+ val := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpRISCV64SRLIW)
+ v.AuxInt = int64ToAuxInt(int64(val & 31))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
func rewriteValueRISCV64_OpRISCV64SUB(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
@@ -6594,7 +7126,7 @@ func rewriteValueRISCV64_OpRsh32Ux16(v *Value) bool {
typ := &b.Func.Config.Types
// match: (Rsh32Ux16 <t> x y)
// cond: !shiftIsBounded(v)
- // result: (AND (SRL <t> (ZeroExt32to64 x) y) (Neg32 <t> (SLTIU <t> [64] (ZeroExt16to64 y))))
+ // result: (AND (SRLW <t> x y) (Neg32 <t> (SLTIU <t> [32] (ZeroExt16to64 y))))
for {
t := v.Type
x := v_0
@@ -6603,33 +7135,29 @@ func rewriteValueRISCV64_OpRsh32Ux16(v *Value) bool {
break
}
v.reset(OpRISCV64AND)
- v0 := b.NewValue0(v.Pos, OpRISCV64SRL, t)
- v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
- v1.AddArg(x)
- v0.AddArg2(v1, y)
- v2 := b.NewValue0(v.Pos, OpNeg32, t)
- v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t)
- v3.AuxInt = int64ToAuxInt(64)
- v4 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
- v4.AddArg(y)
- v3.AddArg(v4)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SRLW, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpNeg32, t)
+ v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t)
+ v2.AuxInt = int64ToAuxInt(32)
+ v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v3.AddArg(y)
v2.AddArg(v3)
- v.AddArg2(v0, v2)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
return true
}
// match: (Rsh32Ux16 x y)
// cond: shiftIsBounded(v)
- // result: (SRL (ZeroExt32to64 x) y)
+ // result: (SRLW x y)
for {
x := v_0
y := v_1
if !(shiftIsBounded(v)) {
break
}
- v.reset(OpRISCV64SRL)
- v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
- v0.AddArg(x)
- v.AddArg2(v0, y)
+ v.reset(OpRISCV64SRLW)
+ v.AddArg2(x, y)
return true
}
return false
@@ -6641,7 +7169,7 @@ func rewriteValueRISCV64_OpRsh32Ux32(v *Value) bool {
typ := &b.Func.Config.Types
// match: (Rsh32Ux32 <t> x y)
// cond: !shiftIsBounded(v)
- // result: (AND (SRL <t> (ZeroExt32to64 x) y) (Neg32 <t> (SLTIU <t> [64] (ZeroExt32to64 y))))
+ // result: (AND (SRLW <t> x y) (Neg32 <t> (SLTIU <t> [32] (ZeroExt32to64 y))))
for {
t := v.Type
x := v_0
@@ -6650,33 +7178,29 @@ func rewriteValueRISCV64_OpRsh32Ux32(v *Value) bool {
break
}
v.reset(OpRISCV64AND)
- v0 := b.NewValue0(v.Pos, OpRISCV64SRL, t)
- v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
- v1.AddArg(x)
- v0.AddArg2(v1, y)
- v2 := b.NewValue0(v.Pos, OpNeg32, t)
- v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t)
- v3.AuxInt = int64ToAuxInt(64)
- v4 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
- v4.AddArg(y)
- v3.AddArg(v4)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SRLW, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpNeg32, t)
+ v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t)
+ v2.AuxInt = int64ToAuxInt(32)
+ v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v3.AddArg(y)
v2.AddArg(v3)
- v.AddArg2(v0, v2)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
return true
}
// match: (Rsh32Ux32 x y)
// cond: shiftIsBounded(v)
- // result: (SRL (ZeroExt32to64 x) y)
+ // result: (SRLW x y)
for {
x := v_0
y := v_1
if !(shiftIsBounded(v)) {
break
}
- v.reset(OpRISCV64SRL)
- v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
- v0.AddArg(x)
- v.AddArg2(v0, y)
+ v.reset(OpRISCV64SRLW)
+ v.AddArg2(x, y)
return true
}
return false
@@ -6685,10 +7209,9 @@ func rewriteValueRISCV64_OpRsh32Ux64(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
b := v.Block
- typ := &b.Func.Config.Types
// match: (Rsh32Ux64 <t> x y)
// cond: !shiftIsBounded(v)
- // result: (AND (SRL <t> (ZeroExt32to64 x) y) (Neg32 <t> (SLTIU <t> [64] y)))
+ // result: (AND (SRLW <t> x y) (Neg32 <t> (SLTIU <t> [32] y)))
for {
t := v.Type
x := v_0
@@ -6697,31 +7220,27 @@ func rewriteValueRISCV64_OpRsh32Ux64(v *Value) bool {
break
}
v.reset(OpRISCV64AND)
- v0 := b.NewValue0(v.Pos, OpRISCV64SRL, t)
- v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
- v1.AddArg(x)
- v0.AddArg2(v1, y)
- v2 := b.NewValue0(v.Pos, OpNeg32, t)
- v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t)
- v3.AuxInt = int64ToAuxInt(64)
- v3.AddArg(y)
- v2.AddArg(v3)
- v.AddArg2(v0, v2)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SRLW, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpNeg32, t)
+ v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t)
+ v2.AuxInt = int64ToAuxInt(32)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
return true
}
// match: (Rsh32Ux64 x y)
// cond: shiftIsBounded(v)
- // result: (SRL (ZeroExt32to64 x) y)
+ // result: (SRLW x y)
for {
x := v_0
y := v_1
if !(shiftIsBounded(v)) {
break
}
- v.reset(OpRISCV64SRL)
- v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
- v0.AddArg(x)
- v.AddArg2(v0, y)
+ v.reset(OpRISCV64SRLW)
+ v.AddArg2(x, y)
return true
}
return false
@@ -6733,7 +7252,7 @@ func rewriteValueRISCV64_OpRsh32Ux8(v *Value) bool {
typ := &b.Func.Config.Types
// match: (Rsh32Ux8 <t> x y)
// cond: !shiftIsBounded(v)
- // result: (AND (SRL <t> (ZeroExt32to64 x) y) (Neg32 <t> (SLTIU <t> [64] (ZeroExt8to64 y))))
+ // result: (AND (SRLW <t> x y) (Neg32 <t> (SLTIU <t> [32] (ZeroExt8to64 y))))
for {
t := v.Type
x := v_0
@@ -6742,33 +7261,29 @@ func rewriteValueRISCV64_OpRsh32Ux8(v *Value) bool {
break
}
v.reset(OpRISCV64AND)
- v0 := b.NewValue0(v.Pos, OpRISCV64SRL, t)
- v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
- v1.AddArg(x)
- v0.AddArg2(v1, y)
- v2 := b.NewValue0(v.Pos, OpNeg32, t)
- v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t)
- v3.AuxInt = int64ToAuxInt(64)
- v4 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
- v4.AddArg(y)
- v3.AddArg(v4)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SRLW, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpNeg32, t)
+ v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t)
+ v2.AuxInt = int64ToAuxInt(32)
+ v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v3.AddArg(y)
v2.AddArg(v3)
- v.AddArg2(v0, v2)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
return true
}
// match: (Rsh32Ux8 x y)
// cond: shiftIsBounded(v)
- // result: (SRL (ZeroExt32to64 x) y)
+ // result: (SRLW x y)
for {
x := v_0
y := v_1
if !(shiftIsBounded(v)) {
break
}
- v.reset(OpRISCV64SRL)
- v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
- v0.AddArg(x)
- v.AddArg2(v0, y)
+ v.reset(OpRISCV64SRLW)
+ v.AddArg2(x, y)
return true
}
return false
@@ -6780,7 +7295,7 @@ func rewriteValueRISCV64_OpRsh32x16(v *Value) bool {
typ := &b.Func.Config.Types
// match: (Rsh32x16 <t> x y)
// cond: !shiftIsBounded(v)
- // result: (SRA <t> (SignExt32to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt16to64 y)))))
+ // result: (SRAW <t> x (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [32] (ZeroExt16to64 y)))))
for {
t := v.Type
x := v_0
@@ -6788,36 +7303,32 @@ func rewriteValueRISCV64_OpRsh32x16(v *Value) bool {
if !(!shiftIsBounded(v)) {
break
}
- v.reset(OpRISCV64SRA)
+ v.reset(OpRISCV64SRAW)
v.Type = t
- v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
- v0.AddArg(x)
- v1 := b.NewValue0(v.Pos, OpRISCV64OR, y.Type)
- v2 := b.NewValue0(v.Pos, OpRISCV64ADDI, y.Type)
- v2.AuxInt = int64ToAuxInt(-1)
- v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, y.Type)
- v3.AuxInt = int64ToAuxInt(64)
- v4 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
- v4.AddArg(y)
- v3.AddArg(v4)
+ v0 := b.NewValue0(v.Pos, OpRISCV64OR, y.Type)
+ v1 := b.NewValue0(v.Pos, OpRISCV64ADDI, y.Type)
+ v1.AuxInt = int64ToAuxInt(-1)
+ v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, y.Type)
+ v2.AuxInt = int64ToAuxInt(32)
+ v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v3.AddArg(y)
v2.AddArg(v3)
- v1.AddArg2(y, v2)
- v.AddArg2(v0, v1)
+ v1.AddArg(v2)
+ v0.AddArg2(y, v1)
+ v.AddArg2(x, v0)
return true
}
// match: (Rsh32x16 x y)
// cond: shiftIsBounded(v)
- // result: (SRA (SignExt32to64 x) y)
+ // result: (SRAW x y)
for {
x := v_0
y := v_1
if !(shiftIsBounded(v)) {
break
}
- v.reset(OpRISCV64SRA)
- v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
- v0.AddArg(x)
- v.AddArg2(v0, y)
+ v.reset(OpRISCV64SRAW)
+ v.AddArg2(x, y)
return true
}
return false
@@ -6829,7 +7340,7 @@ func rewriteValueRISCV64_OpRsh32x32(v *Value) bool {
typ := &b.Func.Config.Types
// match: (Rsh32x32 <t> x y)
// cond: !shiftIsBounded(v)
- // result: (SRA <t> (SignExt32to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt32to64 y)))))
+ // result: (SRAW <t> x (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [32] (ZeroExt32to64 y)))))
for {
t := v.Type
x := v_0
@@ -6837,36 +7348,32 @@ func rewriteValueRISCV64_OpRsh32x32(v *Value) bool {
if !(!shiftIsBounded(v)) {
break
}
- v.reset(OpRISCV64SRA)
+ v.reset(OpRISCV64SRAW)
v.Type = t
- v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
- v0.AddArg(x)
- v1 := b.NewValue0(v.Pos, OpRISCV64OR, y.Type)
- v2 := b.NewValue0(v.Pos, OpRISCV64ADDI, y.Type)
- v2.AuxInt = int64ToAuxInt(-1)
- v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, y.Type)
- v3.AuxInt = int64ToAuxInt(64)
- v4 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
- v4.AddArg(y)
- v3.AddArg(v4)
+ v0 := b.NewValue0(v.Pos, OpRISCV64OR, y.Type)
+ v1 := b.NewValue0(v.Pos, OpRISCV64ADDI, y.Type)
+ v1.AuxInt = int64ToAuxInt(-1)
+ v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, y.Type)
+ v2.AuxInt = int64ToAuxInt(32)
+ v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v3.AddArg(y)
v2.AddArg(v3)
- v1.AddArg2(y, v2)
- v.AddArg2(v0, v1)
+ v1.AddArg(v2)
+ v0.AddArg2(y, v1)
+ v.AddArg2(x, v0)
return true
}
// match: (Rsh32x32 x y)
// cond: shiftIsBounded(v)
- // result: (SRA (SignExt32to64 x) y)
+ // result: (SRAW x y)
for {
x := v_0
y := v_1
if !(shiftIsBounded(v)) {
break
}
- v.reset(OpRISCV64SRA)
- v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
- v0.AddArg(x)
- v.AddArg2(v0, y)
+ v.reset(OpRISCV64SRAW)
+ v.AddArg2(x, y)
return true
}
return false
@@ -6875,10 +7382,9 @@ func rewriteValueRISCV64_OpRsh32x64(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
b := v.Block
- typ := &b.Func.Config.Types
// match: (Rsh32x64 <t> x y)
// cond: !shiftIsBounded(v)
- // result: (SRA <t> (SignExt32to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] y))))
+ // result: (SRAW <t> x (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [32] y))))
for {
t := v.Type
x := v_0
@@ -6886,34 +7392,30 @@ func rewriteValueRISCV64_OpRsh32x64(v *Value) bool {
if !(!shiftIsBounded(v)) {
break
}
- v.reset(OpRISCV64SRA)
+ v.reset(OpRISCV64SRAW)
v.Type = t
- v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
- v0.AddArg(x)
- v1 := b.NewValue0(v.Pos, OpRISCV64OR, y.Type)
- v2 := b.NewValue0(v.Pos, OpRISCV64ADDI, y.Type)
- v2.AuxInt = int64ToAuxInt(-1)
- v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, y.Type)
- v3.AuxInt = int64ToAuxInt(64)
- v3.AddArg(y)
- v2.AddArg(v3)
- v1.AddArg2(y, v2)
- v.AddArg2(v0, v1)
+ v0 := b.NewValue0(v.Pos, OpRISCV64OR, y.Type)
+ v1 := b.NewValue0(v.Pos, OpRISCV64ADDI, y.Type)
+ v1.AuxInt = int64ToAuxInt(-1)
+ v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, y.Type)
+ v2.AuxInt = int64ToAuxInt(32)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v0.AddArg2(y, v1)
+ v.AddArg2(x, v0)
return true
}
// match: (Rsh32x64 x y)
// cond: shiftIsBounded(v)
- // result: (SRA (SignExt32to64 x) y)
+ // result: (SRAW x y)
for {
x := v_0
y := v_1
if !(shiftIsBounded(v)) {
break
}
- v.reset(OpRISCV64SRA)
- v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
- v0.AddArg(x)
- v.AddArg2(v0, y)
+ v.reset(OpRISCV64SRAW)
+ v.AddArg2(x, y)
return true
}
return false
@@ -6925,7 +7427,7 @@ func rewriteValueRISCV64_OpRsh32x8(v *Value) bool {
typ := &b.Func.Config.Types
// match: (Rsh32x8 <t> x y)
// cond: !shiftIsBounded(v)
- // result: (SRA <t> (SignExt32to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt8to64 y)))))
+ // result: (SRAW <t> x (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [32] (ZeroExt8to64 y)))))
for {
t := v.Type
x := v_0
@@ -6933,36 +7435,32 @@ func rewriteValueRISCV64_OpRsh32x8(v *Value) bool {
if !(!shiftIsBounded(v)) {
break
}
- v.reset(OpRISCV64SRA)
+ v.reset(OpRISCV64SRAW)
v.Type = t
- v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
- v0.AddArg(x)
- v1 := b.NewValue0(v.Pos, OpRISCV64OR, y.Type)
- v2 := b.NewValue0(v.Pos, OpRISCV64ADDI, y.Type)
- v2.AuxInt = int64ToAuxInt(-1)
- v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, y.Type)
- v3.AuxInt = int64ToAuxInt(64)
- v4 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
- v4.AddArg(y)
- v3.AddArg(v4)
+ v0 := b.NewValue0(v.Pos, OpRISCV64OR, y.Type)
+ v1 := b.NewValue0(v.Pos, OpRISCV64ADDI, y.Type)
+ v1.AuxInt = int64ToAuxInt(-1)
+ v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, y.Type)
+ v2.AuxInt = int64ToAuxInt(32)
+ v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v3.AddArg(y)
v2.AddArg(v3)
- v1.AddArg2(y, v2)
- v.AddArg2(v0, v1)
+ v1.AddArg(v2)
+ v0.AddArg2(y, v1)
+ v.AddArg2(x, v0)
return true
}
// match: (Rsh32x8 x y)
// cond: shiftIsBounded(v)
- // result: (SRA (SignExt32to64 x) y)
+ // result: (SRAW x y)
for {
x := v_0
y := v_1
if !(shiftIsBounded(v)) {
break
}
- v.reset(OpRISCV64SRA)
- v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
- v0.AddArg(x)
- v.AddArg2(v0, y)
+ v.reset(OpRISCV64SRAW)
+ v.AddArg2(x, y)
return true
}
return false
diff --git a/src/cmd/compile/internal/ssa/rewriteS390X.go b/src/cmd/compile/internal/ssa/rewriteS390X.go
index a3d621898f..c2342c944d 100644
--- a/src/cmd/compile/internal/ssa/rewriteS390X.go
+++ b/src/cmd/compile/internal/ssa/rewriteS390X.go
@@ -9060,7 +9060,7 @@ func rewriteValueS390X_OpS390XMOVDstore(v *Value) bool {
return true
}
// match: (MOVDstore [i] {s} p w1 x:(MOVDstore [i-8] {s} p w0 mem))
- // cond: p.Op != OpSB && x.Uses == 1 && is20Bit(int64(i)-8) && clobber(x)
+ // cond: p.Op != OpSB && x.Uses == 1 && is20Bit(int64(i)-8) && setPos(v, x.Pos) && clobber(x)
// result: (STMG2 [i-8] {s} p w0 w1 mem)
for {
i := auxIntToInt32(v.AuxInt)
@@ -9076,7 +9076,7 @@ func rewriteValueS390X_OpS390XMOVDstore(v *Value) bool {
break
}
w0 := x.Args[1]
- if !(p.Op != OpSB && x.Uses == 1 && is20Bit(int64(i)-8) && clobber(x)) {
+ if !(p.Op != OpSB && x.Uses == 1 && is20Bit(int64(i)-8) && setPos(v, x.Pos) && clobber(x)) {
break
}
v.reset(OpS390XSTMG2)
@@ -9086,7 +9086,7 @@ func rewriteValueS390X_OpS390XMOVDstore(v *Value) bool {
return true
}
// match: (MOVDstore [i] {s} p w2 x:(STMG2 [i-16] {s} p w0 w1 mem))
- // cond: x.Uses == 1 && is20Bit(int64(i)-16) && clobber(x)
+ // cond: x.Uses == 1 && is20Bit(int64(i)-16) && setPos(v, x.Pos) && clobber(x)
// result: (STMG3 [i-16] {s} p w0 w1 w2 mem)
for {
i := auxIntToInt32(v.AuxInt)
@@ -9103,7 +9103,7 @@ func rewriteValueS390X_OpS390XMOVDstore(v *Value) bool {
}
w0 := x.Args[1]
w1 := x.Args[2]
- if !(x.Uses == 1 && is20Bit(int64(i)-16) && clobber(x)) {
+ if !(x.Uses == 1 && is20Bit(int64(i)-16) && setPos(v, x.Pos) && clobber(x)) {
break
}
v.reset(OpS390XSTMG3)
@@ -9113,7 +9113,7 @@ func rewriteValueS390X_OpS390XMOVDstore(v *Value) bool {
return true
}
// match: (MOVDstore [i] {s} p w3 x:(STMG3 [i-24] {s} p w0 w1 w2 mem))
- // cond: x.Uses == 1 && is20Bit(int64(i)-24) && clobber(x)
+ // cond: x.Uses == 1 && is20Bit(int64(i)-24) && setPos(v, x.Pos) && clobber(x)
// result: (STMG4 [i-24] {s} p w0 w1 w2 w3 mem)
for {
i := auxIntToInt32(v.AuxInt)
@@ -9131,7 +9131,7 @@ func rewriteValueS390X_OpS390XMOVDstore(v *Value) bool {
w0 := x.Args[1]
w1 := x.Args[2]
w2 := x.Args[3]
- if !(x.Uses == 1 && is20Bit(int64(i)-24) && clobber(x)) {
+ if !(x.Uses == 1 && is20Bit(int64(i)-24) && setPos(v, x.Pos) && clobber(x)) {
break
}
v.reset(OpS390XSTMG4)
@@ -10595,7 +10595,7 @@ func rewriteValueS390X_OpS390XMOVWstore(v *Value) bool {
return true
}
// match: (MOVWstore [i] {s} p w1 x:(MOVWstore [i-4] {s} p w0 mem))
- // cond: p.Op != OpSB && x.Uses == 1 && is20Bit(int64(i)-4) && clobber(x)
+ // cond: p.Op != OpSB && x.Uses == 1 && is20Bit(int64(i)-4) && setPos(v, x.Pos) && clobber(x)
// result: (STM2 [i-4] {s} p w0 w1 mem)
for {
i := auxIntToInt32(v.AuxInt)
@@ -10611,7 +10611,7 @@ func rewriteValueS390X_OpS390XMOVWstore(v *Value) bool {
break
}
w0 := x.Args[1]
- if !(p.Op != OpSB && x.Uses == 1 && is20Bit(int64(i)-4) && clobber(x)) {
+ if !(p.Op != OpSB && x.Uses == 1 && is20Bit(int64(i)-4) && setPos(v, x.Pos) && clobber(x)) {
break
}
v.reset(OpS390XSTM2)
@@ -10621,7 +10621,7 @@ func rewriteValueS390X_OpS390XMOVWstore(v *Value) bool {
return true
}
// match: (MOVWstore [i] {s} p w2 x:(STM2 [i-8] {s} p w0 w1 mem))
- // cond: x.Uses == 1 && is20Bit(int64(i)-8) && clobber(x)
+ // cond: x.Uses == 1 && is20Bit(int64(i)-8) && setPos(v, x.Pos) && clobber(x)
// result: (STM3 [i-8] {s} p w0 w1 w2 mem)
for {
i := auxIntToInt32(v.AuxInt)
@@ -10638,7 +10638,7 @@ func rewriteValueS390X_OpS390XMOVWstore(v *Value) bool {
}
w0 := x.Args[1]
w1 := x.Args[2]
- if !(x.Uses == 1 && is20Bit(int64(i)-8) && clobber(x)) {
+ if !(x.Uses == 1 && is20Bit(int64(i)-8) && setPos(v, x.Pos) && clobber(x)) {
break
}
v.reset(OpS390XSTM3)
@@ -10648,7 +10648,7 @@ func rewriteValueS390X_OpS390XMOVWstore(v *Value) bool {
return true
}
// match: (MOVWstore [i] {s} p w3 x:(STM3 [i-12] {s} p w0 w1 w2 mem))
- // cond: x.Uses == 1 && is20Bit(int64(i)-12) && clobber(x)
+ // cond: x.Uses == 1 && is20Bit(int64(i)-12) && setPos(v, x.Pos) && clobber(x)
// result: (STM4 [i-12] {s} p w0 w1 w2 w3 mem)
for {
i := auxIntToInt32(v.AuxInt)
@@ -10666,7 +10666,7 @@ func rewriteValueS390X_OpS390XMOVWstore(v *Value) bool {
w0 := x.Args[1]
w1 := x.Args[2]
w2 := x.Args[3]
- if !(x.Uses == 1 && is20Bit(int64(i)-12) && clobber(x)) {
+ if !(x.Uses == 1 && is20Bit(int64(i)-12) && setPos(v, x.Pos) && clobber(x)) {
break
}
v.reset(OpS390XSTM4)
@@ -13107,7 +13107,7 @@ func rewriteValueS390X_OpS390XSTM2(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
// match: (STM2 [i] {s} p w2 w3 x:(STM2 [i-8] {s} p w0 w1 mem))
- // cond: x.Uses == 1 && is20Bit(int64(i)-8) && clobber(x)
+ // cond: x.Uses == 1 && is20Bit(int64(i)-8) && setPos(v, x.Pos) && clobber(x)
// result: (STM4 [i-8] {s} p w0 w1 w2 w3 mem)
for {
i := auxIntToInt32(v.AuxInt)
@@ -13125,7 +13125,7 @@ func rewriteValueS390X_OpS390XSTM2(v *Value) bool {
}
w0 := x.Args[1]
w1 := x.Args[2]
- if !(x.Uses == 1 && is20Bit(int64(i)-8) && clobber(x)) {
+ if !(x.Uses == 1 && is20Bit(int64(i)-8) && setPos(v, x.Pos) && clobber(x)) {
break
}
v.reset(OpS390XSTM4)
@@ -13162,7 +13162,7 @@ func rewriteValueS390X_OpS390XSTMG2(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
// match: (STMG2 [i] {s} p w2 w3 x:(STMG2 [i-16] {s} p w0 w1 mem))
- // cond: x.Uses == 1 && is20Bit(int64(i)-16) && clobber(x)
+ // cond: x.Uses == 1 && is20Bit(int64(i)-16) && setPos(v, x.Pos) && clobber(x)
// result: (STMG4 [i-16] {s} p w0 w1 w2 w3 mem)
for {
i := auxIntToInt32(v.AuxInt)
@@ -13180,7 +13180,7 @@ func rewriteValueS390X_OpS390XSTMG2(v *Value) bool {
}
w0 := x.Args[1]
w1 := x.Args[2]
- if !(x.Uses == 1 && is20Bit(int64(i)-16) && clobber(x)) {
+ if !(x.Uses == 1 && is20Bit(int64(i)-16) && setPos(v, x.Pos) && clobber(x)) {
break
}
v.reset(OpS390XSTMG4)
diff --git a/src/cmd/compile/internal/ssa/rewritedec.go b/src/cmd/compile/internal/ssa/rewritedec.go
index 5c04708b27..3c481adc15 100644
--- a/src/cmd/compile/internal/ssa/rewritedec.go
+++ b/src/cmd/compile/internal/ssa/rewritedec.go
@@ -6,12 +6,18 @@ import "cmd/compile/internal/types"
func rewriteValuedec(v *Value) bool {
switch v.Op {
+ case OpArrayMake1:
+ return rewriteValuedec_OpArrayMake1(v)
+ case OpArraySelect:
+ return rewriteValuedec_OpArraySelect(v)
case OpComplexImag:
return rewriteValuedec_OpComplexImag(v)
case OpComplexReal:
return rewriteValuedec_OpComplexReal(v)
case OpIData:
return rewriteValuedec_OpIData(v)
+ case OpIMake:
+ return rewriteValuedec_OpIMake(v)
case OpITab:
return rewriteValuedec_OpITab(v)
case OpLoad:
@@ -30,11 +36,92 @@ func rewriteValuedec(v *Value) bool {
return rewriteValuedec_OpStringLen(v)
case OpStringPtr:
return rewriteValuedec_OpStringPtr(v)
+ case OpStructMake1:
+ return rewriteValuedec_OpStructMake1(v)
+ case OpStructSelect:
+ return rewriteValuedec_OpStructSelect(v)
+ }
+ return false
+}
+func rewriteValuedec_OpArrayMake1(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (ArrayMake1 x)
+ // cond: x.Type.IsPtrShaped()
+ // result: x
+ for {
+ x := v_0
+ if !(x.Type.IsPtrShaped()) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValuedec_OpArraySelect(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (ArraySelect [0] x)
+ // cond: x.Type.IsPtrShaped()
+ // result: x
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ if !(x.Type.IsPtrShaped()) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (ArraySelect (ArrayMake1 x))
+ // result: x
+ for {
+ if v_0.Op != OpArrayMake1 {
+ break
+ }
+ x := v_0.Args[0]
+ v.copyOf(x)
+ return true
+ }
+ // match: (ArraySelect [0] (IData x))
+ // result: (IData x)
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 || v_0.Op != OpIData {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpIData)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ArraySelect [i] x:(Load <t> ptr mem))
+ // result: @x.Block (Load <v.Type> (OffPtr <v.Type.PtrTo()> [t.Elem().Size()*i] ptr) mem)
+ for {
+ i := auxIntToInt64(v.AuxInt)
+ x := v_0
+ if x.Op != OpLoad {
+ break
+ }
+ t := x.Type
+ mem := x.Args[1]
+ ptr := x.Args[0]
+ b = x.Block
+ v0 := b.NewValue0(v.Pos, OpLoad, v.Type)
+ v.copyOf(v0)
+ v1 := b.NewValue0(v.Pos, OpOffPtr, v.Type.PtrTo())
+ v1.AuxInt = int64ToAuxInt(t.Elem().Size() * i)
+ v1.AddArg(ptr)
+ v0.AddArg2(v1, mem)
+ return true
}
return false
}
func rewriteValuedec_OpComplexImag(v *Value) bool {
v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
// match: (ComplexImag (ComplexMake _ imag ))
// result: imag
for {
@@ -45,10 +132,58 @@ func rewriteValuedec_OpComplexImag(v *Value) bool {
v.copyOf(imag)
return true
}
+ // match: (ComplexImag x:(Load <t> ptr mem))
+ // cond: t.IsComplex() && t.Size() == 8
+ // result: @x.Block (Load <typ.Float32> (OffPtr <typ.Float32Ptr> [4] ptr) mem)
+ for {
+ x := v_0
+ if x.Op != OpLoad {
+ break
+ }
+ t := x.Type
+ mem := x.Args[1]
+ ptr := x.Args[0]
+ if !(t.IsComplex() && t.Size() == 8) {
+ break
+ }
+ b = x.Block
+ v0 := b.NewValue0(v.Pos, OpLoad, typ.Float32)
+ v.copyOf(v0)
+ v1 := b.NewValue0(v.Pos, OpOffPtr, typ.Float32Ptr)
+ v1.AuxInt = int64ToAuxInt(4)
+ v1.AddArg(ptr)
+ v0.AddArg2(v1, mem)
+ return true
+ }
+ // match: (ComplexImag x:(Load <t> ptr mem))
+ // cond: t.IsComplex() && t.Size() == 16
+ // result: @x.Block (Load <typ.Float64> (OffPtr <typ.Float64Ptr> [8] ptr) mem)
+ for {
+ x := v_0
+ if x.Op != OpLoad {
+ break
+ }
+ t := x.Type
+ mem := x.Args[1]
+ ptr := x.Args[0]
+ if !(t.IsComplex() && t.Size() == 16) {
+ break
+ }
+ b = x.Block
+ v0 := b.NewValue0(v.Pos, OpLoad, typ.Float64)
+ v.copyOf(v0)
+ v1 := b.NewValue0(v.Pos, OpOffPtr, typ.Float64Ptr)
+ v1.AuxInt = int64ToAuxInt(8)
+ v1.AddArg(ptr)
+ v0.AddArg2(v1, mem)
+ return true
+ }
return false
}
func rewriteValuedec_OpComplexReal(v *Value) bool {
v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
// match: (ComplexReal (ComplexMake real _ ))
// result: real
for {
@@ -59,10 +194,53 @@ func rewriteValuedec_OpComplexReal(v *Value) bool {
v.copyOf(real)
return true
}
+ // match: (ComplexReal x:(Load <t> ptr mem))
+ // cond: t.IsComplex() && t.Size() == 8
+ // result: @x.Block (Load <typ.Float32> ptr mem)
+ for {
+ x := v_0
+ if x.Op != OpLoad {
+ break
+ }
+ t := x.Type
+ mem := x.Args[1]
+ ptr := x.Args[0]
+ if !(t.IsComplex() && t.Size() == 8) {
+ break
+ }
+ b = x.Block
+ v0 := b.NewValue0(v.Pos, OpLoad, typ.Float32)
+ v.copyOf(v0)
+ v0.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (ComplexReal x:(Load <t> ptr mem))
+ // cond: t.IsComplex() && t.Size() == 16
+ // result: @x.Block (Load <typ.Float64> ptr mem)
+ for {
+ x := v_0
+ if x.Op != OpLoad {
+ break
+ }
+ t := x.Type
+ mem := x.Args[1]
+ ptr := x.Args[0]
+ if !(t.IsComplex() && t.Size() == 16) {
+ break
+ }
+ b = x.Block
+ v0 := b.NewValue0(v.Pos, OpLoad, typ.Float64)
+ v.copyOf(v0)
+ v0.AddArg2(ptr, mem)
+ return true
+ }
return false
}
func rewriteValuedec_OpIData(v *Value) bool {
v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ typ := &b.Func.Config.Types
// match: (IData (IMake _ data))
// result: data
for {
@@ -73,10 +251,52 @@ func rewriteValuedec_OpIData(v *Value) bool {
v.copyOf(data)
return true
}
+ // match: (IData x:(Load <t> ptr mem))
+ // cond: t.IsInterface()
+ // result: @x.Block (Load <typ.BytePtr> (OffPtr <typ.BytePtrPtr> [config.PtrSize] ptr) mem)
+ for {
+ x := v_0
+ if x.Op != OpLoad {
+ break
+ }
+ t := x.Type
+ mem := x.Args[1]
+ ptr := x.Args[0]
+ if !(t.IsInterface()) {
+ break
+ }
+ b = x.Block
+ v0 := b.NewValue0(v.Pos, OpLoad, typ.BytePtr)
+ v.copyOf(v0)
+ v1 := b.NewValue0(v.Pos, OpOffPtr, typ.BytePtrPtr)
+ v1.AuxInt = int64ToAuxInt(config.PtrSize)
+ v1.AddArg(ptr)
+ v0.AddArg2(v1, mem)
+ return true
+ }
+ return false
+}
+func rewriteValuedec_OpIMake(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (IMake _typ (StructMake1 val))
+ // result: (IMake _typ val)
+ for {
+ _typ := v_0
+ if v_1.Op != OpStructMake1 {
+ break
+ }
+ val := v_1.Args[0]
+ v.reset(OpIMake)
+ v.AddArg2(_typ, val)
+ return true
+ }
return false
}
func rewriteValuedec_OpITab(v *Value) bool {
v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
// match: (ITab (IMake itab _))
// result: itab
for {
@@ -87,6 +307,26 @@ func rewriteValuedec_OpITab(v *Value) bool {
v.copyOf(itab)
return true
}
+ // match: (ITab x:(Load <t> ptr mem))
+ // cond: t.IsInterface()
+ // result: @x.Block (Load <typ.Uintptr> ptr mem)
+ for {
+ x := v_0
+ if x.Op != OpLoad {
+ break
+ }
+ t := x.Type
+ mem := x.Args[1]
+ ptr := x.Args[0]
+ if !(t.IsInterface()) {
+ break
+ }
+ b = x.Block
+ v0 := b.NewValue0(v.Pos, OpLoad, typ.Uintptr)
+ v.copyOf(v0)
+ v0.AddArg2(ptr, mem)
+ return true
+ }
return false
}
func rewriteValuedec_OpLoad(v *Value) bool {
@@ -209,6 +449,9 @@ func rewriteValuedec_OpLoad(v *Value) bool {
}
func rewriteValuedec_OpSliceCap(v *Value) bool {
v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ typ := &b.Func.Config.Types
// match: (SliceCap (SliceMake _ _ cap))
// result: cap
for {
@@ -219,10 +462,36 @@ func rewriteValuedec_OpSliceCap(v *Value) bool {
v.copyOf(cap)
return true
}
+ // match: (SliceCap x:(Load <t> ptr mem))
+ // cond: t.IsSlice()
+ // result: @x.Block (Load <typ.Int> (OffPtr <typ.IntPtr> [2*config.PtrSize] ptr) mem)
+ for {
+ x := v_0
+ if x.Op != OpLoad {
+ break
+ }
+ t := x.Type
+ mem := x.Args[1]
+ ptr := x.Args[0]
+ if !(t.IsSlice()) {
+ break
+ }
+ b = x.Block
+ v0 := b.NewValue0(v.Pos, OpLoad, typ.Int)
+ v.copyOf(v0)
+ v1 := b.NewValue0(v.Pos, OpOffPtr, typ.IntPtr)
+ v1.AuxInt = int64ToAuxInt(2 * config.PtrSize)
+ v1.AddArg(ptr)
+ v0.AddArg2(v1, mem)
+ return true
+ }
return false
}
func rewriteValuedec_OpSliceLen(v *Value) bool {
v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ typ := &b.Func.Config.Types
// match: (SliceLen (SliceMake _ len _))
// result: len
for {
@@ -233,10 +502,34 @@ func rewriteValuedec_OpSliceLen(v *Value) bool {
v.copyOf(len)
return true
}
+ // match: (SliceLen x:(Load <t> ptr mem))
+ // cond: t.IsSlice()
+ // result: @x.Block (Load <typ.Int> (OffPtr <typ.IntPtr> [config.PtrSize] ptr) mem)
+ for {
+ x := v_0
+ if x.Op != OpLoad {
+ break
+ }
+ t := x.Type
+ mem := x.Args[1]
+ ptr := x.Args[0]
+ if !(t.IsSlice()) {
+ break
+ }
+ b = x.Block
+ v0 := b.NewValue0(v.Pos, OpLoad, typ.Int)
+ v.copyOf(v0)
+ v1 := b.NewValue0(v.Pos, OpOffPtr, typ.IntPtr)
+ v1.AuxInt = int64ToAuxInt(config.PtrSize)
+ v1.AddArg(ptr)
+ v0.AddArg2(v1, mem)
+ return true
+ }
return false
}
func rewriteValuedec_OpSlicePtr(v *Value) bool {
v_0 := v.Args[0]
+ b := v.Block
// match: (SlicePtr (SliceMake ptr _ _ ))
// result: ptr
for {
@@ -247,6 +540,26 @@ func rewriteValuedec_OpSlicePtr(v *Value) bool {
v.copyOf(ptr)
return true
}
+ // match: (SlicePtr x:(Load <t> ptr mem))
+ // cond: t.IsSlice()
+ // result: @x.Block (Load <t.Elem().PtrTo()> ptr mem)
+ for {
+ x := v_0
+ if x.Op != OpLoad {
+ break
+ }
+ t := x.Type
+ mem := x.Args[1]
+ ptr := x.Args[0]
+ if !(t.IsSlice()) {
+ break
+ }
+ b = x.Block
+ v0 := b.NewValue0(v.Pos, OpLoad, t.Elem().PtrTo())
+ v.copyOf(v0)
+ v0.AddArg2(ptr, mem)
+ return true
+ }
return false
}
func rewriteValuedec_OpSlicePtrUnchecked(v *Value) bool {
@@ -270,6 +583,18 @@ func rewriteValuedec_OpStore(v *Value) bool {
b := v.Block
config := b.Func.Config
typ := &b.Func.Config.Types
+ // match: (Store {t} _ _ mem)
+ // cond: t.Size() == 0
+ // result: mem
+ for {
+ t := auxToType(v.Aux)
+ mem := v_2
+ if !(t.Size() == 0) {
+ break
+ }
+ v.copyOf(mem)
+ return true
+ }
// match: (Store {t} dst (ComplexMake real imag) mem)
// cond: t.Size() == 8
// result: (Store {typ.Float32} (OffPtr <typ.Float32Ptr> [4] dst) imag (Store {typ.Float32} dst real mem))
@@ -393,10 +718,141 @@ func rewriteValuedec_OpStore(v *Value) bool {
v.AddArg3(v0, data, v1)
return true
}
+ // match: (Store dst (StructMake1 <t> f0) mem)
+ // result: (Store {t.FieldType(0)} (OffPtr <t.FieldType(0).PtrTo()> [0] dst) f0 mem)
+ for {
+ dst := v_0
+ if v_1.Op != OpStructMake1 {
+ break
+ }
+ t := v_1.Type
+ f0 := v_1.Args[0]
+ mem := v_2
+ v.reset(OpStore)
+ v.Aux = typeToAux(t.FieldType(0))
+ v0 := b.NewValue0(v.Pos, OpOffPtr, t.FieldType(0).PtrTo())
+ v0.AuxInt = int64ToAuxInt(0)
+ v0.AddArg(dst)
+ v.AddArg3(v0, f0, mem)
+ return true
+ }
+ // match: (Store dst (StructMake2 <t> f0 f1) mem)
+ // result: (Store {t.FieldType(1)} (OffPtr <t.FieldType(1).PtrTo()> [t.FieldOff(1)] dst) f1 (Store {t.FieldType(0)} (OffPtr <t.FieldType(0).PtrTo()> [0] dst) f0 mem))
+ for {
+ dst := v_0
+ if v_1.Op != OpStructMake2 {
+ break
+ }
+ t := v_1.Type
+ f1 := v_1.Args[1]
+ f0 := v_1.Args[0]
+ mem := v_2
+ v.reset(OpStore)
+ v.Aux = typeToAux(t.FieldType(1))
+ v0 := b.NewValue0(v.Pos, OpOffPtr, t.FieldType(1).PtrTo())
+ v0.AuxInt = int64ToAuxInt(t.FieldOff(1))
+ v0.AddArg(dst)
+ v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
+ v1.Aux = typeToAux(t.FieldType(0))
+ v2 := b.NewValue0(v.Pos, OpOffPtr, t.FieldType(0).PtrTo())
+ v2.AuxInt = int64ToAuxInt(0)
+ v2.AddArg(dst)
+ v1.AddArg3(v2, f0, mem)
+ v.AddArg3(v0, f1, v1)
+ return true
+ }
+ // match: (Store dst (StructMake3 <t> f0 f1 f2) mem)
+ // result: (Store {t.FieldType(2)} (OffPtr <t.FieldType(2).PtrTo()> [t.FieldOff(2)] dst) f2 (Store {t.FieldType(1)} (OffPtr <t.FieldType(1).PtrTo()> [t.FieldOff(1)] dst) f1 (Store {t.FieldType(0)} (OffPtr <t.FieldType(0).PtrTo()> [0] dst) f0 mem)))
+ for {
+ dst := v_0
+ if v_1.Op != OpStructMake3 {
+ break
+ }
+ t := v_1.Type
+ f2 := v_1.Args[2]
+ f0 := v_1.Args[0]
+ f1 := v_1.Args[1]
+ mem := v_2
+ v.reset(OpStore)
+ v.Aux = typeToAux(t.FieldType(2))
+ v0 := b.NewValue0(v.Pos, OpOffPtr, t.FieldType(2).PtrTo())
+ v0.AuxInt = int64ToAuxInt(t.FieldOff(2))
+ v0.AddArg(dst)
+ v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
+ v1.Aux = typeToAux(t.FieldType(1))
+ v2 := b.NewValue0(v.Pos, OpOffPtr, t.FieldType(1).PtrTo())
+ v2.AuxInt = int64ToAuxInt(t.FieldOff(1))
+ v2.AddArg(dst)
+ v3 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
+ v3.Aux = typeToAux(t.FieldType(0))
+ v4 := b.NewValue0(v.Pos, OpOffPtr, t.FieldType(0).PtrTo())
+ v4.AuxInt = int64ToAuxInt(0)
+ v4.AddArg(dst)
+ v3.AddArg3(v4, f0, mem)
+ v1.AddArg3(v2, f1, v3)
+ v.AddArg3(v0, f2, v1)
+ return true
+ }
+ // match: (Store dst (StructMake4 <t> f0 f1 f2 f3) mem)
+ // result: (Store {t.FieldType(3)} (OffPtr <t.FieldType(3).PtrTo()> [t.FieldOff(3)] dst) f3 (Store {t.FieldType(2)} (OffPtr <t.FieldType(2).PtrTo()> [t.FieldOff(2)] dst) f2 (Store {t.FieldType(1)} (OffPtr <t.FieldType(1).PtrTo()> [t.FieldOff(1)] dst) f1 (Store {t.FieldType(0)} (OffPtr <t.FieldType(0).PtrTo()> [0] dst) f0 mem))))
+ for {
+ dst := v_0
+ if v_1.Op != OpStructMake4 {
+ break
+ }
+ t := v_1.Type
+ f3 := v_1.Args[3]
+ f0 := v_1.Args[0]
+ f1 := v_1.Args[1]
+ f2 := v_1.Args[2]
+ mem := v_2
+ v.reset(OpStore)
+ v.Aux = typeToAux(t.FieldType(3))
+ v0 := b.NewValue0(v.Pos, OpOffPtr, t.FieldType(3).PtrTo())
+ v0.AuxInt = int64ToAuxInt(t.FieldOff(3))
+ v0.AddArg(dst)
+ v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
+ v1.Aux = typeToAux(t.FieldType(2))
+ v2 := b.NewValue0(v.Pos, OpOffPtr, t.FieldType(2).PtrTo())
+ v2.AuxInt = int64ToAuxInt(t.FieldOff(2))
+ v2.AddArg(dst)
+ v3 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
+ v3.Aux = typeToAux(t.FieldType(1))
+ v4 := b.NewValue0(v.Pos, OpOffPtr, t.FieldType(1).PtrTo())
+ v4.AuxInt = int64ToAuxInt(t.FieldOff(1))
+ v4.AddArg(dst)
+ v5 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
+ v5.Aux = typeToAux(t.FieldType(0))
+ v6 := b.NewValue0(v.Pos, OpOffPtr, t.FieldType(0).PtrTo())
+ v6.AuxInt = int64ToAuxInt(0)
+ v6.AddArg(dst)
+ v5.AddArg3(v6, f0, mem)
+ v3.AddArg3(v4, f1, v5)
+ v1.AddArg3(v2, f2, v3)
+ v.AddArg3(v0, f3, v1)
+ return true
+ }
+ // match: (Store dst (ArrayMake1 e) mem)
+ // result: (Store {e.Type} dst e mem)
+ for {
+ dst := v_0
+ if v_1.Op != OpArrayMake1 {
+ break
+ }
+ e := v_1.Args[0]
+ mem := v_2
+ v.reset(OpStore)
+ v.Aux = typeToAux(e.Type)
+ v.AddArg3(dst, e, mem)
+ return true
+ }
return false
}
func rewriteValuedec_OpStringLen(v *Value) bool {
v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ typ := &b.Func.Config.Types
// match: (StringLen (StringMake _ len))
// result: len
for {
@@ -407,10 +863,35 @@ func rewriteValuedec_OpStringLen(v *Value) bool {
v.copyOf(len)
return true
}
+ // match: (StringLen x:(Load <t> ptr mem))
+ // cond: t.IsString()
+ // result: @x.Block (Load <typ.Int> (OffPtr <typ.IntPtr> [config.PtrSize] ptr) mem)
+ for {
+ x := v_0
+ if x.Op != OpLoad {
+ break
+ }
+ t := x.Type
+ mem := x.Args[1]
+ ptr := x.Args[0]
+ if !(t.IsString()) {
+ break
+ }
+ b = x.Block
+ v0 := b.NewValue0(v.Pos, OpLoad, typ.Int)
+ v.copyOf(v0)
+ v1 := b.NewValue0(v.Pos, OpOffPtr, typ.IntPtr)
+ v1.AuxInt = int64ToAuxInt(config.PtrSize)
+ v1.AddArg(ptr)
+ v0.AddArg2(v1, mem)
+ return true
+ }
return false
}
func rewriteValuedec_OpStringPtr(v *Value) bool {
v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
// match: (StringPtr (StringMake ptr _))
// result: ptr
for {
@@ -421,6 +902,191 @@ func rewriteValuedec_OpStringPtr(v *Value) bool {
v.copyOf(ptr)
return true
}
+ // match: (StringPtr x:(Load <t> ptr mem))
+ // cond: t.IsString()
+ // result: @x.Block (Load <typ.BytePtr> ptr mem)
+ for {
+ x := v_0
+ if x.Op != OpLoad {
+ break
+ }
+ t := x.Type
+ mem := x.Args[1]
+ ptr := x.Args[0]
+ if !(t.IsString()) {
+ break
+ }
+ b = x.Block
+ v0 := b.NewValue0(v.Pos, OpLoad, typ.BytePtr)
+ v.copyOf(v0)
+ v0.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValuedec_OpStructMake1(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (StructMake1 x)
+ // cond: x.Type.IsPtrShaped()
+ // result: x
+ for {
+ x := v_0
+ if !(x.Type.IsPtrShaped()) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValuedec_OpStructSelect(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (StructSelect [0] (IData x))
+ // result: (IData x)
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 || v_0.Op != OpIData {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpIData)
+ v.AddArg(x)
+ return true
+ }
+ // match: (StructSelect (StructMake1 x))
+ // result: x
+ for {
+ if v_0.Op != OpStructMake1 {
+ break
+ }
+ x := v_0.Args[0]
+ v.copyOf(x)
+ return true
+ }
+ // match: (StructSelect [0] (StructMake2 x _))
+ // result: x
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 || v_0.Op != OpStructMake2 {
+ break
+ }
+ x := v_0.Args[0]
+ v.copyOf(x)
+ return true
+ }
+ // match: (StructSelect [1] (StructMake2 _ x))
+ // result: x
+ for {
+ if auxIntToInt64(v.AuxInt) != 1 || v_0.Op != OpStructMake2 {
+ break
+ }
+ x := v_0.Args[1]
+ v.copyOf(x)
+ return true
+ }
+ // match: (StructSelect [0] (StructMake3 x _ _))
+ // result: x
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 || v_0.Op != OpStructMake3 {
+ break
+ }
+ x := v_0.Args[0]
+ v.copyOf(x)
+ return true
+ }
+ // match: (StructSelect [1] (StructMake3 _ x _))
+ // result: x
+ for {
+ if auxIntToInt64(v.AuxInt) != 1 || v_0.Op != OpStructMake3 {
+ break
+ }
+ x := v_0.Args[1]
+ v.copyOf(x)
+ return true
+ }
+ // match: (StructSelect [2] (StructMake3 _ _ x))
+ // result: x
+ for {
+ if auxIntToInt64(v.AuxInt) != 2 || v_0.Op != OpStructMake3 {
+ break
+ }
+ x := v_0.Args[2]
+ v.copyOf(x)
+ return true
+ }
+ // match: (StructSelect [0] (StructMake4 x _ _ _))
+ // result: x
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 || v_0.Op != OpStructMake4 {
+ break
+ }
+ x := v_0.Args[0]
+ v.copyOf(x)
+ return true
+ }
+ // match: (StructSelect [1] (StructMake4 _ x _ _))
+ // result: x
+ for {
+ if auxIntToInt64(v.AuxInt) != 1 || v_0.Op != OpStructMake4 {
+ break
+ }
+ x := v_0.Args[1]
+ v.copyOf(x)
+ return true
+ }
+ // match: (StructSelect [2] (StructMake4 _ _ x _))
+ // result: x
+ for {
+ if auxIntToInt64(v.AuxInt) != 2 || v_0.Op != OpStructMake4 {
+ break
+ }
+ x := v_0.Args[2]
+ v.copyOf(x)
+ return true
+ }
+ // match: (StructSelect [3] (StructMake4 _ _ _ x))
+ // result: x
+ for {
+ if auxIntToInt64(v.AuxInt) != 3 || v_0.Op != OpStructMake4 {
+ break
+ }
+ x := v_0.Args[3]
+ v.copyOf(x)
+ return true
+ }
+ // match: (StructSelect [0] x)
+ // cond: x.Type.IsPtrShaped()
+ // result: x
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ if !(x.Type.IsPtrShaped()) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (StructSelect [i] x:(Load <t> ptr mem))
+ // result: @x.Block (Load <v.Type> (OffPtr <v.Type.PtrTo()> [t.FieldOff(int(i))] ptr) mem)
+ for {
+ i := auxIntToInt64(v.AuxInt)
+ x := v_0
+ if x.Op != OpLoad {
+ break
+ }
+ t := x.Type
+ mem := x.Args[1]
+ ptr := x.Args[0]
+ b = x.Block
+ v0 := b.NewValue0(v.Pos, OpLoad, v.Type)
+ v.copyOf(v0)
+ v1 := b.NewValue0(v.Pos, OpOffPtr, v.Type.PtrTo())
+ v1.AuxInt = int64ToAuxInt(t.FieldOff(int(i)))
+ v1.AddArg(ptr)
+ v0.AddArg2(v1, mem)
+ return true
+ }
return false
}
func rewriteBlockdec(b *Block) bool {
diff --git a/src/cmd/compile/internal/ssa/rewritegeneric.go b/src/cmd/compile/internal/ssa/rewritegeneric.go
index e5bd8bc36f..a018ca04b6 100644
--- a/src/cmd/compile/internal/ssa/rewritegeneric.go
+++ b/src/cmd/compile/internal/ssa/rewritegeneric.go
@@ -397,6 +397,8 @@ func rewriteValuegeneric(v *Value) bool {
return rewriteValuegeneric_OpSlicemask(v)
case OpSqrt:
return rewriteValuegeneric_OpSqrt(v)
+ case OpStaticCall:
+ return rewriteValuegeneric_OpStaticCall(v)
case OpStaticLECall:
return rewriteValuegeneric_OpStaticLECall(v)
case OpStore:
@@ -12585,7 +12587,6 @@ func rewriteValuegeneric_OpLoad(v *Value) bool {
v_0 := v.Args[0]
b := v.Block
config := b.Func.Config
- fe := b.Func.fe
// match: (Load <t1> p1 (Store {t2} p2 x _))
// cond: isSamePtr(p1, p2) && t1.Compare(x.Type) == types.CMPeq && t1.Size() == t2.Size()
// result: x
@@ -12797,7 +12798,7 @@ func rewriteValuegeneric_OpLoad(v *Value) bool {
return true
}
// match: (Load <t1> op:(OffPtr [o1] p1) (Store {t2} p2 _ mem:(Zero [n] p3 _)))
- // cond: o1 >= 0 && o1+t1.Size() <= n && isSamePtr(p1, p3) && fe.CanSSA(t1) && disjoint(op, t1.Size(), p2, t2.Size())
+ // cond: o1 >= 0 && o1+t1.Size() <= n && isSamePtr(p1, p3) && CanSSA(t1) && disjoint(op, t1.Size(), p2, t2.Size())
// result: @mem.Block (Load <t1> (OffPtr <op.Type> [o1] p3) mem)
for {
t1 := v.Type
@@ -12819,7 +12820,7 @@ func rewriteValuegeneric_OpLoad(v *Value) bool {
}
n := auxIntToInt64(mem.AuxInt)
p3 := mem.Args[0]
- if !(o1 >= 0 && o1+t1.Size() <= n && isSamePtr(p1, p3) && fe.CanSSA(t1) && disjoint(op, t1.Size(), p2, t2.Size())) {
+ if !(o1 >= 0 && o1+t1.Size() <= n && isSamePtr(p1, p3) && CanSSA(t1) && disjoint(op, t1.Size(), p2, t2.Size())) {
break
}
b = mem.Block
@@ -12832,7 +12833,7 @@ func rewriteValuegeneric_OpLoad(v *Value) bool {
return true
}
// match: (Load <t1> op:(OffPtr [o1] p1) (Store {t2} p2 _ (Store {t3} p3 _ mem:(Zero [n] p4 _))))
- // cond: o1 >= 0 && o1+t1.Size() <= n && isSamePtr(p1, p4) && fe.CanSSA(t1) && disjoint(op, t1.Size(), p2, t2.Size()) && disjoint(op, t1.Size(), p3, t3.Size())
+ // cond: o1 >= 0 && o1+t1.Size() <= n && isSamePtr(p1, p4) && CanSSA(t1) && disjoint(op, t1.Size(), p2, t2.Size()) && disjoint(op, t1.Size(), p3, t3.Size())
// result: @mem.Block (Load <t1> (OffPtr <op.Type> [o1] p4) mem)
for {
t1 := v.Type
@@ -12861,7 +12862,7 @@ func rewriteValuegeneric_OpLoad(v *Value) bool {
}
n := auxIntToInt64(mem.AuxInt)
p4 := mem.Args[0]
- if !(o1 >= 0 && o1+t1.Size() <= n && isSamePtr(p1, p4) && fe.CanSSA(t1) && disjoint(op, t1.Size(), p2, t2.Size()) && disjoint(op, t1.Size(), p3, t3.Size())) {
+ if !(o1 >= 0 && o1+t1.Size() <= n && isSamePtr(p1, p4) && CanSSA(t1) && disjoint(op, t1.Size(), p2, t2.Size()) && disjoint(op, t1.Size(), p3, t3.Size())) {
break
}
b = mem.Block
@@ -12874,7 +12875,7 @@ func rewriteValuegeneric_OpLoad(v *Value) bool {
return true
}
// match: (Load <t1> op:(OffPtr [o1] p1) (Store {t2} p2 _ (Store {t3} p3 _ (Store {t4} p4 _ mem:(Zero [n] p5 _)))))
- // cond: o1 >= 0 && o1+t1.Size() <= n && isSamePtr(p1, p5) && fe.CanSSA(t1) && disjoint(op, t1.Size(), p2, t2.Size()) && disjoint(op, t1.Size(), p3, t3.Size()) && disjoint(op, t1.Size(), p4, t4.Size())
+ // cond: o1 >= 0 && o1+t1.Size() <= n && isSamePtr(p1, p5) && CanSSA(t1) && disjoint(op, t1.Size(), p2, t2.Size()) && disjoint(op, t1.Size(), p3, t3.Size()) && disjoint(op, t1.Size(), p4, t4.Size())
// result: @mem.Block (Load <t1> (OffPtr <op.Type> [o1] p5) mem)
for {
t1 := v.Type
@@ -12910,7 +12911,7 @@ func rewriteValuegeneric_OpLoad(v *Value) bool {
}
n := auxIntToInt64(mem.AuxInt)
p5 := mem.Args[0]
- if !(o1 >= 0 && o1+t1.Size() <= n && isSamePtr(p1, p5) && fe.CanSSA(t1) && disjoint(op, t1.Size(), p2, t2.Size()) && disjoint(op, t1.Size(), p3, t3.Size()) && disjoint(op, t1.Size(), p4, t4.Size())) {
+ if !(o1 >= 0 && o1+t1.Size() <= n && isSamePtr(p1, p5) && CanSSA(t1) && disjoint(op, t1.Size(), p2, t2.Size()) && disjoint(op, t1.Size(), p3, t3.Size()) && disjoint(op, t1.Size(), p4, t4.Size())) {
break
}
b = mem.Block
@@ -12923,7 +12924,7 @@ func rewriteValuegeneric_OpLoad(v *Value) bool {
return true
}
// match: (Load <t1> op:(OffPtr [o1] p1) (Store {t2} p2 _ (Store {t3} p3 _ (Store {t4} p4 _ (Store {t5} p5 _ mem:(Zero [n] p6 _))))))
- // cond: o1 >= 0 && o1+t1.Size() <= n && isSamePtr(p1, p6) && fe.CanSSA(t1) && disjoint(op, t1.Size(), p2, t2.Size()) && disjoint(op, t1.Size(), p3, t3.Size()) && disjoint(op, t1.Size(), p4, t4.Size()) && disjoint(op, t1.Size(), p5, t5.Size())
+ // cond: o1 >= 0 && o1+t1.Size() <= n && isSamePtr(p1, p6) && CanSSA(t1) && disjoint(op, t1.Size(), p2, t2.Size()) && disjoint(op, t1.Size(), p3, t3.Size()) && disjoint(op, t1.Size(), p4, t4.Size()) && disjoint(op, t1.Size(), p5, t5.Size())
// result: @mem.Block (Load <t1> (OffPtr <op.Type> [o1] p6) mem)
for {
t1 := v.Type
@@ -12966,7 +12967,7 @@ func rewriteValuegeneric_OpLoad(v *Value) bool {
}
n := auxIntToInt64(mem.AuxInt)
p6 := mem.Args[0]
- if !(o1 >= 0 && o1+t1.Size() <= n && isSamePtr(p1, p6) && fe.CanSSA(t1) && disjoint(op, t1.Size(), p2, t2.Size()) && disjoint(op, t1.Size(), p3, t3.Size()) && disjoint(op, t1.Size(), p4, t4.Size()) && disjoint(op, t1.Size(), p5, t5.Size())) {
+ if !(o1 >= 0 && o1+t1.Size() <= n && isSamePtr(p1, p6) && CanSSA(t1) && disjoint(op, t1.Size(), p2, t2.Size()) && disjoint(op, t1.Size(), p3, t3.Size()) && disjoint(op, t1.Size(), p4, t4.Size()) && disjoint(op, t1.Size(), p5, t5.Size())) {
break
}
b = mem.Block
@@ -13133,24 +13134,24 @@ func rewriteValuegeneric_OpLoad(v *Value) bool {
return true
}
// match: (Load <t> _ _)
- // cond: t.IsStruct() && t.NumFields() == 0 && fe.CanSSA(t)
+ // cond: t.IsStruct() && t.NumFields() == 0 && CanSSA(t)
// result: (StructMake0)
for {
t := v.Type
- if !(t.IsStruct() && t.NumFields() == 0 && fe.CanSSA(t)) {
+ if !(t.IsStruct() && t.NumFields() == 0 && CanSSA(t)) {
break
}
v.reset(OpStructMake0)
return true
}
// match: (Load <t> ptr mem)
- // cond: t.IsStruct() && t.NumFields() == 1 && fe.CanSSA(t)
+ // cond: t.IsStruct() && t.NumFields() == 1 && CanSSA(t)
// result: (StructMake1 (Load <t.FieldType(0)> (OffPtr <t.FieldType(0).PtrTo()> [0] ptr) mem))
for {
t := v.Type
ptr := v_0
mem := v_1
- if !(t.IsStruct() && t.NumFields() == 1 && fe.CanSSA(t)) {
+ if !(t.IsStruct() && t.NumFields() == 1 && CanSSA(t)) {
break
}
v.reset(OpStructMake1)
@@ -13163,13 +13164,13 @@ func rewriteValuegeneric_OpLoad(v *Value) bool {
return true
}
// match: (Load <t> ptr mem)
- // cond: t.IsStruct() && t.NumFields() == 2 && fe.CanSSA(t)
+ // cond: t.IsStruct() && t.NumFields() == 2 && CanSSA(t)
// result: (StructMake2 (Load <t.FieldType(0)> (OffPtr <t.FieldType(0).PtrTo()> [0] ptr) mem) (Load <t.FieldType(1)> (OffPtr <t.FieldType(1).PtrTo()> [t.FieldOff(1)] ptr) mem))
for {
t := v.Type
ptr := v_0
mem := v_1
- if !(t.IsStruct() && t.NumFields() == 2 && fe.CanSSA(t)) {
+ if !(t.IsStruct() && t.NumFields() == 2 && CanSSA(t)) {
break
}
v.reset(OpStructMake2)
@@ -13187,13 +13188,13 @@ func rewriteValuegeneric_OpLoad(v *Value) bool {
return true
}
// match: (Load <t> ptr mem)
- // cond: t.IsStruct() && t.NumFields() == 3 && fe.CanSSA(t)
+ // cond: t.IsStruct() && t.NumFields() == 3 && CanSSA(t)
// result: (StructMake3 (Load <t.FieldType(0)> (OffPtr <t.FieldType(0).PtrTo()> [0] ptr) mem) (Load <t.FieldType(1)> (OffPtr <t.FieldType(1).PtrTo()> [t.FieldOff(1)] ptr) mem) (Load <t.FieldType(2)> (OffPtr <t.FieldType(2).PtrTo()> [t.FieldOff(2)] ptr) mem))
for {
t := v.Type
ptr := v_0
mem := v_1
- if !(t.IsStruct() && t.NumFields() == 3 && fe.CanSSA(t)) {
+ if !(t.IsStruct() && t.NumFields() == 3 && CanSSA(t)) {
break
}
v.reset(OpStructMake3)
@@ -13216,13 +13217,13 @@ func rewriteValuegeneric_OpLoad(v *Value) bool {
return true
}
// match: (Load <t> ptr mem)
- // cond: t.IsStruct() && t.NumFields() == 4 && fe.CanSSA(t)
+ // cond: t.IsStruct() && t.NumFields() == 4 && CanSSA(t)
// result: (StructMake4 (Load <t.FieldType(0)> (OffPtr <t.FieldType(0).PtrTo()> [0] ptr) mem) (Load <t.FieldType(1)> (OffPtr <t.FieldType(1).PtrTo()> [t.FieldOff(1)] ptr) mem) (Load <t.FieldType(2)> (OffPtr <t.FieldType(2).PtrTo()> [t.FieldOff(2)] ptr) mem) (Load <t.FieldType(3)> (OffPtr <t.FieldType(3).PtrTo()> [t.FieldOff(3)] ptr) mem))
for {
t := v.Type
ptr := v_0
mem := v_1
- if !(t.IsStruct() && t.NumFields() == 4 && fe.CanSSA(t)) {
+ if !(t.IsStruct() && t.NumFields() == 4 && CanSSA(t)) {
break
}
v.reset(OpStructMake4)
@@ -13261,13 +13262,13 @@ func rewriteValuegeneric_OpLoad(v *Value) bool {
return true
}
// match: (Load <t> ptr mem)
- // cond: t.IsArray() && t.NumElem() == 1 && fe.CanSSA(t)
+ // cond: t.IsArray() && t.NumElem() == 1 && CanSSA(t)
// result: (ArrayMake1 (Load <t.Elem()> ptr mem))
for {
t := v.Type
ptr := v_0
mem := v_1
- if !(t.IsArray() && t.NumElem() == 1 && fe.CanSSA(t)) {
+ if !(t.IsArray() && t.NumElem() == 1 && CanSSA(t)) {
break
}
v.reset(OpArrayMake1)
@@ -18967,79 +18968,84 @@ func rewriteValuegeneric_OpNilCheck(v *Value) bool {
v_0 := v.Args[0]
b := v.Block
fe := b.Func.fe
- // match: (NilCheck (GetG mem) mem)
- // result: mem
+ // match: (NilCheck ptr:(GetG mem) mem)
+ // result: ptr
for {
- if v_0.Op != OpGetG {
+ ptr := v_0
+ if ptr.Op != OpGetG {
break
}
- mem := v_0.Args[0]
+ mem := ptr.Args[0]
if mem != v_1 {
break
}
- v.copyOf(mem)
+ v.copyOf(ptr)
return true
}
- // match: (NilCheck (SelectN [0] call:(StaticLECall _ _)) _)
+ // match: (NilCheck ptr:(SelectN [0] call:(StaticLECall _ _)) _)
// cond: isSameCall(call.Aux, "runtime.newobject") && warnRule(fe.Debug_checknil(), v, "removed nil check")
- // result: (Invalid)
+ // result: ptr
for {
- if v_0.Op != OpSelectN || auxIntToInt64(v_0.AuxInt) != 0 {
+ ptr := v_0
+ if ptr.Op != OpSelectN || auxIntToInt64(ptr.AuxInt) != 0 {
break
}
- call := v_0.Args[0]
+ call := ptr.Args[0]
if call.Op != OpStaticLECall || len(call.Args) != 2 || !(isSameCall(call.Aux, "runtime.newobject") && warnRule(fe.Debug_checknil(), v, "removed nil check")) {
break
}
- v.reset(OpInvalid)
+ v.copyOf(ptr)
return true
}
- // match: (NilCheck (OffPtr (SelectN [0] call:(StaticLECall _ _))) _)
+ // match: (NilCheck ptr:(OffPtr (SelectN [0] call:(StaticLECall _ _))) _)
// cond: isSameCall(call.Aux, "runtime.newobject") && warnRule(fe.Debug_checknil(), v, "removed nil check")
- // result: (Invalid)
+ // result: ptr
for {
- if v_0.Op != OpOffPtr {
+ ptr := v_0
+ if ptr.Op != OpOffPtr {
break
}
- v_0_0 := v_0.Args[0]
- if v_0_0.Op != OpSelectN || auxIntToInt64(v_0_0.AuxInt) != 0 {
+ ptr_0 := ptr.Args[0]
+ if ptr_0.Op != OpSelectN || auxIntToInt64(ptr_0.AuxInt) != 0 {
break
}
- call := v_0_0.Args[0]
+ call := ptr_0.Args[0]
if call.Op != OpStaticLECall || len(call.Args) != 2 || !(isSameCall(call.Aux, "runtime.newobject") && warnRule(fe.Debug_checknil(), v, "removed nil check")) {
break
}
- v.reset(OpInvalid)
+ v.copyOf(ptr)
return true
}
- // match: (NilCheck (Addr {_} (SB)) _)
- // result: (Invalid)
+ // match: (NilCheck ptr:(Addr {_} (SB)) _)
+ // result: ptr
for {
- if v_0.Op != OpAddr {
+ ptr := v_0
+ if ptr.Op != OpAddr {
break
}
- v_0_0 := v_0.Args[0]
- if v_0_0.Op != OpSB {
+ ptr_0 := ptr.Args[0]
+ if ptr_0.Op != OpSB {
break
}
- v.reset(OpInvalid)
+ v.copyOf(ptr)
return true
}
- // match: (NilCheck (Convert (Addr {_} (SB)) _) _)
- // result: (Invalid)
+ // match: (NilCheck ptr:(Convert (Addr {_} (SB)) _) _)
+ // result: ptr
for {
- if v_0.Op != OpConvert {
+ ptr := v_0
+ if ptr.Op != OpConvert {
break
}
- v_0_0 := v_0.Args[0]
- if v_0_0.Op != OpAddr {
+ ptr_0 := ptr.Args[0]
+ if ptr_0.Op != OpAddr {
break
}
- v_0_0_0 := v_0_0.Args[0]
- if v_0_0_0.Op != OpSB {
+ ptr_0_0 := ptr_0.Args[0]
+ if ptr_0_0.Op != OpSB {
break
}
- v.reset(OpInvalid)
+ v.copyOf(ptr)
return true
}
return false
@@ -28219,6 +28225,31 @@ func rewriteValuegeneric_OpSqrt(v *Value) bool {
}
return false
}
+func rewriteValuegeneric_OpStaticCall(v *Value) bool {
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (StaticCall {callAux} p q _ mem)
+ // cond: isSameCall(callAux, "runtime.memequal") && isSamePtr(p, q)
+ // result: (MakeResult (ConstBool <typ.Bool> [true]) mem)
+ for {
+ if len(v.Args) != 4 {
+ break
+ }
+ callAux := auxToCall(v.Aux)
+ mem := v.Args[3]
+ p := v.Args[0]
+ q := v.Args[1]
+ if !(isSameCall(callAux, "runtime.memequal") && isSamePtr(p, q)) {
+ break
+ }
+ v.reset(OpMakeResult)
+ v0 := b.NewValue0(v.Pos, OpConstBool, typ.Bool)
+ v0.AuxInt = boolToAuxInt(true)
+ v.AddArg2(v0, mem)
+ return true
+ }
+ return false
+}
func rewriteValuegeneric_OpStaticLECall(v *Value) bool {
b := v.Block
config := b.Func.Config
@@ -28506,6 +28537,26 @@ func rewriteValuegeneric_OpStaticLECall(v *Value) bool {
v.AddArg2(v0, mem)
return true
}
+ // match: (StaticLECall {callAux} p q _ mem)
+ // cond: isSameCall(callAux, "runtime.memequal") && isSamePtr(p, q)
+ // result: (MakeResult (ConstBool <typ.Bool> [true]) mem)
+ for {
+ if len(v.Args) != 4 {
+ break
+ }
+ callAux := auxToCall(v.Aux)
+ mem := v.Args[3]
+ p := v.Args[0]
+ q := v.Args[1]
+ if !(isSameCall(callAux, "runtime.memequal") && isSamePtr(p, q)) {
+ break
+ }
+ v.reset(OpMakeResult)
+ v0 := b.NewValue0(v.Pos, OpConstBool, typ.Bool)
+ v0.AuxInt = boolToAuxInt(true)
+ v.AddArg2(v0, mem)
+ return true
+ }
// match: (StaticLECall {callAux} _ (Const64 [0]) (Const64 [0]) mem)
// cond: isSameCall(callAux, "runtime.makeslice")
// result: (MakeResult (Addr <v.Type.FieldType(0)> {ir.Syms.Zerobase} (SB)) mem)
@@ -28563,7 +28614,6 @@ func rewriteValuegeneric_OpStore(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
b := v.Block
- fe := b.Func.fe
// match: (Store {t1} p1 (Load <t2> p2 mem) mem)
// cond: isSamePtr(p1, p2) && t2.Size() == t1.Size()
// result: mem
@@ -28940,7 +28990,7 @@ func rewriteValuegeneric_OpStore(v *Value) bool {
return true
}
// match: (Store {t} dst (Load src mem) mem)
- // cond: !fe.CanSSA(t)
+ // cond: !CanSSA(t)
// result: (Move {t} [t.Size()] dst src mem)
for {
t := auxToType(v.Aux)
@@ -28950,7 +29000,7 @@ func rewriteValuegeneric_OpStore(v *Value) bool {
}
mem := v_1.Args[1]
src := v_1.Args[0]
- if mem != v_2 || !(!fe.CanSSA(t)) {
+ if mem != v_2 || !(!CanSSA(t)) {
break
}
v.reset(OpMove)
@@ -28960,7 +29010,7 @@ func rewriteValuegeneric_OpStore(v *Value) bool {
return true
}
// match: (Store {t} dst (Load src mem) (VarDef {x} mem))
- // cond: !fe.CanSSA(t)
+ // cond: !CanSSA(t)
// result: (Move {t} [t.Size()] dst src (VarDef {x} mem))
for {
t := auxToType(v.Aux)
@@ -28974,7 +29024,7 @@ func rewriteValuegeneric_OpStore(v *Value) bool {
break
}
x := auxToSym(v_2.Aux)
- if mem != v_2.Args[0] || !(!fe.CanSSA(t)) {
+ if mem != v_2.Args[0] || !(!CanSSA(t)) {
break
}
v.reset(OpMove)
@@ -29450,7 +29500,6 @@ func rewriteValuegeneric_OpStringPtr(v *Value) bool {
func rewriteValuegeneric_OpStructSelect(v *Value) bool {
v_0 := v.Args[0]
b := v.Block
- fe := b.Func.fe
// match: (StructSelect (StructMake1 x))
// result: x
for {
@@ -29552,7 +29601,7 @@ func rewriteValuegeneric_OpStructSelect(v *Value) bool {
return true
}
// match: (StructSelect [i] x:(Load <t> ptr mem))
- // cond: !fe.CanSSA(t)
+ // cond: !CanSSA(t)
// result: @x.Block (Load <v.Type> (OffPtr <v.Type.PtrTo()> [t.FieldOff(int(i))] ptr) mem)
for {
i := auxIntToInt64(v.AuxInt)
@@ -29563,7 +29612,7 @@ func rewriteValuegeneric_OpStructSelect(v *Value) bool {
t := x.Type
mem := x.Args[1]
ptr := x.Args[0]
- if !(!fe.CanSSA(t)) {
+ if !(!CanSSA(t)) {
break
}
b = x.Block
diff --git a/src/cmd/compile/internal/ssa/sccp.go b/src/cmd/compile/internal/ssa/sccp.go
new file mode 100644
index 0000000000..77a6f50961
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/sccp.go
@@ -0,0 +1,585 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import (
+ "fmt"
+)
+
+// ----------------------------------------------------------------------------
+// Sparse Conditional Constant Propagation
+//
+// Described in
+// Mark N. Wegman, F. Kenneth Zadeck: Constant Propagation with Conditional Branches.
+// TOPLAS 1991.
+//
+// This algorithm uses three level lattice for SSA value
+//
+// Top undefined
+// / | \
+// .. 1 2 3 .. constant
+// \ | /
+// Bottom not constant
+//
+// It starts with optimistically assuming that all SSA values are initially Top
+// and then propagates constant facts only along reachable control flow paths.
+// Since some basic blocks are not visited yet, corresponding inputs of phi become
+// Top, we use the meet(phi) to compute its lattice.
+//
+// Top ∩ any = any
+// Bottom ∩ any = Bottom
+// ConstantA ∩ ConstantA = ConstantA
+// ConstantA ∩ ConstantB = Bottom
+//
+// Each lattice value is lowered most twice(Top to Constant, Constant to Bottom)
+// due to lattice depth, resulting in a fast convergence speed of the algorithm.
+// In this way, sccp can discover optimization opportunities that cannot be found
+// by just combining constant folding and constant propagation and dead code
+// elimination separately.
+
+// Three level lattice holds compile time knowledge about SSA value
+const (
+ top int8 = iota // undefined
+ constant // constant
+ bottom // not a constant
+)
+
+type lattice struct {
+ tag int8 // lattice type
+ val *Value // constant value
+}
+
+type worklist struct {
+ f *Func // the target function to be optimized out
+ edges []Edge // propagate constant facts through edges
+ uses []*Value // re-visiting set
+ visited map[Edge]bool // visited edges
+ latticeCells map[*Value]lattice // constant lattices
+ defUse map[*Value][]*Value // def-use chains for some values
+ defBlock map[*Value][]*Block // use blocks of def
+ visitedBlock []bool // visited block
+}
+
+// sccp stands for sparse conditional constant propagation, it propagates constants
+// through CFG conditionally and applies constant folding, constant replacement and
+// dead code elimination all together.
+func sccp(f *Func) {
+ var t worklist
+ t.f = f
+ t.edges = make([]Edge, 0)
+ t.visited = make(map[Edge]bool)
+ t.edges = append(t.edges, Edge{f.Entry, 0})
+ t.defUse = make(map[*Value][]*Value)
+ t.defBlock = make(map[*Value][]*Block)
+ t.latticeCells = make(map[*Value]lattice)
+ t.visitedBlock = f.Cache.allocBoolSlice(f.NumBlocks())
+ defer f.Cache.freeBoolSlice(t.visitedBlock)
+
+ // build it early since we rely heavily on the def-use chain later
+ t.buildDefUses()
+
+ // pick up either an edge or SSA value from worklilst, process it
+ for {
+ if len(t.edges) > 0 {
+ edge := t.edges[0]
+ t.edges = t.edges[1:]
+ if _, exist := t.visited[edge]; !exist {
+ dest := edge.b
+ destVisited := t.visitedBlock[dest.ID]
+
+ // mark edge as visited
+ t.visited[edge] = true
+ t.visitedBlock[dest.ID] = true
+ for _, val := range dest.Values {
+ if val.Op == OpPhi || !destVisited {
+ t.visitValue(val)
+ }
+ }
+ // propagates constants facts through CFG, taking condition test
+ // into account
+ if !destVisited {
+ t.propagate(dest)
+ }
+ }
+ continue
+ }
+ if len(t.uses) > 0 {
+ use := t.uses[0]
+ t.uses = t.uses[1:]
+ t.visitValue(use)
+ continue
+ }
+ break
+ }
+
+ // apply optimizations based on discovered constants
+ constCnt, rewireCnt := t.replaceConst()
+ if f.pass.debug > 0 {
+ if constCnt > 0 || rewireCnt > 0 {
+ fmt.Printf("Phase SCCP for %v : %v constants, %v dce\n", f.Name, constCnt, rewireCnt)
+ }
+ }
+}
+
+func equals(a, b lattice) bool {
+ if a == b {
+ // fast path
+ return true
+ }
+ if a.tag != b.tag {
+ return false
+ }
+ if a.tag == constant {
+ // The same content of const value may be different, we should
+ // compare with auxInt instead
+ v1 := a.val
+ v2 := b.val
+ if v1.Op == v2.Op && v1.AuxInt == v2.AuxInt {
+ return true
+ } else {
+ return false
+ }
+ }
+ return true
+}
+
+// possibleConst checks if Value can be fold to const. For those Values that can
+// never become constants(e.g. StaticCall), we don't make futile efforts.
+func possibleConst(val *Value) bool {
+ if isConst(val) {
+ return true
+ }
+ switch val.Op {
+ case OpCopy:
+ return true
+ case OpPhi:
+ return true
+ case
+ // negate
+ OpNeg8, OpNeg16, OpNeg32, OpNeg64, OpNeg32F, OpNeg64F,
+ OpCom8, OpCom16, OpCom32, OpCom64,
+ // math
+ OpFloor, OpCeil, OpTrunc, OpRoundToEven, OpSqrt,
+ // conversion
+ OpTrunc16to8, OpTrunc32to8, OpTrunc32to16, OpTrunc64to8,
+ OpTrunc64to16, OpTrunc64to32, OpCvt32to32F, OpCvt32to64F,
+ OpCvt64to32F, OpCvt64to64F, OpCvt32Fto32, OpCvt32Fto64,
+ OpCvt64Fto32, OpCvt64Fto64, OpCvt32Fto64F, OpCvt64Fto32F,
+ OpCvtBoolToUint8,
+ OpZeroExt8to16, OpZeroExt8to32, OpZeroExt8to64, OpZeroExt16to32,
+ OpZeroExt16to64, OpZeroExt32to64, OpSignExt8to16, OpSignExt8to32,
+ OpSignExt8to64, OpSignExt16to32, OpSignExt16to64, OpSignExt32to64,
+ // bit
+ OpCtz8, OpCtz16, OpCtz32, OpCtz64,
+ // mask
+ OpSlicemask,
+ // safety check
+ OpIsNonNil,
+ // not
+ OpNot:
+ return true
+ case
+ // add
+ OpAdd64, OpAdd32, OpAdd16, OpAdd8,
+ OpAdd32F, OpAdd64F,
+ // sub
+ OpSub64, OpSub32, OpSub16, OpSub8,
+ OpSub32F, OpSub64F,
+ // mul
+ OpMul64, OpMul32, OpMul16, OpMul8,
+ OpMul32F, OpMul64F,
+ // div
+ OpDiv32F, OpDiv64F,
+ OpDiv8, OpDiv16, OpDiv32, OpDiv64,
+ OpDiv8u, OpDiv16u, OpDiv32u, OpDiv64u,
+ OpMod8, OpMod16, OpMod32, OpMod64,
+ OpMod8u, OpMod16u, OpMod32u, OpMod64u,
+ // compare
+ OpEq64, OpEq32, OpEq16, OpEq8,
+ OpEq32F, OpEq64F,
+ OpLess64, OpLess32, OpLess16, OpLess8,
+ OpLess64U, OpLess32U, OpLess16U, OpLess8U,
+ OpLess32F, OpLess64F,
+ OpLeq64, OpLeq32, OpLeq16, OpLeq8,
+ OpLeq64U, OpLeq32U, OpLeq16U, OpLeq8U,
+ OpLeq32F, OpLeq64F,
+ OpEqB, OpNeqB,
+ // shift
+ OpLsh64x64, OpRsh64x64, OpRsh64Ux64, OpLsh32x64,
+ OpRsh32x64, OpRsh32Ux64, OpLsh16x64, OpRsh16x64,
+ OpRsh16Ux64, OpLsh8x64, OpRsh8x64, OpRsh8Ux64,
+ // safety check
+ OpIsInBounds, OpIsSliceInBounds,
+ // bit
+ OpAnd8, OpAnd16, OpAnd32, OpAnd64,
+ OpOr8, OpOr16, OpOr32, OpOr64,
+ OpXor8, OpXor16, OpXor32, OpXor64:
+ return true
+ default:
+ return false
+ }
+}
+
+func (t *worklist) getLatticeCell(val *Value) lattice {
+ if !possibleConst(val) {
+ // they are always worst
+ return lattice{bottom, nil}
+ }
+ lt, exist := t.latticeCells[val]
+ if !exist {
+ return lattice{top, nil} // optimistically for un-visited value
+ }
+ return lt
+}
+
+func isConst(val *Value) bool {
+ switch val.Op {
+ case OpConst64, OpConst32, OpConst16, OpConst8,
+ OpConstBool, OpConst32F, OpConst64F:
+ return true
+ default:
+ return false
+ }
+}
+
+// buildDefUses builds def-use chain for some values early, because once the
+// lattice of a value is changed, we need to update lattices of use. But we don't
+// need all uses of it, only uses that can become constants would be added into
+// re-visit worklist since no matter how many times they are revisited, uses which
+// can't become constants lattice remains unchanged, i.e. Bottom.
+func (t *worklist) buildDefUses() {
+ for _, block := range t.f.Blocks {
+ for _, val := range block.Values {
+ for _, arg := range val.Args {
+ // find its uses, only uses that can become constants take into account
+ if possibleConst(arg) && possibleConst(val) {
+ if _, exist := t.defUse[arg]; !exist {
+ t.defUse[arg] = make([]*Value, 0, arg.Uses)
+ }
+ t.defUse[arg] = append(t.defUse[arg], val)
+ }
+ }
+ }
+ for _, ctl := range block.ControlValues() {
+ // for control values that can become constants, find their use blocks
+ if possibleConst(ctl) {
+ t.defBlock[ctl] = append(t.defBlock[ctl], block)
+ }
+ }
+ }
+}
+
+// addUses finds all uses of value and appends them into work list for further process
+func (t *worklist) addUses(val *Value) {
+ for _, use := range t.defUse[val] {
+ if val == use {
+ // Phi may refer to itself as uses, ignore them to avoid re-visiting phi
+ // for performance reason
+ continue
+ }
+ t.uses = append(t.uses, use)
+ }
+ for _, block := range t.defBlock[val] {
+ if t.visitedBlock[block.ID] {
+ t.propagate(block)
+ }
+ }
+}
+
+// meet meets all of phi arguments and computes result lattice
+func (t *worklist) meet(val *Value) lattice {
+ optimisticLt := lattice{top, nil}
+ for i := 0; i < len(val.Args); i++ {
+ edge := Edge{val.Block, i}
+ // If incoming edge for phi is not visited, assume top optimistically.
+ // According to rules of meet:
+ // Top ∩ any = any
+ // Top participates in meet() but does not affect the result, so here
+ // we will ignore Top and only take other lattices into consideration.
+ if _, exist := t.visited[edge]; exist {
+ lt := t.getLatticeCell(val.Args[i])
+ if lt.tag == constant {
+ if optimisticLt.tag == top {
+ optimisticLt = lt
+ } else {
+ if !equals(optimisticLt, lt) {
+ // ConstantA ∩ ConstantB = Bottom
+ return lattice{bottom, nil}
+ }
+ }
+ } else if lt.tag == bottom {
+ // Bottom ∩ any = Bottom
+ return lattice{bottom, nil}
+ } else {
+ // Top ∩ any = any
+ }
+ } else {
+ // Top ∩ any = any
+ }
+ }
+
+ // ConstantA ∩ ConstantA = ConstantA or Top ∩ any = any
+ return optimisticLt
+}
+
+func computeLattice(f *Func, val *Value, args ...*Value) lattice {
+ // In general, we need to perform constant evaluation based on constant args:
+ //
+ // res := lattice{constant, nil}
+ // switch op {
+ // case OpAdd16:
+ // res.val = newConst(argLt1.val.AuxInt16() + argLt2.val.AuxInt16())
+ // case OpAdd32:
+ // res.val = newConst(argLt1.val.AuxInt32() + argLt2.val.AuxInt32())
+ // case OpDiv8:
+ // if !isDivideByZero(argLt2.val.AuxInt8()) {
+ // res.val = newConst(argLt1.val.AuxInt8() / argLt2.val.AuxInt8())
+ // }
+ // ...
+ // }
+ //
+ // However, this would create a huge switch for all opcodes that can be
+ // evaluated during compile time. Moreover, some operations can be evaluated
+ // only if its arguments satisfy additional conditions(e.g. divide by zero).
+ // It's fragile and error prone. We did a trick by reusing the existing rules
+ // in generic rules for compile-time evaluation. But generic rules rewrite
+ // original value, this behavior is undesired, because the lattice of values
+ // may change multiple times, once it was rewritten, we lose the opportunity
+ // to change it permanently, which can lead to errors. For example, We cannot
+ // change its value immediately after visiting Phi, because some of its input
+ // edges may still not be visited at this moment.
+ constValue := f.newValue(val.Op, val.Type, f.Entry, val.Pos)
+ constValue.AddArgs(args...)
+ matched := rewriteValuegeneric(constValue)
+ if matched {
+ if isConst(constValue) {
+ return lattice{constant, constValue}
+ }
+ }
+ // Either we can not match generic rules for given value or it does not
+ // satisfy additional constraints(e.g. divide by zero), in these cases, clean
+ // up temporary value immediately in case they are not dominated by their args.
+ constValue.reset(OpInvalid)
+ return lattice{bottom, nil}
+}
+
+func (t *worklist) visitValue(val *Value) {
+ if !possibleConst(val) {
+ // fast fail for always worst Values, i.e. there is no lowering happen
+ // on them, their lattices must be initially worse Bottom.
+ return
+ }
+
+ oldLt := t.getLatticeCell(val)
+ defer func() {
+ // re-visit all uses of value if its lattice is changed
+ newLt := t.getLatticeCell(val)
+ if !equals(newLt, oldLt) {
+ if int8(oldLt.tag) > int8(newLt.tag) {
+ t.f.Fatalf("Must lower lattice\n")
+ }
+ t.addUses(val)
+ }
+ }()
+
+ switch val.Op {
+ // they are constant values, aren't they?
+ case OpConst64, OpConst32, OpConst16, OpConst8,
+ OpConstBool, OpConst32F, OpConst64F: //TODO: support ConstNil ConstString etc
+ t.latticeCells[val] = lattice{constant, val}
+ // lattice value of copy(x) actually means lattice value of (x)
+ case OpCopy:
+ t.latticeCells[val] = t.getLatticeCell(val.Args[0])
+ // phi should be processed specially
+ case OpPhi:
+ t.latticeCells[val] = t.meet(val)
+ // fold 1-input operations:
+ case
+ // negate
+ OpNeg8, OpNeg16, OpNeg32, OpNeg64, OpNeg32F, OpNeg64F,
+ OpCom8, OpCom16, OpCom32, OpCom64,
+ // math
+ OpFloor, OpCeil, OpTrunc, OpRoundToEven, OpSqrt,
+ // conversion
+ OpTrunc16to8, OpTrunc32to8, OpTrunc32to16, OpTrunc64to8,
+ OpTrunc64to16, OpTrunc64to32, OpCvt32to32F, OpCvt32to64F,
+ OpCvt64to32F, OpCvt64to64F, OpCvt32Fto32, OpCvt32Fto64,
+ OpCvt64Fto32, OpCvt64Fto64, OpCvt32Fto64F, OpCvt64Fto32F,
+ OpCvtBoolToUint8,
+ OpZeroExt8to16, OpZeroExt8to32, OpZeroExt8to64, OpZeroExt16to32,
+ OpZeroExt16to64, OpZeroExt32to64, OpSignExt8to16, OpSignExt8to32,
+ OpSignExt8to64, OpSignExt16to32, OpSignExt16to64, OpSignExt32to64,
+ // bit
+ OpCtz8, OpCtz16, OpCtz32, OpCtz64,
+ // mask
+ OpSlicemask,
+ // safety check
+ OpIsNonNil,
+ // not
+ OpNot:
+ lt1 := t.getLatticeCell(val.Args[0])
+
+ if lt1.tag == constant {
+ // here we take a shortcut by reusing generic rules to fold constants
+ t.latticeCells[val] = computeLattice(t.f, val, lt1.val)
+ } else {
+ t.latticeCells[val] = lattice{lt1.tag, nil}
+ }
+ // fold 2-input operations
+ case
+ // add
+ OpAdd64, OpAdd32, OpAdd16, OpAdd8,
+ OpAdd32F, OpAdd64F,
+ // sub
+ OpSub64, OpSub32, OpSub16, OpSub8,
+ OpSub32F, OpSub64F,
+ // mul
+ OpMul64, OpMul32, OpMul16, OpMul8,
+ OpMul32F, OpMul64F,
+ // div
+ OpDiv32F, OpDiv64F,
+ OpDiv8, OpDiv16, OpDiv32, OpDiv64,
+ OpDiv8u, OpDiv16u, OpDiv32u, OpDiv64u, //TODO: support div128u
+ // mod
+ OpMod8, OpMod16, OpMod32, OpMod64,
+ OpMod8u, OpMod16u, OpMod32u, OpMod64u,
+ // compare
+ OpEq64, OpEq32, OpEq16, OpEq8,
+ OpEq32F, OpEq64F,
+ OpLess64, OpLess32, OpLess16, OpLess8,
+ OpLess64U, OpLess32U, OpLess16U, OpLess8U,
+ OpLess32F, OpLess64F,
+ OpLeq64, OpLeq32, OpLeq16, OpLeq8,
+ OpLeq64U, OpLeq32U, OpLeq16U, OpLeq8U,
+ OpLeq32F, OpLeq64F,
+ OpEqB, OpNeqB,
+ // shift
+ OpLsh64x64, OpRsh64x64, OpRsh64Ux64, OpLsh32x64,
+ OpRsh32x64, OpRsh32Ux64, OpLsh16x64, OpRsh16x64,
+ OpRsh16Ux64, OpLsh8x64, OpRsh8x64, OpRsh8Ux64,
+ // safety check
+ OpIsInBounds, OpIsSliceInBounds,
+ // bit
+ OpAnd8, OpAnd16, OpAnd32, OpAnd64,
+ OpOr8, OpOr16, OpOr32, OpOr64,
+ OpXor8, OpXor16, OpXor32, OpXor64:
+ lt1 := t.getLatticeCell(val.Args[0])
+ lt2 := t.getLatticeCell(val.Args[1])
+
+ if lt1.tag == constant && lt2.tag == constant {
+ // here we take a shortcut by reusing generic rules to fold constants
+ t.latticeCells[val] = computeLattice(t.f, val, lt1.val, lt2.val)
+ } else {
+ if lt1.tag == bottom || lt2.tag == bottom {
+ t.latticeCells[val] = lattice{bottom, nil}
+ } else {
+ t.latticeCells[val] = lattice{top, nil}
+ }
+ }
+ default:
+ // Any other type of value cannot be a constant, they are always worst(Bottom)
+ }
+}
+
+// propagate propagates constants facts through CFG. If the block has single successor,
+// add the successor anyway. If the block has multiple successors, only add the
+// branch destination corresponding to lattice value of condition value.
+func (t *worklist) propagate(block *Block) {
+ switch block.Kind {
+ case BlockExit, BlockRet, BlockRetJmp, BlockInvalid:
+ // control flow ends, do nothing then
+ break
+ case BlockDefer:
+ // we know nothing about control flow, add all branch destinations
+ t.edges = append(t.edges, block.Succs...)
+ case BlockFirst:
+ fallthrough // always takes the first branch
+ case BlockPlain:
+ t.edges = append(t.edges, block.Succs[0])
+ case BlockIf, BlockJumpTable:
+ cond := block.ControlValues()[0]
+ condLattice := t.getLatticeCell(cond)
+ if condLattice.tag == bottom {
+ // we know nothing about control flow, add all branch destinations
+ t.edges = append(t.edges, block.Succs...)
+ } else if condLattice.tag == constant {
+ // add branchIdx destinations depends on its condition
+ var branchIdx int64
+ if block.Kind == BlockIf {
+ branchIdx = 1 - condLattice.val.AuxInt
+ } else {
+ branchIdx = condLattice.val.AuxInt
+ }
+ t.edges = append(t.edges, block.Succs[branchIdx])
+ } else {
+ // condition value is not visited yet, don't propagate it now
+ }
+ default:
+ t.f.Fatalf("All kind of block should be processed above.")
+ }
+}
+
+// rewireSuccessor rewires corresponding successors according to constant value
+// discovered by previous analysis. As the result, some successors become unreachable
+// and thus can be removed in further deadcode phase
+func rewireSuccessor(block *Block, constVal *Value) bool {
+ switch block.Kind {
+ case BlockIf:
+ block.removeEdge(int(constVal.AuxInt))
+ block.Kind = BlockPlain
+ block.Likely = BranchUnknown
+ block.ResetControls()
+ return true
+ case BlockJumpTable:
+ // Remove everything but the known taken branch.
+ idx := int(constVal.AuxInt)
+ if idx < 0 || idx >= len(block.Succs) {
+ // This can only happen in unreachable code,
+ // as an invariant of jump tables is that their
+ // input index is in range.
+ // See issue 64826.
+ return false
+ }
+ block.swapSuccessorsByIdx(0, idx)
+ for len(block.Succs) > 1 {
+ block.removeEdge(1)
+ }
+ block.Kind = BlockPlain
+ block.Likely = BranchUnknown
+ block.ResetControls()
+ return true
+ default:
+ return false
+ }
+}
+
+// replaceConst will replace non-constant values that have been proven by sccp
+// to be constants.
+func (t *worklist) replaceConst() (int, int) {
+ constCnt, rewireCnt := 0, 0
+ for val, lt := range t.latticeCells {
+ if lt.tag == constant {
+ if !isConst(val) {
+ if t.f.pass.debug > 0 {
+ fmt.Printf("Replace %v with %v\n", val.LongString(), lt.val.LongString())
+ }
+ val.reset(lt.val.Op)
+ val.AuxInt = lt.val.AuxInt
+ constCnt++
+ }
+ // If const value controls this block, rewires successors according to its value
+ ctrlBlock := t.defBlock[val]
+ for _, block := range ctrlBlock {
+ if rewireSuccessor(block, lt.val) {
+ rewireCnt++
+ if t.f.pass.debug > 0 {
+ fmt.Printf("Rewire %v %v successors\n", block.Kind, block)
+ }
+ }
+ }
+ }
+ }
+ return constCnt, rewireCnt
+}
diff --git a/src/cmd/compile/internal/ssa/sccp_test.go b/src/cmd/compile/internal/ssa/sccp_test.go
new file mode 100644
index 0000000000..70c23e7527
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/sccp_test.go
@@ -0,0 +1,95 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import (
+ "cmd/compile/internal/types"
+ "strings"
+ "testing"
+)
+
+func TestSCCPBasic(t *testing.T) {
+ c := testConfig(t)
+ fun := c.Fun("b1",
+ Bloc("b1",
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Valu("v1", OpConst64, c.config.Types.Int64, 20, nil),
+ Valu("v2", OpConst64, c.config.Types.Int64, 21, nil),
+ Valu("v3", OpConst64F, c.config.Types.Float64, 21.0, nil),
+ Valu("v4", OpConstBool, c.config.Types.Bool, 1, nil),
+ Valu("t1", OpAdd64, c.config.Types.Int64, 0, nil, "v1", "v2"),
+ Valu("t2", OpDiv64, c.config.Types.Int64, 0, nil, "t1", "v1"),
+ Valu("t3", OpAdd64, c.config.Types.Int64, 0, nil, "t1", "t2"),
+ Valu("t4", OpSub64, c.config.Types.Int64, 0, nil, "t3", "v2"),
+ Valu("t5", OpMul64, c.config.Types.Int64, 0, nil, "t4", "v2"),
+ Valu("t6", OpMod64, c.config.Types.Int64, 0, nil, "t5", "v2"),
+ Valu("t7", OpAnd64, c.config.Types.Int64, 0, nil, "t6", "v2"),
+ Valu("t8", OpOr64, c.config.Types.Int64, 0, nil, "t7", "v2"),
+ Valu("t9", OpXor64, c.config.Types.Int64, 0, nil, "t8", "v2"),
+ Valu("t10", OpNeg64, c.config.Types.Int64, 0, nil, "t9"),
+ Valu("t11", OpCom64, c.config.Types.Int64, 0, nil, "t10"),
+ Valu("t12", OpNeg64, c.config.Types.Int64, 0, nil, "t11"),
+ Valu("t13", OpFloor, c.config.Types.Float64, 0, nil, "v3"),
+ Valu("t14", OpSqrt, c.config.Types.Float64, 0, nil, "t13"),
+ Valu("t15", OpCeil, c.config.Types.Float64, 0, nil, "t14"),
+ Valu("t16", OpTrunc, c.config.Types.Float64, 0, nil, "t15"),
+ Valu("t17", OpRoundToEven, c.config.Types.Float64, 0, nil, "t16"),
+ Valu("t18", OpTrunc64to32, c.config.Types.Int64, 0, nil, "t12"),
+ Valu("t19", OpCvt64Fto64, c.config.Types.Float64, 0, nil, "t17"),
+ Valu("t20", OpCtz64, c.config.Types.Int64, 0, nil, "v2"),
+ Valu("t21", OpSlicemask, c.config.Types.Int64, 0, nil, "t20"),
+ Valu("t22", OpIsNonNil, c.config.Types.Int64, 0, nil, "v2"),
+ Valu("t23", OpNot, c.config.Types.Bool, 0, nil, "v4"),
+ Valu("t24", OpEq64, c.config.Types.Bool, 0, nil, "v1", "v2"),
+ Valu("t25", OpLess64, c.config.Types.Bool, 0, nil, "v1", "v2"),
+ Valu("t26", OpLeq64, c.config.Types.Bool, 0, nil, "v1", "v2"),
+ Valu("t27", OpEqB, c.config.Types.Bool, 0, nil, "v4", "v4"),
+ Valu("t28", OpLsh64x64, c.config.Types.Int64, 0, nil, "v2", "v1"),
+ Valu("t29", OpIsInBounds, c.config.Types.Int64, 0, nil, "v2", "v1"),
+ Valu("t30", OpIsSliceInBounds, c.config.Types.Int64, 0, nil, "v2", "v1"),
+ Goto("b2")),
+ Bloc("b2",
+ Exit("mem")))
+ sccp(fun.f)
+ CheckFunc(fun.f)
+ for name, value := range fun.values {
+ if strings.HasPrefix(name, "t") {
+ if !isConst(value) {
+ t.Errorf("Must be constant: %v", value.LongString())
+ }
+ }
+ }
+}
+
+func TestSCCPIf(t *testing.T) {
+ c := testConfig(t)
+ fun := c.Fun("b1",
+ Bloc("b1",
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Valu("v1", OpConst64, c.config.Types.Int64, 0, nil),
+ Valu("v2", OpConst64, c.config.Types.Int64, 1, nil),
+ Valu("cmp", OpLess64, c.config.Types.Bool, 0, nil, "v1", "v2"),
+ If("cmp", "b2", "b3")),
+ Bloc("b2",
+ Valu("v3", OpConst64, c.config.Types.Int64, 3, nil),
+ Goto("b4")),
+ Bloc("b3",
+ Valu("v4", OpConst64, c.config.Types.Int64, 4, nil),
+ Goto("b4")),
+ Bloc("b4",
+ Valu("merge", OpPhi, c.config.Types.Int64, 0, nil, "v3", "v4"),
+ Exit("mem")))
+ sccp(fun.f)
+ CheckFunc(fun.f)
+ for _, b := range fun.blocks {
+ for _, v := range b.Values {
+ if v == fun.values["merge"] {
+ if !isConst(v) {
+ t.Errorf("Must be constant: %v", v.LongString())
+ }
+ }
+ }
+ }
+}
diff --git a/src/cmd/compile/internal/ssa/schedule.go b/src/cmd/compile/internal/ssa/schedule.go
index 19b98cc4b8..fb38f40d63 100644
--- a/src/cmd/compile/internal/ssa/schedule.go
+++ b/src/cmd/compile/internal/ssa/schedule.go
@@ -7,7 +7,6 @@ package ssa
import (
"cmd/compile/internal/base"
"cmd/compile/internal/types"
- "cmd/internal/src"
"container/heap"
"sort"
)
@@ -65,10 +64,6 @@ func (h ValHeap) Less(i, j int) bool {
}
if x.Pos != y.Pos { // Favor in-order line stepping
- if x.Block == x.Block.Func.Entry && x.Pos.IsStmt() != y.Pos.IsStmt() {
- // In the entry block, put statement-marked instructions earlier.
- return x.Pos.IsStmt() == src.PosIsStmt && y.Pos.IsStmt() != src.PosIsStmt
- }
return x.Pos.Before(y.Pos)
}
if x.Op != OpPhi {
@@ -312,14 +307,21 @@ func schedule(f *Func) {
}
// Remove SPanchored now that we've scheduled.
+ // Also unlink nil checks now that ordering is assured
+ // between the nil check and the uses of the nil-checked pointer.
for _, b := range f.Blocks {
for _, v := range b.Values {
for i, a := range v.Args {
- if a.Op == OpSPanchored {
+ if a.Op == OpSPanchored || opcodeTable[a.Op].nilCheck {
v.SetArg(i, a.Args[0])
}
}
}
+ for i, c := range b.ControlValues() {
+ if c.Op == OpSPanchored || opcodeTable[c.Op].nilCheck {
+ b.ReplaceControl(i, c.Args[0])
+ }
+ }
}
for _, b := range f.Blocks {
i := 0
@@ -332,6 +334,15 @@ func schedule(f *Func) {
v.resetArgs()
f.freeValue(v)
} else {
+ if opcodeTable[v.Op].nilCheck {
+ if v.Uses != 0 {
+ base.Fatalf("nilcheck still has %d uses", v.Uses)
+ }
+ // We can't delete the nil check, but we mark
+ // it as having void type so regalloc won't
+ // try to allocate a register for it.
+ v.Type = types.TypeVoid
+ }
b.Values[i] = v
i++
}
diff --git a/src/cmd/compile/internal/ssa/stackalloc.go b/src/cmd/compile/internal/ssa/stackalloc.go
index 3e24b48a69..c9ca778b3a 100644
--- a/src/cmd/compile/internal/ssa/stackalloc.go
+++ b/src/cmd/compile/internal/ssa/stackalloc.go
@@ -280,7 +280,7 @@ func (s *stackAllocState) stackalloc() {
// If there is no unused stack slot, allocate a new one.
if i == len(locs) {
s.nAuto++
- locs = append(locs, LocalSlot{N: f.fe.Auto(v.Pos, v.Type), Type: v.Type, Off: 0})
+ locs = append(locs, LocalSlot{N: f.NewLocal(v.Pos, v.Type), Type: v.Type, Off: 0})
locations[v.Type] = locs
}
// Use the stack variable at that index for v.
diff --git a/src/cmd/compile/internal/ssa/stackframe.go b/src/cmd/compile/internal/ssa/stackframe.go
deleted file mode 100644
index 08be62a051..0000000000
--- a/src/cmd/compile/internal/ssa/stackframe.go
+++ /dev/null
@@ -1,10 +0,0 @@
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package ssa
-
-// stackframe calls back into the frontend to assign frame offsets.
-func stackframe(f *Func) {
- f.fe.AllocFrame(f)
-}
diff --git a/src/cmd/compile/internal/ssa/value.go b/src/cmd/compile/internal/ssa/value.go
index e89024b3c6..4eaab40354 100644
--- a/src/cmd/compile/internal/ssa/value.go
+++ b/src/cmd/compile/internal/ssa/value.go
@@ -552,7 +552,11 @@ func (v *Value) LackingPos() bool {
// if its use count drops to 0.
func (v *Value) removeable() bool {
if v.Type.IsVoid() {
- // Void ops, like nil pointer checks, must stay.
+ // Void ops (inline marks), must stay.
+ return false
+ }
+ if opcodeTable[v.Op].nilCheck {
+ // Nil pointer checks must stay.
return false
}
if v.Type.IsMemory() {
@@ -581,3 +585,36 @@ func AutoVar(v *Value) (*ir.Name, int64) {
nameOff := v.Aux.(*AuxNameOffset)
return nameOff.Name, nameOff.Offset
}
+
+// CanSSA reports whether values of type t can be represented as a Value.
+func CanSSA(t *types.Type) bool {
+ types.CalcSize(t)
+ if t.Size() > int64(4*types.PtrSize) {
+ // 4*Widthptr is an arbitrary constant. We want it
+ // to be at least 3*Widthptr so slices can be registerized.
+ // Too big and we'll introduce too much register pressure.
+ return false
+ }
+ switch t.Kind() {
+ case types.TARRAY:
+ // We can't do larger arrays because dynamic indexing is
+ // not supported on SSA variables.
+ // TODO: allow if all indexes are constant.
+ if t.NumElem() <= 1 {
+ return CanSSA(t.Elem())
+ }
+ return false
+ case types.TSTRUCT:
+ if t.NumFields() > MaxStruct {
+ return false
+ }
+ for _, t1 := range t.Fields() {
+ if !CanSSA(t1.Type) {
+ return false
+ }
+ }
+ return true
+ default:
+ return true
+ }
+}
diff --git a/src/cmd/compile/internal/ssa/writebarrier.go b/src/cmd/compile/internal/ssa/writebarrier.go
index bd9e0b8268..1caccb7c18 100644
--- a/src/cmd/compile/internal/ssa/writebarrier.go
+++ b/src/cmd/compile/internal/ssa/writebarrier.go
@@ -250,6 +250,7 @@ func writebarrier(f *Func) {
// to a new block.
var last *Value
var start, end int
+ var nonPtrStores int
values := b.Values
FindSeq:
for i := len(values) - 1; i >= 0; i-- {
@@ -261,8 +262,17 @@ func writebarrier(f *Func) {
last = w
end = i + 1
}
+ nonPtrStores = 0
case OpVarDef, OpVarLive:
continue
+ case OpStore:
+ if last == nil {
+ continue
+ }
+ nonPtrStores++
+ if nonPtrStores > 2 {
+ break FindSeq
+ }
default:
if last == nil {
continue
@@ -309,7 +319,7 @@ func writebarrier(f *Func) {
}
t := val.Type.Elem()
- tmp := f.fe.Auto(w.Pos, t)
+ tmp := f.NewLocal(w.Pos, t)
mem = b.NewValue1A(w.Pos, OpVarDef, types.TypeMem, tmp, mem)
tmpaddr := b.NewValue2A(w.Pos, OpLocalAddr, t.PtrTo(), tmp, sp, mem)
siz := t.Size()
@@ -353,7 +363,7 @@ func writebarrier(f *Func) {
memThen := mem
var curCall *Value
var curPtr *Value
- addEntry := func(v *Value) {
+ addEntry := func(pos src.XPos, v *Value) {
if curCall == nil || curCall.AuxInt == maxEntries {
t := types.NewTuple(types.Types[types.TUINTPTR].PtrTo(), types.TypeMem)
curCall = bThen.NewValue1(pos, OpWB, t, memThen)
@@ -394,7 +404,7 @@ func writebarrier(f *Func) {
val := w.Args[1]
if !srcs.contains(val.ID) && needWBsrc(val) {
srcs.add(val.ID)
- addEntry(val)
+ addEntry(pos, val)
}
if !dsts.contains(ptr.ID) && needWBdst(ptr, w.Args[2], zeroes) {
dsts.add(ptr.ID)
@@ -407,7 +417,7 @@ func writebarrier(f *Func) {
// combine the read and the write.
oldVal := bThen.NewValue2(pos, OpLoad, types.Types[types.TUINTPTR], ptr, memThen)
// Save old value to write buffer.
- addEntry(oldVal)
+ addEntry(pos, oldVal)
}
f.fe.Func().SetWBPos(pos)
nWBops--
@@ -449,6 +459,7 @@ func writebarrier(f *Func) {
// Do raw stores after merge point.
for _, w := range stores {
+ pos := w.Pos
switch w.Op {
case OpStoreWB:
ptr := w.Args[0]
@@ -483,6 +494,10 @@ func writebarrier(f *Func) {
mem.Aux = w.Aux
case OpVarDef, OpVarLive:
mem = bEnd.NewValue1A(pos, w.Op, types.TypeMem, w.Aux, mem)
+ case OpStore:
+ ptr := w.Args[0]
+ val := w.Args[1]
+ mem = bEnd.NewValue3A(pos, OpStore, types.TypeMem, w.Aux, ptr, val, mem)
}
}
@@ -656,7 +671,7 @@ func wbcall(pos src.XPos, b *Block, fn *obj.LSym, sp, mem *Value, args ...*Value
for i := 0; i < nargs; i++ {
argTypes[i] = typ
}
- call := b.NewValue0A(pos, OpStaticCall, types.TypeResultMem, StaticAuxCall(fn, b.Func.ABIDefault.ABIAnalyzeTypes(nil, argTypes, nil)))
+ call := b.NewValue0A(pos, OpStaticCall, types.TypeResultMem, StaticAuxCall(fn, b.Func.ABIDefault.ABIAnalyzeTypes(argTypes, nil)))
call.AddArgs(args...)
call.AuxInt = int64(nargs) * typ.Size()
return b.NewValue1I(pos, OpSelectN, types.TypeMem, 0, call)
diff --git a/src/cmd/compile/internal/ssagen/abi.go b/src/cmd/compile/internal/ssagen/abi.go
index c97d60b996..56af9ce781 100644
--- a/src/cmd/compile/internal/ssagen/abi.go
+++ b/src/cmd/compile/internal/ssagen/abi.go
@@ -41,12 +41,8 @@ func NewSymABIs() *SymABIs {
// both to use the full path, which matches compiler-generated linker
// symbol names.
func (s *SymABIs) canonicalize(linksym string) string {
- // If the symbol is already prefixed with "", rewrite it to start
- // with LocalPkg.Prefix.
- //
- // TODO(mdempsky): Have cmd/asm stop writing out symbols like this.
if strings.HasPrefix(linksym, `"".`) {
- return types.LocalPkg.Prefix + linksym[2:]
+ panic("non-canonical symbol name: " + linksym)
}
return linksym
}
@@ -125,11 +121,7 @@ func (s *SymABIs) GenABIWrappers() {
// This may generate new decls for the wrappers, but we
// specifically *don't* want to visit those, lest we create
// wrappers for wrappers.
- for _, fn := range typecheck.Target.Decls {
- if fn.Op() != ir.ODCLFUNC {
- continue
- }
- fn := fn.(*ir.Func)
+ for _, fn := range typecheck.Target.Funcs {
nam := fn.Nname
if ir.IsBlank(nam) {
continue
@@ -241,11 +233,10 @@ func makeABIWrapper(f *ir.Func, wrapperABI obj.ABI) {
// Q: is this needed?
savepos := base.Pos
- savedclcontext := typecheck.DeclContext
savedcurfn := ir.CurFunc
- base.Pos = base.AutogeneratedPos
- typecheck.DeclContext = ir.PEXTERN
+ pos := base.AutogeneratedPos
+ base.Pos = pos
// At the moment we don't support wrapping a method, we'd need machinery
// below to handle the receiver. Panic if we see this scenario.
@@ -256,10 +247,12 @@ func makeABIWrapper(f *ir.Func, wrapperABI obj.ABI) {
}
// Reuse f's types.Sym to create a new ODCLFUNC/function.
- fn := typecheck.DeclFunc(f.Nname.Sym(), nil,
- typecheck.NewFuncParams(ft.Params(), true),
- typecheck.NewFuncParams(ft.Results(), false))
+ // TODO(mdempsky): Means we can't set sym.Def in Declfunc, ugh.
+ fn := ir.NewFunc(pos, pos, f.Sym(), types.NewSignature(nil,
+ typecheck.NewFuncParams(ft.Params()),
+ typecheck.NewFuncParams(ft.Results())))
fn.ABI = wrapperABI
+ typecheck.DeclFunc(fn)
fn.SetABIWrapper(true)
fn.SetDupok(true)
@@ -328,15 +321,11 @@ func makeABIWrapper(f *ir.Func, wrapperABI obj.ABI) {
typecheck.FinishFuncBody()
- typecheck.Func(fn)
ir.CurFunc = fn
typecheck.Stmts(fn.Body)
- typecheck.Target.Decls = append(typecheck.Target.Decls, fn)
-
// Restore previous context.
base.Pos = savepos
- typecheck.DeclContext = savedclcontext
ir.CurFunc = savedcurfn
}
@@ -443,7 +432,7 @@ func setupWasmABI(f *ir.Func) {
//
// (import "a_module" "add" (func (param i32 i32) (result i32)))
abiConfig := AbiForBodylessFuncStackMap(f)
- abiInfo := abiConfig.ABIAnalyzeFuncType(f.Type().FuncType())
+ abiInfo := abiConfig.ABIAnalyzeFuncType(f.Type())
wi.Params = paramsToWasmFields(f, abiInfo, abiInfo.InParams())
wi.Results = resultsToWasmFields(f, abiInfo, abiInfo.OutParams())
}
diff --git a/src/cmd/compile/internal/ssagen/nowb.go b/src/cmd/compile/internal/ssagen/nowb.go
index 9de1b9ae36..b8756eea61 100644
--- a/src/cmd/compile/internal/ssagen/nowb.go
+++ b/src/cmd/compile/internal/ssagen/nowb.go
@@ -56,11 +56,8 @@ func newNowritebarrierrecChecker() *nowritebarrierrecChecker {
// important to handle it for this check, so we model it
// directly. This has to happen before transforming closures in walk since
// it's a lot harder to work out the argument after.
- for _, n := range typecheck.Target.Decls {
- if n.Op() != ir.ODCLFUNC {
- continue
- }
- c.curfn = n.(*ir.Func)
+ for _, n := range typecheck.Target.Funcs {
+ c.curfn = n
if c.curfn.ABIWrapper() {
// We only want "real" calls to these
// functions, not the generated ones within
@@ -78,14 +75,14 @@ func (c *nowritebarrierrecChecker) findExtraCalls(nn ir.Node) {
return
}
n := nn.(*ir.CallExpr)
- if n.X == nil || n.X.Op() != ir.ONAME {
+ if n.Fun == nil || n.Fun.Op() != ir.ONAME {
return
}
- fn := n.X.(*ir.Name)
+ fn := n.Fun.(*ir.Name)
if fn.Class != ir.PFUNC || fn.Defn == nil {
return
}
- if !types.IsRuntimePkg(fn.Sym().Pkg) || fn.Sym().Name != "systemstack" {
+ if types.RuntimeSymName(fn.Sym()) != "systemstack" {
return
}
@@ -101,9 +98,6 @@ func (c *nowritebarrierrecChecker) findExtraCalls(nn ir.Node) {
default:
base.Fatalf("expected ONAME or OCLOSURE node, got %+v", arg)
}
- if callee.Op() != ir.ODCLFUNC {
- base.Fatalf("expected ODCLFUNC node, got %+v", callee)
- }
c.extraCalls[c.curfn] = append(c.extraCalls[c.curfn], nowritebarrierrecCall{callee, n.Pos()})
}
@@ -139,12 +133,7 @@ func (c *nowritebarrierrecChecker) check() {
// q is the queue of ODCLFUNC Nodes to visit in BFS order.
var q ir.NameQueue
- for _, n := range typecheck.Target.Decls {
- if n.Op() != ir.ODCLFUNC {
- continue
- }
- fn := n.(*ir.Func)
-
+ for _, fn := range typecheck.Target.Funcs {
symToFunc[fn.LSym] = fn
// Make nowritebarrierrec functions BFS roots.
diff --git a/src/cmd/compile/internal/ssagen/pgen.go b/src/cmd/compile/internal/ssagen/pgen.go
index 9fd3f2aee4..e7a0699641 100644
--- a/src/cmd/compile/internal/ssagen/pgen.go
+++ b/src/cmd/compile/internal/ssagen/pgen.go
@@ -22,44 +22,65 @@ import (
)
// cmpstackvarlt reports whether the stack variable a sorts before b.
-//
-// Sort the list of stack variables. Autos after anything else,
-// within autos, unused after used, within used, things with
-// pointers first, zeroed things first, and then decreasing size.
-// Because autos are laid out in decreasing addresses
-// on the stack, pointers first, zeroed things first and decreasing size
-// really means, in memory, things with pointers needing zeroing at
-// the top of the stack and increasing in size.
-// Non-autos sort on offset.
func cmpstackvarlt(a, b *ir.Name) bool {
+ // Sort non-autos before autos.
if needAlloc(a) != needAlloc(b) {
return needAlloc(b)
}
+ // If both are non-auto (e.g., parameters, results), then sort by
+ // frame offset (defined by ABI).
if !needAlloc(a) {
return a.FrameOffset() < b.FrameOffset()
}
+ // From here on, a and b are both autos (i.e., local variables).
+
+ // Sort used before unused (so AllocFrame can truncate unused
+ // variables).
if a.Used() != b.Used() {
return a.Used()
}
+ // Sort pointer-typed before non-pointer types.
+ // Keeps the stack's GC bitmap compact.
ap := a.Type().HasPointers()
bp := b.Type().HasPointers()
if ap != bp {
return ap
}
+ // Group variables that need zeroing, so we can efficiently zero
+ // them altogether.
ap = a.Needzero()
bp = b.Needzero()
if ap != bp {
return ap
}
- if a.Type().Size() != b.Type().Size() {
- return a.Type().Size() > b.Type().Size()
+ // Sort variables in descending alignment order, so we can optimally
+ // pack variables into the frame.
+ if a.Type().Alignment() != b.Type().Alignment() {
+ return a.Type().Alignment() > b.Type().Alignment()
+ }
+
+ // Sort normal variables before open-coded-defer slots, so that the
+ // latter are grouped together and near the top of the frame (to
+ // minimize varint encoding of their varp offset).
+ if a.OpenDeferSlot() != b.OpenDeferSlot() {
+ return a.OpenDeferSlot()
+ }
+
+ // If a and b are both open-coded defer slots, then order them by
+ // index in descending order, so they'll be laid out in the frame in
+ // ascending order.
+ //
+ // Their index was saved in FrameOffset in state.openDeferSave.
+ if a.OpenDeferSlot() {
+ return a.FrameOffset() > b.FrameOffset()
}
+ // Tie breaker for stable results.
return a.Sym().Name < b.Sym().Name
}
@@ -100,6 +121,14 @@ func (s *ssafn) AllocFrame(f *ssa.Func) {
// Mark the PAUTO's unused.
for _, ln := range fn.Dcl {
+ if ln.OpenDeferSlot() {
+ // Open-coded defer slots have indices that were assigned
+ // upfront during SSA construction, but the defer statement can
+ // later get removed during deadcode elimination (#61895). To
+ // keep their relative offsets correct, treat them all as used.
+ continue
+ }
+
if needAlloc(ln) {
ln.SetUsed(false)
}
diff --git a/src/cmd/compile/internal/ssagen/ssa.go b/src/cmd/compile/internal/ssagen/ssa.go
index 597a196ba8..c794d6ffd9 100644
--- a/src/cmd/compile/internal/ssagen/ssa.go
+++ b/src/cmd/compile/internal/ssagen/ssa.go
@@ -27,6 +27,7 @@ import (
"cmd/compile/internal/typecheck"
"cmd/compile/internal/types"
"cmd/internal/obj"
+ "cmd/internal/objabi"
"cmd/internal/src"
"cmd/internal/sys"
@@ -86,6 +87,8 @@ func InitConfig() {
_ = types.NewPtr(types.Types[types.TINT16]) // *int16
_ = types.NewPtr(types.Types[types.TINT64]) // *int64
_ = types.NewPtr(types.ErrorType) // *error
+ _ = types.NewPtr(reflectdata.MapType()) // *runtime.hmap
+ _ = types.NewPtr(deferstruct()) // *runtime._defer
types.NewPtrCacheEnabled = false
ssaConfig = ssa.NewConfig(base.Ctxt.Arch.Name, *types_, base.Ctxt, base.Flag.N == 0, Arch.SoftFloat)
ssaConfig.Race = base.Flag.Race
@@ -100,6 +103,7 @@ func InitConfig() {
ir.Syms.CgoCheckPtrWrite = typecheck.LookupRuntimeFunc("cgoCheckPtrWrite")
ir.Syms.CheckPtrAlignment = typecheck.LookupRuntimeFunc("checkptrAlignment")
ir.Syms.Deferproc = typecheck.LookupRuntimeFunc("deferproc")
+ ir.Syms.Deferprocat = typecheck.LookupRuntimeFunc("deferprocat")
ir.Syms.DeferprocStack = typecheck.LookupRuntimeFunc("deferprocStack")
ir.Syms.Deferreturn = typecheck.LookupRuntimeFunc("deferreturn")
ir.Syms.Duffcopy = typecheck.LookupRuntimeFunc("duffcopy")
@@ -114,6 +118,7 @@ func InitConfig() {
ir.Syms.GCWriteBarrier[7] = typecheck.LookupRuntimeFunc("gcWriteBarrier8")
ir.Syms.Goschedguarded = typecheck.LookupRuntimeFunc("goschedguarded")
ir.Syms.Growslice = typecheck.LookupRuntimeFunc("growslice")
+ ir.Syms.InterfaceSwitch = typecheck.LookupRuntimeFunc("interfaceSwitch")
ir.Syms.Memmove = typecheck.LookupRuntimeFunc("memmove")
ir.Syms.Msanread = typecheck.LookupRuntimeFunc("msanread")
ir.Syms.Msanwrite = typecheck.LookupRuntimeFunc("msanwrite")
@@ -128,10 +133,13 @@ func InitConfig() {
ir.Syms.Panicnildottype = typecheck.LookupRuntimeFunc("panicnildottype")
ir.Syms.Panicoverflow = typecheck.LookupRuntimeFunc("panicoverflow")
ir.Syms.Panicshift = typecheck.LookupRuntimeFunc("panicshift")
+ ir.Syms.Racefuncenter = typecheck.LookupRuntimeFunc("racefuncenter")
+ ir.Syms.Racefuncexit = typecheck.LookupRuntimeFunc("racefuncexit")
ir.Syms.Raceread = typecheck.LookupRuntimeFunc("raceread")
ir.Syms.Racereadrange = typecheck.LookupRuntimeFunc("racereadrange")
ir.Syms.Racewrite = typecheck.LookupRuntimeFunc("racewrite")
ir.Syms.Racewriterange = typecheck.LookupRuntimeFunc("racewriterange")
+ ir.Syms.TypeAssert = typecheck.LookupRuntimeFunc("typeAssert")
ir.Syms.WBZero = typecheck.LookupRuntimeFunc("wbZero")
ir.Syms.WBMove = typecheck.LookupRuntimeFunc("wbMove")
ir.Syms.X86HasPOPCNT = typecheck.LookupRuntimeVar("x86HasPOPCNT") // bool
@@ -248,30 +256,6 @@ func abiForFunc(fn *ir.Func, abi0, abi1 *abi.ABIConfig) *abi.ABIConfig {
return a
}
-// dvarint writes a varint v to the funcdata in symbol x and returns the new offset.
-func dvarint(x *obj.LSym, off int, v int64) int {
- if v < 0 || v > 1e9 {
- panic(fmt.Sprintf("dvarint: bad offset for funcdata - %v", v))
- }
- if v < 1<<7 {
- return objw.Uint8(x, off, uint8(v))
- }
- off = objw.Uint8(x, off, uint8((v&127)|128))
- if v < 1<<14 {
- return objw.Uint8(x, off, uint8(v>>7))
- }
- off = objw.Uint8(x, off, uint8(((v>>7)&127)|128))
- if v < 1<<21 {
- return objw.Uint8(x, off, uint8(v>>14))
- }
- off = objw.Uint8(x, off, uint8(((v>>14)&127)|128))
- if v < 1<<28 {
- return objw.Uint8(x, off, uint8(v>>21))
- }
- off = objw.Uint8(x, off, uint8(((v>>21)&127)|128))
- return objw.Uint8(x, off, uint8(v>>28))
-}
-
// emitOpenDeferInfo emits FUNCDATA information about the defers in a function
// that is using open-coded defers. This funcdata is used to determine the active
// defers in a function and execute those defers during panic processing.
@@ -282,47 +266,59 @@ func dvarint(x *obj.LSym, off int, v int64) int {
// top of the local variables) for their starting address. The format is:
//
// - Offset of the deferBits variable
-// - Number of defers in the function
-// - Information about each defer call, in reverse order of appearance in the function:
-// - Offset of the closure value to call
+// - Offset of the first closure slot (the rest are laid out consecutively).
func (s *state) emitOpenDeferInfo() {
+ firstOffset := s.openDefers[0].closureNode.FrameOffset()
+
+ // Verify that cmpstackvarlt laid out the slots in order.
+ for i, r := range s.openDefers {
+ have := r.closureNode.FrameOffset()
+ want := firstOffset + int64(i)*int64(types.PtrSize)
+ if have != want {
+ base.FatalfAt(s.curfn.Pos(), "unexpected frame offset for open-coded defer slot #%v: have %v, want %v", i, have, want)
+ }
+ }
+
x := base.Ctxt.Lookup(s.curfn.LSym.Name + ".opendefer")
x.Set(obj.AttrContentAddressable, true)
s.curfn.LSym.Func().OpenCodedDeferInfo = x
- off := 0
- off = dvarint(x, off, -s.deferBitsTemp.FrameOffset())
- off = dvarint(x, off, int64(len(s.openDefers)))
-
- // Write in reverse-order, for ease of running in that order at runtime
- for i := len(s.openDefers) - 1; i >= 0; i-- {
- r := s.openDefers[i]
- off = dvarint(x, off, -r.closureNode.FrameOffset())
- }
-}
-func okOffset(offset int64) int64 {
- if offset == types.BOGUS_FUNARG_OFFSET {
- panic(fmt.Errorf("Bogus offset %d", offset))
- }
- return offset
+ off := 0
+ off = objw.Uvarint(x, off, uint64(-s.deferBitsTemp.FrameOffset()))
+ off = objw.Uvarint(x, off, uint64(-firstOffset))
}
// buildssa builds an SSA function for fn.
// worker indicates which of the backend workers is doing the processing.
func buildssa(fn *ir.Func, worker int) *ssa.Func {
name := ir.FuncName(fn)
+
+ abiSelf := abiForFunc(fn, ssaConfig.ABI0, ssaConfig.ABI1)
+
printssa := false
- if ssaDump != "" { // match either a simple name e.g. "(*Reader).Reset", package.name e.g. "compress/gzip.(*Reader).Reset", or subpackage name "gzip.(*Reader).Reset"
- pkgDotName := base.Ctxt.Pkgpath + "." + name
- printssa = name == ssaDump ||
- strings.HasSuffix(pkgDotName, ssaDump) && (pkgDotName == ssaDump || strings.HasSuffix(pkgDotName, "/"+ssaDump))
+ // match either a simple name e.g. "(*Reader).Reset", package.name e.g. "compress/gzip.(*Reader).Reset", or subpackage name "gzip.(*Reader).Reset"
+ // optionally allows an ABI suffix specification in the GOSSAHASH, e.g. "(*Reader).Reset<0>" etc
+ if strings.Contains(ssaDump, name) { // in all the cases the function name is entirely contained within the GOSSAFUNC string.
+ nameOptABI := name
+ if strings.Contains(ssaDump, ",") { // ABI specification
+ nameOptABI = ssa.FuncNameABI(name, abiSelf.Which())
+ } else if strings.HasSuffix(ssaDump, ">") { // if they use the linker syntax instead....
+ l := len(ssaDump)
+ if l >= 3 && ssaDump[l-3] == '<' {
+ nameOptABI = ssa.FuncNameABI(name, abiSelf.Which())
+ ssaDump = ssaDump[:l-3] + "," + ssaDump[l-2:l-1]
+ }
+ }
+ pkgDotName := base.Ctxt.Pkgpath + "." + nameOptABI
+ printssa = nameOptABI == ssaDump || // "(*Reader).Reset"
+ pkgDotName == ssaDump || // "compress/gzip.(*Reader).Reset"
+ strings.HasSuffix(pkgDotName, ssaDump) && strings.HasSuffix(pkgDotName, "/"+ssaDump) // "gzip.(*Reader).Reset"
}
+
var astBuf *bytes.Buffer
if printssa {
astBuf = &bytes.Buffer{}
- ir.FDumpList(astBuf, "buildssa-enter", fn.Enter)
ir.FDumpList(astBuf, "buildssa-body", fn.Body)
- ir.FDumpList(astBuf, "buildssa-exit", fn.Exit)
if ssaDumpStdout {
fmt.Println("generating SSA for", name)
fmt.Print(astBuf.String())
@@ -339,27 +335,36 @@ func buildssa(fn *ir.Func, worker int) *ssa.Func {
}
s.checkPtrEnabled = ir.ShouldCheckPtr(fn, 1)
+ if base.Flag.Cfg.Instrumenting && fn.Pragma&ir.Norace == 0 && !fn.Linksym().ABIWrapper() {
+ if !base.Flag.Race || !objabi.LookupPkgSpecial(fn.Sym().Pkg.Path).NoRaceFunc {
+ s.instrumentMemory = true
+ }
+ if base.Flag.Race {
+ s.instrumentEnterExit = true
+ }
+ }
+
fe := ssafn{
curfn: fn,
log: printssa && ssaDumpStdout,
}
s.curfn = fn
- s.f = ssa.NewFunc(&fe)
+ cache := &ssaCaches[worker]
+ cache.Reset()
+
+ s.f = ssaConfig.NewFunc(&fe, cache)
s.config = ssaConfig
s.f.Type = fn.Type()
- s.f.Config = ssaConfig
- s.f.Cache = &ssaCaches[worker]
- s.f.Cache.Reset()
s.f.Name = name
s.f.PrintOrHtmlSSA = printssa
if fn.Pragma&ir.Nosplit != 0 {
s.f.NoSplit = true
}
- s.f.ABI0 = ssaConfig.ABI0.Copy() // Make a copy to avoid racy map operations in type-register-width cache.
- s.f.ABI1 = ssaConfig.ABI1.Copy()
- s.f.ABIDefault = abiForFunc(nil, s.f.ABI0, s.f.ABI1)
- s.f.ABISelf = abiForFunc(fn, s.f.ABI0, s.f.ABI1)
+ s.f.ABI0 = ssaConfig.ABI0
+ s.f.ABI1 = ssaConfig.ABI1
+ s.f.ABIDefault = abiForFunc(nil, ssaConfig.ABI0, ssaConfig.ABI1)
+ s.f.ABISelf = abiSelf
s.panics = map[funcLine]*ssa.Block{}
s.softFloat = s.config.SoftFloat
@@ -371,7 +376,7 @@ func buildssa(fn *ir.Func, worker int) *ssa.Func {
if printssa {
ssaDF := ssaDumpFile
if ssaDir != "" {
- ssaDF = filepath.Join(ssaDir, base.Ctxt.Pkgpath+"."+name+".html")
+ ssaDF = filepath.Join(ssaDir, base.Ctxt.Pkgpath+"."+s.f.NameABI()+".html")
ssaD := filepath.Dir(ssaDF)
os.MkdirAll(ssaD, 0755)
}
@@ -396,16 +401,16 @@ func buildssa(fn *ir.Func, worker int) *ssa.Func {
// preceding the deferreturn/ret code that we don't track correctly.
s.hasOpenDefers = false
}
- if s.hasOpenDefers && len(s.curfn.Exit) > 0 {
- // Skip doing open defers if there is any extra exit code (likely
- // race detection), since we will not generate that code in the
- // case of the extra deferreturn/ret segment.
+ if s.hasOpenDefers && s.instrumentEnterExit {
+ // Skip doing open defers if we need to instrument function
+ // returns for the race detector, since we will not generate that
+ // code in the case of the extra deferreturn/ret segment.
s.hasOpenDefers = false
}
if s.hasOpenDefers {
// Similarly, skip if there are any heap-allocated result
// parameters that need to be copied back to their stack slots.
- for _, f := range s.curfn.Type().Results().FieldSlice() {
+ for _, f := range s.curfn.Type().Results() {
if !f.Nname.(*ir.Name).OnStack() {
s.hasOpenDefers = false
break
@@ -492,12 +497,12 @@ func buildssa(fn *ir.Func, worker int) *ssa.Func {
} else { // address was taken AND/OR too large for SSA
paramAssignment := ssa.ParamAssignmentForArgName(s.f, n)
if len(paramAssignment.Registers) > 0 {
- if TypeOK(n.Type()) { // SSA-able type, so address was taken -- receive value in OpArg, DO NOT bind to var, store immediately to memory.
+ if ssa.CanSSA(n.Type()) { // SSA-able type, so address was taken -- receive value in OpArg, DO NOT bind to var, store immediately to memory.
v := s.newValue0A(ssa.OpArg, n.Type(), n)
s.store(n.Type(), s.decladdrs[n], v)
} else { // Too big for SSA.
// Brute force, and early, do a bunch of stores from registers
- // TODO fix the nasty storeArgOrLoad recursion in ssa/expand_calls.go so this Just Works with store of a big Arg.
+ // Note that expand calls knows about this and doesn't trouble itself with larger-than-SSA-able Args in registers.
s.storeParameterRegsToStack(s.f.ABISelf, paramAssignment, n, s.decladdrs[n], false)
}
}
@@ -527,7 +532,7 @@ func buildssa(fn *ir.Func, worker int) *ssa.Func {
// runtime calls that did (#43701). Since we don't
// convert Addrtaken variables to SSA anyway, no point
// in promoting them either.
- if n.Byval() && !n.Addrtaken() && TypeOK(n.Type()) {
+ if n.Byval() && !n.Addrtaken() && ssa.CanSSA(n.Type()) {
n.Class = ir.PAUTO
fn.Dcl = append(fn.Dcl, n)
s.assign(n, s.load(n.Type(), ptr), false, 0)
@@ -542,7 +547,9 @@ func buildssa(fn *ir.Func, worker int) *ssa.Func {
}
// Convert the AST-based IR to the SSA-based IR
- s.stmtList(fn.Enter)
+ if s.instrumentEnterExit {
+ s.rtcall(ir.Syms.Racefuncenter, true, nil, s.newValue0(ssa.OpGetCallerPC, types.Types[types.TUINTPTR]))
+ }
s.zeroResults()
s.paramsToHeap()
s.stmtList(fn.Body)
@@ -567,7 +574,9 @@ func buildssa(fn *ir.Func, worker int) *ssa.Func {
// Main call to ssa package to compile function
ssa.Compile(s.f)
- if s.hasOpenDefers {
+ fe.AllocFrame(s.f)
+
+ if len(s.openDefers) != 0 {
s.emitOpenDeferInfo()
}
@@ -613,7 +622,7 @@ func (s *state) storeParameterRegsToStack(abi *abi.ABIConfig, paramAssignment *a
// are always live, so we need to zero them before any allocations,
// even allocations to move params/results to the heap.
func (s *state) zeroResults() {
- for _, f := range s.curfn.Type().Results().FieldSlice() {
+ for _, f := range s.curfn.Type().Results() {
n := f.Nname.(*ir.Name)
if !n.OnStack() {
// The local which points to the return value is the
@@ -622,7 +631,7 @@ func (s *state) zeroResults() {
continue
}
// Zero the stack location containing f.
- if typ := n.Type(); TypeOK(typ) {
+ if typ := n.Type(); ssa.CanSSA(typ) {
s.assign(n, s.zeroVal(typ), false, 0)
} else {
if typ.HasPointers() {
@@ -636,8 +645,8 @@ func (s *state) zeroResults() {
// paramsToHeap produces code to allocate memory for heap-escaped parameters
// and to copy non-result parameters' values from the stack.
func (s *state) paramsToHeap() {
- do := func(params *types.Type) {
- for _, f := range params.FieldSlice() {
+ do := func(params []*types.Field) {
+ for _, f := range params {
if f.Nname == nil {
continue // anonymous or blank parameter
}
@@ -671,12 +680,9 @@ func (s *state) setHeapaddr(pos src.XPos, n *ir.Name, ptr *ssa.Value) {
}
// Declare variable to hold address.
- addr := ir.NewNameAt(pos, &types.Sym{Name: "&" + n.Sym().Name, Pkg: types.LocalPkg})
- addr.SetType(types.NewPtr(n.Type()))
- addr.Class = ir.PAUTO
+ sym := &types.Sym{Name: "&" + n.Sym().Name, Pkg: types.LocalPkg}
+ addr := s.curfn.NewLocal(pos, sym, types.NewPtr(n.Type()))
addr.SetUsed(true)
- addr.Curfn = s.curfn
- s.curfn.Dcl = append(s.curfn.Dcl, addr)
types.CalcSize(addr.Type())
if n.Class == ir.PPARAMOUT {
@@ -883,11 +889,13 @@ type state struct {
// Used to deduplicate panic calls.
panics map[funcLine]*ssa.Block
- cgoUnsafeArgs bool
- hasdefer bool // whether the function contains a defer statement
- softFloat bool
- hasOpenDefers bool // whether we are doing open-coded defers
- checkPtrEnabled bool // whether to insert checkptr instrumentation
+ cgoUnsafeArgs bool
+ hasdefer bool // whether the function contains a defer statement
+ softFloat bool
+ hasOpenDefers bool // whether we are doing open-coded defers
+ checkPtrEnabled bool // whether to insert checkptr instrumentation
+ instrumentEnterExit bool // whether to instrument function enter/exit
+ instrumentMemory bool // whether to instrument memory operations
// If doing open-coded defers, list of info about the defer calls in
// scanning order. Hence, at exit we should run these defers in reverse
@@ -935,7 +943,7 @@ func (s *state) Warnl(pos src.XPos, msg string, args ...interface{}) { s.f.Warnl
func (s *state) Debug_checknil() bool { return s.f.Frontend().Debug_checknil() }
func ssaMarker(name string) *ir.Name {
- return typecheck.NewName(&types.Sym{Name: name})
+ return ir.NewNameAt(base.Pos, &types.Sym{Name: name}, nil)
}
var (
@@ -949,6 +957,7 @@ var (
typVar = ssaMarker("typ")
okVar = ssaMarker("ok")
deferBitsVar = ssaMarker("deferBits")
+ hashVar = ssaMarker("hash")
)
// startBlock sets the current block we're generating code in to b.
@@ -1245,7 +1254,7 @@ func (s *state) instrumentFields(t *types.Type, addr *ssa.Value, kind instrument
s.instrument(t, addr, kind)
return
}
- for _, f := range t.Fields().Slice() {
+ for _, f := range t.Fields() {
if f.Sym.IsBlank() {
continue
}
@@ -1264,7 +1273,7 @@ func (s *state) instrumentMove(t *types.Type, dst, src *ssa.Value) {
}
func (s *state) instrument2(t *types.Type, addr, addr2 *ssa.Value, kind instrumentKind) {
- if !s.curfn.InstrumentBody() {
+ if !s.instrumentMemory {
return
}
@@ -1436,8 +1445,7 @@ func (s *state) stmt(n ir.Node) {
n := n.(*ir.BlockStmt)
s.stmtList(n.List)
- // No-ops
- case ir.ODCLCONST, ir.ODCLTYPE, ir.OFALL:
+ case ir.OFALL: // no-op
// Expression statements
case ir.OCALLFUNC:
@@ -1451,9 +1459,9 @@ func (s *state) stmt(n ir.Node) {
case ir.OCALLINTER:
n := n.(*ir.CallExpr)
s.callResult(n, callNormal)
- if n.Op() == ir.OCALLFUNC && n.X.Op() == ir.ONAME && n.X.(*ir.Name).Class == ir.PFUNC {
- if fn := n.X.Sym().Name; base.Flag.CompilingRuntime && fn == "throw" ||
- n.X.Sym().Pkg == ir.Pkgs.Runtime && (fn == "throwinit" || fn == "gopanic" || fn == "panicwrap" || fn == "block" || fn == "panicmakeslicelen" || fn == "panicmakeslicecap" || fn == "panicunsafeslicelen" || fn == "panicunsafeslicenilptr" || fn == "panicunsafestringlen" || fn == "panicunsafestringnilptr") {
+ if n.Op() == ir.OCALLFUNC && n.Fun.Op() == ir.ONAME && n.Fun.(*ir.Name).Class == ir.PFUNC {
+ if fn := n.Fun.Sym().Name; base.Flag.CompilingRuntime && fn == "throw" ||
+ n.Fun.Sym().Pkg == ir.Pkgs.Runtime && (fn == "throwinit" || fn == "gopanic" || fn == "panicwrap" || fn == "block" || fn == "panicmakeslicelen" || fn == "panicmakeslicecap" || fn == "panicunsafeslicelen" || fn == "panicunsafeslicenilptr" || fn == "panicunsafestringlen" || fn == "panicunsafestringnilptr") {
m := s.mem()
b := s.endBlock()
b.Kind = ssa.BlockExit
@@ -1480,10 +1488,10 @@ func (s *state) stmt(n ir.Node) {
s.openDeferRecord(n.Call.(*ir.CallExpr))
} else {
d := callDefer
- if n.Esc() == ir.EscNever {
+ if n.Esc() == ir.EscNever && n.DeferAt == nil {
d = callDeferStack
}
- s.callResult(n.Call.(*ir.CallExpr), d)
+ s.call(n.Call.(*ir.CallExpr), d, false, n.DeferAt)
}
case ir.OGO:
n := n.(*ir.GoDeferStmt)
@@ -1498,7 +1506,7 @@ func (s *state) stmt(n ir.Node) {
res, resok = s.dynamicDottype(n.Rhs[0].(*ir.DynamicTypeAssertExpr), true)
}
deref := false
- if !TypeOK(n.Rhs[0].Type()) {
+ if !ssa.CanSSA(n.Rhs[0].Type()) {
if res.Op != ssa.OpLoad {
s.Fatalf("dottype of non-load")
}
@@ -1657,7 +1665,7 @@ func (s *state) stmt(n ir.Node) {
}
var r *ssa.Value
- deref := !TypeOK(t)
+ deref := !ssa.CanSSA(t)
if deref {
if rhs == nil {
r = nil // Signal assign to use OpZero.
@@ -1988,10 +1996,119 @@ func (s *state) stmt(n ir.Node) {
s.startBlock(bEnd)
+ case ir.OINTERFACESWITCH:
+ n := n.(*ir.InterfaceSwitchStmt)
+ typs := s.f.Config.Types
+
+ t := s.expr(n.RuntimeType)
+ h := s.expr(n.Hash)
+ d := s.newValue1A(ssa.OpAddr, typs.BytePtr, n.Descriptor, s.sb)
+
+ // Check the cache first.
+ var merge *ssa.Block
+ if base.Flag.N == 0 && rtabi.UseInterfaceSwitchCache(Arch.LinkArch.Name) {
+ // Note: we can only use the cache if we have the right atomic load instruction.
+ // Double-check that here.
+ if _, ok := intrinsics[intrinsicKey{Arch.LinkArch.Arch, "runtime/internal/atomic", "Loadp"}]; !ok {
+ s.Fatalf("atomic load not available")
+ }
+ merge = s.f.NewBlock(ssa.BlockPlain)
+ cacheHit := s.f.NewBlock(ssa.BlockPlain)
+ cacheMiss := s.f.NewBlock(ssa.BlockPlain)
+ loopHead := s.f.NewBlock(ssa.BlockPlain)
+ loopBody := s.f.NewBlock(ssa.BlockPlain)
+
+ // Pick right size ops.
+ var mul, and, add, zext ssa.Op
+ if s.config.PtrSize == 4 {
+ mul = ssa.OpMul32
+ and = ssa.OpAnd32
+ add = ssa.OpAdd32
+ zext = ssa.OpCopy
+ } else {
+ mul = ssa.OpMul64
+ and = ssa.OpAnd64
+ add = ssa.OpAdd64
+ zext = ssa.OpZeroExt32to64
+ }
+
+ // Load cache pointer out of descriptor, with an atomic load so
+ // we ensure that we see a fully written cache.
+ atomicLoad := s.newValue2(ssa.OpAtomicLoadPtr, types.NewTuple(typs.BytePtr, types.TypeMem), d, s.mem())
+ cache := s.newValue1(ssa.OpSelect0, typs.BytePtr, atomicLoad)
+ s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, atomicLoad)
+
+ // Initialize hash variable.
+ s.vars[hashVar] = s.newValue1(zext, typs.Uintptr, h)
+
+ // Load mask from cache.
+ mask := s.newValue2(ssa.OpLoad, typs.Uintptr, cache, s.mem())
+ // Jump to loop head.
+ b := s.endBlock()
+ b.AddEdgeTo(loopHead)
+
+ // At loop head, get pointer to the cache entry.
+ // e := &cache.Entries[hash&mask]
+ s.startBlock(loopHead)
+ entries := s.newValue2(ssa.OpAddPtr, typs.UintptrPtr, cache, s.uintptrConstant(uint64(s.config.PtrSize)))
+ idx := s.newValue2(and, typs.Uintptr, s.variable(hashVar, typs.Uintptr), mask)
+ idx = s.newValue2(mul, typs.Uintptr, idx, s.uintptrConstant(uint64(3*s.config.PtrSize)))
+ e := s.newValue2(ssa.OpAddPtr, typs.UintptrPtr, entries, idx)
+ // hash++
+ s.vars[hashVar] = s.newValue2(add, typs.Uintptr, s.variable(hashVar, typs.Uintptr), s.uintptrConstant(1))
+
+ // Look for a cache hit.
+ // if e.Typ == t { goto hit }
+ eTyp := s.newValue2(ssa.OpLoad, typs.Uintptr, e, s.mem())
+ cmp1 := s.newValue2(ssa.OpEqPtr, typs.Bool, t, eTyp)
+ b = s.endBlock()
+ b.Kind = ssa.BlockIf
+ b.SetControl(cmp1)
+ b.AddEdgeTo(cacheHit)
+ b.AddEdgeTo(loopBody)
+
+ // Look for an empty entry, the tombstone for this hash table.
+ // if e.Typ == nil { goto miss }
+ s.startBlock(loopBody)
+ cmp2 := s.newValue2(ssa.OpEqPtr, typs.Bool, eTyp, s.constNil(typs.BytePtr))
+ b = s.endBlock()
+ b.Kind = ssa.BlockIf
+ b.SetControl(cmp2)
+ b.AddEdgeTo(cacheMiss)
+ b.AddEdgeTo(loopHead)
+
+ // On a hit, load the data fields of the cache entry.
+ // Case = e.Case
+ // Itab = e.Itab
+ s.startBlock(cacheHit)
+ eCase := s.newValue2(ssa.OpLoad, typs.Int, s.newValue1I(ssa.OpOffPtr, typs.IntPtr, s.config.PtrSize, e), s.mem())
+ eItab := s.newValue2(ssa.OpLoad, typs.BytePtr, s.newValue1I(ssa.OpOffPtr, typs.BytePtrPtr, 2*s.config.PtrSize, e), s.mem())
+ s.assign(n.Case, eCase, false, 0)
+ s.assign(n.Itab, eItab, false, 0)
+ b = s.endBlock()
+ b.AddEdgeTo(merge)
+
+ // On a miss, call into the runtime to get the answer.
+ s.startBlock(cacheMiss)
+ }
+
+ r := s.rtcall(ir.Syms.InterfaceSwitch, true, []*types.Type{typs.Int, typs.BytePtr}, d, t)
+ s.assign(n.Case, r[0], false, 0)
+ s.assign(n.Itab, r[1], false, 0)
+
+ if merge != nil {
+ // Cache hits merge in here.
+ b := s.endBlock()
+ b.Kind = ssa.BlockPlain
+ b.AddEdgeTo(merge)
+ s.startBlock(merge)
+ }
+
case ir.OCHECKNIL:
n := n.(*ir.UnaryExpr)
p := s.expr(n.X)
- s.nilCheck(p)
+ _ = s.nilCheck(p)
+ // TODO: check that throwing away the nilcheck result is ok.
case ir.OINLMARK:
n := n.(*ir.InlineMarkStmt)
@@ -2026,13 +2143,10 @@ func (s *state) exit() *ssa.Block {
}
}
- var b *ssa.Block
- var m *ssa.Value
// Do actual return.
// These currently turn into self-copies (in many cases).
- resultFields := s.curfn.Type().Results().FieldSlice()
+ resultFields := s.curfn.Type().Results()
results := make([]*ssa.Value, len(resultFields)+1, len(resultFields)+1)
- m = s.newValue0(ssa.OpMakeResult, s.f.OwnAux.LateExpansionResultType())
// Store SSAable and heap-escaped PPARAMOUT variables back to stack locations.
for i, f := range resultFields {
n := f.Nname.(*ir.Name)
@@ -2058,15 +2172,18 @@ func (s *state) exit() *ssa.Block {
}
}
- // Run exit code. Today, this is just racefuncexit, in -race mode.
- // TODO(register args) this seems risky here with a register-ABI, but not clear it is right to do it earlier either.
- // Spills in register allocation might just fix it.
- s.stmtList(s.curfn.Exit)
+ // In -race mode, we need to call racefuncexit.
+ // Note: This has to happen after we load any heap-allocated results,
+ // otherwise races will be attributed to the caller instead.
+ if s.instrumentEnterExit {
+ s.rtcall(ir.Syms.Racefuncexit, true, nil)
+ }
results[len(results)-1] = s.mem()
+ m := s.newValue0(ssa.OpMakeResult, s.f.OwnAux.LateExpansionResultType())
m.AddArgs(results...)
- b = s.endBlock()
+ b := s.endBlock()
b.Kind = ssa.BlockRet
b.SetControl(m)
if s.hasdefer && s.hasOpenDefers {
@@ -2659,6 +2776,14 @@ func (s *state) exprCheckPtr(n ir.Node, checkPtrOK bool) *ssa.Value {
n := n.(*ir.ConvExpr)
str := s.expr(n.X)
ptr := s.newValue1(ssa.OpStringPtr, s.f.Config.Types.BytePtr, str)
+ if !n.NonNil() {
+ // We need to ensure []byte("") evaluates to []byte{}, and not []byte(nil).
+ //
+ // TODO(mdempsky): Investigate using "len != 0" instead of "ptr != nil".
+ cond := s.newValue2(ssa.OpNeqPtr, types.Types[types.TBOOL], ptr, s.constNil(ptr.Type))
+ zerobase := s.newValue1A(ssa.OpAddr, ptr.Type, ir.Syms.Zerobase, s.sb)
+ ptr = s.ternary(cond, ptr, zerobase)
+ }
len := s.newValue1(ssa.OpStringLen, types.Types[types.TINT], str)
return s.newValue3(ssa.OpSliceMake, n.Type(), ptr, len, len)
case ir.OCFUNC:
@@ -2794,8 +2919,7 @@ func (s *state) exprCheckPtr(n ir.Node, checkPtrOK bool) *ssa.Value {
}
// map <--> *hmap
- if to.Kind() == types.TMAP && from.IsPtr() &&
- to.MapType().Hmap == from.Elem() {
+ if to.Kind() == types.TMAP && from == types.NewPtr(reflectdata.MapType()) {
return v
}
@@ -3154,7 +3278,7 @@ func (s *state) exprCheckPtr(n ir.Node, checkPtrOK bool) *ssa.Value {
p := s.addr(n)
return s.load(n.X.Type().Elem(), p)
case n.X.Type().IsArray():
- if TypeOK(n.X.Type()) {
+ if ssa.CanSSA(n.X.Type()) {
// SSA can handle arrays of length at most 1.
bound := n.X.Type().NumElem()
a := s.expr(n.X)
@@ -3218,7 +3342,7 @@ func (s *state) exprCheckPtr(n ir.Node, checkPtrOK bool) *ssa.Value {
a := s.expr(n.X)
return s.newValue1(ssa.OpIData, n.Type(), a)
- case ir.OEFACE:
+ case ir.OMAKEFACE:
n := n.(*ir.BinaryExpr)
tab := s.expr(n.X)
data := s.expr(n.Y)
@@ -3357,7 +3481,7 @@ func (s *state) resultOfCall(c *ssa.Value, which int64, t *types.Type) *ssa.Valu
pa := aux.ParamAssignmentForResult(which)
// TODO(register args) determine if in-memory TypeOK is better loaded early from SelectNAddr or later when SelectN is expanded.
// SelectN is better for pattern-matching and possible call-aware analysis we might want to do in the future.
- if len(pa.Registers) == 0 && !TypeOK(t) {
+ if len(pa.Registers) == 0 && !ssa.CanSSA(t) {
addr := s.newValue1I(ssa.OpSelectNAddr, types.NewPtr(t), which, c)
return s.rawLoad(t, addr)
}
@@ -3462,7 +3586,7 @@ func (s *state) append(n *ir.CallExpr, inplace bool) *ssa.Value {
// Call growslice
s.startBlock(grow)
- taddr := s.expr(n.X)
+ taddr := s.expr(n.Fun)
r := s.rtcall(ir.Syms.Growslice, true, []*types.Type{n.Type()}, p, l, c, nargs, taddr)
// Decompose output slice
@@ -3513,7 +3637,7 @@ func (s *state) append(n *ir.CallExpr, inplace bool) *ssa.Value {
}
args := make([]argRec, 0, len(n.Args[1:]))
for _, n := range n.Args[1:] {
- if TypeOK(n.Type()) {
+ if ssa.CanSSA(n.Type()) {
args = append(args, argRec{v: s.expr(n), store: true})
} else {
v := s.addr(n)
@@ -3567,11 +3691,32 @@ func (s *state) minMax(n *ir.CallExpr) *ssa.Value {
if typ.IsFloat() || typ.IsString() {
// min/max semantics for floats are tricky because of NaNs and
- // negative zero, so we let the runtime handle this instead.
+ // negative zero. Some architectures have instructions which
+ // we can use to generate the right result. For others we must
+ // call into the runtime instead.
//
// Strings are conceptually simpler, but we currently desugar
// string comparisons during walk, not ssagen.
+ if typ.IsFloat() {
+ switch Arch.LinkArch.Family {
+ case sys.AMD64, sys.ARM64:
+ var op ssa.Op
+ switch {
+ case typ.Kind() == types.TFLOAT64 && n.Op() == ir.OMIN:
+ op = ssa.OpMin64F
+ case typ.Kind() == types.TFLOAT64 && n.Op() == ir.OMAX:
+ op = ssa.OpMax64F
+ case typ.Kind() == types.TFLOAT32 && n.Op() == ir.OMIN:
+ op = ssa.OpMin32F
+ case typ.Kind() == types.TFLOAT32 && n.Op() == ir.OMAX:
+ op = ssa.OpMax32F
+ }
+ return fold(func(x, a *ssa.Value) *ssa.Value {
+ return s.newValue2(op, typ, x, a)
+ })
+ }
+ }
var name string
switch typ.Kind() {
case types.TFLOAT32:
@@ -4077,7 +4222,6 @@ func InitTables() {
return s.newValue2(ssa.OpMul64uover, types.NewTuple(types.Types[types.TUINT], types.Types[types.TUINT]), args[0], args[1])
},
sys.AMD64, sys.I386, sys.Loong64, sys.MIPS64, sys.RISCV64, sys.ARM64)
- alias("runtime", "mulUintptr", "runtime/internal/math", "MulUintptr", all...)
add("runtime", "KeepAlive",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
data := s.newValue1(ssa.OpIData, s.f.Config.Types.BytePtr, args[0])
@@ -4108,7 +4252,7 @@ func InitTables() {
s.vars[memVar] = s.newValue1(ssa.OpPubBarrier, types.TypeMem, s.mem())
return nil
},
- sys.ARM64, sys.PPC64)
+ sys.ARM64, sys.PPC64, sys.RISCV64)
brev_arch := []sys.ArchFamily{sys.AMD64, sys.I386, sys.ARM64, sys.ARM, sys.S390X}
if buildcfg.GOPPC64 >= 10 {
@@ -4843,13 +4987,14 @@ func InitTables() {
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
return s.newValue3(ssa.OpAdd64carry, types.NewTuple(types.Types[types.TUINT64], types.Types[types.TUINT64]), args[0], args[1], args[2])
},
- sys.AMD64, sys.ARM64, sys.PPC64, sys.S390X, sys.RISCV64, sys.Loong64)
+ sys.AMD64, sys.ARM64, sys.PPC64, sys.S390X, sys.RISCV64, sys.Loong64, sys.MIPS64)
alias("math/bits", "Add", "math/bits", "Add64", p8...)
+ alias("runtime/internal/math", "Add64", "math/bits", "Add64", all...)
addF("math/bits", "Sub64",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
return s.newValue3(ssa.OpSub64borrow, types.NewTuple(types.Types[types.TUINT64], types.Types[types.TUINT64]), args[0], args[1], args[2])
},
- sys.AMD64, sys.ARM64, sys.PPC64, sys.S390X, sys.RISCV64, sys.Loong64)
+ sys.AMD64, sys.ARM64, sys.PPC64, sys.S390X, sys.RISCV64, sys.Loong64, sys.MIPS64)
alias("math/bits", "Sub", "math/bits", "Sub64", p8...)
addF("math/bits", "Div64",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
@@ -4950,7 +5095,7 @@ func IsIntrinsicCall(n *ir.CallExpr) bool {
if n == nil {
return false
}
- name, ok := n.X.(*ir.Name)
+ name, ok := n.Fun.(*ir.Name)
if !ok {
return false
}
@@ -4959,7 +5104,7 @@ func IsIntrinsicCall(n *ir.CallExpr) bool {
// intrinsicCall converts a call to a recognized intrinsic function into the intrinsic SSA operation.
func (s *state) intrinsicCall(n *ir.CallExpr) *ssa.Value {
- v := findIntrinsic(n.X.Sym())(s, n, s.intrinsicArgs(n))
+ v := findIntrinsic(n.Fun.Sym())(s, n, s.intrinsicArgs(n))
if ssa.IntrinsicsDebug > 0 {
x := v
if x == nil {
@@ -4968,7 +5113,7 @@ func (s *state) intrinsicCall(n *ir.CallExpr) *ssa.Value {
if x.Op == ssa.OpSelect0 || x.Op == ssa.OpSelect1 {
x = x.Args[0]
}
- base.WarnfAt(n.Pos(), "intrinsic substitution for %v with %s", n.X.Sym().Name, x.LongString())
+ base.WarnfAt(n.Pos(), "intrinsic substitution for %v with %s", n.Fun.Sym().Name, x.LongString())
}
return v
}
@@ -4989,14 +5134,14 @@ func (s *state) intrinsicArgs(n *ir.CallExpr) []*ssa.Value {
// (as well as the deferBits variable), and this will enable us to run the proper
// defer calls during panics.
func (s *state) openDeferRecord(n *ir.CallExpr) {
- if len(n.Args) != 0 || n.Op() != ir.OCALLFUNC || n.X.Type().NumResults() != 0 {
+ if len(n.Args) != 0 || n.Op() != ir.OCALLFUNC || n.Fun.Type().NumResults() != 0 {
s.Fatalf("defer call with arguments or results: %v", n)
}
opendefer := &openDeferInfo{
n: n,
}
- fn := n.X
+ fn := n.Fun
// We must always store the function value in a stack slot for the
// runtime panic code to use. But in the defer exit code, we will
// call the function directly if it is a static function.
@@ -5023,7 +5168,7 @@ func (s *state) openDeferRecord(n *ir.CallExpr) {
// (therefore SSAable). val is the value to be stored. The function returns an SSA
// value representing a pointer to the autotmp location.
func (s *state) openDeferSave(t *types.Type, val *ssa.Value) *ssa.Value {
- if !TypeOK(t) {
+ if !ssa.CanSSA(t) {
s.Fatalf("openDeferSave of non-SSA-able type %v val=%v", t, val)
}
if !t.HasPointers() {
@@ -5032,6 +5177,7 @@ func (s *state) openDeferSave(t *types.Type, val *ssa.Value) *ssa.Value {
pos := val.Pos
temp := typecheck.TempAt(pos.WithNotStmt(), s.curfn, t)
temp.SetOpenDeferSlot(true)
+ temp.SetFrameOffset(int64(len(s.openDefers))) // so cmpstackvarlt can order them
var addrTemp *ssa.Value
// Use OpVarLive to make sure stack slot for the closure is not removed by
// dead-store elimination
@@ -5109,7 +5255,7 @@ func (s *state) openDeferExit() {
// Generate code to call the function call of the defer, using the
// closure that were stored in argtmps at the point of the defer
// statement.
- fn := r.n.X
+ fn := r.n.Fun
stksize := fn.Type().ArgWidth()
var callArgs []*ssa.Value
var call *ssa.Value
@@ -5117,10 +5263,10 @@ func (s *state) openDeferExit() {
v := s.load(r.closure.Type.Elem(), r.closure)
s.maybeNilCheckClosure(v, callDefer)
codeptr := s.rawLoad(types.Types[types.TUINTPTR], v)
- aux := ssa.ClosureAuxCall(s.f.ABIDefault.ABIAnalyzeTypes(nil, nil, nil))
+ aux := ssa.ClosureAuxCall(s.f.ABIDefault.ABIAnalyzeTypes(nil, nil))
call = s.newValue2A(ssa.OpClosureLECall, aux.LateExpansionResultType(), aux, codeptr, v)
} else {
- aux := ssa.StaticAuxCall(fn.(*ir.Name).Linksym(), s.f.ABIDefault.ABIAnalyzeTypes(nil, nil, nil))
+ aux := ssa.StaticAuxCall(fn.(*ir.Name).Linksym(), s.f.ABIDefault.ABIAnalyzeTypes(nil, nil))
call = s.newValue0A(ssa.OpStaticLECall, aux.LateExpansionResultType(), aux)
}
callArgs = append(callArgs, s.mem())
@@ -5141,29 +5287,30 @@ func (s *state) openDeferExit() {
}
func (s *state) callResult(n *ir.CallExpr, k callKind) *ssa.Value {
- return s.call(n, k, false)
+ return s.call(n, k, false, nil)
}
func (s *state) callAddr(n *ir.CallExpr, k callKind) *ssa.Value {
- return s.call(n, k, true)
+ return s.call(n, k, true, nil)
}
// Calls the function n using the specified call type.
// Returns the address of the return value (or nil if none).
-func (s *state) call(n *ir.CallExpr, k callKind, returnResultAddr bool) *ssa.Value {
+func (s *state) call(n *ir.CallExpr, k callKind, returnResultAddr bool, deferExtra ir.Expr) *ssa.Value {
s.prevCall = nil
- var callee *ir.Name // target function (if static)
- var closure *ssa.Value // ptr to closure to run (if dynamic)
- var codeptr *ssa.Value // ptr to target code (if dynamic)
- var rcvr *ssa.Value // receiver to set
- fn := n.X
+ var calleeLSym *obj.LSym // target function (if static)
+ var closure *ssa.Value // ptr to closure to run (if dynamic)
+ var codeptr *ssa.Value // ptr to target code (if dynamic)
+ var dextra *ssa.Value // defer extra arg
+ var rcvr *ssa.Value // receiver to set
+ fn := n.Fun
var ACArgs []*types.Type // AuxCall args
var ACResults []*types.Type // AuxCall results
var callArgs []*ssa.Value // For late-expansion, the args themselves (not stored, args to the call instead).
callABI := s.f.ABIDefault
- if k != callNormal && k != callTail && (len(n.Args) != 0 || n.Op() == ir.OCALLINTER || n.X.Type().NumResults() != 0) {
+ if k != callNormal && k != callTail && (len(n.Args) != 0 || n.Op() == ir.OCALLINTER || n.Fun.Type().NumResults() != 0) {
s.Fatalf("go/defer call with arguments: %v", n)
}
@@ -5171,7 +5318,7 @@ func (s *state) call(n *ir.CallExpr, k callKind, returnResultAddr bool) *ssa.Val
case ir.OCALLFUNC:
if (k == callNormal || k == callTail) && fn.Op() == ir.ONAME && fn.(*ir.Name).Class == ir.PFUNC {
fn := fn.(*ir.Name)
- callee = fn
+ calleeLSym = callTargetLSym(fn)
if buildcfg.Experiment.RegabiArgs {
// This is a static call, so it may be
// a direct call to a non-ABIInternal
@@ -5210,12 +5357,15 @@ func (s *state) call(n *ir.CallExpr, k callKind, returnResultAddr bool) *ssa.Val
closure = iclosure
}
}
+ if deferExtra != nil {
+ dextra = s.expr(deferExtra)
+ }
- params := callABI.ABIAnalyze(n.X.Type(), false /* Do not set (register) nNames from caller side -- can cause races. */)
+ params := callABI.ABIAnalyze(n.Fun.Type(), false /* Do not set (register) nNames from caller side -- can cause races. */)
types.CalcSize(fn.Type())
stksize := params.ArgWidth() // includes receiver, args, and results
- res := n.X.Type().Results()
+ res := n.Fun.Type().Results()
if k == callNormal || k == callTail {
for _, p := range params.OutParams() {
ACResults = append(ACResults, p.Type)
@@ -5224,38 +5374,19 @@ func (s *state) call(n *ir.CallExpr, k callKind, returnResultAddr bool) *ssa.Val
var call *ssa.Value
if k == callDeferStack {
- // Make a defer struct d on the stack.
if stksize != 0 {
s.Fatalf("deferprocStack with non-zero stack size %d: %v", stksize, n)
}
-
+ // Make a defer struct on the stack.
t := deferstruct()
- d := typecheck.TempAt(n.Pos(), s.curfn, t)
-
- if t.HasPointers() {
- s.vars[memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, d, s.mem())
- }
- addr := s.addr(d)
-
- // Must match deferstruct() below and src/runtime/runtime2.go:_defer.
- // 0: started, set in deferprocStack
- // 1: heap, set in deferprocStack
- // 2: openDefer
- // 3: sp, set in deferprocStack
- // 4: pc, set in deferprocStack
- // 5: fn
+ _, addr := s.temp(n.Pos(), t)
s.store(closure.Type,
- s.newValue1I(ssa.OpOffPtr, closure.Type.PtrTo(), t.FieldOff(5), addr),
+ s.newValue1I(ssa.OpOffPtr, closure.Type.PtrTo(), t.FieldOff(deferStructFnField), addr),
closure)
- // 6: panic, set in deferprocStack
- // 7: link, set in deferprocStack
- // 8: fd
- // 9: varp
- // 10: framepc
// Call runtime.deferprocStack with pointer to _defer record.
ACArgs = append(ACArgs, types.Types[types.TUINTPTR])
- aux := ssa.StaticAuxCall(ir.Syms.DeferprocStack, s.f.ABIDefault.ABIAnalyzeTypes(nil, ACArgs, ACResults))
+ aux := ssa.StaticAuxCall(ir.Syms.DeferprocStack, s.f.ABIDefault.ABIAnalyzeTypes(ACArgs, ACResults))
callArgs = append(callArgs, addr, s.mem())
call = s.newValue0A(ssa.OpStaticLECall, aux.LateExpansionResultType(), aux)
call.AddArgs(callArgs...)
@@ -5271,6 +5402,13 @@ func (s *state) call(n *ir.CallExpr, k callKind, returnResultAddr bool) *ssa.Val
callArgs = append(callArgs, closure)
stksize += int64(types.PtrSize)
argStart += int64(types.PtrSize)
+ if dextra != nil {
+ // Extra token of type any for deferproc
+ ACArgs = append(ACArgs, types.Types[types.TINTER])
+ callArgs = append(callArgs, dextra)
+ stksize += 2 * int64(types.PtrSize)
+ argStart += 2 * int64(types.PtrSize)
+ }
}
// Set receiver (for interface calls).
@@ -5279,7 +5417,7 @@ func (s *state) call(n *ir.CallExpr, k callKind, returnResultAddr bool) *ssa.Val
}
// Write args.
- t := n.X.Type()
+ t := n.Fun.Type()
args := n.Args
for _, p := range params.InParams() { // includes receiver for interface calls
@@ -5298,7 +5436,7 @@ func (s *state) call(n *ir.CallExpr, k callKind, returnResultAddr bool) *ssa.Val
}
for i, n := range args {
- callArgs = append(callArgs, s.putArg(n, t.Params().Field(i).Type))
+ callArgs = append(callArgs, s.putArg(n, t.Param(i).Type))
}
callArgs = append(callArgs, s.mem())
@@ -5306,11 +5444,15 @@ func (s *state) call(n *ir.CallExpr, k callKind, returnResultAddr bool) *ssa.Val
// call target
switch {
case k == callDefer:
- aux := ssa.StaticAuxCall(ir.Syms.Deferproc, s.f.ABIDefault.ABIAnalyzeTypes(nil, ACArgs, ACResults)) // TODO paramResultInfo for DeferProc
+ sym := ir.Syms.Deferproc
+ if dextra != nil {
+ sym = ir.Syms.Deferprocat
+ }
+ aux := ssa.StaticAuxCall(sym, s.f.ABIDefault.ABIAnalyzeTypes(ACArgs, ACResults)) // TODO paramResultInfo for Deferproc(at)
call = s.newValue0A(ssa.OpStaticLECall, aux.LateExpansionResultType(), aux)
case k == callGo:
- aux := ssa.StaticAuxCall(ir.Syms.Newproc, s.f.ABIDefault.ABIAnalyzeTypes(nil, ACArgs, ACResults))
- call = s.newValue0A(ssa.OpStaticLECall, aux.LateExpansionResultType(), aux) // TODO paramResultInfo for NewProc
+ aux := ssa.StaticAuxCall(ir.Syms.Newproc, s.f.ABIDefault.ABIAnalyzeTypes(ACArgs, ACResults))
+ call = s.newValue0A(ssa.OpStaticLECall, aux.LateExpansionResultType(), aux) // TODO paramResultInfo for Newproc
case closure != nil:
// rawLoad because loading the code pointer from a
// closure is always safe, but IsSanitizerSafeAddr
@@ -5318,14 +5460,14 @@ func (s *state) call(n *ir.CallExpr, k callKind, returnResultAddr bool) *ssa.Val
// critical that we not clobber any arguments already
// stored onto the stack.
codeptr = s.rawLoad(types.Types[types.TUINTPTR], closure)
- aux := ssa.ClosureAuxCall(callABI.ABIAnalyzeTypes(nil, ACArgs, ACResults))
+ aux := ssa.ClosureAuxCall(callABI.ABIAnalyzeTypes(ACArgs, ACResults))
call = s.newValue2A(ssa.OpClosureLECall, aux.LateExpansionResultType(), aux, codeptr, closure)
case codeptr != nil:
// Note that the "receiver" parameter is nil because the actual receiver is the first input parameter.
aux := ssa.InterfaceAuxCall(params)
call = s.newValue1A(ssa.OpInterLECall, aux.LateExpansionResultType(), aux, codeptr)
- case callee != nil:
- aux := ssa.StaticAuxCall(callTargetLSym(callee), params)
+ case calleeLSym != nil:
+ aux := ssa.StaticAuxCall(calleeLSym, params)
call = s.newValue0A(ssa.OpStaticLECall, aux.LateExpansionResultType(), aux)
if k == callTail {
call.Op = ssa.OpTailLECall
@@ -5368,11 +5510,11 @@ func (s *state) call(n *ir.CallExpr, k callKind, returnResultAddr bool) *ssa.Val
s.startBlock(bNext)
}
- if res.NumFields() == 0 || k != callNormal {
+ if len(res) == 0 || k != callNormal {
// call has no return value. Continue with the next statement.
return nil
}
- fp := res.Field(0)
+ fp := res[0]
if returnResultAddr {
return s.resultAddrOfCall(call, 0, fp.Type)
}
@@ -5552,7 +5694,7 @@ func (s *state) canSSA(n ir.Node) bool {
if n.Op() != ir.ONAME {
return false
}
- return s.canSSAName(n.(*ir.Name)) && TypeOK(n.Type())
+ return s.canSSAName(n.(*ir.Name)) && ssa.CanSSA(n.Type())
}
func (s *state) canSSAName(name *ir.Name) bool {
@@ -5579,39 +5721,6 @@ func (s *state) canSSAName(name *ir.Name) bool {
// TODO: try to make more variables SSAable?
}
-// TypeOK reports whether variables of type t are SSA-able.
-func TypeOK(t *types.Type) bool {
- types.CalcSize(t)
- if t.Size() > int64(4*types.PtrSize) {
- // 4*Widthptr is an arbitrary constant. We want it
- // to be at least 3*Widthptr so slices can be registerized.
- // Too big and we'll introduce too much register pressure.
- return false
- }
- switch t.Kind() {
- case types.TARRAY:
- // We can't do larger arrays because dynamic indexing is
- // not supported on SSA variables.
- // TODO: allow if all indexes are constant.
- if t.NumElem() <= 1 {
- return TypeOK(t.Elem())
- }
- return false
- case types.TSTRUCT:
- if t.NumFields() > ssa.MaxStruct {
- return false
- }
- for _, t1 := range t.Fields().Slice() {
- if !TypeOK(t1.Type) {
- return false
- }
- }
- return true
- default:
- return true
- }
-}
-
// exprPtr evaluates n to a pointer and nil-checks it.
func (s *state) exprPtr(n ir.Node, bounded bool, lineno src.XPos) *ssa.Value {
p := s.expr(n)
@@ -5621,18 +5730,20 @@ func (s *state) exprPtr(n ir.Node, bounded bool, lineno src.XPos) *ssa.Value {
}
return p
}
- s.nilCheck(p)
+ p = s.nilCheck(p)
return p
}
// nilCheck generates nil pointer checking code.
// Used only for automatically inserted nil checks,
// not for user code like 'x != nil'.
-func (s *state) nilCheck(ptr *ssa.Value) {
+// Returns a "definitely not nil" copy of x to ensure proper ordering
+// of the uses of the post-nilcheck pointer.
+func (s *state) nilCheck(ptr *ssa.Value) *ssa.Value {
if base.Debug.DisableNil != 0 || s.curfn.NilCheckDisabled() {
- return
+ return ptr
}
- s.newValue2(ssa.OpNilCheck, types.TypeVoid, ptr, s.mem())
+ return s.newValue2(ssa.OpNilCheck, ptr.Type, ptr, s.mem())
}
// boundsCheck generates bounds checking code. Checks if 0 <= idx <[=] len, branches to exit if not.
@@ -5791,7 +5902,7 @@ func (s *state) rtcall(fn *obj.LSym, returns bool, results []*types.Type, args .
// Issue call
var call *ssa.Value
- aux := ssa.StaticAuxCall(fn, s.f.ABIDefault.ABIAnalyzeTypes(nil, callArgTypes, results))
+ aux := ssa.StaticAuxCall(fn, s.f.ABIDefault.ABIAnalyzeTypes(callArgTypes, results))
callArgs = append(callArgs, s.mem())
call = s.newValue0A(ssa.OpStaticLECall, aux.LateExpansionResultType(), aux)
call.AddArgs(callArgs...)
@@ -5937,7 +6048,7 @@ func (s *state) storeTypePtrs(t *types.Type, left, right *ssa.Value) {
// putArg evaluates n for the purpose of passing it as an argument to a function and returns the value for the call.
func (s *state) putArg(n ir.Node, t *types.Type) *ssa.Value {
var a *ssa.Value
- if !TypeOK(t) {
+ if !ssa.CanSSA(t) {
a = s.newValue2(ssa.OpDereference, t, s.addr(n), s.mem())
} else {
a = s.expr(n)
@@ -5955,7 +6066,7 @@ func (s *state) storeArgWithBase(n ir.Node, t *types.Type, base *ssa.Value, off
addr = s.newValue1I(ssa.OpOffPtr, pt, off, base)
}
- if !TypeOK(t) {
+ if !ssa.CanSSA(t) {
a := s.addr(n)
s.move(t, addr, a)
return
@@ -5984,8 +6095,8 @@ func (s *state) slice(v, i, j, k *ssa.Value, bounded bool) (p, l, c *ssa.Value)
if !t.Elem().IsArray() {
s.Fatalf("bad ptr to array in slice %v\n", t)
}
- s.nilCheck(v)
- ptr = s.newValue1(ssa.OpCopy, types.NewPtr(t.Elem().Elem()), v)
+ nv := s.nilCheck(v)
+ ptr = s.newValue1(ssa.OpCopy, types.NewPtr(t.Elem().Elem()), nv)
len = s.constInt(types.Types[types.TINT], t.Elem().NumElem())
cap = len
default:
@@ -6397,7 +6508,7 @@ func (s *state) dottype(n *ir.TypeAssertExpr, commaok bool) (res, resok *ssa.Val
if n.ITab != nil {
targetItab = s.expr(n.ITab)
}
- return s.dottype1(n.Pos(), n.X.Type(), n.Type(), iface, nil, target, targetItab, commaok)
+ return s.dottype1(n.Pos(), n.X.Type(), n.Type(), iface, nil, target, targetItab, commaok, n.Descriptor)
}
func (s *state) dynamicDottype(n *ir.DynamicTypeAssertExpr, commaok bool) (res, resok *ssa.Value) {
@@ -6415,7 +6526,7 @@ func (s *state) dynamicDottype(n *ir.DynamicTypeAssertExpr, commaok bool) (res,
} else {
target = s.expr(n.RType)
}
- return s.dottype1(n.Pos(), n.X.Type(), n.Type(), iface, source, target, targetItab, commaok)
+ return s.dottype1(n.Pos(), n.X.Type(), n.Type(), iface, source, target, targetItab, commaok, nil)
}
// dottype1 implements a x.(T) operation. iface is the argument (x), dst is the type we're asserting to (T)
@@ -6424,8 +6535,11 @@ func (s *state) dynamicDottype(n *ir.DynamicTypeAssertExpr, commaok bool) (res,
// target is the *runtime._type of dst.
// If src is a nonempty interface and dst is not an interface, targetItab is an itab representing (dst, src). Otherwise it is nil.
// commaok is true if the caller wants a boolean success value. Otherwise, the generated code panics if the conversion fails.
-func (s *state) dottype1(pos src.XPos, src, dst *types.Type, iface, source, target, targetItab *ssa.Value, commaok bool) (res, resok *ssa.Value) {
- byteptr := s.f.Config.Types.BytePtr
+// descriptor is a compiler-allocated internal/abi.TypeAssert whose address is passed to runtime.typeAssert when
+// the target type is a compile-time-known non-empty interface. It may be nil.
+func (s *state) dottype1(pos src.XPos, src, dst *types.Type, iface, source, target, targetItab *ssa.Value, commaok bool, descriptor *obj.LSym) (res, resok *ssa.Value) {
+ typs := s.f.Config.Types
+ byteptr := typs.BytePtr
if dst.IsInterface() {
if dst.IsEmptyInterface() {
// Converting to an empty interface.
@@ -6500,23 +6614,156 @@ func (s *state) dottype1(pos src.XPos, src, dst *types.Type, iface, source, targ
if base.Debug.TypeAssert > 0 {
base.WarnfAt(pos, "type assertion not inlined")
}
- if !commaok {
- fn := ir.Syms.AssertI2I
- if src.IsEmptyInterface() {
+
+ itab := s.newValue1(ssa.OpITab, byteptr, iface)
+ data := s.newValue1(ssa.OpIData, types.Types[types.TUNSAFEPTR], iface)
+
+ // First, check for nil.
+ bNil := s.f.NewBlock(ssa.BlockPlain)
+ bNonNil := s.f.NewBlock(ssa.BlockPlain)
+ bMerge := s.f.NewBlock(ssa.BlockPlain)
+ cond := s.newValue2(ssa.OpNeqPtr, types.Types[types.TBOOL], itab, s.constNil(byteptr))
+ b := s.endBlock()
+ b.Kind = ssa.BlockIf
+ b.SetControl(cond)
+ b.Likely = ssa.BranchLikely
+ b.AddEdgeTo(bNonNil)
+ b.AddEdgeTo(bNil)
+
+ s.startBlock(bNil)
+ if commaok {
+ s.vars[typVar] = itab // which will be nil
+ b := s.endBlock()
+ b.AddEdgeTo(bMerge)
+ } else {
+ // Panic if input is nil.
+ s.rtcall(ir.Syms.Panicnildottype, false, nil, target)
+ }
+
+ // Get typ, possibly by loading out of itab.
+ s.startBlock(bNonNil)
+ typ := itab
+ if !src.IsEmptyInterface() {
+ typ = s.load(byteptr, s.newValue1I(ssa.OpOffPtr, byteptr, int64(types.PtrSize), itab))
+ }
+
+ // Check the cache first.
+ var d *ssa.Value
+ if descriptor != nil {
+ d = s.newValue1A(ssa.OpAddr, byteptr, descriptor, s.sb)
+ if base.Flag.N == 0 && rtabi.UseInterfaceSwitchCache(Arch.LinkArch.Name) {
+ // Note: we can only use the cache if we have the right atomic load instruction.
+ // Double-check that here.
+ if _, ok := intrinsics[intrinsicKey{Arch.LinkArch.Arch, "runtime/internal/atomic", "Loadp"}]; !ok {
+ s.Fatalf("atomic load not available")
+ }
+ // Pick right size ops.
+ var mul, and, add, zext ssa.Op
+ if s.config.PtrSize == 4 {
+ mul = ssa.OpMul32
+ and = ssa.OpAnd32
+ add = ssa.OpAdd32
+ zext = ssa.OpCopy
+ } else {
+ mul = ssa.OpMul64
+ and = ssa.OpAnd64
+ add = ssa.OpAdd64
+ zext = ssa.OpZeroExt32to64
+ }
+
+ loopHead := s.f.NewBlock(ssa.BlockPlain)
+ loopBody := s.f.NewBlock(ssa.BlockPlain)
+ cacheHit := s.f.NewBlock(ssa.BlockPlain)
+ cacheMiss := s.f.NewBlock(ssa.BlockPlain)
+
+ // Load cache pointer out of descriptor, with an atomic load so
+ // we ensure that we see a fully written cache.
+ atomicLoad := s.newValue2(ssa.OpAtomicLoadPtr, types.NewTuple(typs.BytePtr, types.TypeMem), d, s.mem())
+ cache := s.newValue1(ssa.OpSelect0, typs.BytePtr, atomicLoad)
+ s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, atomicLoad)
+
+ // Load hash from type or itab.
+ var hash *ssa.Value
+ if src.IsEmptyInterface() {
+ hash = s.newValue2(ssa.OpLoad, typs.UInt32, s.newValue1I(ssa.OpOffPtr, typs.UInt32Ptr, 2*s.config.PtrSize, typ), s.mem())
+ } else {
+ hash = s.newValue2(ssa.OpLoad, typs.UInt32, s.newValue1I(ssa.OpOffPtr, typs.UInt32Ptr, 2*s.config.PtrSize, itab), s.mem())
+ }
+ hash = s.newValue1(zext, typs.Uintptr, hash)
+ s.vars[hashVar] = hash
+ // Load mask from cache.
+ mask := s.newValue2(ssa.OpLoad, typs.Uintptr, cache, s.mem())
+ // Jump to loop head.
+ b := s.endBlock()
+ b.AddEdgeTo(loopHead)
+
+ // At loop head, get pointer to the cache entry.
+ // e := &cache.Entries[hash&mask]
+ s.startBlock(loopHead)
+ idx := s.newValue2(and, typs.Uintptr, s.variable(hashVar, typs.Uintptr), mask)
+ idx = s.newValue2(mul, typs.Uintptr, idx, s.uintptrConstant(uint64(2*s.config.PtrSize)))
+ idx = s.newValue2(add, typs.Uintptr, idx, s.uintptrConstant(uint64(s.config.PtrSize)))
+ e := s.newValue2(ssa.OpAddPtr, typs.UintptrPtr, cache, idx)
+ // hash++
+ s.vars[hashVar] = s.newValue2(add, typs.Uintptr, s.variable(hashVar, typs.Uintptr), s.uintptrConstant(1))
+
+ // Look for a cache hit.
+ // if e.Typ == typ { goto hit }
+ eTyp := s.newValue2(ssa.OpLoad, typs.Uintptr, e, s.mem())
+ cmp1 := s.newValue2(ssa.OpEqPtr, typs.Bool, typ, eTyp)
+ b = s.endBlock()
+ b.Kind = ssa.BlockIf
+ b.SetControl(cmp1)
+ b.AddEdgeTo(cacheHit)
+ b.AddEdgeTo(loopBody)
+
+ // Look for an empty entry, the tombstone for this hash table.
+ // if e.Typ == nil { goto miss }
+ s.startBlock(loopBody)
+ cmp2 := s.newValue2(ssa.OpEqPtr, typs.Bool, eTyp, s.constNil(typs.BytePtr))
+ b = s.endBlock()
+ b.Kind = ssa.BlockIf
+ b.SetControl(cmp2)
+ b.AddEdgeTo(cacheMiss)
+ b.AddEdgeTo(loopHead)
+
+ // On a hit, load the data fields of the cache entry.
+ // Itab = e.Itab
+ s.startBlock(cacheHit)
+ eItab := s.newValue2(ssa.OpLoad, typs.BytePtr, s.newValue1I(ssa.OpOffPtr, typs.BytePtrPtr, s.config.PtrSize, e), s.mem())
+ s.vars[typVar] = eItab
+ b = s.endBlock()
+ b.AddEdgeTo(bMerge)
+
+ // On a miss, call into the runtime to get the answer.
+ s.startBlock(cacheMiss)
+ }
+ }
+
+ // Call into runtime to get itab for result.
+ if descriptor != nil {
+ itab = s.rtcall(ir.Syms.TypeAssert, true, []*types.Type{byteptr}, d, typ)[0]
+ } else {
+ var fn *obj.LSym
+ if commaok {
+ fn = ir.Syms.AssertE2I2
+ } else {
fn = ir.Syms.AssertE2I
}
- data := s.newValue1(ssa.OpIData, types.Types[types.TUNSAFEPTR], iface)
- tab := s.newValue1(ssa.OpITab, byteptr, iface)
- tab = s.rtcall(fn, true, []*types.Type{byteptr}, target, tab)[0]
- return s.newValue2(ssa.OpIMake, dst, tab, data), nil
+ itab = s.rtcall(fn, true, []*types.Type{byteptr}, target, typ)[0]
}
- fn := ir.Syms.AssertI2I2
- if src.IsEmptyInterface() {
- fn = ir.Syms.AssertE2I2
+ s.vars[typVar] = itab
+ b = s.endBlock()
+ b.AddEdgeTo(bMerge)
+
+ // Build resulting interface.
+ s.startBlock(bMerge)
+ itab = s.variable(typVar, byteptr)
+ var ok *ssa.Value
+ if commaok {
+ ok = s.newValue2(ssa.OpNeqPtr, types.Types[types.TBOOL], itab, s.constNil(byteptr))
}
- res = s.rtcall(fn, true, []*types.Type{dst}, target, iface)[0]
- resok = s.newValue2(ssa.OpNeqInter, types.Types[types.TBOOL], res, s.constInterface(dst))
- return
+ return s.newValue2(ssa.OpIMake, dst, itab, data), ok
}
if base.Debug.TypeAssert > 0 {
@@ -6540,7 +6787,7 @@ func (s *state) dottype1(pos src.XPos, src, dst *types.Type, iface, source, targ
var tmp ir.Node // temporary for use with large types
var addr *ssa.Value // address of tmp
- if commaok && !TypeOK(dst) {
+ if commaok && !ssa.CanSSA(dst) {
// unSSAable type, use temporary.
// TODO: get rid of some of these temporaries.
tmp, addr = s.temp(pos, dst)
@@ -6886,7 +7133,7 @@ func EmitArgInfo(f *ir.Func, abiInfo *abi.ABIParamResultInfo) *obj.LSym {
n := 0
writebyte := func(o uint8) { wOff = objw.Uint8(x, wOff, o) }
- // Write one non-aggrgate arg/field/element.
+ // Write one non-aggregate arg/field/element.
write1 := func(sz, offset int64) {
if offset >= _special {
writebyte(_offsetTooLarge)
@@ -6944,7 +7191,7 @@ func EmitArgInfo(f *ir.Func, abiInfo *abi.ABIParamResultInfo) *obj.LSym {
n++ // {} counts as a component
break
}
- for _, field := range t.Fields().Slice() {
+ for _, field := range t.Fields() {
if !visitType(baseOffset+field.Offset, field.Type, depth) {
break
}
@@ -7078,27 +7325,6 @@ func genssa(f *ssa.Func, pp *objw.Progs) {
s.lineRunStart = nil
s.SetPos(s.pp.Pos.WithNotStmt()) // It needs a non-empty Pos, but cannot be a statement boundary (yet).
- // Attach a "default" liveness info. Normally this will be
- // overwritten in the Values loop below for each Value. But
- // for an empty block this will be used for its control
- // instruction. We won't use the actual liveness map on a
- // control instruction. Just mark it something that is
- // preemptible, unless this function is "all unsafe", or
- // the empty block is in a write barrier.
- unsafe := liveness.IsUnsafe(f)
- if b.Kind == ssa.BlockPlain {
- // Empty blocks that are part of write barriers need
- // to have their control instructions marked unsafe.
- c := b.Succs[0].Block()
- for _, v := range c.Values {
- if v.Op == ssa.OpWBend {
- unsafe = true
- break
- }
- }
- }
- s.pp.NextLive = objw.LivenessIndex{StackMapIndex: -1, IsUnsafePoint: unsafe}
-
if idx, ok := argLiveBlockMap[b.ID]; ok && idx != argLiveIdx {
argLiveIdx = idx
p := s.pp.Prog(obj.APCDATA)
@@ -7158,6 +7384,7 @@ func genssa(f *ssa.Func, pp *objw.Progs) {
// Attach this safe point to the next
// instruction.
s.pp.NextLive = s.livenessMap.Get(v)
+ s.pp.NextUnsafe = s.livenessMap.GetUnsafe(v)
// let the backend handle it
Arch.SSAGenValue(&s, v)
@@ -7192,6 +7419,13 @@ func genssa(f *ssa.Func, pp *objw.Progs) {
}
b.Pos = b.Pos.WithBogusLine() // Debuggers are not good about infinite loops, force a change in line number
}
+
+ // Set unsafe mark for any end-of-block generated instructions
+ // (normally, conditional or unconditional branches).
+ // This is particularly important for empty blocks, as there
+ // are no values to inherit the unsafe mark from.
+ s.pp.NextUnsafe = s.livenessMap.GetUnsafeBlock(b)
+
// Emit control flow instructions for block
var next *ssa.Block
if i < len(f.Blocks)-1 && base.Flag.N == 0 {
@@ -7232,7 +7466,7 @@ func genssa(f *ssa.Func, pp *objw.Progs) {
// The results are already in memory, because they are not SSA'd
// when the function has defers (see canSSAName).
for _, o := range f.OwnAux.ABIInfo().OutParams() {
- n := o.Name.(*ir.Name)
+ n := o.Name
rts, offs := o.RegisterTypesAndOffsets()
for i := range o.Registers {
Arch.LoadRegResult(&s, f, rts[i], ssa.ObjRegForAbiReg(o.Registers[i], f.Config), n, offs[i])
@@ -7344,9 +7578,9 @@ func genssa(f *ssa.Func, pp *objw.Progs) {
for i, b := range f.Blocks {
idToIdx[b.ID] = i
}
- // Note that at this moment, Prog.Pc is a sequence number; it's
- // not a real PC until after assembly, so this mapping has to
- // be done later.
+ // Register a callback that will be used later to fill in PCs into location
+ // lists. At the moment, Prog.Pc is a sequence number; it's not a real PC
+ // until after assembly, so the translation needs to be deferred.
debugInfo.GetPC = func(b, v ssa.ID) int64 {
switch v {
case ssa.BlockStart.ID:
@@ -7537,7 +7771,7 @@ func defframe(s *State, e *ssafn, f *ssa.Func) {
continue
}
n, off := ssa.AutoVar(v)
- if n.Class != ir.PPARAM || n.Addrtaken() || !TypeOK(n.Type()) || !s.partLiveArgs[n] {
+ if n.Class != ir.PPARAM || n.Addrtaken() || !ssa.CanSSA(n.Type()) || !s.partLiveArgs[n] {
continue
}
partLiveArgsSpilled[nameOff{n, off}] = true
@@ -7545,8 +7779,8 @@ func defframe(s *State, e *ssafn, f *ssa.Func) {
// Then, insert code to spill registers if not already.
for _, a := range f.OwnAux.ABIInfo().InParams() {
- n, ok := a.Name.(*ir.Name)
- if !ok || n.Addrtaken() || !TypeOK(n.Type()) || !s.partLiveArgs[n] || len(a.Registers) <= 1 {
+ n := a.Name
+ if n == nil || n.Addrtaken() || !ssa.CanSSA(n.Type()) || !s.partLiveArgs[n] || len(a.Registers) <= 1 {
continue
}
rts, offs := a.RegisterTypesAndOffsets()
@@ -7665,16 +7899,10 @@ func AddAux2(a *obj.Addr, v *ssa.Value, offset int64) {
case *ir.Name:
if n.Class == ir.PPARAM || (n.Class == ir.PPARAMOUT && !n.IsOutputParamInRegisters()) {
a.Name = obj.NAME_PARAM
- a.Sym = ir.Orig(n).(*ir.Name).Linksym()
- a.Offset += n.FrameOffset()
- break
- }
- a.Name = obj.NAME_AUTO
- if n.Class == ir.PPARAMOUT {
- a.Sym = ir.Orig(n).(*ir.Name).Linksym()
} else {
- a.Sym = n.Linksym()
+ a.Name = obj.NAME_AUTO
}
+ a.Sym = n.Linksym()
a.Offset += n.FrameOffset()
default:
v.Fatalf("aux in %s not implemented %#v", v, v.Aux)
@@ -7917,7 +8145,7 @@ func fieldIdx(n *ir.SelectorExpr) int {
panic("ODOT's LHS is not a struct")
}
- for i, f := range t.Fields().Slice() {
+ for i, f := range t.Fields() {
if f.Sym == n.Sel {
if f.Offset != n.Offset() {
panic("field offset doesn't match")
@@ -7962,10 +8190,6 @@ func (e *ssafn) StringData(s string) *obj.LSym {
return data
}
-func (e *ssafn) Auto(pos src.XPos, t *types.Type) *ir.Name {
- return typecheck.TempAt(pos, e.curfn, t) // Note: adds new auto to e.curfn.Func.Dcl list
-}
-
// SplitSlot returns a slot representing the data of parent starting at offset.
func (e *ssafn) SplitSlot(parent *ssa.LocalSlot, suffix string, offset int64, t *types.Type) ssa.LocalSlot {
node := parent.N
@@ -7975,23 +8199,14 @@ func (e *ssafn) SplitSlot(parent *ssa.LocalSlot, suffix string, offset int64, t
return ssa.LocalSlot{N: node, Type: t, Off: parent.Off + offset}
}
- s := &types.Sym{Name: node.Sym().Name + suffix, Pkg: types.LocalPkg}
- n := ir.NewNameAt(parent.N.Pos(), s)
- s.Def = n
- ir.AsNode(s.Def).Name().SetUsed(true)
- n.SetType(t)
- n.Class = ir.PAUTO
+ sym := &types.Sym{Name: node.Sym().Name + suffix, Pkg: types.LocalPkg}
+ n := e.curfn.NewLocal(parent.N.Pos(), sym, t)
+ n.SetUsed(true)
n.SetEsc(ir.EscNever)
- n.Curfn = e.curfn
- e.curfn.Dcl = append(e.curfn.Dcl, n)
types.CalcSize(t)
return ssa.LocalSlot{N: n, Type: t, Off: 0, SplitOf: parent, SplitOffset: offset}
}
-func (e *ssafn) CanSSA(t *types.Type) bool {
- return TypeOK(t)
-}
-
// Logf logs a message from the compiler.
func (e *ssafn) Logf(msg string, args ...interface{}) {
if e.log {
@@ -8043,10 +8258,6 @@ func (e *ssafn) Syslook(name string) *obj.LSym {
return nil
}
-func (e *ssafn) MyImportPath() string {
- return base.Ctxt.Pkgpath
-}
-
func (e *ssafn) Func() *ir.Func {
return e.curfn
}
@@ -8093,39 +8304,50 @@ func max8(a, b int8) int8 {
return b
}
-// deferstruct makes a runtime._defer structure.
+// deferStructFnField is the field index of _defer.fn.
+const deferStructFnField = 4
+
+var deferType *types.Type
+
+// deferstruct returns a type interchangeable with runtime._defer.
+// Make sure this stays in sync with runtime/runtime2.go:_defer.
func deferstruct() *types.Type {
- makefield := func(name string, typ *types.Type) *types.Field {
- // Unlike the global makefield function, this one needs to set Pkg
- // because these types might be compared (in SSA CSE sorting).
- // TODO: unify this makefield and the global one above.
- sym := &types.Sym{Name: name, Pkg: types.LocalPkg}
- return types.NewField(src.NoXPos, sym, typ)
- }
- // These fields must match the ones in runtime/runtime2.go:_defer and
- // (*state).call above.
+ if deferType != nil {
+ return deferType
+ }
+
+ makefield := func(name string, t *types.Type) *types.Field {
+ sym := (*types.Pkg)(nil).Lookup(name)
+ return types.NewField(src.NoXPos, sym, t)
+ }
+
fields := []*types.Field{
- makefield("started", types.Types[types.TBOOL]),
makefield("heap", types.Types[types.TBOOL]),
- makefield("openDefer", types.Types[types.TBOOL]),
+ makefield("rangefunc", types.Types[types.TBOOL]),
makefield("sp", types.Types[types.TUINTPTR]),
makefield("pc", types.Types[types.TUINTPTR]),
// Note: the types here don't really matter. Defer structures
// are always scanned explicitly during stack copying and GC,
// so we make them uintptr type even though they are real pointers.
makefield("fn", types.Types[types.TUINTPTR]),
- makefield("_panic", types.Types[types.TUINTPTR]),
makefield("link", types.Types[types.TUINTPTR]),
- makefield("fd", types.Types[types.TUINTPTR]),
- makefield("varp", types.Types[types.TUINTPTR]),
- makefield("framepc", types.Types[types.TUINTPTR]),
+ makefield("head", types.Types[types.TUINTPTR]),
}
+ if name := fields[deferStructFnField].Sym.Name; name != "fn" {
+ base.Fatalf("deferStructFnField is %q, not fn", name)
+ }
+
+ n := ir.NewDeclNameAt(src.NoXPos, ir.OTYPE, ir.Pkgs.Runtime.Lookup("_defer"))
+ typ := types.NewNamed(n)
+ n.SetType(typ)
+ n.SetTypecheck(1)
// build struct holding the above fields
- s := types.NewStruct(fields)
- s.SetNoalg(true)
- types.CalcStructSize(s)
- return s
+ typ.SetUnderlying(types.NewStruct(fields))
+ types.CalcStructSize(typ)
+
+ deferType = typ
+ return typ
}
// SpillSlotAddr uses LocalSlot information to initialize an obj.Addr
diff --git a/src/cmd/compile/internal/staticdata/data.go b/src/cmd/compile/internal/staticdata/data.go
index e39d0ee6a5..78c332eeb8 100644
--- a/src/cmd/compile/internal/staticdata/data.go
+++ b/src/cmd/compile/internal/staticdata/data.go
@@ -17,7 +17,6 @@ import (
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
"cmd/compile/internal/objw"
- "cmd/compile/internal/typecheck"
"cmd/compile/internal/types"
"cmd/internal/notsha256"
"cmd/internal/obj"
@@ -56,7 +55,7 @@ func InitSliceBytes(nam *ir.Name, off int64, s string) {
if nam.Op() != ir.ONAME {
base.Fatalf("InitSliceBytes %v", nam)
}
- InitSlice(nam, off, slicedata(nam.Pos(), s).Linksym(), int64(len(s)))
+ InitSlice(nam, off, slicedata(nam.Pos(), s), int64(len(s)))
}
const (
@@ -97,6 +96,16 @@ func StringSym(pos src.XPos, s string) (data *obj.LSym) {
return symdata
}
+// StringSymNoCommon is like StringSym, but produces a symbol that is not content-
+// addressable. This symbol is not supposed to appear in the final binary, it is
+// only used to pass string arguments to the linker like R_USENAMEDMETHOD does.
+func StringSymNoCommon(s string) (data *obj.LSym) {
+ var nameSym obj.LSym
+ nameSym.WriteString(base.Ctxt, 0, len(s), s)
+ objw.Global(&nameSym, int32(len(s)), obj.RODATA)
+ return &nameSym
+}
+
// maxFileSize is the maximum file size permitted by the linker
// (see issue #9862).
const maxFileSize = int64(2e9)
@@ -134,7 +143,7 @@ func fileStringSym(pos src.XPos, file string, readonly bool, hash []byte) (*obj.
if readonly {
sym = StringSym(pos, string(data))
} else {
- sym = slicedata(pos, string(data)).Linksym()
+ sym = slicedata(pos, string(data))
}
if len(hash) > 0 {
sum := notsha256.Sum256(data)
@@ -182,7 +191,7 @@ func fileStringSym(pos src.XPos, file string, readonly bool, hash []byte) (*obj.
} else {
// Emit a zero-length data symbol
// and then fix up length and content to use file.
- symdata = slicedata(pos, "").Linksym()
+ symdata = slicedata(pos, "")
symdata.Size = size
symdata.Type = objabi.SNOPTRDATA
info := symdata.NewFileInfo()
@@ -195,18 +204,14 @@ func fileStringSym(pos src.XPos, file string, readonly bool, hash []byte) (*obj.
var slicedataGen int
-func slicedata(pos src.XPos, s string) *ir.Name {
+func slicedata(pos src.XPos, s string) *obj.LSym {
slicedataGen++
symname := fmt.Sprintf(".gobytes.%d", slicedataGen)
- sym := types.LocalPkg.Lookup(symname)
- symnode := typecheck.NewName(sym)
- sym.Def = symnode
-
- lsym := symnode.Linksym()
+ lsym := types.LocalPkg.Lookup(symname).LinksymABI(obj.ABI0)
off := dstringdata(lsym, 0, s, pos, "slice")
objw.Global(lsym, int32(off), obj.NOPTR|obj.LOCAL)
- return symnode
+ return lsym
}
func dstringdata(s *obj.LSym, off int, t string, pos src.XPos, what string) int {
diff --git a/src/cmd/compile/internal/staticinit/sched.go b/src/cmd/compile/internal/staticinit/sched.go
index 7d1dfcbbb3..4191f6997e 100644
--- a/src/cmd/compile/internal/staticinit/sched.go
+++ b/src/cmd/compile/internal/staticinit/sched.go
@@ -42,6 +42,11 @@ type Schedule struct {
Plans map[ir.Node]*Plan
Temps map[ir.Node]*ir.Name
+
+ // seenMutation tracks whether we've seen an initialization
+ // expression that may have modified other package-scope variables
+ // within this package.
+ seenMutation bool
}
func (s *Schedule) append(n ir.Node) {
@@ -80,26 +85,57 @@ func recordFuncForVar(v *ir.Name, fn *ir.Func) {
MapInitToVar[fn] = v
}
+// allBlank reports whether every node in exprs is blank.
+func allBlank(exprs []ir.Node) bool {
+ for _, expr := range exprs {
+ if !ir.IsBlank(expr) {
+ return false
+ }
+ }
+ return true
+}
+
// tryStaticInit attempts to statically execute an initialization
// statement and reports whether it succeeded.
-func (s *Schedule) tryStaticInit(nn ir.Node) bool {
- // Only worry about simple "l = r" assignments. Multiple
- // variable/expression OAS2 assignments have already been
- // replaced by multiple simple OAS assignments, and the other
- // OAS2* assignments mostly necessitate dynamic execution
- // anyway.
- if nn.Op() != ir.OAS {
- return false
+func (s *Schedule) tryStaticInit(n ir.Node) bool {
+ var lhs []ir.Node
+ var rhs ir.Node
+
+ switch n.Op() {
+ default:
+ base.FatalfAt(n.Pos(), "unexpected initialization statement: %v", n)
+ case ir.OAS:
+ n := n.(*ir.AssignStmt)
+ lhs, rhs = []ir.Node{n.X}, n.Y
+ case ir.OAS2DOTTYPE, ir.OAS2FUNC, ir.OAS2MAPR, ir.OAS2RECV:
+ n := n.(*ir.AssignListStmt)
+ if len(n.Lhs) < 2 || len(n.Rhs) != 1 {
+ base.FatalfAt(n.Pos(), "unexpected shape for %v: %v", n.Op(), n)
+ }
+ lhs, rhs = n.Lhs, n.Rhs[0]
+ case ir.OCALLFUNC:
+ return false // outlined map init call; no mutations
}
- n := nn.(*ir.AssignStmt)
- if ir.IsBlank(n.X) && !AnySideEffects(n.Y) {
- // Discard.
- return true
+
+ if !s.seenMutation {
+ s.seenMutation = mayModifyPkgVar(rhs)
+ }
+
+ if allBlank(lhs) && !AnySideEffects(rhs) {
+ return true // discard
}
+
+ // Only worry about simple "l = r" assignments. The OAS2*
+ // assignments mostly necessitate dynamic execution anyway.
+ if len(lhs) > 1 {
+ return false
+ }
+
lno := ir.SetPos(n)
defer func() { base.Pos = lno }()
- nam := n.X.(*ir.Name)
- return s.StaticAssign(nam, 0, n.Y, nam.Type())
+
+ nam := lhs[0].(*ir.Name)
+ return s.StaticAssign(nam, 0, rhs, nam.Type())
}
// like staticassign but we are copying an already
@@ -113,6 +149,11 @@ func (s *Schedule) staticcopy(l *ir.Name, loff int64, rn *ir.Name, typ *types.Ty
if rn.Class != ir.PEXTERN || rn.Sym().Pkg != types.LocalPkg {
return false
}
+ if rn.Defn == nil {
+ // No explicit initialization value. Probably zeroed but perhaps
+ // supplied externally and of unknown value.
+ return false
+ }
if rn.Defn.Op() != ir.OAS {
return false
}
@@ -125,8 +166,16 @@ func (s *Schedule) staticcopy(l *ir.Name, loff int64, rn *ir.Name, typ *types.Ty
orig := rn
r := rn.Defn.(*ir.AssignStmt).Y
if r == nil {
- // No explicit initialization value. Probably zeroed but perhaps
- // supplied externally and of unknown value.
+ // types2.InitOrder doesn't include default initializers.
+ base.Fatalf("unexpected initializer: %v", rn.Defn)
+ }
+
+ // Variable may have been reassigned by a user-written function call
+ // that was invoked to initialize another global variable (#51913).
+ if s.seenMutation {
+ if base.Debug.StaticCopy != 0 {
+ base.WarnfAt(l.Pos(), "skipping static copy of %v+%v with %v", l, loff, r)
+ }
return false
}
@@ -373,9 +422,9 @@ func (s *Schedule) StaticAssign(l *ir.Name, loff int64, r ir.Node, typ *types.Ty
var itab *ir.AddrExpr
if typ.IsEmptyInterface() {
- itab = reflectdata.TypePtr(val.Type())
+ itab = reflectdata.TypePtrAt(base.Pos, val.Type())
} else {
- itab = reflectdata.ITabAddr(val.Type(), typ)
+ itab = reflectdata.ITabAddrAt(base.Pos, val.Type(), typ)
}
// Create a copy of l to modify while we emit data.
@@ -677,10 +726,15 @@ var statuniqgen int // name generator for static temps
// Use readonlystaticname for read-only node.
func StaticName(t *types.Type) *ir.Name {
// Don't use LookupNum; it interns the resulting string, but these are all unique.
- n := typecheck.NewName(typecheck.Lookup(fmt.Sprintf("%s%d", obj.StaticNamePref, statuniqgen)))
+ sym := typecheck.Lookup(fmt.Sprintf("%s%d", obj.StaticNamePref, statuniqgen))
statuniqgen++
- typecheck.Declare(n, ir.PEXTERN)
- n.SetType(t)
+
+ n := ir.NewNameAt(base.Pos, sym, t)
+ sym.Def = n
+
+ n.Class = ir.PEXTERN
+ typecheck.Target.Externs = append(typecheck.Target.Externs, n)
+
n.Linksym().Set(obj.AttrStatic, true)
return n
}
@@ -821,6 +875,43 @@ func AnySideEffects(n ir.Node) bool {
return ir.Any(n, isSideEffect)
}
+// mayModifyPkgVar reports whether expression n may modify any
+// package-scope variables declared within the current package.
+func mayModifyPkgVar(n ir.Node) bool {
+ // safeLHS reports whether the assigned-to variable lhs is either a
+ // local variable or a global from another package.
+ safeLHS := func(lhs ir.Node) bool {
+ v, ok := ir.OuterValue(lhs).(*ir.Name)
+ return ok && v.Op() == ir.ONAME && !(v.Class == ir.PEXTERN && v.Sym().Pkg == types.LocalPkg)
+ }
+
+ return ir.Any(n, func(n ir.Node) bool {
+ switch n.Op() {
+ case ir.OCALLFUNC, ir.OCALLINTER:
+ return !ir.IsFuncPCIntrinsic(n.(*ir.CallExpr))
+
+ case ir.OAPPEND, ir.OCLEAR, ir.OCOPY:
+ return true // could mutate a global array
+
+ case ir.OAS:
+ n := n.(*ir.AssignStmt)
+ if !safeLHS(n.X) {
+ return true
+ }
+
+ case ir.OAS2, ir.OAS2DOTTYPE, ir.OAS2FUNC, ir.OAS2MAPR, ir.OAS2RECV:
+ n := n.(*ir.AssignListStmt)
+ for _, lhs := range n.Lhs {
+ if !safeLHS(lhs) {
+ return true
+ }
+ }
+ }
+
+ return false
+ })
+}
+
// canRepeat reports whether executing n multiple times has the same effect as
// assigning n to a single variable and using that variable multiple times.
func canRepeat(n ir.Node) bool {
@@ -942,7 +1033,7 @@ func addStr(n *ir.AddStringExpr) ir.Node {
for _, c := range s {
strs = append(strs, ir.StringVal(c))
}
- return typecheck.OrigConst(n, constant.MakeString(strings.Join(strs, "")))
+ return ir.NewConstExpr(constant.MakeString(strings.Join(strs, "")), n)
}
newList := make([]ir.Node, 0, need)
for i := 0; i < len(s); i++ {
@@ -955,9 +1046,7 @@ func addStr(n *ir.AddStringExpr) ir.Node {
i2++
}
- nl := ir.Copy(n).(*ir.AddStringExpr)
- nl.List = s[i:i2]
- newList = append(newList, typecheck.OrigConst(nl, constant.MakeString(strings.Join(strs, ""))))
+ newList = append(newList, ir.NewConstExpr(constant.MakeString(strings.Join(strs, "")), s[i]))
i = i2 - 1
} else {
newList = append(newList, s[i])
@@ -971,26 +1060,26 @@ func addStr(n *ir.AddStringExpr) ir.Node {
const wrapGlobalMapInitSizeThreshold = 20
-// tryWrapGlobalMapInit examines the node 'n' to see if it is a map
-// variable initialization, and if so, possibly returns the mapvar
-// being assigned, a new function containing the init code, and a call
-// to the function passing the mapvar. Returns will be nil if the
-// assignment is not to a map, or the map init is not big enough,
-// or if the expression being assigned to the map has side effects.
-func tryWrapGlobalMapInit(n ir.Node) (mapvar *ir.Name, genfn *ir.Func, call ir.Node) {
+// tryWrapGlobalInit returns a new outlined function to contain global
+// initializer statement n, if possible and worthwhile. Otherwise, it
+// returns nil.
+//
+// Currently, it outlines map assignment statements with large,
+// side-effect-free RHS expressions.
+func tryWrapGlobalInit(n ir.Node) *ir.Func {
// Look for "X = ..." where X has map type.
// FIXME: might also be worth trying to look for cases where
// the LHS is of interface type but RHS is map type.
if n.Op() != ir.OAS {
- return nil, nil, nil
+ return nil
}
as := n.(*ir.AssignStmt)
if ir.IsBlank(as.X) || as.X.Op() != ir.ONAME {
- return nil, nil, nil
+ return nil
}
nm := as.X.(*ir.Name)
if !nm.Type().IsMap() {
- return nil, nil, nil
+ return nil
}
// Determine size of RHS.
@@ -1010,7 +1099,7 @@ func tryWrapGlobalMapInit(n ir.Node) (mapvar *ir.Name, genfn *ir.Func, call ir.N
fmt.Fprintf(os.Stderr, "=-= skipping %v size too small at %d\n",
nm, rsiz)
}
- return nil, nil, nil
+ return nil
}
// Reject right hand sides with side effects.
@@ -1018,7 +1107,7 @@ func tryWrapGlobalMapInit(n ir.Node) (mapvar *ir.Name, genfn *ir.Func, call ir.N
if base.Debug.WrapGlobalMapDbg > 0 {
fmt.Fprintf(os.Stderr, "=-= rejected %v due to side effects\n", nm)
}
- return nil, nil, nil
+ return nil
}
if base.Debug.WrapGlobalMapDbg > 1 {
@@ -1027,15 +1116,19 @@ func tryWrapGlobalMapInit(n ir.Node) (mapvar *ir.Name, genfn *ir.Func, call ir.N
// Create a new function that will (eventually) have this form:
//
- // func map.init.%d() {
- // globmapvar = <map initialization>
- // }
+ // func map.init.%d() {
+ // globmapvar = <map initialization>
+ // }
//
+ // Note: cmd/link expects the function name to contain "map.init".
minitsym := typecheck.LookupNum("map.init.", mapinitgen)
mapinitgen++
- newfn := typecheck.DeclFunc(minitsym, nil, nil, nil)
+
+ fn := ir.NewFunc(n.Pos(), n.Pos(), minitsym, types.NewSignature(nil, nil, nil))
+ fn.SetInlinabilityChecked(true) // suppress inlining (which would defeat the point)
+ typecheck.DeclFunc(fn)
if base.Debug.WrapGlobalMapDbg > 0 {
- fmt.Fprintf(os.Stderr, "=-= generated func is %v\n", newfn)
+ fmt.Fprintf(os.Stderr, "=-= generated func is %v\n", fn)
}
// NB: we're relying on this phase being run before inlining;
@@ -1043,26 +1136,17 @@ func tryWrapGlobalMapInit(n ir.Node) (mapvar *ir.Name, genfn *ir.Func, call ir.N
// need code here that relocates or duplicates inline temps.
// Insert assignment into function body; mark body finished.
- newfn.Body = append(newfn.Body, as)
+ fn.Body = []ir.Node{as}
typecheck.FinishFuncBody()
- typecheck.Func(newfn)
-
- const no = `
- // Register new function with decls.
- typecheck.Target.Decls = append(typecheck.Target.Decls, newfn)
-`
-
- // Create call to function, passing mapvar.
- fncall := ir.NewCallExpr(n.Pos(), ir.OCALL, newfn.Nname, nil)
-
if base.Debug.WrapGlobalMapDbg > 1 {
fmt.Fprintf(os.Stderr, "=-= mapvar is %v\n", nm)
- fmt.Fprintf(os.Stderr, "=-= newfunc is %+v\n", newfn)
- fmt.Fprintf(os.Stderr, "=-= call is %+v\n", fncall)
+ fmt.Fprintf(os.Stderr, "=-= newfunc is %+v\n", fn)
}
- return nm, newfn, typecheck.Stmt(fncall)
+ recordFuncForVar(nm, fn)
+
+ return fn
}
// mapinitgen is a counter used to uniquify compiler-generated
@@ -1099,31 +1183,28 @@ func AddKeepRelocations() {
varToMapInit = nil
}
-// OutlineMapInits walks through a list of init statements (candidates
-// for inclusion in the package "init" function) and returns an
-// updated list in which items corresponding to map variable
-// initializations have been replaced with calls to outline "map init"
-// functions (if legal/profitable). Return value is an updated list
-// and a list of any newly generated "map init" functions.
-func OutlineMapInits(stmts []ir.Node) ([]ir.Node, []*ir.Func) {
+// OutlineMapInits replaces global map initializers with outlined
+// calls to separate "map init" functions (where possible and
+// profitable), to facilitate better dead-code elimination by the
+// linker.
+func OutlineMapInits(fn *ir.Func) {
if base.Debug.WrapGlobalMapCtl == 1 {
- return stmts, nil
- }
- newfuncs := []*ir.Func{}
- for i := range stmts {
- s := stmts[i]
- // Call the helper tryWrapGlobalMapInit to see if the LHS of
- // this assignment is to a map var, and if so whether the RHS
- // should be outlined into a separate init function. If the
- // outline goes through, then replace the original init
- // statement with the call to the outlined func, and append
- // the new outlined func to our return list.
- if mapvar, genfn, call := tryWrapGlobalMapInit(s); call != nil {
- stmts[i] = call
- newfuncs = append(newfuncs, genfn)
- recordFuncForVar(mapvar, genfn)
+ return
+ }
+
+ outlined := 0
+ for i, stmt := range fn.Body {
+ // Attempt to outline stmt. If successful, replace it with a call
+ // to the returned wrapper function.
+ if wrapperFn := tryWrapGlobalInit(stmt); wrapperFn != nil {
+ ir.WithFunc(fn, func() {
+ fn.Body[i] = typecheck.Call(stmt.Pos(), wrapperFn.Nname, nil, false)
+ })
+ outlined++
}
}
- return stmts, newfuncs
+ if base.Debug.WrapGlobalMapDbg > 1 {
+ fmt.Fprintf(os.Stderr, "=-= outlined %v map initializations\n", outlined)
+ }
}
diff --git a/src/cmd/compile/internal/syntax/nodes.go b/src/cmd/compile/internal/syntax/nodes.go
index 6580f053c7..de277fc3d8 100644
--- a/src/cmd/compile/internal/syntax/nodes.go
+++ b/src/cmd/compile/internal/syntax/nodes.go
@@ -17,6 +17,7 @@ type Node interface {
// associated with that production; usually the left-most one
// ('[' for IndexExpr, 'if' for IfStmt, etc.)
Pos() Pos
+ SetPos(Pos)
aNode()
}
@@ -26,8 +27,9 @@ type node struct {
pos Pos
}
-func (n *node) Pos() Pos { return n.pos }
-func (*node) aNode() {}
+func (n *node) Pos() Pos { return n.pos }
+func (n *node) SetPos(pos Pos) { n.pos = pos }
+func (*node) aNode() {}
// ----------------------------------------------------------------------------
// Files
@@ -389,8 +391,9 @@ type (
}
CallStmt struct {
- Tok token // Go or Defer
- Call Expr
+ Tok token // Go or Defer
+ Call Expr
+ DeferAt Expr // argument to runtime.deferprocat
stmt
}
diff --git a/src/cmd/compile/internal/syntax/nodes_test.go b/src/cmd/compile/internal/syntax/nodes_test.go
index a39f08c1a4..a86ae87adf 100644
--- a/src/cmd/compile/internal/syntax/nodes_test.go
+++ b/src/cmd/compile/internal/syntax/nodes_test.go
@@ -322,8 +322,5 @@ func stripAt(s string) (string, int) {
func typeOf(n Node) string {
const prefix = "*syntax."
k := fmt.Sprintf("%T", n)
- if strings.HasPrefix(k, prefix) {
- return k[len(prefix):]
- }
- return k
+ return strings.TrimPrefix(k, prefix)
}
diff --git a/src/cmd/compile/internal/syntax/parser.go b/src/cmd/compile/internal/syntax/parser.go
index b5602fcff7..1569b5e987 100644
--- a/src/cmd/compile/internal/syntax/parser.go
+++ b/src/cmd/compile/internal/syntax/parser.go
@@ -181,10 +181,9 @@ func commentText(s string) string {
}
func trailingDigits(text string) (uint, uint, bool) {
- // Want to use LastIndexByte below but it's not defined in Go1.4 and bootstrap fails.
- i := strings.LastIndex(text, ":") // look from right (Windows filenames may contain ':')
+ i := strings.LastIndexByte(text, ':') // look from right (Windows filenames may contain ':')
if i < 0 {
- return 0, 0, false // no ":"
+ return 0, 0, false // no ':'
}
// i >= 0
n, err := strconv.ParseUint(text[i+1:], 10, 0)
@@ -799,6 +798,9 @@ func (p *parser) funcDeclOrNil() *FuncDecl {
f.Name = p.name()
f.TParamList, f.Type = p.funcType(context)
} else {
+ f.Name = NewName(p.pos(), "_")
+ f.Type = new(FuncType)
+ f.Type.pos = p.pos()
msg := "expected name or ("
if context != "" {
msg = "expected name"
@@ -885,7 +887,7 @@ func (p *parser) unaryExpr() Expr {
p.next()
// unaryExpr may have returned a parenthesized composite literal
// (see comment in operand) - remove parentheses if any
- x.X = unparen(p.unaryExpr())
+ x.X = Unparen(p.unaryExpr())
return x
}
@@ -965,7 +967,7 @@ func (p *parser) callStmt() *CallStmt {
p.next()
x := p.pexpr(nil, p.tok == _Lparen) // keep_parens so we can report error below
- if t := unparen(x); t != x {
+ if t := Unparen(x); t != x {
p.errorAt(x.Pos(), fmt.Sprintf("expression in %s must not be parenthesized", s.Tok))
// already progressed, no need to advance
x = t
@@ -1145,7 +1147,7 @@ loop:
}
// x[i:...
- // For better error message, don't simply use p.want(_Colon) here (issue #47704).
+ // For better error message, don't simply use p.want(_Colon) here (go.dev/issue/47704).
if !p.got(_Colon) {
p.syntaxError("expected comma, : or ]")
p.advance(_Comma, _Colon, _Rbrack)
@@ -1190,7 +1192,7 @@ loop:
case _Lbrace:
// operand may have returned a parenthesized complit
// type; accept it but complain if we have a complit
- t := unparen(x)
+ t := Unparen(x)
// determine if '{' belongs to a composite literal or a block statement
complit_ok := false
switch t.(type) {
@@ -2019,7 +2021,7 @@ func (p *parser) paramList(name *Name, typ Expr, close token, requireNames bool)
// distribute parameter types (len(list) > 0)
if named == 0 && !requireNames {
- // all unnamed => found names are named types
+ // all unnamed and we're not in a type parameter list => found names are named types
for _, par := range list {
if typ := par.Name; typ != nil {
par.Type = typ
@@ -2027,40 +2029,50 @@ func (p *parser) paramList(name *Name, typ Expr, close token, requireNames bool)
}
}
} else if named != len(list) {
- // some named => all must have names and types
- var pos Pos // left-most error position (or unknown)
- var typ Expr // current type (from right to left)
+ // some named or we're in a type parameter list => all must be named
+ var errPos Pos // left-most error position (or unknown)
+ var typ Expr // current type (from right to left)
for i := len(list) - 1; i >= 0; i-- {
par := list[i]
if par.Type != nil {
typ = par.Type
if par.Name == nil {
- pos = StartPos(typ)
- par.Name = NewName(pos, "_")
+ errPos = StartPos(typ)
+ par.Name = NewName(errPos, "_")
}
} else if typ != nil {
par.Type = typ
} else {
// par.Type == nil && typ == nil => we only have a par.Name
- pos = par.Name.Pos()
+ errPos = par.Name.Pos()
t := p.badExpr()
- t.pos = pos // correct position
+ t.pos = errPos // correct position
par.Type = t
}
}
- if pos.IsKnown() {
+ if errPos.IsKnown() {
var msg string
if requireNames {
+ // Not all parameters are named because named != len(list).
+ // If named == typed we must have parameters that have no types,
+ // and they must be at the end of the parameter list, otherwise
+ // the types would have been filled in by the right-to-left sweep
+ // above and we wouldn't have an error. Since we are in a type
+ // parameter list, the missing types are constraints.
if named == typed {
- pos = end // position error at closing ]
+ errPos = end // position error at closing ]
msg = "missing type constraint"
} else {
- msg = "type parameters must be named"
+ msg = "missing type parameter name"
+ // go.dev/issue/60812
+ if len(list) == 1 {
+ msg += " or invalid array length"
+ }
}
} else {
msg = "mixed named and unnamed parameters"
}
- p.syntaxErrorAt(pos, msg)
+ p.syntaxErrorAt(errPos, msg)
}
}
@@ -2320,7 +2332,7 @@ func (p *parser) header(keyword token) (init SimpleStmt, cond Expr, post SimpleS
// asking for a '{' rather than a ';' here leads to a better error message
p.want(_Lbrace)
if p.tok != _Lbrace {
- p.advance(_Lbrace, _Rbrace) // for better synchronization (e.g., issue #22581)
+ p.advance(_Lbrace, _Rbrace) // for better synchronization (e.g., go.dev/issue/22581)
}
}
if keyword == _For {
@@ -2812,8 +2824,8 @@ func (p *parser) typeList(strict bool) (x Expr, comma bool) {
return
}
-// unparen removes all parentheses around an expression.
-func unparen(x Expr) Expr {
+// Unparen returns e with any enclosing parentheses stripped.
+func Unparen(x Expr) Expr {
for {
p, ok := x.(*ParenExpr)
if !ok {
@@ -2823,3 +2835,15 @@ func unparen(x Expr) Expr {
}
return x
}
+
+// UnpackListExpr unpacks a *ListExpr into a []Expr.
+func UnpackListExpr(x Expr) []Expr {
+ switch x := x.(type) {
+ case nil:
+ return nil
+ case *ListExpr:
+ return x.ElemList
+ default:
+ return []Expr{x}
+ }
+}
diff --git a/src/cmd/compile/internal/syntax/parser_test.go b/src/cmd/compile/internal/syntax/parser_test.go
index d5d4290f59..538278b3eb 100644
--- a/src/cmd/compile/internal/syntax/parser_test.go
+++ b/src/cmd/compile/internal/syntax/parser_test.go
@@ -374,3 +374,22 @@ func TestLineDirectives(t *testing.T) {
}
}
}
+
+// Test that typical uses of UnpackListExpr don't allocate.
+func TestUnpackListExprAllocs(t *testing.T) {
+ var x Expr = NewName(Pos{}, "x")
+ allocs := testing.AllocsPerRun(1000, func() {
+ list := UnpackListExpr(x)
+ if len(list) != 1 || list[0] != x {
+ t.Fatalf("unexpected result")
+ }
+ })
+
+ if allocs > 0 {
+ errorf := t.Errorf
+ if testenv.OptimizationOff() {
+ errorf = t.Logf // noopt builder disables inlining
+ }
+ errorf("UnpackListExpr allocated %v times", allocs)
+ }
+}
diff --git a/src/cmd/compile/internal/syntax/printer.go b/src/cmd/compile/internal/syntax/printer.go
index 62de68ed66..9f20db54de 100644
--- a/src/cmd/compile/internal/syntax/printer.go
+++ b/src/cmd/compile/internal/syntax/printer.go
@@ -916,7 +916,7 @@ func (p *printer) printParameterList(list []*Field, tok token) {
}
p.print(blank)
}
- p.printNode(unparen(f.Type)) // no need for (extra) parentheses around parameter types
+ p.printNode(Unparen(f.Type)) // no need for (extra) parentheses around parameter types
}
// A type parameter list [P T] where the name P and the type expression T syntactically
// combine to another valid (value) expression requires a trailing comma, as in [P *T,]
diff --git a/src/cmd/compile/internal/syntax/printer_test.go b/src/cmd/compile/internal/syntax/printer_test.go
index ceb512ef89..99baf7f5b6 100644
--- a/src/cmd/compile/internal/syntax/printer_test.go
+++ b/src/cmd/compile/internal/syntax/printer_test.go
@@ -169,6 +169,7 @@ var exprTests = [][2]string{
dup(`'a'`),
dup(`"foo"`),
dup("`bar`"),
+ dup("any"),
// func and composite literals
dup("func() {}"),
@@ -197,12 +198,18 @@ var exprTests = [][2]string{
// new interfaces
dup("interface{int}"),
dup("interface{~int}"),
- dup("interface{~int}"),
+
+ // generic constraints
+ dup("interface{~a | ~b | ~c; ~int | ~string; float64; m()}"),
dup("interface{int | string}"),
dup("interface{~int | ~string; float64; m()}"),
- dup("interface{~a | ~b | ~c; ~int | ~string; float64; m()}"),
dup("interface{~T[int, string] | string}"),
+ // generic types
+ dup("x[T]"),
+ dup("x[N | A | S]"),
+ dup("x[N, A]"),
+
// non-type expressions
dup("(x)"),
dup("x.f"),
@@ -250,6 +257,12 @@ var exprTests = [][2]string{
dup("f(s...)"),
dup("f(a, s...)"),
+ // generic functions
+ dup("f[T]()"),
+ dup("f[T](T)"),
+ dup("f[T, T1]()"),
+ dup("f[T, T1](T, T1)"),
+
dup("*x"),
dup("&x"),
dup("x + y"),
diff --git a/src/cmd/compile/internal/syntax/testdata/issue23434.go b/src/cmd/compile/internal/syntax/testdata/issue23434.go
index 5a72a7f4ed..e436abfecb 100644
--- a/src/cmd/compile/internal/syntax/testdata/issue23434.go
+++ b/src/cmd/compile/internal/syntax/testdata/issue23434.go
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// Test case for issue 23434: Better synchronization of
+// Test case for go.dev/issue/23434: Better synchronization of
// parser after missing type. There should be exactly
// one error each time, with now follow errors.
@@ -12,7 +12,7 @@ type T /* ERROR unexpected newline */
type Map map[int] /* ERROR unexpected newline */
-// Examples from #23434:
+// Examples from go.dev/issue/23434:
func g() {
m := make(map[string] /* ERROR unexpected ! */ !)
diff --git a/src/cmd/compile/internal/syntax/testdata/issue31092.go b/src/cmd/compile/internal/syntax/testdata/issue31092.go
index b1839b8f46..0bd40bd7cd 100644
--- a/src/cmd/compile/internal/syntax/testdata/issue31092.go
+++ b/src/cmd/compile/internal/syntax/testdata/issue31092.go
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// Test cases for issue 31092: Better synchronization of
+// Test cases for go.dev/issue/31092: Better synchronization of
// parser after seeing an := rather than an = in a const,
// type, or variable declaration.
diff --git a/src/cmd/compile/internal/syntax/testdata/issue43527.go b/src/cmd/compile/internal/syntax/testdata/issue43527.go
index dd2c9b1272..99a8c0965d 100644
--- a/src/cmd/compile/internal/syntax/testdata/issue43527.go
+++ b/src/cmd/compile/internal/syntax/testdata/issue43527.go
@@ -7,17 +7,17 @@ package p
type (
// 0 and 1-element []-lists are syntactically valid
_[A, B /* ERROR missing type constraint */ ] int
- _[A, /* ERROR type parameters must be named */ interface{}] int
+ _[A, /* ERROR missing type parameter name */ interface{}] int
_[A, B, C /* ERROR missing type constraint */ ] int
_[A B, C /* ERROR missing type constraint */ ] int
- _[A B, /* ERROR type parameters must be named */ interface{}] int
- _[A B, /* ERROR type parameters must be named */ interface{}, C D] int
- _[A B, /* ERROR type parameters must be named */ interface{}, C, D] int
- _[A B, /* ERROR type parameters must be named */ interface{}, C, interface{}] int
- _[A B, C interface{}, D, /* ERROR type parameters must be named */ interface{}] int
+ _[A B, /* ERROR missing type parameter name */ interface{}] int
+ _[A B, /* ERROR missing type parameter name */ interface{}, C D] int
+ _[A B, /* ERROR missing type parameter name */ interface{}, C, D] int
+ _[A B, /* ERROR missing type parameter name */ interface{}, C, interface{}] int
+ _[A B, C interface{}, D, /* ERROR missing type parameter name */ interface{}] int
)
// function type parameters use the same parsing routine - just have a couple of tests
func _[A, B /* ERROR missing type constraint */ ]() {}
-func _[A, /* ERROR type parameters must be named */ interface{}]() {}
+func _[A, /* ERROR missing type parameter name */ interface{}]() {}
diff --git a/src/cmd/compile/internal/syntax/testdata/issue63835.go b/src/cmd/compile/internal/syntax/testdata/issue63835.go
new file mode 100644
index 0000000000..3d165c016e
--- /dev/null
+++ b/src/cmd/compile/internal/syntax/testdata/issue63835.go
@@ -0,0 +1,9 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+func (x string) /* ERROR syntax error: unexpected \[, expected name */ []byte {
+ return []byte(x)
+}
diff --git a/src/cmd/compile/internal/syntax/testdata/map2.go b/src/cmd/compile/internal/syntax/testdata/map2.go
index 2833445662..3d1cbfbd22 100644
--- a/src/cmd/compile/internal/syntax/testdata/map2.go
+++ b/src/cmd/compile/internal/syntax/testdata/map2.go
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// This file is like map.go2, but instead if importing chans, it contains
+// This file is like map.go, but instead of importing chans, it contains
// the necessary functionality at the end of the file.
// Package orderedmap provides an ordered map, implemented as a binary tree.
@@ -23,7 +23,7 @@ type node[K, V any] struct {
// New returns a new map.
func New[K, V any](compare func(K, K) int) *Map[K, V] {
- return &Map[K, V]{compare: compare}
+ return &Map[K, V]{compare: compare}
}
// find looks up key in the map, and returns either a pointer
@@ -85,7 +85,7 @@ func (m *Map[K, V]) InOrder() *Iterator[K, V] {
// Stop sending values if sender.Send returns false,
// meaning that nothing is listening at the receiver end.
return f(n.left) &&
- sender.Send(keyValue[K, V]{n.key, n.val}) &&
+ sender.Send(keyValue[K, V]{n.key, n.val}) &&
f(n.right)
}
go func() {
@@ -119,7 +119,7 @@ func chans_Ranger[T any]() (*chans_Sender[T], *chans_Receiver[T])
// A sender is used to send values to a Receiver.
type chans_Sender[T any] struct {
values chan<- T
- done <-chan bool
+ done <-chan bool
}
func (s *chans_Sender[T]) Send(v T) bool {
@@ -137,10 +137,10 @@ func (s *chans_Sender[T]) Close() {
type chans_Receiver[T any] struct {
values <-chan T
- done chan<- bool
+ done chan<- bool
}
func (r *chans_Receiver[T]) Next() (T, bool) {
v, ok := <-r.values
return v, ok
-} \ No newline at end of file
+}
diff --git a/src/cmd/compile/internal/syntax/testdata/tparams.go b/src/cmd/compile/internal/syntax/testdata/tparams.go
index 646fbbebc8..4b68a1585f 100644
--- a/src/cmd/compile/internal/syntax/testdata/tparams.go
+++ b/src/cmd/compile/internal/syntax/testdata/tparams.go
@@ -23,7 +23,7 @@ func f[a t, b t, c /* ERROR missing type constraint */ ]()
func f[a b, /* ERROR expected ] */ 0] ()
-// issue #49482
+// go.dev/issue/49482
type (
t[a *[]int] struct{}
t[a *t,] struct{}
@@ -35,7 +35,7 @@ type (
t[a *struct{}|~t] struct{}
)
-// issue #51488
+// go.dev/issue/51488
type (
t[a *t|t,] struct{}
t[a *t|t, b t] struct{}
@@ -44,3 +44,14 @@ type (
t[a ([]t)] struct{}
t[a ([]t)|t] struct{}
)
+
+// go.dev/issue/60812
+type (
+ t [t]struct{}
+ t [[]t]struct{}
+ t [[t]t]struct{}
+ t [/* ERROR missing type parameter name or invalid array length */ t[t]]struct{}
+ t [t t[t], /* ERROR missing type parameter name */ t[t]]struct{}
+ t [/* ERROR missing type parameter name */ t[t], t t[t]]struct{}
+ t [/* ERROR missing type parameter name */ t[t], t[t]]struct{} // report only first error
+)
diff --git a/src/cmd/compile/internal/syntax/testdata/typeset.go b/src/cmd/compile/internal/syntax/testdata/typeset.go
index fe5c3f45a8..819025c1aa 100644
--- a/src/cmd/compile/internal/syntax/testdata/typeset.go
+++ b/src/cmd/compile/internal/syntax/testdata/typeset.go
@@ -44,15 +44,15 @@ type (
_[_ t|~struct{}] t
_[_ ~t|~struct{}] t
- // test cases for issue #49175
+ // test cases for go.dev/issue/49175
_[_ []t]t
_[_ [1]t]t
_[_ ~[]t]t
_[_ ~[1]t]t
- t [ /* ERROR type parameters must be named */ t[0]]t
+ t [ /* ERROR missing type parameter name */ t[0]]t
)
-// test cases for issue #49174
+// test cases for go.dev/issue/49174
func _[_ t]() {}
func _[_ []t]() {}
func _[_ [1]t]() {}
@@ -81,11 +81,11 @@ type (
type (
_[_ t, t /* ERROR missing type constraint */ ] t
_[_ ~t, t /* ERROR missing type constraint */ ] t
- _[_ t, /* ERROR type parameters must be named */ ~t] t
- _[_ ~t, /* ERROR type parameters must be named */ ~t] t
+ _[_ t, /* ERROR missing type parameter name */ ~t] t
+ _[_ ~t, /* ERROR missing type parameter name */ ~t] t
- _[_ t|t, /* ERROR type parameters must be named */ t|t] t
- _[_ ~t|t, /* ERROR type parameters must be named */ t|t] t
- _[_ t|t, /* ERROR type parameters must be named */ ~t|t] t
- _[_ ~t|t, /* ERROR type parameters must be named */ ~t|t] t
+ _[_ t|t, /* ERROR missing type parameter name */ t|t] t
+ _[_ ~t|t, /* ERROR missing type parameter name */ t|t] t
+ _[_ t|t, /* ERROR missing type parameter name */ ~t|t] t
+ _[_ ~t|t, /* ERROR missing type parameter name */ ~t|t] t
)
diff --git a/src/cmd/compile/internal/syntax/tokens.go b/src/cmd/compile/internal/syntax/tokens.go
index 6dece1aa5b..b08f699582 100644
--- a/src/cmd/compile/internal/syntax/tokens.go
+++ b/src/cmd/compile/internal/syntax/tokens.go
@@ -4,7 +4,9 @@
package syntax
-type token uint
+type Token uint
+
+type token = Token
//go:generate stringer -type token -linecomment tokens.go
diff --git a/src/cmd/compile/internal/syntax/type.go b/src/cmd/compile/internal/syntax/type.go
index 01eab7ad04..53132a442d 100644
--- a/src/cmd/compile/internal/syntax/type.go
+++ b/src/cmd/compile/internal/syntax/type.go
@@ -39,25 +39,27 @@ type TypeAndValue struct {
exprFlags
}
-type exprFlags uint8
+type exprFlags uint16
-func (f exprFlags) IsVoid() bool { return f&1 != 0 }
-func (f exprFlags) IsType() bool { return f&2 != 0 }
-func (f exprFlags) IsBuiltin() bool { return f&4 != 0 }
-func (f exprFlags) IsValue() bool { return f&8 != 0 }
-func (f exprFlags) IsNil() bool { return f&16 != 0 }
-func (f exprFlags) Addressable() bool { return f&32 != 0 }
-func (f exprFlags) Assignable() bool { return f&64 != 0 }
-func (f exprFlags) HasOk() bool { return f&128 != 0 }
+func (f exprFlags) IsVoid() bool { return f&1 != 0 }
+func (f exprFlags) IsType() bool { return f&2 != 0 }
+func (f exprFlags) IsBuiltin() bool { return f&4 != 0 } // a language builtin that resembles a function call, e.g., "make, append, new"
+func (f exprFlags) IsValue() bool { return f&8 != 0 }
+func (f exprFlags) IsNil() bool { return f&16 != 0 }
+func (f exprFlags) Addressable() bool { return f&32 != 0 }
+func (f exprFlags) Assignable() bool { return f&64 != 0 }
+func (f exprFlags) HasOk() bool { return f&128 != 0 }
+func (f exprFlags) IsRuntimeHelper() bool { return f&256 != 0 } // a runtime function called from transformed syntax
-func (f *exprFlags) SetIsVoid() { *f |= 1 }
-func (f *exprFlags) SetIsType() { *f |= 2 }
-func (f *exprFlags) SetIsBuiltin() { *f |= 4 }
-func (f *exprFlags) SetIsValue() { *f |= 8 }
-func (f *exprFlags) SetIsNil() { *f |= 16 }
-func (f *exprFlags) SetAddressable() { *f |= 32 }
-func (f *exprFlags) SetAssignable() { *f |= 64 }
-func (f *exprFlags) SetHasOk() { *f |= 128 }
+func (f *exprFlags) SetIsVoid() { *f |= 1 }
+func (f *exprFlags) SetIsType() { *f |= 2 }
+func (f *exprFlags) SetIsBuiltin() { *f |= 4 }
+func (f *exprFlags) SetIsValue() { *f |= 8 }
+func (f *exprFlags) SetIsNil() { *f |= 16 }
+func (f *exprFlags) SetAddressable() { *f |= 32 }
+func (f *exprFlags) SetAssignable() { *f |= 64 }
+func (f *exprFlags) SetHasOk() { *f |= 128 }
+func (f *exprFlags) SetIsRuntimeHelper() { *f |= 256 }
// a typeAndValue contains the results of typechecking an expression.
// It is embedded in expression nodes.
diff --git a/src/cmd/compile/internal/test/abiutils_test.go b/src/cmd/compile/internal/test/abiutils_test.go
index 8ed7622632..b500de9f18 100644
--- a/src/cmd/compile/internal/test/abiutils_test.go
+++ b/src/cmd/compile/internal/test/abiutils_test.go
@@ -22,7 +22,7 @@ import (
// AMD64 registers available:
// - integer: RAX, RBX, RCX, RDI, RSI, R8, R9, r10, R11
// - floating point: X0 - X14
-var configAMD64 = abi.NewABIConfig(9, 15, 0)
+var configAMD64 = abi.NewABIConfig(9, 15, 0, 1)
func TestMain(m *testing.M) {
ssagen.Arch.LinkArch = &x86.Linkamd64
@@ -157,7 +157,7 @@ func TestABIUtilsStruct1(t *testing.T) {
i16 := types.Types[types.TINT16]
i32 := types.Types[types.TINT32]
i64 := types.Types[types.TINT64]
- s := mkstruct([]*types.Type{i8, i8, mkstruct([]*types.Type{}), i8, i16})
+ s := mkstruct(i8, i8, mkstruct(), i8, i16)
ft := mkFuncType(nil, []*types.Type{i8, s, i64},
[]*types.Type{s, i8, i32})
@@ -181,8 +181,8 @@ func TestABIUtilsStruct2(t *testing.T) {
// (r1 fs, r2 fs)
f64 := types.Types[types.TFLOAT64]
i64 := types.Types[types.TINT64]
- s := mkstruct([]*types.Type{i64, mkstruct([]*types.Type{})})
- fs := mkstruct([]*types.Type{f64, s, mkstruct([]*types.Type{})})
+ s := mkstruct(i64, mkstruct())
+ fs := mkstruct(f64, s, mkstruct())
ft := mkFuncType(nil, []*types.Type{s, s, fs},
[]*types.Type{fs, fs})
@@ -213,9 +213,10 @@ func TestABIUtilsEmptyFieldAtEndOfStruct(t *testing.T) {
ab2 := types.NewArray(tb, 2)
a2 := types.NewArray(i64, 2)
a3 := types.NewArray(i16, 3)
- s := mkstruct([]*types.Type{a2, mkstruct([]*types.Type{})})
- s2 := mkstruct([]*types.Type{a3, mkstruct([]*types.Type{})})
- fs := mkstruct([]*types.Type{f64, s, mkstruct([]*types.Type{})})
+ empty := mkstruct()
+ s := mkstruct(a2, empty)
+ s2 := mkstruct(a3, empty)
+ fs := mkstruct(f64, s, empty)
ft := mkFuncType(nil, []*types.Type{s, ab2, s2, fs, fs},
[]*types.Type{fs, ab2, fs})
@@ -233,12 +234,11 @@ func TestABIUtilsEmptyFieldAtEndOfStruct(t *testing.T) {
abitest(t, ft, exp)
- // Check to make sure that NumParamRegs yields 2 and not 3
- // for struct "s" (e.g. that it handles the padding properly).
- nps := configAMD64.NumParamRegs(s)
- if nps != 2 {
- t.Errorf("NumParams(%v) returned %d expected %d\n",
- s, nps, 2)
+ // Test that NumParamRegs doesn't assign registers to trailing padding.
+ typ := mkstruct(i64, i64, mkstruct())
+ have := configAMD64.NumParamRegs(typ)
+ if have != 2 {
+ t.Errorf("NumParams(%v): have %v, want %v", typ, have, 2)
}
}
@@ -279,7 +279,7 @@ func TestABIUtilsMethod(t *testing.T) {
i16 := types.Types[types.TINT16]
i64 := types.Types[types.TINT64]
f64 := types.Types[types.TFLOAT64]
- s1 := mkstruct([]*types.Type{i16, i16, i16})
+ s1 := mkstruct(i16, i16, i16)
ps1 := types.NewPtr(s1)
a7 := types.NewArray(ps1, 7)
ft := mkFuncType(s1, []*types.Type{ps1, a7, f64, i16, i16, i16},
@@ -316,7 +316,7 @@ func TestABIUtilsInterfaces(t *testing.T) {
nei := types.NewInterface([]*types.Field{field})
i16 := types.Types[types.TINT16]
tb := types.Types[types.TBOOL]
- s1 := mkstruct([]*types.Type{i16, i16, tb})
+ s1 := mkstruct(i16, i16, tb)
ft := mkFuncType(nil, []*types.Type{s1, ei, ei, nei, pei, nei, i16},
[]*types.Type{ei, nei, pei})
@@ -347,8 +347,8 @@ func TestABINumParamRegs(t *testing.T) {
c64 := types.Types[types.TCOMPLEX64]
c128 := types.Types[types.TCOMPLEX128]
- s := mkstruct([]*types.Type{i8, i8, mkstruct([]*types.Type{}), i8, i16})
- a := types.NewArray(s, 3)
+ s := mkstruct(i8, i8, mkstruct(), i8, i16)
+ a := mkstruct(s, s, s)
nrtest(t, i8, 1)
nrtest(t, i16, 1)
@@ -360,7 +360,6 @@ func TestABINumParamRegs(t *testing.T) {
nrtest(t, c128, 2)
nrtest(t, s, 4)
nrtest(t, a, 12)
-
}
func TestABIUtilsComputePadding(t *testing.T) {
@@ -369,11 +368,11 @@ func TestABIUtilsComputePadding(t *testing.T) {
i16 := types.Types[types.TINT16]
i32 := types.Types[types.TINT32]
i64 := types.Types[types.TINT64]
- emptys := mkstruct([]*types.Type{})
- s1 := mkstruct([]*types.Type{i8, i16, emptys, i32, i64})
+ emptys := mkstruct()
+ s1 := mkstruct(i8, i16, emptys, i32, i64)
// func (p1 int32, p2 s1, p3 emptys, p4 [1]int32)
a1 := types.NewArray(i32, 1)
- ft := mkFuncType(nil, []*types.Type{i32, s1, emptys, a1}, []*types.Type{})
+ ft := mkFuncType(nil, []*types.Type{i32, s1, emptys, a1}, nil)
// Run abitest() just to document what we're expected to see.
exp := makeExpectedDump(`
diff --git a/src/cmd/compile/internal/test/abiutilsaux_test.go b/src/cmd/compile/internal/test/abiutilsaux_test.go
index 07b8eb7289..fb1c3983a8 100644
--- a/src/cmd/compile/internal/test/abiutilsaux_test.go
+++ b/src/cmd/compile/internal/test/abiutilsaux_test.go
@@ -21,16 +21,15 @@ import (
func mkParamResultField(t *types.Type, s *types.Sym, which ir.Class) *types.Field {
field := types.NewField(src.NoXPos, s, t)
- n := typecheck.NewName(s)
+ n := ir.NewNameAt(src.NoXPos, s, t)
n.Class = which
field.Nname = n
- n.SetType(t)
return field
}
// mkstruct is a helper routine to create a struct type with fields
// of the types specified in 'fieldtypes'.
-func mkstruct(fieldtypes []*types.Type) *types.Type {
+func mkstruct(fieldtypes ...*types.Type) *types.Type {
fields := make([]*types.Field, len(fieldtypes))
for k, t := range fieldtypes {
if t == nil {
@@ -77,7 +76,7 @@ func tokenize(src string) []string {
}
func verifyParamResultOffset(t *testing.T, f *types.Field, r abi.ABIParamAssignment, which string, idx int) int {
- n := ir.AsNode(f.Nname).(*ir.Name)
+ n := f.Nname.(*ir.Name)
if n.FrameOffset() != int64(r.Offset()) {
t.Errorf("%s %d: got offset %d wanted %d t=%v",
which, idx, r.Offset(), n.Offset_, f.Type)
diff --git a/src/cmd/compile/internal/test/iface_test.go b/src/cmd/compile/internal/test/iface_test.go
index ebc4f891c9..db41eb8e55 100644
--- a/src/cmd/compile/internal/test/iface_test.go
+++ b/src/cmd/compile/internal/test/iface_test.go
@@ -124,3 +124,15 @@ func BenchmarkEfaceInteger(b *testing.B) {
func i2int(i interface{}) int {
return i.(int)
}
+
+func BenchmarkTypeAssert(b *testing.B) {
+ e := any(Int(0))
+ r := true
+ for i := 0; i < b.N; i++ {
+ _, ok := e.(I)
+ if !ok {
+ r = false
+ }
+ }
+ sink = r
+}
diff --git a/src/cmd/compile/internal/test/inl_test.go b/src/cmd/compile/internal/test/inl_test.go
index 205b746dd8..0ccc7b3761 100644
--- a/src/cmd/compile/internal/test/inl_test.go
+++ b/src/cmd/compile/internal/test/inl_test.go
@@ -44,15 +44,16 @@ func TestIntendedInlining(t *testing.T) {
"chanbuf",
"evacuated",
"fastlog2",
- "fastrand",
"float64bits",
"funcspdelta",
"getm",
"getMCache",
"isDirectIface",
"itabHashFunc",
+ "nextslicecap",
"noescape",
"pcvalueCacheKey",
+ "rand32",
"readUnaligned32",
"readUnaligned64",
"releasem",
@@ -72,11 +73,13 @@ func TestIntendedInlining(t *testing.T) {
"gclinkptr.ptr",
"guintptr.ptr",
"writeHeapBitsForAddr",
+ "heapBitsSlice",
"markBits.isMarked",
"muintptr.ptr",
"puintptr.ptr",
"spanOf",
"spanOfUnchecked",
+ "typePointers.nextFast",
"(*gcWork).putFast",
"(*gcWork).tryGetFast",
"(*guintptr).set",
@@ -85,10 +88,15 @@ func TestIntendedInlining(t *testing.T) {
"(*mspan).base",
"(*mspan).markBitsForBase",
"(*mspan).markBitsForIndex",
+ "(*mspan).writeUserArenaHeapBits",
"(*muintptr).set",
"(*puintptr).set",
"(*wbBuf).get1",
"(*wbBuf).get2",
+
+ // Trace-related ones.
+ "traceLocker.ok",
+ "traceEnabled",
},
"runtime/internal/sys": {},
"runtime/internal/math": {
@@ -107,6 +115,9 @@ func TestIntendedInlining(t *testing.T) {
"(*Buffer).UnreadByte",
"(*Buffer).tryGrowByReslice",
},
+ "internal/abi": {
+ "UseInterfaceSwitchCache",
+ },
"compress/flate": {
"byLiteral.Len",
"byLiteral.Less",
@@ -242,6 +253,10 @@ func TestIntendedInlining(t *testing.T) {
want["runtime/internal/sys"] = append(want["runtime/internal/sys"], "TrailingZeros32")
want["runtime/internal/sys"] = append(want["runtime/internal/sys"], "Bswap32")
}
+ if runtime.GOARCH == "amd64" || runtime.GOARCH == "arm64" || runtime.GOARCH == "loong64" || runtime.GOARCH == "mips" || runtime.GOARCH == "mips64" || runtime.GOARCH == "ppc64" || runtime.GOARCH == "riscv64" || runtime.GOARCH == "s390x" {
+ // runtime/internal/atomic.Loaduintptr is only intrinsified on these platforms.
+ want["runtime"] = append(want["runtime"], "traceAcquire")
+ }
if bits.UintSize == 64 {
// mix is only defined on 64-bit architectures
want["runtime"] = append(want["runtime"], "mix")
diff --git a/src/cmd/compile/internal/test/logic_test.go b/src/cmd/compile/internal/test/logic_test.go
index 1d7043ff60..0e46b5faef 100644
--- a/src/cmd/compile/internal/test/logic_test.go
+++ b/src/cmd/compile/internal/test/logic_test.go
@@ -1,3 +1,7 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
package test
import "testing"
diff --git a/src/cmd/compile/internal/test/math_test.go b/src/cmd/compile/internal/test/math_test.go
index 6bcb4601ba..1febe9d42b 100644
--- a/src/cmd/compile/internal/test/math_test.go
+++ b/src/cmd/compile/internal/test/math_test.go
@@ -1,3 +1,7 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
package test
import (
diff --git a/src/cmd/compile/internal/test/memcombine_test.go b/src/cmd/compile/internal/test/memcombine_test.go
index c7e7a208dd..3fc4a004a3 100644
--- a/src/cmd/compile/internal/test/memcombine_test.go
+++ b/src/cmd/compile/internal/test/memcombine_test.go
@@ -71,3 +71,129 @@ func readUint32be(b []byte) uint64 {
//go:noinline
func nop() {
}
+
+type T32 struct {
+ a, b uint32
+}
+
+//go:noinline
+func (t *T32) bigEndianLoad() uint64 {
+ return uint64(t.a)<<32 | uint64(t.b)
+}
+
+//go:noinline
+func (t *T32) littleEndianLoad() uint64 {
+ return uint64(t.a) | (uint64(t.b) << 32)
+}
+
+//go:noinline
+func (t *T32) bigEndianStore(x uint64) {
+ t.a = uint32(x >> 32)
+ t.b = uint32(x)
+}
+
+//go:noinline
+func (t *T32) littleEndianStore(x uint64) {
+ t.a = uint32(x)
+ t.b = uint32(x >> 32)
+}
+
+type T16 struct {
+ a, b uint16
+}
+
+//go:noinline
+func (t *T16) bigEndianLoad() uint32 {
+ return uint32(t.a)<<16 | uint32(t.b)
+}
+
+//go:noinline
+func (t *T16) littleEndianLoad() uint32 {
+ return uint32(t.a) | (uint32(t.b) << 16)
+}
+
+//go:noinline
+func (t *T16) bigEndianStore(x uint32) {
+ t.a = uint16(x >> 16)
+ t.b = uint16(x)
+}
+
+//go:noinline
+func (t *T16) littleEndianStore(x uint32) {
+ t.a = uint16(x)
+ t.b = uint16(x >> 16)
+}
+
+type T8 struct {
+ a, b uint8
+}
+
+//go:noinline
+func (t *T8) bigEndianLoad() uint16 {
+ return uint16(t.a)<<8 | uint16(t.b)
+}
+
+//go:noinline
+func (t *T8) littleEndianLoad() uint16 {
+ return uint16(t.a) | (uint16(t.b) << 8)
+}
+
+//go:noinline
+func (t *T8) bigEndianStore(x uint16) {
+ t.a = uint8(x >> 8)
+ t.b = uint8(x)
+}
+
+//go:noinline
+func (t *T8) littleEndianStore(x uint16) {
+ t.a = uint8(x)
+ t.b = uint8(x >> 8)
+}
+
+func TestIssue64468(t *testing.T) {
+ t32 := T32{1, 2}
+ if got, want := t32.bigEndianLoad(), uint64(1<<32+2); got != want {
+ t.Errorf("T32.bigEndianLoad got %x want %x\n", got, want)
+ }
+ if got, want := t32.littleEndianLoad(), uint64(1+2<<32); got != want {
+ t.Errorf("T32.littleEndianLoad got %x want %x\n", got, want)
+ }
+ t16 := T16{1, 2}
+ if got, want := t16.bigEndianLoad(), uint32(1<<16+2); got != want {
+ t.Errorf("T16.bigEndianLoad got %x want %x\n", got, want)
+ }
+ if got, want := t16.littleEndianLoad(), uint32(1+2<<16); got != want {
+ t.Errorf("T16.littleEndianLoad got %x want %x\n", got, want)
+ }
+ t8 := T8{1, 2}
+ if got, want := t8.bigEndianLoad(), uint16(1<<8+2); got != want {
+ t.Errorf("T8.bigEndianLoad got %x want %x\n", got, want)
+ }
+ if got, want := t8.littleEndianLoad(), uint16(1+2<<8); got != want {
+ t.Errorf("T8.littleEndianLoad got %x want %x\n", got, want)
+ }
+ t32.bigEndianStore(1<<32 + 2)
+ if got, want := t32, (T32{1, 2}); got != want {
+ t.Errorf("T32.bigEndianStore got %x want %x\n", got, want)
+ }
+ t32.littleEndianStore(1<<32 + 2)
+ if got, want := t32, (T32{2, 1}); got != want {
+ t.Errorf("T32.littleEndianStore got %x want %x\n", got, want)
+ }
+ t16.bigEndianStore(1<<16 + 2)
+ if got, want := t16, (T16{1, 2}); got != want {
+ t.Errorf("T16.bigEndianStore got %x want %x\n", got, want)
+ }
+ t16.littleEndianStore(1<<16 + 2)
+ if got, want := t16, (T16{2, 1}); got != want {
+ t.Errorf("T16.littleEndianStore got %x want %x\n", got, want)
+ }
+ t8.bigEndianStore(1<<8 + 2)
+ if got, want := t8, (T8{1, 2}); got != want {
+ t.Errorf("T8.bigEndianStore got %x want %x\n", got, want)
+ }
+ t8.littleEndianStore(1<<8 + 2)
+ if got, want := t8, (T8{2, 1}); got != want {
+ t.Errorf("T8.littleEndianStore got %x want %x\n", got, want)
+ }
+}
diff --git a/src/cmd/compile/internal/test/pgo_devirtualize_test.go b/src/cmd/compile/internal/test/pgo_devirtualize_test.go
index 49e95e9a80..f451243683 100644
--- a/src/cmd/compile/internal/test/pgo_devirtualize_test.go
+++ b/src/cmd/compile/internal/test/pgo_devirtualize_test.go
@@ -14,8 +14,13 @@ import (
"testing"
)
+type devirtualization struct {
+ pos string
+ callee string
+}
+
// testPGODevirtualize tests that specific PGO devirtualize rewrites are performed.
-func testPGODevirtualize(t *testing.T, dir string) {
+func testPGODevirtualize(t *testing.T, dir string, want []devirtualization) {
testenv.MustHaveGoRun(t)
t.Parallel()
@@ -23,17 +28,27 @@ func testPGODevirtualize(t *testing.T, dir string) {
// Add a go.mod so we have a consistent symbol names in this temp dir.
goMod := fmt.Sprintf(`module %s
-go 1.19
+go 1.21
`, pkg)
if err := os.WriteFile(filepath.Join(dir, "go.mod"), []byte(goMod), 0644); err != nil {
t.Fatalf("error writing go.mod: %v", err)
}
+ // Run the test without PGO to ensure that the test assertions are
+ // correct even in the non-optimized version.
+ cmd := testenv.CleanCmdEnv(testenv.Command(t, testenv.GoToolPath(t), "test", "."))
+ cmd.Dir = dir
+ b, err := cmd.CombinedOutput()
+ t.Logf("Test without PGO:\n%s", b)
+ if err != nil {
+ t.Fatalf("Test failed without PGO: %v", err)
+ }
+
// Build the test with the profile.
pprof := filepath.Join(dir, "devirt.pprof")
- gcflag := fmt.Sprintf("-gcflags=-m=2 -pgoprofile=%s -d=pgodebug=2", pprof)
+ gcflag := fmt.Sprintf("-gcflags=-m=2 -pgoprofile=%s -d=pgodebug=3", pprof)
out := filepath.Join(dir, "test.exe")
- cmd := testenv.CleanCmdEnv(testenv.Command(t, testenv.GoToolPath(t), "build", "-o", out, gcflag, "."))
+ cmd = testenv.CleanCmdEnv(testenv.Command(t, testenv.GoToolPath(t), "test", "-o", out, gcflag, "."))
cmd.Dir = dir
pr, pw, err := os.Pipe()
@@ -50,25 +65,9 @@ go 1.19
t.Fatalf("error starting go test: %v", err)
}
- type devirtualization struct {
- pos string
- callee string
- }
-
- want := []devirtualization{
- {
- pos: "./devirt.go:61:21",
- callee: "mult.Mult.Multiply",
- },
- {
- pos: "./devirt.go:61:31",
- callee: "Add.Add",
- },
- }
-
got := make(map[devirtualization]struct{})
- devirtualizedLine := regexp.MustCompile(`(.*): PGO devirtualizing .* to (.*)`)
+ devirtualizedLine := regexp.MustCompile(`(.*): PGO devirtualizing \w+ call .* to (.*)`)
scanner := bufio.NewScanner(pr)
for scanner.Scan() {
@@ -102,6 +101,15 @@ go 1.19
}
t.Errorf("devirtualization %v missing; got %v", w, got)
}
+
+ // Run test with PGO to ensure the assertions are still true.
+ cmd = testenv.CleanCmdEnv(testenv.Command(t, out))
+ cmd.Dir = dir
+ b, err = cmd.CombinedOutput()
+ t.Logf("Test with PGO:\n%s", b)
+ if err != nil {
+ t.Fatalf("Test failed without PGO: %v", err)
+ }
}
// TestPGODevirtualize tests that specific functions are devirtualized when PGO
@@ -115,14 +123,139 @@ func TestPGODevirtualize(t *testing.T) {
// Copy the module to a scratch location so we can add a go.mod.
dir := t.TempDir()
- if err := os.Mkdir(filepath.Join(dir, "mult"), 0755); err != nil {
+ if err := os.Mkdir(filepath.Join(dir, "mult.pkg"), 0755); err != nil {
t.Fatalf("error creating dir: %v", err)
}
- for _, file := range []string{"devirt.go", "devirt_test.go", "devirt.pprof", filepath.Join("mult", "mult.go")} {
+ for _, file := range []string{"devirt.go", "devirt_test.go", "devirt.pprof", filepath.Join("mult.pkg", "mult.go")} {
if err := copyFile(filepath.Join(dir, file), filepath.Join(srcDir, file)); err != nil {
t.Fatalf("error copying %s: %v", file, err)
}
}
- testPGODevirtualize(t, dir)
+ want := []devirtualization{
+ // ExerciseIface
+ {
+ pos: "./devirt.go:101:20",
+ callee: "mult.Mult.Multiply",
+ },
+ {
+ pos: "./devirt.go:101:39",
+ callee: "Add.Add",
+ },
+ // ExerciseFuncConcrete
+ {
+ pos: "./devirt.go:173:36",
+ callee: "AddFn",
+ },
+ {
+ pos: "./devirt.go:173:15",
+ callee: "mult.MultFn",
+ },
+ // ExerciseFuncField
+ {
+ pos: "./devirt.go:207:35",
+ callee: "AddFn",
+ },
+ {
+ pos: "./devirt.go:207:19",
+ callee: "mult.MultFn",
+ },
+ // ExerciseFuncClosure
+ // TODO(prattmic): Closure callees not implemented.
+ //{
+ // pos: "./devirt.go:249:27",
+ // callee: "AddClosure.func1",
+ //},
+ //{
+ // pos: "./devirt.go:249:15",
+ // callee: "mult.MultClosure.func1",
+ //},
+ }
+
+ testPGODevirtualize(t, dir, want)
+}
+
+// Regression test for https://go.dev/issue/65615. If a target function changes
+// from non-generic to generic we can't devirtualize it (don't know the type
+// parameters), but the compiler should not crash.
+func TestLookupFuncGeneric(t *testing.T) {
+ wd, err := os.Getwd()
+ if err != nil {
+ t.Fatalf("error getting wd: %v", err)
+ }
+ srcDir := filepath.Join(wd, "testdata", "pgo", "devirtualize")
+
+ // Copy the module to a scratch location so we can add a go.mod.
+ dir := t.TempDir()
+ if err := os.Mkdir(filepath.Join(dir, "mult.pkg"), 0755); err != nil {
+ t.Fatalf("error creating dir: %v", err)
+ }
+ for _, file := range []string{"devirt.go", "devirt_test.go", "devirt.pprof", filepath.Join("mult.pkg", "mult.go")} {
+ if err := copyFile(filepath.Join(dir, file), filepath.Join(srcDir, file)); err != nil {
+ t.Fatalf("error copying %s: %v", file, err)
+ }
+ }
+
+ // Change MultFn from a concrete function to a parameterized function.
+ if err := convertMultToGeneric(filepath.Join(dir, "mult.pkg", "mult.go")); err != nil {
+ t.Fatalf("error editing mult.go: %v", err)
+ }
+
+ // Same as TestPGODevirtualize except for MultFn, which we cannot
+ // devirtualize to because it has become generic.
+ //
+ // Note that the important part of this test is that the build is
+ // successful, not the specific devirtualizations.
+ want := []devirtualization{
+ // ExerciseIface
+ {
+ pos: "./devirt.go:101:20",
+ callee: "mult.Mult.Multiply",
+ },
+ {
+ pos: "./devirt.go:101:39",
+ callee: "Add.Add",
+ },
+ // ExerciseFuncConcrete
+ {
+ pos: "./devirt.go:173:36",
+ callee: "AddFn",
+ },
+ // ExerciseFuncField
+ {
+ pos: "./devirt.go:207:35",
+ callee: "AddFn",
+ },
+ // ExerciseFuncClosure
+ // TODO(prattmic): Closure callees not implemented.
+ //{
+ // pos: "./devirt.go:249:27",
+ // callee: "AddClosure.func1",
+ //},
+ //{
+ // pos: "./devirt.go:249:15",
+ // callee: "mult.MultClosure.func1",
+ //},
+ }
+
+ testPGODevirtualize(t, dir, want)
+}
+
+var multFnRe = regexp.MustCompile(`func MultFn\(a, b int64\) int64`)
+
+func convertMultToGeneric(path string) error {
+ content, err := os.ReadFile(path)
+ if err != nil {
+ return fmt.Errorf("error opening: %w", err)
+ }
+
+ if !multFnRe.Match(content) {
+ return fmt.Errorf("MultFn not found; update regexp?")
+ }
+
+ // Users of MultFn shouldn't need adjustment, type inference should
+ // work OK.
+ content = multFnRe.ReplaceAll(content, []byte(`func MultFn[T int32|int64](a, b T) T`))
+
+ return os.WriteFile(path, content, 0644)
}
diff --git a/src/cmd/compile/internal/test/pgo_inl_test.go b/src/cmd/compile/internal/test/pgo_inl_test.go
index 4d6b5a134a..da6c4a53d3 100644
--- a/src/cmd/compile/internal/test/pgo_inl_test.go
+++ b/src/cmd/compile/internal/test/pgo_inl_test.go
@@ -6,6 +6,7 @@ package test
import (
"bufio"
+ "bytes"
"fmt"
"internal/profile"
"internal/testenv"
@@ -17,11 +18,7 @@ import (
"testing"
)
-// testPGOIntendedInlining tests that specific functions are inlined.
-func testPGOIntendedInlining(t *testing.T, dir string) {
- testenv.MustHaveGoRun(t)
- t.Parallel()
-
+func buildPGOInliningTest(t *testing.T, dir string, gcflag string) []byte {
const pkg = "example.com/pgo/inline"
// Add a go.mod so we have a consistent symbol names in this temp dir.
@@ -32,6 +29,26 @@ go 1.19
t.Fatalf("error writing go.mod: %v", err)
}
+ exe := filepath.Join(dir, "test.exe")
+ args := []string{"test", "-c", "-o", exe, "-gcflags=" + gcflag}
+ cmd := testenv.Command(t, testenv.GoToolPath(t), args...)
+ cmd.Dir = dir
+ cmd = testenv.CleanCmdEnv(cmd)
+ t.Log(cmd)
+ out, err := cmd.CombinedOutput()
+ if err != nil {
+ t.Fatalf("build failed: %v, output:\n%s", err, out)
+ }
+ return out
+}
+
+// testPGOIntendedInlining tests that specific functions are inlined.
+func testPGOIntendedInlining(t *testing.T, dir string) {
+ testenv.MustHaveGoRun(t)
+ t.Parallel()
+
+ const pkg = "example.com/pgo/inline"
+
want := []string{
"(*BS).NS",
}
@@ -70,26 +87,10 @@ go 1.19
// Build the test with the profile. Use a smaller threshold to test.
// TODO: maybe adjust the test to work with default threshold.
pprof := filepath.Join(dir, "inline_hot.pprof")
- gcflag := fmt.Sprintf("-gcflags=-m -m -pgoprofile=%s -d=pgoinlinebudget=160,pgoinlinecdfthreshold=90", pprof)
- out := filepath.Join(dir, "test.exe")
- cmd := testenv.CleanCmdEnv(testenv.Command(t, testenv.GoToolPath(t), "test", "-c", "-o", out, gcflag, "."))
- cmd.Dir = dir
+ gcflag := fmt.Sprintf("-m -m -pgoprofile=%s -d=pgoinlinebudget=160,pgoinlinecdfthreshold=90", pprof)
+ out := buildPGOInliningTest(t, dir, gcflag)
- pr, pw, err := os.Pipe()
- if err != nil {
- t.Fatalf("error creating pipe: %v", err)
- }
- defer pr.Close()
- cmd.Stdout = pw
- cmd.Stderr = pw
-
- err = cmd.Start()
- pw.Close()
- if err != nil {
- t.Fatalf("error starting go test: %v", err)
- }
-
- scanner := bufio.NewScanner(pr)
+ scanner := bufio.NewScanner(bytes.NewReader(out))
curPkg := ""
canInline := regexp.MustCompile(`: can inline ([^ ]*)`)
haveInlined := regexp.MustCompile(`: inlining call to ([^ ]*)`)
@@ -128,11 +129,8 @@ go 1.19
continue
}
}
- if err := cmd.Wait(); err != nil {
- t.Fatalf("error running go test: %v", err)
- }
if err := scanner.Err(); err != nil {
- t.Fatalf("error reading go test output: %v", err)
+ t.Fatalf("error reading output: %v", err)
}
for fullName, reason := range notInlinedReason {
t.Errorf("%s was not inlined: %s", fullName, reason)
@@ -297,3 +295,50 @@ func copyFile(dst, src string) error {
_, err = io.Copy(d, s)
return err
}
+
+// TestPGOHash tests that PGO optimization decisions can be selected by pgohash.
+func TestPGOHash(t *testing.T) {
+ testenv.MustHaveGoRun(t)
+ t.Parallel()
+
+ const pkg = "example.com/pgo/inline"
+
+ wd, err := os.Getwd()
+ if err != nil {
+ t.Fatalf("error getting wd: %v", err)
+ }
+ srcDir := filepath.Join(wd, "testdata/pgo/inline")
+
+ // Copy the module to a scratch location so we can add a go.mod.
+ dir := t.TempDir()
+
+ for _, file := range []string{"inline_hot.go", "inline_hot_test.go", "inline_hot.pprof"} {
+ if err := copyFile(filepath.Join(dir, file), filepath.Join(srcDir, file)); err != nil {
+ t.Fatalf("error copying %s: %v", file, err)
+ }
+ }
+
+ pprof := filepath.Join(dir, "inline_hot.pprof")
+ // build with -trimpath so the source location (thus the hash)
+ // does not depend on the temporary directory path.
+ gcflag0 := fmt.Sprintf("-pgoprofile=%s -trimpath %s=>%s -d=pgoinlinebudget=160,pgoinlinecdfthreshold=90,pgodebug=1", pprof, dir, pkg)
+
+ // Check that a hash match allows PGO inlining.
+ const srcPos = "example.com/pgo/inline/inline_hot.go:81:19"
+ const hashMatch = "pgohash triggered " + srcPos + " (inline)"
+ pgoDebugRE := regexp.MustCompile(`hot-budget check allows inlining for call .* at ` + strings.ReplaceAll(srcPos, ".", "\\."))
+ hash := "v1" // 1 matches srcPos, v for verbose (print source location)
+ gcflag := gcflag0 + ",pgohash=" + hash
+ out := buildPGOInliningTest(t, dir, gcflag)
+ if !bytes.Contains(out, []byte(hashMatch)) || !pgoDebugRE.Match(out) {
+ t.Errorf("output does not contain expected source line, out:\n%s", out)
+ }
+
+ // Check that a hash mismatch turns off PGO inlining.
+ hash = "v0" // 0 should not match srcPos
+ gcflag = gcflag0 + ",pgohash=" + hash
+ out = buildPGOInliningTest(t, dir, gcflag)
+ if bytes.Contains(out, []byte(hashMatch)) || pgoDebugRE.Match(out) {
+ t.Errorf("output contains unexpected source line, out:\n%s", out)
+ }
+}
diff --git a/src/cmd/compile/internal/test/ssa_test.go b/src/cmd/compile/internal/test/ssa_test.go
index 5f8acdc72d..7f2faa1140 100644
--- a/src/cmd/compile/internal/test/ssa_test.go
+++ b/src/cmd/compile/internal/test/ssa_test.go
@@ -169,7 +169,7 @@ func TestCode(t *testing.T) {
continue
}
t.Run(fmt.Sprintf("%s%s", test.name[4:], flag), func(t *testing.T) {
- out, err := testenv.Command(t, filepath.Join(tmpdir, "code.test"), "-test.run="+test.name).CombinedOutput()
+ out, err := testenv.Command(t, filepath.Join(tmpdir, "code.test"), "-test.run=^"+test.name+"$").CombinedOutput()
if err != nil || string(out) != "PASS\n" {
t.Errorf("Failed:\n%s\n", out)
}
diff --git a/src/cmd/compile/internal/test/switch_test.go b/src/cmd/compile/internal/test/switch_test.go
index 30dee6257e..1d12361cbb 100644
--- a/src/cmd/compile/internal/test/switch_test.go
+++ b/src/cmd/compile/internal/test/switch_test.go
@@ -120,6 +120,165 @@ func benchmarkSwitchString(b *testing.B, predictable bool) {
sink = n
}
+func BenchmarkSwitchTypePredictable(b *testing.B) {
+ benchmarkSwitchType(b, true)
+}
+func BenchmarkSwitchTypeUnpredictable(b *testing.B) {
+ benchmarkSwitchType(b, false)
+}
+func benchmarkSwitchType(b *testing.B, predictable bool) {
+ a := []any{
+ int8(1),
+ int16(2),
+ int32(3),
+ int64(4),
+ uint8(5),
+ uint16(6),
+ uint32(7),
+ uint64(8),
+ }
+ n := 0
+ rng := newRNG()
+ for i := 0; i < b.N; i++ {
+ rng = rng.next(predictable)
+ switch a[rng.value()&7].(type) {
+ case int8:
+ n += 1
+ case int16:
+ n += 2
+ case int32:
+ n += 3
+ case int64:
+ n += 4
+ case uint8:
+ n += 5
+ case uint16:
+ n += 6
+ case uint32:
+ n += 7
+ case uint64:
+ n += 8
+ }
+ }
+ sink = n
+}
+
+func BenchmarkSwitchInterfaceTypePredictable(b *testing.B) {
+ benchmarkSwitchInterfaceType(b, true)
+}
+func BenchmarkSwitchInterfaceTypeUnpredictable(b *testing.B) {
+ benchmarkSwitchInterfaceType(b, false)
+}
+
+type SI0 interface {
+ si0()
+}
+type ST0 struct {
+}
+
+func (ST0) si0() {
+}
+
+type SI1 interface {
+ si1()
+}
+type ST1 struct {
+}
+
+func (ST1) si1() {
+}
+
+type SI2 interface {
+ si2()
+}
+type ST2 struct {
+}
+
+func (ST2) si2() {
+}
+
+type SI3 interface {
+ si3()
+}
+type ST3 struct {
+}
+
+func (ST3) si3() {
+}
+
+type SI4 interface {
+ si4()
+}
+type ST4 struct {
+}
+
+func (ST4) si4() {
+}
+
+type SI5 interface {
+ si5()
+}
+type ST5 struct {
+}
+
+func (ST5) si5() {
+}
+
+type SI6 interface {
+ si6()
+}
+type ST6 struct {
+}
+
+func (ST6) si6() {
+}
+
+type SI7 interface {
+ si7()
+}
+type ST7 struct {
+}
+
+func (ST7) si7() {
+}
+
+func benchmarkSwitchInterfaceType(b *testing.B, predictable bool) {
+ a := []any{
+ ST0{},
+ ST1{},
+ ST2{},
+ ST3{},
+ ST4{},
+ ST5{},
+ ST6{},
+ ST7{},
+ }
+ n := 0
+ rng := newRNG()
+ for i := 0; i < b.N; i++ {
+ rng = rng.next(predictable)
+ switch a[rng.value()&7].(type) {
+ case SI0:
+ n += 1
+ case SI1:
+ n += 2
+ case SI2:
+ n += 3
+ case SI3:
+ n += 4
+ case SI4:
+ n += 5
+ case SI5:
+ n += 6
+ case SI6:
+ n += 7
+ case SI7:
+ n += 8
+ }
+ }
+ sink = n
+}
+
// A simple random number generator used to make switches conditionally predictable.
type rng uint64
diff --git a/src/cmd/compile/internal/test/test.go b/src/cmd/compile/internal/test/test.go
index 56e5404079..195c65a9ea 100644
--- a/src/cmd/compile/internal/test/test.go
+++ b/src/cmd/compile/internal/test/test.go
@@ -1 +1,5 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
package test
diff --git a/src/cmd/compile/internal/test/testdata/arith_test.go b/src/cmd/compile/internal/test/testdata/arith_test.go
index 2b8cd9fad3..cd7b5bc2c4 100644
--- a/src/cmd/compile/internal/test/testdata/arith_test.go
+++ b/src/cmd/compile/internal/test/testdata/arith_test.go
@@ -268,6 +268,70 @@ func testOverflowConstShift(t *testing.T) {
}
}
+//go:noinline
+func rsh64x64ConstOverflow8(x int8) int64 {
+ return int64(x) >> 9
+}
+
+//go:noinline
+func rsh64x64ConstOverflow16(x int16) int64 {
+ return int64(x) >> 17
+}
+
+//go:noinline
+func rsh64x64ConstOverflow32(x int32) int64 {
+ return int64(x) >> 33
+}
+
+func testArithRightShiftConstOverflow(t *testing.T) {
+ allSet := int64(-1)
+ if got, want := rsh64x64ConstOverflow8(0x7f), int64(0); got != want {
+ t.Errorf("rsh64x64ConstOverflow8 failed: got %v, want %v", got, want)
+ }
+ if got, want := rsh64x64ConstOverflow16(0x7fff), int64(0); got != want {
+ t.Errorf("rsh64x64ConstOverflow16 failed: got %v, want %v", got, want)
+ }
+ if got, want := rsh64x64ConstOverflow32(0x7ffffff), int64(0); got != want {
+ t.Errorf("rsh64x64ConstOverflow32 failed: got %v, want %v", got, want)
+ }
+ if got, want := rsh64x64ConstOverflow8(int8(-1)), allSet; got != want {
+ t.Errorf("rsh64x64ConstOverflow8 failed: got %v, want %v", got, want)
+ }
+ if got, want := rsh64x64ConstOverflow16(int16(-1)), allSet; got != want {
+ t.Errorf("rsh64x64ConstOverflow16 failed: got %v, want %v", got, want)
+ }
+ if got, want := rsh64x64ConstOverflow32(int32(-1)), allSet; got != want {
+ t.Errorf("rsh64x64ConstOverflow32 failed: got %v, want %v", got, want)
+ }
+}
+
+//go:noinline
+func rsh64Ux64ConstOverflow8(x uint8) uint64 {
+ return uint64(x) >> 9
+}
+
+//go:noinline
+func rsh64Ux64ConstOverflow16(x uint16) uint64 {
+ return uint64(x) >> 17
+}
+
+//go:noinline
+func rsh64Ux64ConstOverflow32(x uint32) uint64 {
+ return uint64(x) >> 33
+}
+
+func testRightShiftConstOverflow(t *testing.T) {
+ if got, want := rsh64Ux64ConstOverflow8(0xff), uint64(0); got != want {
+ t.Errorf("rsh64Ux64ConstOverflow8 failed: got %v, want %v", got, want)
+ }
+ if got, want := rsh64Ux64ConstOverflow16(0xffff), uint64(0); got != want {
+ t.Errorf("rsh64Ux64ConstOverflow16 failed: got %v, want %v", got, want)
+ }
+ if got, want := rsh64Ux64ConstOverflow32(0xffffffff), uint64(0); got != want {
+ t.Errorf("rsh64Ux64ConstOverflow32 failed: got %v, want %v", got, want)
+ }
+}
+
// test64BitConstMult tests that rewrite rules don't fold 64 bit constants
// into multiply instructions.
func test64BitConstMult(t *testing.T) {
@@ -918,6 +982,8 @@ func TestArithmetic(t *testing.T) {
testShiftCX(t)
testSubConst(t)
testOverflowConstShift(t)
+ testArithRightShiftConstOverflow(t)
+ testRightShiftConstOverflow(t)
testArithConstShift(t)
testArithRshConst(t)
testLargeConst(t)
diff --git a/src/cmd/compile/internal/test/testdata/ctl_test.go b/src/cmd/compile/internal/test/testdata/ctl_test.go
index ff3a1609c5..501f79eee1 100644
--- a/src/cmd/compile/internal/test/testdata/ctl_test.go
+++ b/src/cmd/compile/internal/test/testdata/ctl_test.go
@@ -70,7 +70,6 @@ func switch_ssa(a int) int {
ret += 1
}
return ret
-
}
func fallthrough_ssa(a int) int {
@@ -92,7 +91,6 @@ func fallthrough_ssa(a int) int {
ret++
}
return ret
-
}
func testFallthrough(t *testing.T) {
diff --git a/src/cmd/compile/internal/test/testdata/pgo/devirtualize/devirt.go b/src/cmd/compile/internal/test/testdata/pgo/devirtualize/devirt.go
index 390b6c350a..ac238f6dea 100644
--- a/src/cmd/compile/internal/test/testdata/pgo/devirtualize/devirt.go
+++ b/src/cmd/compile/internal/test/testdata/pgo/devirtualize/devirt.go
@@ -11,7 +11,16 @@
package devirt
-import "example.com/pgo/devirtualize/mult"
+// Devirtualization of callees from transitive dependencies should work even if
+// they aren't directly referenced in the package. See #61577.
+//
+// Dots in the last package path component are escaped in symbol names. Use one
+// to ensure the escaping doesn't break lookup.
+import (
+ "fmt"
+
+ "example.com/pgo/devirtualize/mult.pkg"
+)
var sink int
@@ -37,15 +46,46 @@ func (Sub) Add(a, b int) int {
return a - b
}
-// Exercise calls mostly a1 and m1.
+// ExerciseIface calls mostly a1 and m1.
//
//go:noinline
-func Exercise(iter int, a1, a2 Adder, m1, m2 mult.Multiplier) {
+func ExerciseIface(iter int, a1, a2 Adder, m1, m2 mult.Multiplier) int {
+ // The call below must evaluate selectA() to determine the receiver to
+ // use. This should happen exactly once per iteration. Assert that is
+ // the case to ensure the IR manipulation does not result in over- or
+ // under-evaluation.
+ selectI := 0
+ selectA := func(gotI int) Adder {
+ if gotI != selectI {
+ panic(fmt.Sprintf("selectA not called once per iteration; got i %d want %d", gotI, selectI))
+ }
+ selectI++
+
+ if gotI%10 == 0 {
+ return a2
+ }
+ return a1
+ }
+ oneI := 0
+ one := func(gotI int) int {
+ if gotI != oneI {
+ panic(fmt.Sprintf("one not called once per iteration; got i %d want %d", gotI, oneI))
+ }
+ oneI++
+
+ // The function value must be evaluated before arguments, so
+ // selectI must have been incremented already.
+ if selectI != oneI {
+ panic(fmt.Sprintf("selectA not called before not called before one; got i %d want %d", selectI, oneI))
+ }
+
+ return 1
+ }
+
+ val := 0
for i := 0; i < iter; i++ {
- a := a1
m := m1
if i%10 == 0 {
- a = a2
m = m2
}
@@ -58,16 +98,155 @@ func Exercise(iter int, a1, a2 Adder, m1, m2 mult.Multiplier) {
// If they were not mutually exclusive (for example, two Add
// calls), then we could not definitively select the correct
// callee.
- sink += m.Multiply(42, a.Add(1, 2))
+ val += m.Multiply(42, selectA(i).Add(one(i), 2))
+ }
+ return val
+}
+
+type AddFunc func(int, int) int
+
+func AddFn(a, b int) int {
+ for i := 0; i < 1000; i++ {
+ sink++
+ }
+ return a + b
+}
+
+func SubFn(a, b int) int {
+ for i := 0; i < 1000; i++ {
+ sink++
+ }
+ return a - b
+}
+
+// ExerciseFuncConcrete calls mostly a1 and m1.
+//
+//go:noinline
+func ExerciseFuncConcrete(iter int, a1, a2 AddFunc, m1, m2 mult.MultFunc) int {
+ // The call below must evaluate selectA() to determine the function to
+ // call. This should happen exactly once per iteration. Assert that is
+ // the case to ensure the IR manipulation does not result in over- or
+ // under-evaluation.
+ selectI := 0
+ selectA := func(gotI int) AddFunc {
+ if gotI != selectI {
+ panic(fmt.Sprintf("selectA not called once per iteration; got i %d want %d", gotI, selectI))
+ }
+ selectI++
+
+ if gotI%10 == 0 {
+ return a2
+ }
+ return a1
+ }
+ oneI := 0
+ one := func(gotI int) int {
+ if gotI != oneI {
+ panic(fmt.Sprintf("one not called once per iteration; got i %d want %d", gotI, oneI))
+ }
+ oneI++
+
+ // The function value must be evaluated before arguments, so
+ // selectI must have been incremented already.
+ if selectI != oneI {
+ panic(fmt.Sprintf("selectA not called before not called before one; got i %d want %d", selectI, oneI))
+ }
+
+ return 1
+ }
+
+ val := 0
+ for i := 0; i < iter; i++ {
+ m := m1
+ if i%10 == 0 {
+ m = m2
+ }
+
+ // N.B. Profiles only distinguish calls on a per-line level,
+ // making the two calls ambiguous. However because the
+ // function types are mutually exclusive, devirtualization can
+ // still select the correct callee for each.
+ //
+ // If they were not mutually exclusive (for example, two
+ // AddFunc calls), then we could not definitively select the
+ // correct callee.
+ val += int(m(42, int64(selectA(i)(one(i), 2))))
+ }
+ return val
+}
+
+// ExerciseFuncField calls mostly a1 and m1.
+//
+// This is a simplified version of ExerciseFuncConcrete, but accessing the
+// function values via a struct field.
+//
+//go:noinline
+func ExerciseFuncField(iter int, a1, a2 AddFunc, m1, m2 mult.MultFunc) int {
+ ops := struct {
+ a AddFunc
+ m mult.MultFunc
+ }{}
+
+ val := 0
+ for i := 0; i < iter; i++ {
+ ops.a = a1
+ ops.m = m1
+ if i%10 == 0 {
+ ops.a = a2
+ ops.m = m2
+ }
+
+ // N.B. Profiles only distinguish calls on a per-line level,
+ // making the two calls ambiguous. However because the
+ // function types are mutually exclusive, devirtualization can
+ // still select the correct callee for each.
+ //
+ // If they were not mutually exclusive (for example, two
+ // AddFunc calls), then we could not definitively select the
+ // correct callee.
+ val += int(ops.m(42, int64(ops.a(1, 2))))
}
+ return val
}
-func init() {
- // TODO: until https://golang.org/cl/497175 or similar lands,
- // we need to create an explicit reference to callees
- // in another package for devirtualization to work.
- m := mult.Mult{}
- m.Multiply(42, 0)
- n := mult.NegMult{}
- n.Multiply(42, 0)
+//go:noinline
+func AddClosure() AddFunc {
+ // Implicit closure by capturing the receiver.
+ var a Add
+ return a.Add
+}
+
+//go:noinline
+func SubClosure() AddFunc {
+ var s Sub
+ return s.Add
+}
+
+// ExerciseFuncClosure calls mostly a1 and m1.
+//
+// This is a simplified version of ExerciseFuncConcrete, but we need two
+// distinct call sites to test two different types of function values.
+//
+//go:noinline
+func ExerciseFuncClosure(iter int, a1, a2 AddFunc, m1, m2 mult.MultFunc) int {
+ val := 0
+ for i := 0; i < iter; i++ {
+ a := a1
+ m := m1
+ if i%10 == 0 {
+ a = a2
+ m = m2
+ }
+
+ // N.B. Profiles only distinguish calls on a per-line level,
+ // making the two calls ambiguous. However because the
+ // function types are mutually exclusive, devirtualization can
+ // still select the correct callee for each.
+ //
+ // If they were not mutually exclusive (for example, two
+ // AddFunc calls), then we could not definitively select the
+ // correct callee.
+ val += int(m(42, int64(a(1, 2))))
+ }
+ return val
}
diff --git a/src/cmd/compile/internal/test/testdata/pgo/devirtualize/devirt.pprof b/src/cmd/compile/internal/test/testdata/pgo/devirtualize/devirt.pprof
index 5fe5dd606f..2a27f1bb50 100644
--- a/src/cmd/compile/internal/test/testdata/pgo/devirtualize/devirt.pprof
+++ b/src/cmd/compile/internal/test/testdata/pgo/devirtualize/devirt.pprof
Binary files differ
diff --git a/src/cmd/compile/internal/test/testdata/pgo/devirtualize/devirt_test.go b/src/cmd/compile/internal/test/testdata/pgo/devirtualize/devirt_test.go
index f4cbbb8069..59b565d77f 100644
--- a/src/cmd/compile/internal/test/testdata/pgo/devirtualize/devirt_test.go
+++ b/src/cmd/compile/internal/test/testdata/pgo/devirtualize/devirt_test.go
@@ -14,10 +14,10 @@ package devirt
import (
"testing"
- "example.com/pgo/devirtualize/mult"
+ "example.com/pgo/devirtualize/mult.pkg"
)
-func BenchmarkDevirt(b *testing.B) {
+func BenchmarkDevirtIface(b *testing.B) {
var (
a1 Add
a2 Sub
@@ -25,5 +25,49 @@ func BenchmarkDevirt(b *testing.B) {
m2 mult.NegMult
)
- Exercise(b.N, a1, a2, m1, m2)
+ ExerciseIface(b.N, a1, a2, m1, m2)
+}
+
+// Verify that devirtualization doesn't result in calls or side effects applying more than once.
+func TestDevirtIface(t *testing.T) {
+ var (
+ a1 Add
+ a2 Sub
+ m1 mult.Mult
+ m2 mult.NegMult
+ )
+
+ if v := ExerciseIface(10, a1, a2, m1, m2); v != 1176 {
+ t.Errorf("ExerciseIface(10) got %d want 1176", v)
+ }
+}
+
+func BenchmarkDevirtFuncConcrete(b *testing.B) {
+ ExerciseFuncConcrete(b.N, AddFn, SubFn, mult.MultFn, mult.NegMultFn)
+}
+
+func TestDevirtFuncConcrete(t *testing.T) {
+ if v := ExerciseFuncConcrete(10, AddFn, SubFn, mult.MultFn, mult.NegMultFn); v != 1176 {
+ t.Errorf("ExerciseFuncConcrete(10) got %d want 1176", v)
+ }
+}
+
+func BenchmarkDevirtFuncField(b *testing.B) {
+ ExerciseFuncField(b.N, AddFn, SubFn, mult.MultFn, mult.NegMultFn)
+}
+
+func TestDevirtFuncField(t *testing.T) {
+ if v := ExerciseFuncField(10, AddFn, SubFn, mult.MultFn, mult.NegMultFn); v != 1176 {
+ t.Errorf("ExerciseFuncField(10) got %d want 1176", v)
+ }
+}
+
+func BenchmarkDevirtFuncClosure(b *testing.B) {
+ ExerciseFuncClosure(b.N, AddClosure(), SubClosure(), mult.MultClosure(), mult.NegMultClosure())
+}
+
+func TestDevirtFuncClosure(t *testing.T) {
+ if v := ExerciseFuncClosure(10, AddClosure(), SubClosure(), mult.MultClosure(), mult.NegMultClosure()); v != 1176 {
+ t.Errorf("ExerciseFuncClosure(10) got %d want 1176", v)
+ }
}
diff --git a/src/cmd/compile/internal/test/testdata/pgo/devirtualize/mult.pkg/mult.go b/src/cmd/compile/internal/test/testdata/pgo/devirtualize/mult.pkg/mult.go
new file mode 100644
index 0000000000..113a5e1a7e
--- /dev/null
+++ b/src/cmd/compile/internal/test/testdata/pgo/devirtualize/mult.pkg/mult.go
@@ -0,0 +1,72 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// WARNING: Please avoid updating this file.
+// See the warning in ../devirt.go for more details.
+
+package mult
+
+var sink int
+
+type Multiplier interface {
+ Multiply(a, b int) int
+}
+
+type Mult struct{}
+
+func (Mult) Multiply(a, b int) int {
+ for i := 0; i < 1000; i++ {
+ sink++
+ }
+ return a * b
+}
+
+type NegMult struct{}
+
+func (NegMult) Multiply(a, b int) int {
+ for i := 0; i < 1000; i++ {
+ sink++
+ }
+ return -1 * a * b
+}
+
+// N.B. Different types than AddFunc to test intra-line disambiguation.
+type MultFunc func(int64, int64) int64
+
+func MultFn(a, b int64) int64 {
+ for i := 0; i < 1000; i++ {
+ sink++
+ }
+ return a * b
+}
+
+func NegMultFn(a, b int64) int64 {
+ for i := 0; i < 1000; i++ {
+ sink++
+ }
+ return -1 * a * b
+}
+
+//go:noinline
+func MultClosure() MultFunc {
+ // Explicit closure to differentiate from AddClosure.
+ c := 1
+ return func(a, b int64) int64 {
+ for i := 0; i < 1000; i++ {
+ sink++
+ }
+ return a * b * int64(c)
+ }
+}
+
+//go:noinline
+func NegMultClosure() MultFunc {
+ c := 1
+ return func(a, b int64) int64 {
+ for i := 0; i < 1000; i++ {
+ sink++
+ }
+ return -1 * a * b * int64(c)
+ }
+}
diff --git a/src/cmd/compile/internal/test/testdata/pgo/devirtualize/mult/mult.go b/src/cmd/compile/internal/test/testdata/pgo/devirtualize/mult/mult.go
deleted file mode 100644
index 8a026a52f5..0000000000
--- a/src/cmd/compile/internal/test/testdata/pgo/devirtualize/mult/mult.go
+++ /dev/null
@@ -1,32 +0,0 @@
-// Copyright 2023 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// WARNING: Please avoid updating this file.
-// See the warning in ../devirt.go for more details.
-
-package mult
-
-var sink int
-
-type Multiplier interface {
- Multiply(a, b int) int
-}
-
-type Mult struct{}
-
-func (Mult) Multiply(a, b int) int {
- for i := 0; i < 1000; i++ {
- sink++
- }
- return a * b
-}
-
-type NegMult struct{}
-
-func (NegMult) Multiply(a, b int) int {
- for i := 0; i < 1000; i++ {
- sink++
- }
- return -1 * a * b
-}
diff --git a/src/cmd/compile/internal/typebits/typebits.go b/src/cmd/compile/internal/typebits/typebits.go
index b533212e9e..b07f4374c2 100644
--- a/src/cmd/compile/internal/typebits/typebits.go
+++ b/src/cmd/compile/internal/typebits/typebits.go
@@ -86,7 +86,7 @@ func set(t *types.Type, off int64, bv bitvec.BitVec, skip bool) {
}
case types.TSTRUCT:
- for _, f := range t.Fields().Slice() {
+ for _, f := range t.Fields() {
set(f.Type, off+f.Offset, bv, skip)
}
diff --git a/src/cmd/compile/internal/typecheck/_builtin/coverage.go b/src/cmd/compile/internal/typecheck/_builtin/coverage.go
index ea4462dd97..02226356bc 100644
--- a/src/cmd/compile/internal/typecheck/_builtin/coverage.go
+++ b/src/cmd/compile/internal/typecheck/_builtin/coverage.go
@@ -7,7 +7,6 @@
// to avoid depending on having a working compiler binary.
//go:build ignore
-// +build ignore
package coverage
diff --git a/src/cmd/compile/internal/typecheck/_builtin/runtime.go b/src/cmd/compile/internal/typecheck/_builtin/runtime.go
index 2e1e94bbd7..421152967c 100644
--- a/src/cmd/compile/internal/typecheck/_builtin/runtime.go
+++ b/src/cmd/compile/internal/typecheck/_builtin/runtime.go
@@ -82,9 +82,6 @@ func slicecopy(toPtr *any, toLen int, fromPtr *any, fromLen int, wid uintptr) in
func decoderune(string, int) (retv rune, retk int)
func countrunes(string) int
-// Non-empty-interface to non-empty-interface conversion.
-func convI2I(typ *byte, itab *uintptr) (ret *uintptr)
-
// Convert non-interface type to the data word of a (empty or nonempty) interface.
func convT(typ *byte, elem *any) unsafe.Pointer
@@ -105,19 +102,27 @@ func convTslice(val []uint8) unsafe.Pointer
// interface type assertions x.(T)
func assertE2I(inter *byte, typ *byte) *byte
-func assertE2I2(inter *byte, eface any) (ret any)
-func assertI2I(inter *byte, tab *byte) *byte
-func assertI2I2(inter *byte, iface any) (ret any)
+func assertE2I2(inter *byte, typ *byte) *byte
func panicdottypeE(have, want, iface *byte)
func panicdottypeI(have, want, iface *byte)
func panicnildottype(want *byte)
+func typeAssert(s *byte, typ *byte) *byte
+
+// interface switches
+func interfaceSwitch(s *byte, t *byte) (int, *byte)
// interface equality. Type/itab pointers are already known to be equal, so
// we only need to pass one.
func ifaceeq(tab *uintptr, x, y unsafe.Pointer) (ret bool)
func efaceeq(typ *uintptr, x, y unsafe.Pointer) (ret bool)
-func fastrand() uint32
+// panic for iteration after exit in range func
+func panicrangeexit()
+
+// defer in range over func
+func deferrangefunc() interface{}
+
+func rand32() uint32
// *byte is really *runtime.Type
func makemap64(mapType *byte, hint int64, mapbuf *any) (hmap map[any]any)
@@ -158,7 +163,6 @@ func closechan(hchan any)
var writeBarrier struct {
enabled bool
pad [3]byte
- needed bool
cgo bool
alignme uint64
}
@@ -186,8 +190,6 @@ func unsafestringcheckptr(ptr unsafe.Pointer, len int64)
func panicunsafestringlen()
func panicunsafestringnilptr()
-func mulUintptr(x, y uintptr) (uintptr, bool)
-
func memmove(to *any, frm *any, length uintptr)
func memclrNoHeapPointers(ptr unsafe.Pointer, n uintptr)
func memclrHasPointers(ptr unsafe.Pointer, n uintptr)
@@ -280,3 +282,5 @@ var x86HasSSE41 bool
var x86HasFMA bool
var armHasVFPv4 bool
var arm64HasATOMICS bool
+
+func asanregisterglobals(unsafe.Pointer, uintptr)
diff --git a/src/cmd/compile/internal/typecheck/builtin.go b/src/cmd/compile/internal/typecheck/builtin.go
index bf87b4dec5..09f60c68c0 100644
--- a/src/cmd/compile/internal/typecheck/builtin.go
+++ b/src/cmd/compile/internal/typecheck/builtin.go
@@ -86,24 +86,25 @@ var runtimeDecls = [...]struct {
{"slicecopy", funcTag, 54},
{"decoderune", funcTag, 55},
{"countrunes", funcTag, 56},
- {"convI2I", funcTag, 58},
- {"convT", funcTag, 59},
- {"convTnoptr", funcTag, 59},
- {"convT16", funcTag, 61},
- {"convT32", funcTag, 63},
- {"convT64", funcTag, 64},
- {"convTstring", funcTag, 65},
- {"convTslice", funcTag, 68},
- {"assertE2I", funcTag, 69},
- {"assertE2I2", funcTag, 70},
- {"assertI2I", funcTag, 69},
- {"assertI2I2", funcTag, 70},
- {"panicdottypeE", funcTag, 71},
- {"panicdottypeI", funcTag, 71},
- {"panicnildottype", funcTag, 72},
- {"ifaceeq", funcTag, 73},
- {"efaceeq", funcTag, 73},
- {"fastrand", funcTag, 74},
+ {"convT", funcTag, 57},
+ {"convTnoptr", funcTag, 57},
+ {"convT16", funcTag, 59},
+ {"convT32", funcTag, 61},
+ {"convT64", funcTag, 62},
+ {"convTstring", funcTag, 63},
+ {"convTslice", funcTag, 66},
+ {"assertE2I", funcTag, 67},
+ {"assertE2I2", funcTag, 67},
+ {"panicdottypeE", funcTag, 68},
+ {"panicdottypeI", funcTag, 68},
+ {"panicnildottype", funcTag, 69},
+ {"typeAssert", funcTag, 67},
+ {"interfaceSwitch", funcTag, 70},
+ {"ifaceeq", funcTag, 72},
+ {"efaceeq", funcTag, 72},
+ {"panicrangeexit", funcTag, 9},
+ {"deferrangefunc", funcTag, 73},
+ {"rand32", funcTag, 74},
{"makemap64", funcTag, 76},
{"makemap", funcTag, 77},
{"makemap_small", funcTag, 78},
@@ -155,86 +156,86 @@ var runtimeDecls = [...]struct {
{"unsafestringcheckptr", funcTag, 119},
{"panicunsafestringlen", funcTag, 9},
{"panicunsafestringnilptr", funcTag, 9},
- {"mulUintptr", funcTag, 120},
- {"memmove", funcTag, 121},
- {"memclrNoHeapPointers", funcTag, 122},
- {"memclrHasPointers", funcTag, 122},
- {"memequal", funcTag, 123},
- {"memequal0", funcTag, 124},
- {"memequal8", funcTag, 124},
- {"memequal16", funcTag, 124},
- {"memequal32", funcTag, 124},
- {"memequal64", funcTag, 124},
- {"memequal128", funcTag, 124},
- {"f32equal", funcTag, 125},
- {"f64equal", funcTag, 125},
- {"c64equal", funcTag, 125},
- {"c128equal", funcTag, 125},
- {"strequal", funcTag, 125},
- {"interequal", funcTag, 125},
- {"nilinterequal", funcTag, 125},
- {"memhash", funcTag, 126},
- {"memhash0", funcTag, 127},
- {"memhash8", funcTag, 127},
- {"memhash16", funcTag, 127},
- {"memhash32", funcTag, 127},
- {"memhash64", funcTag, 127},
- {"memhash128", funcTag, 127},
- {"f32hash", funcTag, 128},
- {"f64hash", funcTag, 128},
- {"c64hash", funcTag, 128},
- {"c128hash", funcTag, 128},
- {"strhash", funcTag, 128},
- {"interhash", funcTag, 128},
- {"nilinterhash", funcTag, 128},
- {"int64div", funcTag, 129},
- {"uint64div", funcTag, 130},
- {"int64mod", funcTag, 129},
- {"uint64mod", funcTag, 130},
- {"float64toint64", funcTag, 131},
- {"float64touint64", funcTag, 132},
- {"float64touint32", funcTag, 133},
- {"int64tofloat64", funcTag, 134},
- {"int64tofloat32", funcTag, 136},
- {"uint64tofloat64", funcTag, 137},
- {"uint64tofloat32", funcTag, 138},
- {"uint32tofloat64", funcTag, 139},
- {"complex128div", funcTag, 140},
- {"getcallerpc", funcTag, 141},
- {"getcallersp", funcTag, 141},
+ {"memmove", funcTag, 120},
+ {"memclrNoHeapPointers", funcTag, 121},
+ {"memclrHasPointers", funcTag, 121},
+ {"memequal", funcTag, 122},
+ {"memequal0", funcTag, 123},
+ {"memequal8", funcTag, 123},
+ {"memequal16", funcTag, 123},
+ {"memequal32", funcTag, 123},
+ {"memequal64", funcTag, 123},
+ {"memequal128", funcTag, 123},
+ {"f32equal", funcTag, 124},
+ {"f64equal", funcTag, 124},
+ {"c64equal", funcTag, 124},
+ {"c128equal", funcTag, 124},
+ {"strequal", funcTag, 124},
+ {"interequal", funcTag, 124},
+ {"nilinterequal", funcTag, 124},
+ {"memhash", funcTag, 125},
+ {"memhash0", funcTag, 126},
+ {"memhash8", funcTag, 126},
+ {"memhash16", funcTag, 126},
+ {"memhash32", funcTag, 126},
+ {"memhash64", funcTag, 126},
+ {"memhash128", funcTag, 126},
+ {"f32hash", funcTag, 127},
+ {"f64hash", funcTag, 127},
+ {"c64hash", funcTag, 127},
+ {"c128hash", funcTag, 127},
+ {"strhash", funcTag, 127},
+ {"interhash", funcTag, 127},
+ {"nilinterhash", funcTag, 127},
+ {"int64div", funcTag, 128},
+ {"uint64div", funcTag, 129},
+ {"int64mod", funcTag, 128},
+ {"uint64mod", funcTag, 129},
+ {"float64toint64", funcTag, 130},
+ {"float64touint64", funcTag, 131},
+ {"float64touint32", funcTag, 132},
+ {"int64tofloat64", funcTag, 133},
+ {"int64tofloat32", funcTag, 135},
+ {"uint64tofloat64", funcTag, 136},
+ {"uint64tofloat32", funcTag, 137},
+ {"uint32tofloat64", funcTag, 138},
+ {"complex128div", funcTag, 139},
+ {"getcallerpc", funcTag, 140},
+ {"getcallersp", funcTag, 140},
{"racefuncenter", funcTag, 31},
{"racefuncexit", funcTag, 9},
{"raceread", funcTag, 31},
{"racewrite", funcTag, 31},
- {"racereadrange", funcTag, 142},
- {"racewriterange", funcTag, 142},
- {"msanread", funcTag, 142},
- {"msanwrite", funcTag, 142},
- {"msanmove", funcTag, 143},
- {"asanread", funcTag, 142},
- {"asanwrite", funcTag, 142},
- {"checkptrAlignment", funcTag, 144},
- {"checkptrArithmetic", funcTag, 146},
- {"libfuzzerTraceCmp1", funcTag, 147},
- {"libfuzzerTraceCmp2", funcTag, 148},
- {"libfuzzerTraceCmp4", funcTag, 149},
- {"libfuzzerTraceCmp8", funcTag, 150},
- {"libfuzzerTraceConstCmp1", funcTag, 147},
- {"libfuzzerTraceConstCmp2", funcTag, 148},
- {"libfuzzerTraceConstCmp4", funcTag, 149},
- {"libfuzzerTraceConstCmp8", funcTag, 150},
- {"libfuzzerHookStrCmp", funcTag, 151},
- {"libfuzzerHookEqualFold", funcTag, 151},
- {"addCovMeta", funcTag, 153},
+ {"racereadrange", funcTag, 141},
+ {"racewriterange", funcTag, 141},
+ {"msanread", funcTag, 141},
+ {"msanwrite", funcTag, 141},
+ {"msanmove", funcTag, 142},
+ {"asanread", funcTag, 141},
+ {"asanwrite", funcTag, 141},
+ {"checkptrAlignment", funcTag, 143},
+ {"checkptrArithmetic", funcTag, 145},
+ {"libfuzzerTraceCmp1", funcTag, 146},
+ {"libfuzzerTraceCmp2", funcTag, 147},
+ {"libfuzzerTraceCmp4", funcTag, 148},
+ {"libfuzzerTraceCmp8", funcTag, 149},
+ {"libfuzzerTraceConstCmp1", funcTag, 146},
+ {"libfuzzerTraceConstCmp2", funcTag, 147},
+ {"libfuzzerTraceConstCmp4", funcTag, 148},
+ {"libfuzzerTraceConstCmp8", funcTag, 149},
+ {"libfuzzerHookStrCmp", funcTag, 150},
+ {"libfuzzerHookEqualFold", funcTag, 150},
+ {"addCovMeta", funcTag, 152},
{"x86HasPOPCNT", varTag, 6},
{"x86HasSSE41", varTag, 6},
{"x86HasFMA", varTag, 6},
{"armHasVFPv4", varTag, 6},
{"arm64HasATOMICS", varTag, 6},
+ {"asanregisterglobals", funcTag, 121},
}
func runtimeTypes() []*types.Type {
- var typs [154]*types.Type
+ var typs [153]*types.Type
typs[0] = types.ByteType
typs[1] = types.NewPtr(typs[0])
typs[2] = types.Types[types.TANY]
@@ -292,41 +293,41 @@ func runtimeTypes() []*types.Type {
typs[54] = newSig(params(typs[3], typs[15], typs[3], typs[15], typs[5]), params(typs[15]))
typs[55] = newSig(params(typs[28], typs[15]), params(typs[46], typs[15]))
typs[56] = newSig(params(typs[28]), params(typs[15]))
- typs[57] = types.NewPtr(typs[5])
- typs[58] = newSig(params(typs[1], typs[57]), params(typs[57]))
- typs[59] = newSig(params(typs[1], typs[3]), params(typs[7]))
- typs[60] = types.Types[types.TUINT16]
+ typs[57] = newSig(params(typs[1], typs[3]), params(typs[7]))
+ typs[58] = types.Types[types.TUINT16]
+ typs[59] = newSig(params(typs[58]), params(typs[7]))
+ typs[60] = types.Types[types.TUINT32]
typs[61] = newSig(params(typs[60]), params(typs[7]))
- typs[62] = types.Types[types.TUINT32]
- typs[63] = newSig(params(typs[62]), params(typs[7]))
- typs[64] = newSig(params(typs[24]), params(typs[7]))
- typs[65] = newSig(params(typs[28]), params(typs[7]))
- typs[66] = types.Types[types.TUINT8]
- typs[67] = types.NewSlice(typs[66])
- typs[68] = newSig(params(typs[67]), params(typs[7]))
- typs[69] = newSig(params(typs[1], typs[1]), params(typs[1]))
- typs[70] = newSig(params(typs[1], typs[2]), params(typs[2]))
- typs[71] = newSig(params(typs[1], typs[1], typs[1]), nil)
- typs[72] = newSig(params(typs[1]), nil)
- typs[73] = newSig(params(typs[57], typs[7], typs[7]), params(typs[6]))
- typs[74] = newSig(nil, params(typs[62]))
+ typs[62] = newSig(params(typs[24]), params(typs[7]))
+ typs[63] = newSig(params(typs[28]), params(typs[7]))
+ typs[64] = types.Types[types.TUINT8]
+ typs[65] = types.NewSlice(typs[64])
+ typs[66] = newSig(params(typs[65]), params(typs[7]))
+ typs[67] = newSig(params(typs[1], typs[1]), params(typs[1]))
+ typs[68] = newSig(params(typs[1], typs[1], typs[1]), nil)
+ typs[69] = newSig(params(typs[1]), nil)
+ typs[70] = newSig(params(typs[1], typs[1]), params(typs[15], typs[1]))
+ typs[71] = types.NewPtr(typs[5])
+ typs[72] = newSig(params(typs[71], typs[7], typs[7]), params(typs[6]))
+ typs[73] = newSig(nil, params(typs[10]))
+ typs[74] = newSig(nil, params(typs[60]))
typs[75] = types.NewMap(typs[2], typs[2])
typs[76] = newSig(params(typs[1], typs[22], typs[3]), params(typs[75]))
typs[77] = newSig(params(typs[1], typs[15], typs[3]), params(typs[75]))
typs[78] = newSig(nil, params(typs[75]))
typs[79] = newSig(params(typs[1], typs[75], typs[3]), params(typs[3]))
- typs[80] = newSig(params(typs[1], typs[75], typs[62]), params(typs[3]))
+ typs[80] = newSig(params(typs[1], typs[75], typs[60]), params(typs[3]))
typs[81] = newSig(params(typs[1], typs[75], typs[24]), params(typs[3]))
typs[82] = newSig(params(typs[1], typs[75], typs[28]), params(typs[3]))
typs[83] = newSig(params(typs[1], typs[75], typs[3], typs[1]), params(typs[3]))
typs[84] = newSig(params(typs[1], typs[75], typs[3]), params(typs[3], typs[6]))
- typs[85] = newSig(params(typs[1], typs[75], typs[62]), params(typs[3], typs[6]))
+ typs[85] = newSig(params(typs[1], typs[75], typs[60]), params(typs[3], typs[6]))
typs[86] = newSig(params(typs[1], typs[75], typs[24]), params(typs[3], typs[6]))
typs[87] = newSig(params(typs[1], typs[75], typs[28]), params(typs[3], typs[6]))
typs[88] = newSig(params(typs[1], typs[75], typs[3], typs[1]), params(typs[3], typs[6]))
typs[89] = newSig(params(typs[1], typs[75], typs[7]), params(typs[3]))
typs[90] = newSig(params(typs[1], typs[75], typs[3]), nil)
- typs[91] = newSig(params(typs[1], typs[75], typs[62]), nil)
+ typs[91] = newSig(params(typs[1], typs[75], typs[60]), nil)
typs[92] = newSig(params(typs[1], typs[75], typs[24]), nil)
typs[93] = newSig(params(typs[1], typs[75], typs[28]), nil)
typs[94] = newSig(params(typs[3]), nil)
@@ -340,14 +341,14 @@ func runtimeTypes() []*types.Type {
typs[102] = types.NewChan(typs[2], types.Csend)
typs[103] = newSig(params(typs[102], typs[3]), nil)
typs[104] = types.NewArray(typs[0], 3)
- typs[105] = types.NewStruct([]*types.Field{types.NewField(src.NoXPos, Lookup("enabled"), typs[6]), types.NewField(src.NoXPos, Lookup("pad"), typs[104]), types.NewField(src.NoXPos, Lookup("needed"), typs[6]), types.NewField(src.NoXPos, Lookup("cgo"), typs[6]), types.NewField(src.NoXPos, Lookup("alignme"), typs[24])})
+ typs[105] = types.NewStruct([]*types.Field{types.NewField(src.NoXPos, Lookup("enabled"), typs[6]), types.NewField(src.NoXPos, Lookup("pad"), typs[104]), types.NewField(src.NoXPos, Lookup("cgo"), typs[6]), types.NewField(src.NoXPos, Lookup("alignme"), typs[24])})
typs[106] = newSig(params(typs[1], typs[3], typs[3]), nil)
typs[107] = newSig(params(typs[1], typs[3]), nil)
typs[108] = newSig(params(typs[1], typs[3], typs[15], typs[3], typs[15]), params(typs[15]))
typs[109] = newSig(params(typs[102], typs[3]), params(typs[6]))
typs[110] = newSig(params(typs[3], typs[99]), params(typs[6], typs[6]))
- typs[111] = newSig(params(typs[57]), nil)
- typs[112] = newSig(params(typs[1], typs[1], typs[57], typs[15], typs[15], typs[6]), params(typs[15], typs[6]))
+ typs[111] = newSig(params(typs[71]), nil)
+ typs[112] = newSig(params(typs[1], typs[1], typs[71], typs[15], typs[15], typs[6]), params(typs[15], typs[6]))
typs[113] = newSig(params(typs[1], typs[15], typs[15]), params(typs[7]))
typs[114] = newSig(params(typs[1], typs[22], typs[22]), params(typs[7]))
typs[115] = newSig(params(typs[1], typs[15], typs[15], typs[7]), params(typs[7]))
@@ -355,40 +356,39 @@ func runtimeTypes() []*types.Type {
typs[117] = newSig(params(typs[3], typs[15], typs[15], typs[15], typs[1]), params(typs[116]))
typs[118] = newSig(params(typs[1], typs[7], typs[22]), nil)
typs[119] = newSig(params(typs[7], typs[22]), nil)
- typs[120] = newSig(params(typs[5], typs[5]), params(typs[5], typs[6]))
- typs[121] = newSig(params(typs[3], typs[3], typs[5]), nil)
- typs[122] = newSig(params(typs[7], typs[5]), nil)
- typs[123] = newSig(params(typs[3], typs[3], typs[5]), params(typs[6]))
- typs[124] = newSig(params(typs[3], typs[3]), params(typs[6]))
- typs[125] = newSig(params(typs[7], typs[7]), params(typs[6]))
- typs[126] = newSig(params(typs[3], typs[5], typs[5]), params(typs[5]))
- typs[127] = newSig(params(typs[7], typs[5]), params(typs[5]))
- typs[128] = newSig(params(typs[3], typs[5]), params(typs[5]))
- typs[129] = newSig(params(typs[22], typs[22]), params(typs[22]))
- typs[130] = newSig(params(typs[24], typs[24]), params(typs[24]))
- typs[131] = newSig(params(typs[20]), params(typs[22]))
- typs[132] = newSig(params(typs[20]), params(typs[24]))
- typs[133] = newSig(params(typs[20]), params(typs[62]))
- typs[134] = newSig(params(typs[22]), params(typs[20]))
- typs[135] = types.Types[types.TFLOAT32]
- typs[136] = newSig(params(typs[22]), params(typs[135]))
- typs[137] = newSig(params(typs[24]), params(typs[20]))
- typs[138] = newSig(params(typs[24]), params(typs[135]))
- typs[139] = newSig(params(typs[62]), params(typs[20]))
- typs[140] = newSig(params(typs[26], typs[26]), params(typs[26]))
- typs[141] = newSig(nil, params(typs[5]))
- typs[142] = newSig(params(typs[5], typs[5]), nil)
- typs[143] = newSig(params(typs[5], typs[5], typs[5]), nil)
- typs[144] = newSig(params(typs[7], typs[1], typs[5]), nil)
- typs[145] = types.NewSlice(typs[7])
- typs[146] = newSig(params(typs[7], typs[145]), nil)
- typs[147] = newSig(params(typs[66], typs[66], typs[17]), nil)
+ typs[120] = newSig(params(typs[3], typs[3], typs[5]), nil)
+ typs[121] = newSig(params(typs[7], typs[5]), nil)
+ typs[122] = newSig(params(typs[3], typs[3], typs[5]), params(typs[6]))
+ typs[123] = newSig(params(typs[3], typs[3]), params(typs[6]))
+ typs[124] = newSig(params(typs[7], typs[7]), params(typs[6]))
+ typs[125] = newSig(params(typs[3], typs[5], typs[5]), params(typs[5]))
+ typs[126] = newSig(params(typs[7], typs[5]), params(typs[5]))
+ typs[127] = newSig(params(typs[3], typs[5]), params(typs[5]))
+ typs[128] = newSig(params(typs[22], typs[22]), params(typs[22]))
+ typs[129] = newSig(params(typs[24], typs[24]), params(typs[24]))
+ typs[130] = newSig(params(typs[20]), params(typs[22]))
+ typs[131] = newSig(params(typs[20]), params(typs[24]))
+ typs[132] = newSig(params(typs[20]), params(typs[60]))
+ typs[133] = newSig(params(typs[22]), params(typs[20]))
+ typs[134] = types.Types[types.TFLOAT32]
+ typs[135] = newSig(params(typs[22]), params(typs[134]))
+ typs[136] = newSig(params(typs[24]), params(typs[20]))
+ typs[137] = newSig(params(typs[24]), params(typs[134]))
+ typs[138] = newSig(params(typs[60]), params(typs[20]))
+ typs[139] = newSig(params(typs[26], typs[26]), params(typs[26]))
+ typs[140] = newSig(nil, params(typs[5]))
+ typs[141] = newSig(params(typs[5], typs[5]), nil)
+ typs[142] = newSig(params(typs[5], typs[5], typs[5]), nil)
+ typs[143] = newSig(params(typs[7], typs[1], typs[5]), nil)
+ typs[144] = types.NewSlice(typs[7])
+ typs[145] = newSig(params(typs[7], typs[144]), nil)
+ typs[146] = newSig(params(typs[64], typs[64], typs[17]), nil)
+ typs[147] = newSig(params(typs[58], typs[58], typs[17]), nil)
typs[148] = newSig(params(typs[60], typs[60], typs[17]), nil)
- typs[149] = newSig(params(typs[62], typs[62], typs[17]), nil)
- typs[150] = newSig(params(typs[24], typs[24], typs[17]), nil)
- typs[151] = newSig(params(typs[28], typs[28], typs[17]), nil)
- typs[152] = types.NewArray(typs[0], 16)
- typs[153] = newSig(params(typs[7], typs[62], typs[152], typs[28], typs[15], typs[66], typs[66]), params(typs[62]))
+ typs[149] = newSig(params(typs[24], typs[24], typs[17]), nil)
+ typs[150] = newSig(params(typs[28], typs[28], typs[17]), nil)
+ typs[151] = types.NewArray(typs[0], 16)
+ typs[152] = newSig(params(typs[7], typs[60], typs[151], typs[28], typs[15], typs[64], typs[64]), params(typs[60]))
return typs[:]
}
diff --git a/src/cmd/compile/internal/typecheck/const.go b/src/cmd/compile/internal/typecheck/const.go
index f4fb614e63..e7f9ec5cd8 100644
--- a/src/cmd/compile/internal/typecheck/const.go
+++ b/src/cmd/compile/internal/typecheck/const.go
@@ -8,7 +8,6 @@ import (
"fmt"
"go/constant"
"go/token"
- "internal/types/errors"
"math"
"math/big"
"unicode"
@@ -113,7 +112,7 @@ func convlit1(n ir.Node, t *types.Type, explicit bool, context func() string) ir
base.Fatalf("unexpected untyped expression: %v", n)
case ir.OLITERAL:
- v := convertVal(n.Val(), t, explicit)
+ v := ConvertVal(n.Val(), t, explicit)
if v.Kind() == constant.Unknown {
n = ir.NewConstExpr(n.Val(), n)
break
@@ -219,12 +218,13 @@ func operandType(op ir.Op, t *types.Type) *types.Type {
return nil
}
-// convertVal converts v into a representation appropriate for t. If
-// no such representation exists, it returns Val{} instead.
+// ConvertVal converts v into a representation appropriate for t. If
+// no such representation exists, it returns constant.MakeUnknown()
+// instead.
//
// If explicit is true, then conversions from integer to string are
// also allowed.
-func convertVal(v constant.Value, t *types.Type, explicit bool) constant.Value {
+func ConvertVal(v constant.Value, t *types.Type, explicit bool) constant.Value {
switch ct := v.Kind(); ct {
case constant.Bool:
if t.IsBoolean() {
@@ -304,8 +304,7 @@ func toint(v constant.Value) constant.Value {
}
// Prevent follow-on errors.
- // TODO(mdempsky): Use constant.MakeUnknown() instead.
- return constant.MakeInt64(1)
+ return constant.MakeUnknown()
}
func tostr(v constant.Value) constant.Value {
@@ -319,35 +318,6 @@ func tostr(v constant.Value) constant.Value {
return v
}
-var tokenForOp = [...]token.Token{
- ir.OPLUS: token.ADD,
- ir.ONEG: token.SUB,
- ir.ONOT: token.NOT,
- ir.OBITNOT: token.XOR,
-
- ir.OADD: token.ADD,
- ir.OSUB: token.SUB,
- ir.OMUL: token.MUL,
- ir.ODIV: token.QUO,
- ir.OMOD: token.REM,
- ir.OOR: token.OR,
- ir.OXOR: token.XOR,
- ir.OAND: token.AND,
- ir.OANDNOT: token.AND_NOT,
- ir.OOROR: token.LOR,
- ir.OANDAND: token.LAND,
-
- ir.OEQ: token.EQL,
- ir.ONE: token.NEQ,
- ir.OLT: token.LSS,
- ir.OLE: token.LEQ,
- ir.OGT: token.GTR,
- ir.OGE: token.GEQ,
-
- ir.OLSH: token.SHL,
- ir.ORSH: token.SHR,
-}
-
func makeFloat64(f float64) constant.Value {
if math.IsInf(f, 0) {
base.Fatalf("infinity is not a valid constant")
@@ -359,50 +329,6 @@ func makeComplex(real, imag constant.Value) constant.Value {
return constant.BinaryOp(constant.ToFloat(real), token.ADD, constant.MakeImag(constant.ToFloat(imag)))
}
-// For matching historical "constant OP overflow" error messages.
-// TODO(mdempsky): Replace with error messages like go/types uses.
-var overflowNames = [...]string{
- ir.OADD: "addition",
- ir.OSUB: "subtraction",
- ir.OMUL: "multiplication",
- ir.OLSH: "shift",
- ir.OXOR: "bitwise XOR",
- ir.OBITNOT: "bitwise complement",
-}
-
-// OrigConst returns an OLITERAL with orig n and value v.
-func OrigConst(n ir.Node, v constant.Value) ir.Node {
- lno := ir.SetPos(n)
- v = convertVal(v, n.Type(), false)
- base.Pos = lno
-
- switch v.Kind() {
- case constant.Int:
- if constant.BitLen(v) <= ir.ConstPrec {
- break
- }
- fallthrough
- case constant.Unknown:
- what := overflowNames[n.Op()]
- if what == "" {
- base.Fatalf("unexpected overflow: %v", n.Op())
- }
- base.ErrorfAt(n.Pos(), errors.NumericOverflow, "constant %v overflow", what)
- n.SetType(nil)
- return n
- }
-
- return ir.NewConstExpr(v, n)
-}
-
-func OrigBool(n ir.Node, v bool) ir.Node {
- return OrigConst(n, constant.MakeBool(v))
-}
-
-func OrigInt(n ir.Node, v int64) ir.Node {
- return OrigConst(n, constant.MakeInt64(v))
-}
-
// DefaultLit on both nodes simultaneously;
// if they're both ideal going in they better
// get the same type going out.
@@ -544,9 +470,10 @@ func callOrChan(n ir.Node) bool {
ir.ONEW,
ir.OPANIC,
ir.OPRINT,
- ir.OPRINTN,
+ ir.OPRINTLN,
ir.OREAL,
ir.ORECOVER,
+ ir.ORECOVERFP,
ir.ORECV,
ir.OUNSAFEADD,
ir.OUNSAFESLICE,
@@ -557,96 +484,3 @@ func callOrChan(n ir.Node) bool {
}
return false
}
-
-// evalunsafe evaluates a package unsafe operation and returns the result.
-func evalunsafe(n ir.Node) int64 {
- switch n.Op() {
- case ir.OALIGNOF, ir.OSIZEOF:
- n := n.(*ir.UnaryExpr)
- n.X = Expr(n.X)
- n.X = DefaultLit(n.X, nil)
- tr := n.X.Type()
- if tr == nil {
- return 0
- }
- types.CalcSize(tr)
- if n.Op() == ir.OALIGNOF {
- return tr.Alignment()
- }
- return tr.Size()
-
- case ir.OOFFSETOF:
- // must be a selector.
- n := n.(*ir.UnaryExpr)
- // ODOT and ODOTPTR are allowed in case the OXDOT transformation has
- // already happened (e.g. during -G=3 stenciling).
- if n.X.Op() != ir.OXDOT && n.X.Op() != ir.ODOT && n.X.Op() != ir.ODOTPTR {
- base.Errorf("invalid expression %v", n)
- return 0
- }
- sel := n.X.(*ir.SelectorExpr)
-
- // Remember base of selector to find it back after dot insertion.
- // Since r->left may be mutated by typechecking, check it explicitly
- // first to track it correctly.
- sel.X = Expr(sel.X)
- sbase := sel.X
-
- // Implicit dot may already be resolved for instantiating generic function. So we
- // need to remove any implicit dot until we reach the first non-implicit one, it's
- // the right base selector. See issue #53137.
- var clobberBase func(n ir.Node) ir.Node
- clobberBase = func(n ir.Node) ir.Node {
- if sel, ok := n.(*ir.SelectorExpr); ok && sel.Implicit() {
- return clobberBase(sel.X)
- }
- return n
- }
- sbase = clobberBase(sbase)
-
- tsel := Expr(sel)
- n.X = tsel
- if tsel.Type() == nil {
- return 0
- }
- switch tsel.Op() {
- case ir.ODOT, ir.ODOTPTR:
- break
- case ir.OMETHVALUE:
- base.Errorf("invalid expression %v: argument is a method value", n)
- return 0
- default:
- base.Errorf("invalid expression %v", n)
- return 0
- }
-
- // Sum offsets for dots until we reach sbase.
- var v int64
- var next ir.Node
- for r := tsel; r != sbase; r = next {
- switch r.Op() {
- case ir.ODOTPTR:
- // For Offsetof(s.f), s may itself be a pointer,
- // but accessing f must not otherwise involve
- // indirection via embedded pointer types.
- r := r.(*ir.SelectorExpr)
- if r.X != sbase {
- base.Errorf("invalid expression %v: selector implies indirection of embedded %v", n, r.X)
- return 0
- }
- fallthrough
- case ir.ODOT:
- r := r.(*ir.SelectorExpr)
- v += r.Offset()
- next = r.X
- default:
- ir.Dump("unsafenmagic", tsel)
- base.Fatalf("impossible %v node after dot insertion", r.Op())
- }
- }
- return v
- }
-
- base.Fatalf("unexpected op %v", n.Op())
- return 0
-}
diff --git a/src/cmd/compile/internal/typecheck/dcl.go b/src/cmd/compile/internal/typecheck/dcl.go
index 029c14f819..4a847e8558 100644
--- a/src/cmd/compile/internal/typecheck/dcl.go
+++ b/src/cmd/compile/internal/typecheck/dcl.go
@@ -6,7 +6,6 @@ package typecheck
import (
"fmt"
- "internal/types/errors"
"sync"
"cmd/compile/internal/base"
@@ -15,108 +14,26 @@ import (
"cmd/internal/src"
)
-var DeclContext ir.Class = ir.PEXTERN // PEXTERN/PAUTO
+var funcStack []*ir.Func // stack of previous values of ir.CurFunc
-func DeclFunc(sym *types.Sym, recv *ir.Field, params, results []*ir.Field) *ir.Func {
- fn := ir.NewFunc(base.Pos)
- fn.Nname = ir.NewNameAt(base.Pos, sym)
- fn.Nname.Func = fn
+// DeclFunc declares the parameters for fn and adds it to
+// Target.Funcs.
+//
+// Before returning, it sets CurFunc to fn. When the caller is done
+// constructing fn, it must call FinishFuncBody to restore CurFunc.
+func DeclFunc(fn *ir.Func) {
+ fn.DeclareParams(true)
fn.Nname.Defn = fn
- ir.MarkFunc(fn.Nname)
- StartFuncBody(fn)
+ Target.Funcs = append(Target.Funcs, fn)
- var recv1 *types.Field
- if recv != nil {
- recv1 = declareParam(ir.PPARAM, -1, recv)
- }
-
- typ := types.NewSignature(recv1, declareParams(ir.PPARAM, params), declareParams(ir.PPARAMOUT, results))
- checkdupfields("argument", typ.Recvs().FieldSlice(), typ.Params().FieldSlice(), typ.Results().FieldSlice())
- fn.Nname.SetType(typ)
- fn.Nname.SetTypecheck(1)
-
- return fn
-}
-
-// Declare records that Node n declares symbol n.Sym in the specified
-// declaration context.
-func Declare(n *ir.Name, ctxt ir.Class) {
- if ir.IsBlank(n) {
- return
- }
-
- s := n.Sym()
-
- // kludgy: TypecheckAllowed means we're past parsing. Eg reflectdata.methodWrapper may declare out of package names later.
- if !inimport && !TypecheckAllowed && s.Pkg != types.LocalPkg {
- base.ErrorfAt(n.Pos(), 0, "cannot declare name %v", s)
- }
-
- if ctxt == ir.PEXTERN {
- if s.Name == "init" {
- base.ErrorfAt(n.Pos(), errors.InvalidInitDecl, "cannot declare init - must be func")
- }
- if s.Name == "main" && s.Pkg.Name == "main" {
- base.ErrorfAt(n.Pos(), errors.InvalidMainDecl, "cannot declare main - must be func")
- }
- Target.Externs = append(Target.Externs, n)
- s.Def = n
- } else {
- if ir.CurFunc == nil && ctxt == ir.PAUTO {
- base.Pos = n.Pos()
- base.Fatalf("automatic outside function")
- }
- if ir.CurFunc != nil && ctxt != ir.PFUNC && n.Op() == ir.ONAME {
- ir.CurFunc.Dcl = append(ir.CurFunc.Dcl, n)
- }
- n.Curfn = ir.CurFunc
- }
-
- if ctxt == ir.PAUTO {
- n.SetFrameOffset(0)
- }
-
- n.Class = ctxt
- if ctxt == ir.PFUNC {
- n.Sym().SetFunc(true)
- }
-
- autoexport(n, ctxt)
-}
-
-// Export marks n for export (or reexport).
-func Export(n *ir.Name) {
- if n.Sym().OnExportList() {
- return
- }
- n.Sym().SetOnExportList(true)
-
- if base.Flag.E != 0 {
- fmt.Printf("export symbol %v\n", n.Sym())
- }
-
- Target.Exports = append(Target.Exports, n)
-}
-
-// declare the function proper
-// and declare the arguments.
-// called in extern-declaration context
-// returns in auto-declaration context.
-func StartFuncBody(fn *ir.Func) {
- // change the declaration context from extern to auto
- funcStack = append(funcStack, funcStackEnt{ir.CurFunc, DeclContext})
+ funcStack = append(funcStack, ir.CurFunc)
ir.CurFunc = fn
- DeclContext = ir.PAUTO
}
-// finish the body.
-// called in auto-declaration context.
-// returns in extern-declaration context.
+// FinishFuncBody restores ir.CurFunc to its state before the last
+// call to DeclFunc.
func FinishFuncBody() {
- // change the declaration context from auto to previous context
- var e funcStackEnt
- funcStack, e = funcStack[:len(funcStack)-1], funcStack[len(funcStack)-1]
- ir.CurFunc, DeclContext = e.curfn, e.dclcontext
+ funcStack, ir.CurFunc = funcStack[:len(funcStack)-1], funcStack[len(funcStack)-1]
}
func CheckFuncStack() {
@@ -125,150 +42,29 @@ func CheckFuncStack() {
}
}
-func autoexport(n *ir.Name, ctxt ir.Class) {
- if n.Sym().Pkg != types.LocalPkg {
- return
- }
- if (ctxt != ir.PEXTERN && ctxt != ir.PFUNC) || DeclContext != ir.PEXTERN {
- return
- }
- if n.Type() != nil && n.Type().IsKind(types.TFUNC) && ir.IsMethod(n) {
- return
- }
-
- if types.IsExported(n.Sym().Name) || n.Sym().Name == "init" {
- Export(n)
- }
- if base.Flag.AsmHdr != "" && !n.Sym().Asm() {
- n.Sym().SetAsm(true)
- Target.Asms = append(Target.Asms, n)
- }
-}
-
-// checkdupfields emits errors for duplicately named fields or methods in
-// a list of struct or interface types.
-func checkdupfields(what string, fss ...[]*types.Field) {
- seen := make(map[*types.Sym]bool)
- for _, fs := range fss {
- for _, f := range fs {
- if f.Sym == nil || f.Sym.IsBlank() {
- continue
- }
- if seen[f.Sym] {
- base.ErrorfAt(f.Pos, errors.DuplicateFieldAndMethod, "duplicate %s %s", what, f.Sym.Name)
- continue
- }
- seen[f.Sym] = true
- }
- }
-}
-
-// structs, functions, and methods.
-// they don't belong here, but where do they belong?
-func checkembeddedtype(t *types.Type) {
- if t == nil {
- return
- }
-
- if t.Sym() == nil && t.IsPtr() {
- t = t.Elem()
- if t.IsInterface() {
- base.Errorf("embedded type cannot be a pointer to interface")
- }
- }
-
- if t.IsPtr() || t.IsUnsafePtr() {
- base.Errorf("embedded type cannot be a pointer")
- } else if t.Kind() == types.TFORW && !t.ForwardType().Embedlineno.IsKnown() {
- t.ForwardType().Embedlineno = base.Pos
- }
-}
-
-var funcStack []funcStackEnt // stack of previous values of ir.CurFunc/DeclContext
-
-type funcStackEnt struct {
- curfn *ir.Func
- dclcontext ir.Class
-}
-
-func declareParams(ctxt ir.Class, l []*ir.Field) []*types.Field {
- fields := make([]*types.Field, len(l))
- for i, n := range l {
- fields[i] = declareParam(ctxt, i, n)
- }
- return fields
-}
-
-func declareParam(ctxt ir.Class, i int, param *ir.Field) *types.Field {
- f := types.NewField(param.Pos, param.Sym, param.Type)
- f.SetIsDDD(param.IsDDD)
-
- sym := param.Sym
- if ctxt == ir.PPARAMOUT {
- if sym == nil {
- // Name so that escape analysis can track it. ~r stands for 'result'.
- sym = LookupNum("~r", i)
- } else if sym.IsBlank() {
- // Give it a name so we can assign to it during return. ~b stands for 'blank'.
- // The name must be different from ~r above because if you have
- // func f() (_ int)
- // func g() int
- // f is allowed to use a plain 'return' with no arguments, while g is not.
- // So the two cases must be distinguished.
- sym = LookupNum("~b", i)
- }
- }
-
- if sym != nil {
- name := ir.NewNameAt(param.Pos, sym)
- name.SetType(f.Type)
- name.SetTypecheck(1)
- Declare(name, ctxt)
-
- f.Nname = name
- }
-
- return f
-}
-
-func Temp(t *types.Type) *ir.Name {
- return TempAt(base.Pos, ir.CurFunc, t)
-}
-
// make a new Node off the books.
-func TempAt(pos src.XPos, curfn *ir.Func, t *types.Type) *ir.Name {
+func TempAt(pos src.XPos, curfn *ir.Func, typ *types.Type) *ir.Name {
if curfn == nil {
- base.Fatalf("no curfn for TempAt")
- }
- if curfn.Op() == ir.OCLOSURE {
- ir.Dump("TempAt", curfn)
- base.Fatalf("adding TempAt to wrong closure function")
+ base.FatalfAt(pos, "no curfn for TempAt")
}
- if t == nil {
- base.Fatalf("TempAt called with nil type")
+ if typ == nil {
+ base.FatalfAt(pos, "TempAt called with nil type")
}
- if t.Kind() == types.TFUNC && t.Recv() != nil {
- base.Fatalf("misuse of method type: %v", t)
+ if typ.Kind() == types.TFUNC && typ.Recv() != nil {
+ base.FatalfAt(pos, "misuse of method type: %v", typ)
}
+ types.CalcSize(typ)
- s := &types.Sym{
+ sym := &types.Sym{
Name: autotmpname(len(curfn.Dcl)),
Pkg: types.LocalPkg,
}
- n := ir.NewNameAt(pos, s)
- s.Def = n
- n.SetType(t)
- n.SetTypecheck(1)
- n.Class = ir.PAUTO
- n.SetEsc(ir.EscNever)
- n.Curfn = curfn
- n.SetUsed(true)
- n.SetAutoTemp(true)
- curfn.Dcl = append(curfn.Dcl, n)
-
- types.CalcSize(t)
+ name := curfn.NewLocal(pos, sym, typ)
+ name.SetEsc(ir.EscNever)
+ name.SetUsed(true)
+ name.SetAutoTemp(true)
- return n
+ return name
}
var (
@@ -310,18 +106,18 @@ func NewMethodType(sig *types.Type, recv *types.Type) *types.Type {
// TODO(mdempsky): Move this function to types.
// TODO(mdempsky): Preserve positions, names, and package from sig+recv.
- params := make([]*types.Field, nrecvs+sig.Params().Fields().Len())
+ params := make([]*types.Field, nrecvs+sig.NumParams())
if recv != nil {
params[0] = types.NewField(base.Pos, nil, recv)
}
- for i, param := range sig.Params().Fields().Slice() {
+ for i, param := range sig.Params() {
d := types.NewField(base.Pos, nil, param.Type)
d.SetIsDDD(param.IsDDD())
params[nrecvs+i] = d
}
- results := make([]*types.Field, sig.Results().Fields().Len())
- for i, t := range sig.Results().Fields().Slice() {
+ results := make([]*types.Field, sig.NumResults())
+ for i, t := range sig.Results() {
results[i] = types.NewField(base.Pos, nil, t.Type)
}
diff --git a/src/cmd/compile/internal/typecheck/export.go b/src/cmd/compile/internal/typecheck/export.go
index af56ea8d98..585c1b78c2 100644
--- a/src/cmd/compile/internal/typecheck/export.go
+++ b/src/cmd/compile/internal/typecheck/export.go
@@ -5,70 +5,29 @@
package typecheck
import (
- "go/constant"
-
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
"cmd/compile/internal/types"
"cmd/internal/src"
)
-// importalias declares symbol s as an imported type alias with type t.
-// ipkg is the package being imported.
-func importalias(pos src.XPos, s *types.Sym, t *types.Type) *ir.Name {
- return importobj(pos, s, ir.OTYPE, ir.PEXTERN, t)
-}
-
-// importconst declares symbol s as an imported constant with type t and value val.
-// ipkg is the package being imported.
-func importconst(pos src.XPos, s *types.Sym, t *types.Type, val constant.Value) *ir.Name {
- n := importobj(pos, s, ir.OLITERAL, ir.PEXTERN, t)
- n.SetVal(val)
- return n
-}
-
// importfunc declares symbol s as an imported function with type t.
-// ipkg is the package being imported.
-func importfunc(pos src.XPos, s *types.Sym, t *types.Type) *ir.Name {
- n := importobj(pos, s, ir.ONAME, ir.PFUNC, t)
- n.Func = ir.NewFunc(pos)
- n.Func.Nname = n
- return n
+func importfunc(s *types.Sym, t *types.Type) {
+ fn := ir.NewFunc(src.NoXPos, src.NoXPos, s, t)
+ importsym(fn.Nname)
}
-// importobj declares symbol s as an imported object representable by op.
-// ipkg is the package being imported.
-func importobj(pos src.XPos, s *types.Sym, op ir.Op, ctxt ir.Class, t *types.Type) *ir.Name {
- n := importsym(pos, s, op, ctxt)
- n.SetType(t)
- if ctxt == ir.PFUNC {
- n.Sym().SetFunc(true)
- }
- return n
+// importvar declares symbol s as an imported variable with type t.
+func importvar(s *types.Sym, t *types.Type) {
+ n := ir.NewNameAt(src.NoXPos, s, t)
+ n.Class = ir.PEXTERN
+ importsym(n)
}
-func importsym(pos src.XPos, s *types.Sym, op ir.Op, ctxt ir.Class) *ir.Name {
- if n := s.PkgDef(); n != nil {
- base.Fatalf("importsym of symbol that already exists: %v", n)
+func importsym(name *ir.Name) {
+ sym := name.Sym()
+ if sym.Def != nil {
+ base.Fatalf("importsym of symbol that already exists: %v", sym.Def)
}
-
- n := ir.NewDeclNameAt(pos, op, s)
- n.Class = ctxt // TODO(mdempsky): Move this into NewDeclNameAt too?
- s.SetPkgDef(n)
- return n
-}
-
-// importtype returns the named type declared by symbol s.
-// If no such type has been declared yet, a forward declaration is returned.
-// ipkg is the package being imported.
-func importtype(pos src.XPos, s *types.Sym) *ir.Name {
- n := importsym(pos, s, ir.OTYPE, ir.PEXTERN)
- n.SetType(types.NewNamed(n))
- return n
-}
-
-// importvar declares symbol s as an imported variable with type t.
-// ipkg is the package being imported.
-func importvar(pos src.XPos, s *types.Sym, t *types.Type) *ir.Name {
- return importobj(pos, s, ir.ONAME, ir.PEXTERN, t)
+ sym.Def = name
}
diff --git a/src/cmd/compile/internal/typecheck/expr.go b/src/cmd/compile/internal/typecheck/expr.go
index 2d25f80473..12d1743874 100644
--- a/src/cmd/compile/internal/typecheck/expr.go
+++ b/src/cmd/compile/internal/typecheck/expr.go
@@ -8,45 +8,15 @@ import (
"fmt"
"go/constant"
"go/token"
+ "internal/types/errors"
"strings"
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
"cmd/compile/internal/types"
+ "cmd/internal/src"
)
-// tcAddr typechecks an OADDR node.
-func tcAddr(n *ir.AddrExpr) ir.Node {
- n.X = Expr(n.X)
- if n.X.Type() == nil {
- n.SetType(nil)
- return n
- }
-
- switch n.X.Op() {
- case ir.OARRAYLIT, ir.OMAPLIT, ir.OSLICELIT, ir.OSTRUCTLIT:
- n.SetOp(ir.OPTRLIT)
-
- default:
- checklvalue(n.X, "take the address of")
- r := ir.OuterValue(n.X)
- if r.Op() == ir.ONAME {
- r := r.(*ir.Name)
- if ir.Orig(r) != r {
- base.Fatalf("found non-orig name node %v", r) // TODO(mdempsky): What does this mean?
- }
- }
- n.X = DefaultLit(n.X, nil)
- if n.X.Type() == nil {
- n.SetType(nil)
- return n
- }
- }
-
- n.SetType(types.NewPtr(n.X.Type()))
- return n
-}
-
func tcShift(n, l, r ir.Node) (ir.Node, ir.Node, *types.Type) {
if l.Type() == nil || r.Type() == nil {
return l, r, nil
@@ -99,7 +69,7 @@ func tcArith(n ir.Node, op ir.Op, l, r ir.Node) (ir.Node, ir.Node, *types.Type)
// The conversion allocates, so only do it if the concrete type is huge.
converted := false
if r.Type().Kind() != types.TBLANK {
- aop, _ = Assignop(l.Type(), r.Type())
+ aop, _ = assignOp(l.Type(), r.Type())
if aop != ir.OXXX {
if r.Type().IsInterface() && !l.Type().IsInterface() && !types.IsComparable(l.Type()) {
base.Errorf("invalid operation: %v (operator %v not defined on %s)", n, op, typekind(l.Type()))
@@ -118,7 +88,7 @@ func tcArith(n ir.Node, op ir.Op, l, r ir.Node) (ir.Node, ir.Node, *types.Type)
}
if !converted && l.Type().Kind() != types.TBLANK {
- aop, _ = Assignop(r.Type(), l.Type())
+ aop, _ = assignOp(r.Type(), l.Type())
if aop != ir.OXXX {
if l.Type().IsInterface() && !r.Type().IsInterface() && !types.IsComparable(r.Type()) {
base.Errorf("invalid operation: %v (operator %v not defined on %s)", n, op, typekind(r.Type()))
@@ -200,9 +170,6 @@ func tcCompLit(n *ir.CompLitExpr) (res ir.Node) {
base.Pos = lno
}()
- // Save original node (including n.Right)
- n.SetOrig(ir.Copy(n))
-
ir.SetPos(n)
t := n.Type()
@@ -274,7 +241,7 @@ func tcCompLit(n *ir.CompLitExpr) (res ir.Node) {
// walkClosure(), because the instantiated
// function is compiled as if in the source
// package of the generic function.
- if !(ir.CurFunc != nil && strings.Index(ir.CurFunc.Nname.Sym().Name, "[") >= 0) {
+ if !(ir.CurFunc != nil && strings.Contains(ir.CurFunc.Nname.Sym().Name, "[")) {
if s != nil && !types.IsExported(s.Name) && s.Pkg != types.LocalPkg {
base.Errorf("implicit assignment of unexported field '%s' in %v literal", s.Name, t)
}
@@ -385,9 +352,12 @@ func tcConv(n *ir.ConvExpr) ir.Node {
n.SetType(nil)
return n
}
- op, why := Convertop(n.X.Op() == ir.OLITERAL, t, n.Type())
+ op, why := convertOp(n.X.Op() == ir.OLITERAL, t, n.Type())
if op == ir.OXXX {
- base.Fatalf("cannot convert %L to type %v%s", n.X, n.Type(), why)
+ // Due to //go:nointerface, we may be stricter than types2 here (#63333).
+ base.ErrorfAt(n.Pos(), errors.InvalidConversion, "cannot convert %L to type %v%s", n.X, n.Type(), why)
+ n.SetType(nil)
+ return n
}
n.SetOp(op)
@@ -436,6 +406,66 @@ func tcConv(n *ir.ConvExpr) ir.Node {
return n
}
+// DotField returns a field selector expression that selects the
+// index'th field of the given expression, which must be of struct or
+// pointer-to-struct type.
+func DotField(pos src.XPos, x ir.Node, index int) *ir.SelectorExpr {
+ op, typ := ir.ODOT, x.Type()
+ if typ.IsPtr() {
+ op, typ = ir.ODOTPTR, typ.Elem()
+ }
+ if !typ.IsStruct() {
+ base.FatalfAt(pos, "DotField of non-struct: %L", x)
+ }
+
+ // TODO(mdempsky): This is the backend's responsibility.
+ types.CalcSize(typ)
+
+ field := typ.Field(index)
+ return dot(pos, field.Type, op, x, field)
+}
+
+func dot(pos src.XPos, typ *types.Type, op ir.Op, x ir.Node, selection *types.Field) *ir.SelectorExpr {
+ n := ir.NewSelectorExpr(pos, op, x, selection.Sym)
+ n.Selection = selection
+ n.SetType(typ)
+ n.SetTypecheck(1)
+ return n
+}
+
+// XDotMethod returns an expression representing the field selection
+// x.sym. If any implicit field selection are necessary, those are
+// inserted too.
+func XDotField(pos src.XPos, x ir.Node, sym *types.Sym) *ir.SelectorExpr {
+ n := Expr(ir.NewSelectorExpr(pos, ir.OXDOT, x, sym)).(*ir.SelectorExpr)
+ if n.Op() != ir.ODOT && n.Op() != ir.ODOTPTR {
+ base.FatalfAt(pos, "unexpected result op: %v (%v)", n.Op(), n)
+ }
+ return n
+}
+
+// XDotMethod returns an expression representing the method value
+// x.sym (i.e., x is a value, not a type). If any implicit field
+// selection are necessary, those are inserted too.
+//
+// If callee is true, the result is an ODOTMETH/ODOTINTER, otherwise
+// an OMETHVALUE.
+func XDotMethod(pos src.XPos, x ir.Node, sym *types.Sym, callee bool) *ir.SelectorExpr {
+ n := ir.NewSelectorExpr(pos, ir.OXDOT, x, sym)
+ if callee {
+ n = Callee(n).(*ir.SelectorExpr)
+ if n.Op() != ir.ODOTMETH && n.Op() != ir.ODOTINTER {
+ base.FatalfAt(pos, "unexpected result op: %v (%v)", n.Op(), n)
+ }
+ } else {
+ n = Expr(n).(*ir.SelectorExpr)
+ if n.Op() != ir.OMETHVALUE {
+ base.FatalfAt(pos, "unexpected result op: %v (%v)", n.Op(), n)
+ }
+ }
+ return n
+}
+
// tcDot typechecks an OXDOT or ODOT node.
func tcDot(n *ir.SelectorExpr, top int) ir.Node {
if n.Op() == ir.OXDOT {
@@ -447,7 +477,7 @@ func tcDot(n *ir.SelectorExpr, top int) ir.Node {
}
}
- n.X = typecheck(n.X, ctxExpr|ctxType)
+ n.X = Expr(n.X)
n.X = DefaultLit(n.X, nil)
t := n.X.Type()
@@ -458,7 +488,7 @@ func tcDot(n *ir.SelectorExpr, top int) ir.Node {
}
if n.X.Op() == ir.OTYPE {
- return typecheckMethodExpr(n)
+ base.FatalfAt(n.Pos(), "use NewMethodExpr to construct OMETHEXPR")
}
if t.IsPtr() && !t.Elem().IsInterface() {
diff --git a/src/cmd/compile/internal/typecheck/func.go b/src/cmd/compile/internal/typecheck/func.go
index 47d6c1e09a..5c54a5bd49 100644
--- a/src/cmd/compile/internal/typecheck/func.go
+++ b/src/cmd/compile/internal/typecheck/func.go
@@ -17,18 +17,15 @@ import (
// MakeDotArgs package all the arguments that match a ... T parameter into a []T.
func MakeDotArgs(pos src.XPos, typ *types.Type, args []ir.Node) ir.Node {
- var n ir.Node
if len(args) == 0 {
- n = ir.NewNilExpr(pos)
- n.SetType(typ)
- } else {
- args = append([]ir.Node(nil), args...)
- lit := ir.NewCompLitExpr(pos, ir.OCOMPLIT, typ, args)
- lit.SetImplicit(true)
- n = lit
+ return ir.NewNilExpr(pos, typ)
}
- n = Expr(n)
+ args = append([]ir.Node(nil), args...)
+ lit := ir.NewCompLitExpr(pos, ir.OCOMPLIT, typ, args)
+ lit.SetImplicit(true)
+
+ n := Expr(lit)
if n.Type() == nil {
base.FatalfAt(pos, "mkdotargslice: typecheck failed")
}
@@ -38,13 +35,13 @@ func MakeDotArgs(pos src.XPos, typ *types.Type, args []ir.Node) ir.Node {
// FixVariadicCall rewrites calls to variadic functions to use an
// explicit ... argument if one is not already present.
func FixVariadicCall(call *ir.CallExpr) {
- fntype := call.X.Type()
+ fntype := call.Fun.Type()
if !fntype.IsVariadic() || call.IsDDD {
return
}
vi := fntype.NumParams() - 1
- vt := fntype.Params().Field(vi).Type
+ vt := fntype.Param(vi).Type
args := call.Args
extra := args[vi:]
@@ -59,25 +56,25 @@ func FixVariadicCall(call *ir.CallExpr) {
// FixMethodCall rewrites a method call t.M(...) into a function call T.M(t, ...).
func FixMethodCall(call *ir.CallExpr) {
- if call.X.Op() != ir.ODOTMETH {
+ if call.Fun.Op() != ir.ODOTMETH {
return
}
- dot := call.X.(*ir.SelectorExpr)
+ dot := call.Fun.(*ir.SelectorExpr)
- fn := Expr(ir.NewSelectorExpr(dot.Pos(), ir.OXDOT, ir.TypeNode(dot.X.Type()), dot.Selection.Sym))
+ fn := NewMethodExpr(dot.Pos(), dot.X.Type(), dot.Selection.Sym)
args := make([]ir.Node, 1+len(call.Args))
args[0] = dot.X
copy(args[1:], call.Args)
call.SetOp(ir.OCALLFUNC)
- call.X = fn
+ call.Fun = fn
call.Args = args
}
func AssertFixedCall(call *ir.CallExpr) {
- if call.X.Type().IsVariadic() && !call.IsDDD {
+ if call.Fun.Type().IsVariadic() && !call.IsDDD {
base.FatalfAt(call.Pos(), "missed FixVariadicCall")
}
if call.Op() == ir.OCALLMETH {
@@ -130,82 +127,6 @@ func MethodValueType(n *ir.SelectorExpr) *types.Type {
return t
}
-// Get the function's package. For ordinary functions it's on the ->sym, but for imported methods
-// the ->sym can be re-used in the local package, so peel it off the receiver's type.
-func fnpkg(fn *ir.Name) *types.Pkg {
- if ir.IsMethod(fn) {
- // method
- rcvr := fn.Type().Recv().Type
-
- if rcvr.IsPtr() {
- rcvr = rcvr.Elem()
- }
- if rcvr.Sym() == nil {
- base.Fatalf("receiver with no sym: [%v] %L (%v)", fn.Sym(), fn, rcvr)
- }
- return rcvr.Sym().Pkg
- }
-
- // non-method
- return fn.Sym().Pkg
-}
-
-// tcClosure typechecks an OCLOSURE node. It also creates the named
-// function associated with the closure.
-// TODO: This creation of the named function should probably really be done in a
-// separate pass from type-checking.
-func tcClosure(clo *ir.ClosureExpr, top int) ir.Node {
- fn := clo.Func
-
- // We used to allow IR builders to typecheck the underlying Func
- // themselves, but that led to too much variety and inconsistency
- // around who's responsible for naming the function, typechecking
- // it, or adding it to Target.Decls.
- //
- // It's now all or nothing. Callers are still allowed to do these
- // themselves, but then they assume responsibility for all of them.
- if fn.Typecheck() == 1 {
- base.FatalfAt(fn.Pos(), "underlying closure func already typechecked: %v", fn)
- }
-
- ir.NameClosure(clo, ir.CurFunc)
- Func(fn)
-
- // Type check the body now, but only if we're inside a function.
- // At top level (in a variable initialization: curfn==nil) we're not
- // ready to type check code yet; we'll check it later, because the
- // underlying closure function we create is added to Target.Decls.
- if ir.CurFunc != nil {
- oldfn := ir.CurFunc
- ir.CurFunc = fn
- Stmts(fn.Body)
- ir.CurFunc = oldfn
- }
-
- out := 0
- for _, v := range fn.ClosureVars {
- if v.Type() == nil {
- // If v.Type is nil, it means v looked like it was going to be
- // used in the closure, but isn't. This happens in struct
- // literals like s{f: x} where we can't distinguish whether f is
- // a field identifier or expression until resolving s.
- continue
- }
-
- // type check closed variables outside the closure, so that the
- // outer frame also captures them.
- Expr(v.Outer)
-
- fn.ClosureVars[out] = v
- out++
- }
- fn.ClosureVars = fn.ClosureVars[:out]
-
- clo.SetType(fn.Type())
-
- return ir.UseClosure(clo, Target)
-}
-
// type check function definition
// To be called by typecheck, not directly.
// (Call typecheck.Func instead.)
@@ -223,9 +144,9 @@ func tcFunc(n *ir.Func) {
// tcCall typechecks an OCALL node.
func tcCall(n *ir.CallExpr, top int) ir.Node {
Stmts(n.Init()) // imported rewritten f(g()) calls (#30907)
- n.X = typecheck(n.X, ctxExpr|ctxType|ctxCallee)
+ n.Fun = typecheck(n.Fun, ctxExpr|ctxType|ctxCallee)
- l := n.X
+ l := n.Fun
if l.Op() == ir.ONAME && l.(*ir.Name).BuiltinOp != 0 {
l := l.(*ir.Name)
@@ -238,16 +159,16 @@ func tcCall(n *ir.CallExpr, top int) ir.Node {
default:
base.Fatalf("unknown builtin %v", l)
- case ir.OAPPEND, ir.ODELETE, ir.OMAKE, ir.OMAX, ir.OMIN, ir.OPRINT, ir.OPRINTN, ir.ORECOVER:
+ case ir.OAPPEND, ir.ODELETE, ir.OMAKE, ir.OMAX, ir.OMIN, ir.OPRINT, ir.OPRINTLN, ir.ORECOVER:
n.SetOp(l.BuiltinOp)
- n.X = nil
+ n.Fun = nil
n.SetTypecheck(0) // re-typechecking new op is OK, not a loop
return typecheck(n, top)
case ir.OCAP, ir.OCLEAR, ir.OCLOSE, ir.OIMAG, ir.OLEN, ir.OPANIC, ir.OREAL, ir.OUNSAFESTRINGDATA, ir.OUNSAFESLICEDATA:
typecheckargs(n)
fallthrough
- case ir.ONEW, ir.OALIGNOF, ir.OOFFSETOF, ir.OSIZEOF:
+ case ir.ONEW:
arg, ok := needOneArg(n, "%v", n.Op())
if !ok {
n.SetType(nil)
@@ -269,8 +190,8 @@ func tcCall(n *ir.CallExpr, top int) ir.Node {
panic("unreachable")
}
- n.X = DefaultLit(n.X, nil)
- l = n.X
+ n.Fun = DefaultLit(n.Fun, nil)
+ l = n.Fun
if l.Op() == ir.OTYPE {
if n.IsDDD {
base.Fatalf("invalid use of ... in type conversion to %v", l.Type())
@@ -318,7 +239,7 @@ func tcCall(n *ir.CallExpr, top int) ir.Node {
default:
n.SetOp(ir.OCALLFUNC)
if t.Kind() != types.TFUNC {
- if o := ir.Orig(l); o.Name() != nil && types.BuiltinPkg.Lookup(o.Sym().Name).Def != nil {
+ if o := l; o.Name() != nil && types.BuiltinPkg.Lookup(o.Sym().Name).Def != nil {
// be more specific when the non-function
// name matches a predeclared function
base.Errorf("cannot call non-function %L, declared at %s",
@@ -331,17 +252,17 @@ func tcCall(n *ir.CallExpr, top int) ir.Node {
}
}
- typecheckaste(ir.OCALL, n.X, n.IsDDD, t.Params(), n.Args, func() string { return fmt.Sprintf("argument to %v", n.X) })
+ typecheckaste(ir.OCALL, n.Fun, n.IsDDD, t.Params(), n.Args, func() string { return fmt.Sprintf("argument to %v", n.Fun) })
FixVariadicCall(n)
FixMethodCall(n)
if t.NumResults() == 0 {
return n
}
if t.NumResults() == 1 {
- n.SetType(l.Type().Results().Field(0).Type)
+ n.SetType(l.Type().Result(0).Type)
- if n.Op() == ir.OCALLFUNC && n.X.Op() == ir.ONAME {
- if sym := n.X.(*ir.Name).Sym(); types.IsRuntimePkg(sym.Pkg) && sym.Name == "getg" {
+ if n.Op() == ir.OCALLFUNC && n.Fun.Op() == ir.ONAME {
+ if sym := n.Fun.(*ir.Name).Sym(); types.RuntimeSymName(sym) == "getg" {
// Emit code for runtime.getg() directly instead of calling function.
// Most such rewrites (for example the similar one for math.Sqrt) should be done in walk,
// so that the ordering pass can make sure to preserve the semantics of the original code
@@ -360,7 +281,7 @@ func tcCall(n *ir.CallExpr, top int) ir.Node {
return n
}
- n.SetType(l.Type().Results())
+ n.SetType(l.Type().ResultsTuple())
return n
}
@@ -834,22 +755,17 @@ func tcRecover(n *ir.CallExpr) ir.Node {
return n
}
- n.SetType(types.Types[types.TINTER])
- return n
-}
-
-// tcRecoverFP typechecks an ORECOVERFP node.
-func tcRecoverFP(n *ir.CallExpr) ir.Node {
- if len(n.Args) != 1 {
- base.FatalfAt(n.Pos(), "wrong number of arguments: %v", n)
- }
-
- n.Args[0] = Expr(n.Args[0])
- if !n.Args[0].Type().IsPtrShaped() {
- base.FatalfAt(n.Pos(), "%L is not pointer shaped", n.Args[0])
+ // FP is equal to caller's SP plus FixedFrameSize.
+ var fp ir.Node = ir.NewCallExpr(n.Pos(), ir.OGETCALLERSP, nil, nil)
+ if off := base.Ctxt.Arch.FixedFrameSize; off != 0 {
+ fp = ir.NewBinaryExpr(n.Pos(), ir.OADD, fp, ir.NewInt(base.Pos, off))
}
+ // TODO(mdempsky): Replace *int32 with unsafe.Pointer, without upsetting checkptr.
+ fp = ir.NewConvExpr(n.Pos(), ir.OCONVNOP, types.NewPtr(types.Types[types.TINT32]), fp)
+ n.SetOp(ir.ORECOVERFP)
n.SetType(types.Types[types.TINTER])
+ n.Args = []ir.Node{Expr(fp)}
return n
}
diff --git a/src/cmd/compile/internal/typecheck/iexport.go b/src/cmd/compile/internal/typecheck/iexport.go
index df579b7166..83d35b365f 100644
--- a/src/cmd/compile/internal/typecheck/iexport.go
+++ b/src/cmd/compile/internal/typecheck/iexport.go
@@ -235,69 +235,11 @@
package typecheck
import (
- "go/constant"
- "strconv"
"strings"
-
- "cmd/compile/internal/base"
- "cmd/compile/internal/ir"
- "cmd/compile/internal/types"
-)
-
-// predeclReserved is the number of type offsets reserved for types
-// implicitly declared in the universe block.
-const predeclReserved = 32
-
-// An itag distinguishes the kind of type that was written into the
-// indexed export format.
-type itag uint64
-
-const (
- // Types
- definedType itag = iota
- pointerType
- sliceType
- arrayType
- chanType
- mapType
- signatureType
- structType
- interfaceType
- typeParamType
- instanceType // Instantiation of a generic type
- unionType
)
-const (
- debug = false
- magic = 0x6742937dc293105
-)
-
-// exportPath returns the path for pkg as it appears in the iexport
-// file format. For historical reasons (before cmd/compile required
-// the -p flag), the local package is represented as the empty string,
-// instead of its actual path.
-func exportPath(pkg *types.Pkg) string {
- if pkg == types.LocalPkg {
- return ""
- }
- return pkg.Path
-}
-
const blankMarker = "$"
-// TparamExportName creates a unique name for type param in a method or a generic
-// type, using the specified unique prefix and the index of the type param. The index
-// is only used if the type param is blank, in which case the blank is replace by
-// "$<index>". A unique name is needed for later substitution in the compiler and
-// export/import that keeps blank type params associated with the correct constraint.
-func TparamExportName(prefix string, name string, index int) string {
- if name == "_" {
- name = blankMarker + strconv.Itoa(index)
- }
- return prefix + "." + name
-}
-
// TparamName returns the real name of a type parameter, after stripping its
// qualifying prefix and reverting blank-name encoding. See TparamExportName
// for details.
@@ -314,83 +256,5 @@ func TparamName(exportName string) string {
return name
}
-func constTypeOf(typ *types.Type) constant.Kind {
- switch typ {
- case types.UntypedInt, types.UntypedRune:
- return constant.Int
- case types.UntypedFloat:
- return constant.Float
- case types.UntypedComplex:
- return constant.Complex
- }
-
- switch typ.Kind() {
- case types.TBOOL:
- return constant.Bool
- case types.TSTRING:
- return constant.String
- case types.TINT, types.TINT8, types.TINT16, types.TINT32, types.TINT64,
- types.TUINT, types.TUINT8, types.TUINT16, types.TUINT32, types.TUINT64, types.TUINTPTR:
- return constant.Int
- case types.TFLOAT32, types.TFLOAT64:
- return constant.Float
- case types.TCOMPLEX64, types.TCOMPLEX128:
- return constant.Complex
- }
-
- base.Fatalf("unexpected constant type: %v", typ)
- return 0
-}
-
-func intSize(typ *types.Type) (signed bool, maxBytes uint) {
- if typ.IsUntyped() {
- return true, ir.ConstPrec / 8
- }
-
- switch typ.Kind() {
- case types.TFLOAT32, types.TCOMPLEX64:
- return true, 3
- case types.TFLOAT64, types.TCOMPLEX128:
- return true, 7
- }
-
- signed = typ.IsSigned()
- maxBytes = uint(typ.Size())
-
- // The go/types API doesn't expose sizes to importers, so they
- // don't know how big these types are.
- switch typ.Kind() {
- case types.TINT, types.TUINT, types.TUINTPTR:
- maxBytes = 8
- }
-
- return
-}
-
-func isNonEmptyAssign(n ir.Node) bool {
- switch n.Op() {
- case ir.OAS:
- if n.(*ir.AssignStmt).Y != nil {
- return true
- }
- case ir.OAS2, ir.OAS2DOTTYPE, ir.OAS2FUNC, ir.OAS2MAPR, ir.OAS2RECV:
- return true
- }
- return false
-}
-func isNamedTypeSwitch(x ir.Node) bool {
- guard, ok := x.(*ir.TypeSwitchGuard)
- return ok && guard.Tag != nil
-}
-
-func simplifyForExport(n ir.Node) ir.Node {
- switch n.Op() {
- case ir.OPAREN:
- n := n.(*ir.ParenExpr)
- return simplifyForExport(n.X)
- }
- return n
-}
-
// The name used for dictionary parameters or local variables.
const LocalDictName = ".dict"
diff --git a/src/cmd/compile/internal/typecheck/stmt.go b/src/cmd/compile/internal/typecheck/stmt.go
index 9dea261bb9..8d792485d8 100644
--- a/src/cmd/compile/internal/typecheck/stmt.go
+++ b/src/cmd/compile/internal/typecheck/stmt.go
@@ -20,70 +20,6 @@ func RangeExprType(t *types.Type) *types.Type {
}
func typecheckrangeExpr(n *ir.RangeStmt) {
- n.X = Expr(n.X)
- if n.X.Type() == nil {
- return
- }
-
- t := RangeExprType(n.X.Type())
- // delicate little dance. see tcAssignList
- if n.Key != nil && !ir.DeclaredBy(n.Key, n) {
- n.Key = AssignExpr(n.Key)
- }
- if n.Value != nil && !ir.DeclaredBy(n.Value, n) {
- n.Value = AssignExpr(n.Value)
- }
-
- var tk, tv *types.Type
- toomany := false
- switch t.Kind() {
- default:
- base.ErrorfAt(n.Pos(), errors.InvalidRangeExpr, "cannot range over %L", n.X)
- return
-
- case types.TARRAY, types.TSLICE:
- tk = types.Types[types.TINT]
- tv = t.Elem()
-
- case types.TMAP:
- tk = t.Key()
- tv = t.Elem()
-
- case types.TCHAN:
- if !t.ChanDir().CanRecv() {
- base.ErrorfAt(n.Pos(), errors.InvalidRangeExpr, "invalid operation: range %v (receive from send-only type %v)", n.X, n.X.Type())
- return
- }
-
- tk = t.Elem()
- tv = nil
- if n.Value != nil {
- toomany = true
- }
-
- case types.TSTRING:
- tk = types.Types[types.TINT]
- tv = types.RuneType
- }
-
- if toomany {
- base.ErrorfAt(n.Pos(), errors.InvalidIterVar, "too many variables in range")
- }
-
- do := func(nn ir.Node, t *types.Type) {
- if nn != nil {
- if ir.DeclaredBy(nn, n) && nn.Type() == nil {
- nn.SetType(t)
- } else if nn.Type() != nil {
- if op, why := Assignop(t, nn.Type()); op == ir.OXXX {
- base.ErrorfAt(n.Pos(), errors.InvalidIterVar, "cannot assign type %v to %L in range%s", t, nn, why)
- }
- }
- checkassign(nn)
- }
- }
- do(n.Key, tk)
- do(n.Value, tv)
}
// type check assignment.
@@ -127,7 +63,6 @@ func assign(stmt ir.Node, lhs, rhs []ir.Node) {
// so that the conversion below happens).
checkLHS := func(i int, typ *types.Type) {
- lhs[i] = Resolve(lhs[i])
if n := lhs[i]; typ != nil && ir.DeclaredBy(n, stmt) && n.Type() == nil {
base.Assertf(typ.Kind() == types.TNIL, "unexpected untyped nil")
n.SetType(defaultType(typ))
@@ -186,7 +121,7 @@ assignOK:
if len(lhs) != cr {
if r, ok := rhs[0].(*ir.CallExpr); ok && len(rhs) == 1 {
if r.Type() != nil {
- base.ErrorfAt(stmt.Pos(), errors.WrongAssignCount, "assignment mismatch: %d variable%s but %v returns %d value%s", len(lhs), plural(len(lhs)), r.X, cr, plural(cr))
+ base.ErrorfAt(stmt.Pos(), errors.WrongAssignCount, "assignment mismatch: %d variable%s but %v returns %d value%s", len(lhs), plural(len(lhs)), r.Fun, cr, plural(cr))
}
} else {
base.ErrorfAt(stmt.Pos(), errors.WrongAssignCount, "assignment mismatch: %d variable%s but %v value%s", len(lhs), plural(len(lhs)), len(rhs), plural(len(rhs)))
@@ -263,57 +198,186 @@ func tcFor(n *ir.ForStmt) ir.Node {
return n
}
+// tcGoDefer typechecks (normalizes) an OGO/ODEFER statement.
func tcGoDefer(n *ir.GoDeferStmt) {
- what := "defer"
- if n.Op() == ir.OGO {
- what = "go"
- }
-
- switch n.Call.Op() {
- // ok
- case ir.OCALLINTER,
- ir.OCALLMETH,
- ir.OCALLFUNC,
- ir.OCLEAR,
- ir.OCLOSE,
- ir.OCOPY,
- ir.ODELETE,
- ir.OMAX,
- ir.OMIN,
- ir.OPANIC,
- ir.OPRINT,
- ir.OPRINTN,
- ir.ORECOVER:
- return
+ call := normalizeGoDeferCall(n.Pos(), n.Op(), n.Call, n.PtrInit())
+ call.GoDefer = true
+ n.Call = call
+}
- case ir.OAPPEND,
- ir.OCAP,
- ir.OCOMPLEX,
- ir.OIMAG,
- ir.OLEN,
- ir.OMAKE,
- ir.OMAKESLICE,
- ir.OMAKECHAN,
- ir.OMAKEMAP,
- ir.ONEW,
- ir.OREAL,
- ir.OLITERAL: // conversion or unsafe.Alignof, Offsetof, Sizeof
- if orig := ir.Orig(n.Call); orig.Op() == ir.OCONV {
- break
+// normalizeGoDeferCall normalizes call into a normal function call
+// with no arguments and no results, suitable for use in an OGO/ODEFER
+// statement.
+//
+// For example, it normalizes:
+//
+// f(x, y)
+//
+// into:
+//
+// x1, y1 := x, y // added to init
+// func() { f(x1, y1) }() // result
+func normalizeGoDeferCall(pos src.XPos, op ir.Op, call ir.Node, init *ir.Nodes) *ir.CallExpr {
+ init.Append(ir.TakeInit(call)...)
+
+ if call, ok := call.(*ir.CallExpr); ok && call.Op() == ir.OCALLFUNC {
+ if sig := call.Fun.Type(); sig.NumParams()+sig.NumResults() == 0 {
+ return call // already in normal form
}
- base.ErrorfAt(n.Pos(), errors.UnusedResults, "%s discards result of %v", what, n.Call)
- return
}
- // type is broken or missing, most likely a method call on a broken type
- // we will warn about the broken type elsewhere. no need to emit a potentially confusing error
- if n.Call.Type() == nil {
- return
+ // Create a new wrapper function without parameters or results.
+ wrapperFn := ir.NewClosureFunc(pos, pos, op, types.NewSignature(nil, nil, nil), ir.CurFunc, Target)
+ wrapperFn.DeclareParams(true)
+ wrapperFn.SetWrapper(true)
+
+ // argps collects the list of operands within the call expression
+ // that must be evaluated at the go/defer statement.
+ var argps []*ir.Node
+
+ var visit func(argp *ir.Node)
+ visit = func(argp *ir.Node) {
+ arg := *argp
+ if arg == nil {
+ return
+ }
+
+ // Recognize a few common expressions that can be evaluated within
+ // the wrapper, so we don't need to allocate space for them within
+ // the closure.
+ switch arg.Op() {
+ case ir.OLITERAL, ir.ONIL, ir.OMETHEXPR, ir.ONEW:
+ return
+ case ir.ONAME:
+ arg := arg.(*ir.Name)
+ if arg.Class == ir.PFUNC {
+ return // reference to global function
+ }
+ case ir.OADDR:
+ arg := arg.(*ir.AddrExpr)
+ if arg.X.Op() == ir.OLINKSYMOFFSET {
+ return // address of global symbol
+ }
+
+ case ir.OCONVNOP:
+ arg := arg.(*ir.ConvExpr)
+
+ // For unsafe.Pointer->uintptr conversion arguments, save the
+ // unsafe.Pointer argument. This is necessary to handle cases
+ // like fixedbugs/issue24491a.go correctly.
+ //
+ // TODO(mdempsky): Limit to static callees with
+ // //go:uintptr{escapes,keepalive}?
+ if arg.Type().IsUintptr() && arg.X.Type().IsUnsafePtr() {
+ visit(&arg.X)
+ return
+ }
+
+ case ir.OARRAYLIT, ir.OSLICELIT, ir.OSTRUCTLIT:
+ // TODO(mdempsky): For very large slices, it may be preferable
+ // to construct them at the go/defer statement instead.
+ list := arg.(*ir.CompLitExpr).List
+ for i, el := range list {
+ switch el := el.(type) {
+ case *ir.KeyExpr:
+ visit(&el.Value)
+ case *ir.StructKeyExpr:
+ visit(&el.Value)
+ default:
+ visit(&list[i])
+ }
+ }
+ return
+ }
+
+ argps = append(argps, argp)
+ }
+
+ visitList := func(list []ir.Node) {
+ for i := range list {
+ visit(&list[i])
+ }
+ }
+
+ switch call.Op() {
+ default:
+ base.Fatalf("unexpected call op: %v", call.Op())
+
+ case ir.OCALLFUNC:
+ call := call.(*ir.CallExpr)
+
+ // If the callee is a named function, link to the original callee.
+ if wrapped := ir.StaticCalleeName(call.Fun); wrapped != nil {
+ wrapperFn.WrappedFunc = wrapped.Func
+ }
+
+ visit(&call.Fun)
+ visitList(call.Args)
+
+ case ir.OCALLINTER:
+ call := call.(*ir.CallExpr)
+ argps = append(argps, &call.Fun.(*ir.SelectorExpr).X) // must be first for OCHECKNIL; see below
+ visitList(call.Args)
+
+ case ir.OAPPEND, ir.ODELETE, ir.OPRINT, ir.OPRINTLN, ir.ORECOVERFP:
+ call := call.(*ir.CallExpr)
+ visitList(call.Args)
+ visit(&call.RType)
+
+ case ir.OCOPY:
+ call := call.(*ir.BinaryExpr)
+ visit(&call.X)
+ visit(&call.Y)
+ visit(&call.RType)
+
+ case ir.OCLEAR, ir.OCLOSE, ir.OPANIC:
+ call := call.(*ir.UnaryExpr)
+ visit(&call.X)
}
- // The syntax made sure it was a call, so this must be
- // a conversion.
- base.FatalfAt(n.Pos(), "%s requires function call, not conversion", what)
+ if len(argps) != 0 {
+ // Found one or more operands that need to be evaluated upfront
+ // and spilled to temporary variables, which can be captured by
+ // the wrapper function.
+
+ stmtPos := base.Pos
+ callPos := base.Pos
+
+ as := ir.NewAssignListStmt(callPos, ir.OAS2, make([]ir.Node, len(argps)), make([]ir.Node, len(argps)))
+ for i, argp := range argps {
+ arg := *argp
+
+ pos := callPos
+ if ir.HasUniquePos(arg) {
+ pos = arg.Pos()
+ }
+
+ // tmp := arg
+ tmp := TempAt(pos, ir.CurFunc, arg.Type())
+ init.Append(Stmt(ir.NewDecl(pos, ir.ODCL, tmp)))
+ tmp.Defn = as
+ as.Lhs[i] = tmp
+ as.Rhs[i] = arg
+
+ // Rewrite original expression to use/capture tmp.
+ *argp = ir.NewClosureVar(pos, wrapperFn, tmp)
+ }
+ init.Append(Stmt(as))
+
+ // For "go/defer iface.M()", if iface is nil, we need to panic at
+ // the point of the go/defer statement.
+ if call.Op() == ir.OCALLINTER {
+ iface := as.Lhs[0]
+ init.Append(Stmt(ir.NewUnaryExpr(stmtPos, ir.OCHECKNIL, ir.NewUnaryExpr(iface.Pos(), ir.OITAB, iface))))
+ }
+ }
+
+ // Move call into the wrapper function, now that it's safe to
+ // evaluate there.
+ wrapperFn.Body = []ir.Node{call}
+
+ // Finally, construct a call to the wrapper.
+ return Call(call.Pos(), wrapperFn.OClosure, nil, false).(*ir.CallExpr)
}
// tcIf typechecks an OIF node.
@@ -334,18 +398,23 @@ func tcIf(n *ir.IfStmt) ir.Node {
// range
func tcRange(n *ir.RangeStmt) {
- // Typechecking order is important here:
- // 0. first typecheck range expression (slice/map/chan),
- // it is evaluated only once and so logically it is not part of the loop.
- // 1. typecheck produced values,
- // this part can declare new vars and so it must be typechecked before body,
- // because body can contain a closure that captures the vars.
- // 2. decldepth++ to denote loop body.
- // 3. typecheck body.
- // 4. decldepth--.
- typecheckrangeExpr(n)
-
- // second half of dance, the first half being typecheckrangeExpr
+ n.X = Expr(n.X)
+
+ // delicate little dance. see tcAssignList
+ if n.Key != nil {
+ if !ir.DeclaredBy(n.Key, n) {
+ n.Key = AssignExpr(n.Key)
+ }
+ checkassign(n.Key)
+ }
+ if n.Value != nil {
+ if !ir.DeclaredBy(n.Value, n) {
+ n.Value = AssignExpr(n.Value)
+ }
+ checkassign(n.Value)
+ }
+
+ // second half of dance
n.SetTypecheck(1)
if n.Key != nil && n.Key.Typecheck() == 0 {
n.Key = AssignExpr(n.Key)
@@ -359,17 +428,14 @@ func tcRange(n *ir.RangeStmt) {
// tcReturn typechecks an ORETURN node.
func tcReturn(n *ir.ReturnStmt) ir.Node {
- typecheckargs(n)
if ir.CurFunc == nil {
- base.Errorf("return outside function")
- n.SetType(nil)
- return n
+ base.FatalfAt(n.Pos(), "return outside function")
}
- if ir.HasNamedResults(ir.CurFunc) && len(n.Results) == 0 {
- return n
+ typecheckargs(n)
+ if len(n.Results) != 0 {
+ typecheckaste(ir.ORETURN, nil, false, ir.CurFunc.Type().Results(), n.Results, func() string { return "return argument" })
}
- typecheckaste(ir.ORETURN, nil, false, ir.CurFunc.Type().Results(), n.Results, func() string { return "return argument" })
return n
}
@@ -538,8 +604,8 @@ func tcSwitchExpr(n *ir.SwitchStmt) {
} else if t.IsInterface() && !n1.Type().IsInterface() && !types.IsComparable(n1.Type()) {
base.ErrorfAt(ncase.Pos(), errors.UndefinedOp, "invalid case %L in switch (incomparable type)", n1)
} else {
- op1, _ := Assignop(n1.Type(), t)
- op2, _ := Assignop(t, n1.Type())
+ op1, _ := assignOp(n1.Type(), t)
+ op2, _ := assignOp(t, n1.Type())
if op1 == ir.OXXX && op2 == ir.OXXX {
if n.Tag != nil {
base.ErrorfAt(ncase.Pos(), errors.MismatchedTypes, "invalid case %v in switch on %v (mismatched types %v and %v)", n1, n.Tag, n1.Type(), t)
diff --git a/src/cmd/compile/internal/typecheck/subr.go b/src/cmd/compile/internal/typecheck/subr.go
index 2bb978a0fa..d64b0f0e22 100644
--- a/src/cmd/compile/internal/typecheck/subr.go
+++ b/src/cmd/compile/internal/typecheck/subr.go
@@ -26,33 +26,14 @@ func LookupNum(prefix string, n int) *types.Sym {
}
// Given funarg struct list, return list of fn args.
-func NewFuncParams(tl *types.Type, mustname bool) []*ir.Field {
- var args []*ir.Field
- gen := 0
- for _, t := range tl.Fields().Slice() {
- s := t.Sym
- if mustname && (s == nil || s.Name == "_") {
- // invent a name so that we can refer to it in the trampoline
- s = LookupNum(".anon", gen)
- gen++
- } else if s != nil && s.Pkg != types.LocalPkg {
- // TODO(mdempsky): Preserve original position, name, and package.
- s = Lookup(s.Name)
- }
- a := ir.NewField(base.Pos, s, t.Type)
- a.Pos = t.Pos
- a.IsDDD = t.IsDDD()
- args = append(args, a)
- }
-
- return args
-}
-
-// NewName returns a new ONAME Node associated with symbol s.
-func NewName(s *types.Sym) *ir.Name {
- n := ir.NewNameAt(base.Pos, s)
- n.Curfn = ir.CurFunc
- return n
+func NewFuncParams(origs []*types.Field) []*types.Field {
+ res := make([]*types.Field, len(origs))
+ for i, orig := range origs {
+ p := types.NewField(orig.Pos, orig.Sym, orig.Type)
+ p.SetIsDDD(orig.IsDDD())
+ res[i] = p
+ }
+ return res
}
// NodAddr returns a node representing &n at base.Pos.
@@ -62,60 +43,7 @@ func NodAddr(n ir.Node) *ir.AddrExpr {
// NodAddrAt returns a node representing &n at position pos.
func NodAddrAt(pos src.XPos, n ir.Node) *ir.AddrExpr {
- n = markAddrOf(n)
- return ir.NewAddrExpr(pos, n)
-}
-
-func markAddrOf(n ir.Node) ir.Node {
- if IncrementalAddrtaken {
- // We can only do incremental addrtaken computation when it is ok
- // to typecheck the argument of the OADDR. That's only safe after the
- // main typecheck has completed, and not loading the inlined body.
- // The argument to OADDR needs to be typechecked because &x[i] takes
- // the address of x if x is an array, but not if x is a slice.
- // Note: OuterValue doesn't work correctly until n is typechecked.
- n = typecheck(n, ctxExpr)
- if x := ir.OuterValue(n); x.Op() == ir.ONAME {
- x.Name().SetAddrtaken(true)
- }
- } else {
- // Remember that we built an OADDR without computing the Addrtaken bit for
- // its argument. We'll do that later in bulk using computeAddrtaken.
- DirtyAddrtaken = true
- }
- return n
-}
-
-// If IncrementalAddrtaken is false, we do not compute Addrtaken for an OADDR Node
-// when it is built. The Addrtaken bits are set in bulk by computeAddrtaken.
-// If IncrementalAddrtaken is true, then when an OADDR Node is built the Addrtaken
-// field of its argument is updated immediately.
-var IncrementalAddrtaken = false
-
-// If DirtyAddrtaken is true, then there are OADDR whose corresponding arguments
-// have not yet been marked as Addrtaken.
-var DirtyAddrtaken = false
-
-func ComputeAddrtaken(top []ir.Node) {
- for _, n := range top {
- var doVisit func(n ir.Node)
- doVisit = func(n ir.Node) {
- if n.Op() == ir.OADDR {
- if x := ir.OuterValue(n.(*ir.AddrExpr).X); x.Op() == ir.ONAME {
- x.Name().SetAddrtaken(true)
- if x.Name().IsClosureVar() {
- // Mark the original variable as Addrtaken so that capturevars
- // knows not to pass it by value.
- x.Name().Defn.Name().SetAddrtaken(true)
- }
- }
- }
- if n.Op() == ir.OCLOSURE {
- ir.VisitList(n.(*ir.ClosureExpr).Func.Body, doVisit)
- }
- }
- ir.Visit(n, doVisit)
- }
+ return ir.NewAddrExpr(pos, Expr(n))
}
// LinksymAddr returns a new expression that evaluates to the address
@@ -126,9 +54,7 @@ func LinksymAddr(pos src.XPos, lsym *obj.LSym, typ *types.Type) *ir.AddrExpr {
}
func NodNil() ir.Node {
- n := ir.NewNilExpr(base.Pos)
- n.SetType(types.Types[types.TNIL])
- return n
+ return ir.NewNilExpr(base.Pos, types.Types[types.TNIL])
}
// AddImplicitDots finds missing fields in obj.field that
@@ -170,13 +96,13 @@ func AddImplicitDots(n *ir.SelectorExpr) *ir.SelectorExpr {
// CalcMethods calculates all the methods (including embedding) of a non-interface
// type t.
func CalcMethods(t *types.Type) {
- if t == nil || t.AllMethods().Len() != 0 {
+ if t == nil || len(t.AllMethods()) != 0 {
return
}
// mark top-level method symbols
// so that expand1 doesn't consider them.
- for _, f := range t.Methods().Slice() {
+ for _, f := range t.Methods() {
f.Sym.SetUniq(true)
}
@@ -213,11 +139,11 @@ func CalcMethods(t *types.Type) {
ms = append(ms, f)
}
- for _, f := range t.Methods().Slice() {
+ for _, f := range t.Methods() {
f.Sym.SetUniq(false)
}
- ms = append(ms, t.Methods().Slice()...)
+ ms = append(ms, t.Methods()...)
sort.Sort(types.MethodsByName(ms))
t.SetAllMethods(ms)
}
@@ -255,13 +181,13 @@ func adddot1(s *types.Sym, t *types.Type, d int, save **types.Field, ignorecase
return c, false
}
- var fields *types.Fields
+ var fields []*types.Field
if u.IsStruct() {
fields = u.Fields()
} else {
fields = u.AllMethods()
}
- for _, f := range fields.Slice() {
+ for _, f := range fields {
if f.Embedded == 0 || f.Sym == nil {
continue
}
@@ -311,7 +237,7 @@ func assignconvfn(n ir.Node, t *types.Type, context func() string) ir.Node {
return n
}
- op, why := Assignop(n.Type(), t)
+ op, why := assignOp(n.Type(), t)
if op == ir.OXXX {
base.Errorf("cannot use %L as type %v in %s%s", n, t, context(), why)
op = ir.OCONV
@@ -327,7 +253,7 @@ func assignconvfn(n ir.Node, t *types.Type, context func() string) ir.Node {
// If so, return op code to use in conversion.
// If not, return OXXX. In this case, the string return parameter may
// hold a reason why. In all other cases, it'll be the empty string.
-func Assignop(src, dst *types.Type) (ir.Op, string) {
+func assignOp(src, dst *types.Type) (ir.Op, string) {
if src == dst {
return ir.OCONVNOP, ""
}
@@ -339,10 +265,7 @@ func Assignop(src, dst *types.Type) (ir.Op, string) {
if types.Identical(src, dst) {
return ir.OCONVNOP, ""
}
- return Assignop1(src, dst)
-}
-func Assignop1(src, dst *types.Type) (ir.Op, string) {
// 2. src and dst have identical underlying types and
// a. either src or dst is not a named type, or
// b. both are empty interface types, or
@@ -441,7 +364,7 @@ func Assignop1(src, dst *types.Type) (ir.Op, string) {
// If not, return OXXX. In this case, the string return parameter may
// hold a reason why. In all other cases, it'll be the empty string.
// srcConstant indicates whether the value of type src is a constant.
-func Convertop(srcConstant bool, src, dst *types.Type) (ir.Op, string) {
+func convertOp(srcConstant bool, src, dst *types.Type) (ir.Op, string) {
if src == dst {
return ir.OCONVNOP, ""
}
@@ -464,7 +387,7 @@ func Convertop(srcConstant bool, src, dst *types.Type) (ir.Op, string) {
}
// 1. src can be assigned to dst.
- op, why := Assignop(src, dst)
+ op, why := assignOp(src, dst)
if op != ir.OXXX {
return op, why
}
@@ -549,15 +472,7 @@ func Convertop(srcConstant bool, src, dst *types.Type) (ir.Op, string) {
return ir.OCONVNOP, ""
}
- // 10. src is map and dst is a pointer to corresponding hmap.
- // This rule is needed for the implementation detail that
- // go gc maps are implemented as a pointer to a hmap struct.
- if src.Kind() == types.TMAP && dst.IsPtr() &&
- src.MapType().Hmap == dst.Elem() {
- return ir.OCONVNOP, ""
- }
-
- // 11. src is a slice and dst is an array or pointer-to-array.
+ // 10. src is a slice and dst is an array or pointer-to-array.
// They must have same element type.
if src.IsSlice() {
if dst.IsArray() && types.Identical(src.Elem(), dst.Elem()) {
@@ -612,7 +527,7 @@ func expand0(t *types.Type) {
}
if u.IsInterface() {
- for _, f := range u.AllMethods().Slice() {
+ for _, f := range u.AllMethods() {
if f.Sym.Uniq() {
continue
}
@@ -625,7 +540,7 @@ func expand0(t *types.Type) {
u = types.ReceiverBaseType(t)
if u != nil {
- for _, f := range u.Methods().Slice() {
+ for _, f := range u.Methods() {
if f.Sym.Uniq() {
continue
}
@@ -651,13 +566,13 @@ func expand1(t *types.Type, top bool) {
}
if u.IsStruct() || u.IsInterface() {
- var fields *types.Fields
+ var fields []*types.Field
if u.IsStruct() {
fields = u.Fields()
} else {
fields = u.AllMethods()
}
- for _, f := range fields.Slice() {
+ for _, f := range fields {
if f.Embedded == 0 {
continue
}
@@ -736,8 +651,8 @@ func implements(t, iface *types.Type, m, samename **types.Field, ptr *int) bool
if t.IsInterface() {
i := 0
- tms := t.AllMethods().Slice()
- for _, im := range iface.AllMethods().Slice() {
+ tms := t.AllMethods()
+ for _, im := range iface.AllMethods() {
for i < len(tms) && tms[i].Sym != im.Sym {
i++
}
@@ -763,10 +678,10 @@ func implements(t, iface *types.Type, m, samename **types.Field, ptr *int) bool
var tms []*types.Field
if t != nil {
CalcMethods(t)
- tms = t.AllMethods().Slice()
+ tms = t.AllMethods()
}
i := 0
- for _, im := range iface.AllMethods().Slice() {
+ for _, im := range iface.AllMethods() {
for i < len(tms) && tms[i].Sym != im.Sym {
i++
}
@@ -783,12 +698,10 @@ func implements(t, iface *types.Type, m, samename **types.Field, ptr *int) bool
*ptr = 0
return false
}
- followptr := tm.Embedded == 2
// if pointer receiver in method,
// the method does not exist for value types.
- rcvr := tm.Type.Recv().Type
- if rcvr.IsPtr() && !t0.IsPtr() && !followptr && !types.IsInterfaceMethod(tm.Type) {
+ if !types.IsMethodApplicable(t0, tm) {
if false && base.Flag.LowerR != 0 {
base.Errorf("interface pointer mismatch")
}
@@ -831,13 +744,13 @@ func lookdot0(s *types.Sym, t *types.Type, save **types.Field, ignorecase bool)
c := 0
if u.IsStruct() || u.IsInterface() {
- var fields *types.Fields
+ var fields []*types.Field
if u.IsStruct() {
fields = u.Fields()
} else {
fields = u.AllMethods()
}
- for _, f := range fields.Slice() {
+ for _, f := range fields {
if f.Sym == s || (ignorecase && f.IsMethod() && strings.EqualFold(f.Sym.Name, s.Name)) {
if save != nil {
*save = f
@@ -854,7 +767,7 @@ func lookdot0(s *types.Sym, t *types.Type, save **types.Field, ignorecase bool)
}
u = types.ReceiverBaseType(u)
if u != nil {
- for _, f := range u.Methods().Slice() {
+ for _, f := range u.Methods() {
if f.Embedded == 0 && (f.Sym == s || (ignorecase && strings.EqualFold(f.Sym.Name, s.Name))) {
if save != nil {
*save = f
@@ -877,7 +790,3 @@ var slist []symlink
type symlink struct {
field *types.Field
}
-
-func assert(p bool) {
- base.Assert(p)
-}
diff --git a/src/cmd/compile/internal/typecheck/syms.go b/src/cmd/compile/internal/typecheck/syms.go
index 7fe649faaa..a977b5e110 100644
--- a/src/cmd/compile/internal/typecheck/syms.go
+++ b/src/cmd/compile/internal/typecheck/syms.go
@@ -9,30 +9,32 @@ import (
"cmd/compile/internal/ir"
"cmd/compile/internal/types"
"cmd/internal/obj"
- "cmd/internal/src"
)
-func LookupRuntime(name string) *ir.Name {
+// LookupRuntime returns a function or variable declared in
+// _builtin/runtime.go. If types_ is non-empty, successive occurrences
+// of the "any" placeholder type will be substituted.
+func LookupRuntime(name string, types_ ...*types.Type) *ir.Name {
s := ir.Pkgs.Runtime.Lookup(name)
if s == nil || s.Def == nil {
base.Fatalf("LookupRuntime: can't find runtime.%s", name)
}
- return ir.AsNode(s.Def).(*ir.Name)
+ n := s.Def.(*ir.Name)
+ if len(types_) != 0 {
+ n = substArgTypes(n, types_...)
+ }
+ return n
}
// SubstArgTypes substitutes the given list of types for
// successive occurrences of the "any" placeholder in the
// type syntax expression n.Type.
-// The result of SubstArgTypes MUST be assigned back to old, e.g.
-//
-// n.Left = SubstArgTypes(n.Left, t1, t2)
-func SubstArgTypes(old *ir.Name, types_ ...*types.Type) *ir.Name {
+func substArgTypes(old *ir.Name, types_ ...*types.Type) *ir.Name {
for _, t := range types_ {
types.CalcSize(t)
}
- n := ir.NewNameAt(old.Pos(), old.Sym())
+ n := ir.NewNameAt(old.Pos(), old.Sym(), types.SubstAny(old.Type(), &types_))
n.Class = old.Class
- n.SetType(types.SubstAny(old.Type(), &types_))
n.Func = old.Func
if len(types_) > 0 {
base.Fatalf("SubstArgTypes: too many argument types")
@@ -75,9 +77,9 @@ func InitRuntime() {
typ := typs[d.typ]
switch d.tag {
case funcTag:
- importfunc(src.NoXPos, sym, typ)
+ importfunc(sym, typ)
case varTag:
- importvar(src.NoXPos, sym, typ)
+ importvar(sym, typ)
default:
base.Fatalf("unhandled declaration tag %v", d.tag)
}
@@ -111,9 +113,9 @@ func InitCoverage() {
typ := typs[d.typ]
switch d.tag {
case funcTag:
- importfunc(src.NoXPos, sym, typ)
+ importfunc(sym, typ)
case varTag:
- importvar(src.NoXPos, sym, typ)
+ importvar(sym, typ)
default:
base.Fatalf("unhandled declaration tag %v", d.tag)
}
@@ -128,5 +130,5 @@ func LookupCoverage(name string) *ir.Name {
if sym == nil {
base.Fatalf("LookupCoverage: can't find runtime/coverage.%s", name)
}
- return ir.AsNode(sym.Def).(*ir.Name)
+ return sym.Def.(*ir.Name)
}
diff --git a/src/cmd/compile/internal/typecheck/typecheck.go b/src/cmd/compile/internal/typecheck/typecheck.go
index 6e4feeccd9..b22e45358e 100644
--- a/src/cmd/compile/internal/typecheck/typecheck.go
+++ b/src/cmd/compile/internal/typecheck/typecheck.go
@@ -8,7 +8,6 @@ import (
"fmt"
"go/constant"
"go/token"
- "internal/types/errors"
"strings"
"cmd/compile/internal/base"
@@ -17,18 +16,6 @@ import (
"cmd/internal/src"
)
-// Function collecting autotmps generated during typechecking,
-// to be included in the package-level init function.
-var InitTodoFunc = ir.NewFunc(base.Pos)
-
-var inimport bool // set during import
-
-var TypecheckAllowed bool
-
-var (
- NeedRuntimeType = func(*types.Type) {}
-)
-
func AssignExpr(n ir.Node) ir.Node { return typecheck(n, ctxExpr|ctxAssign) }
func Expr(n ir.Node) ir.Node { return typecheck(n, ctxExpr) }
func Stmt(n ir.Node) ir.Node { return typecheck(n, ctxStmt) }
@@ -105,20 +92,6 @@ const (
// marks variables that escape the local frame.
// rewrites n.Op to be more specific in some cases.
-// Resolve resolves an ONONAME node to a definition, if any. If n is not an ONONAME node,
-// Resolve returns n unchanged. If n is an ONONAME node and not in the same package,
-// then n.Sym() is resolved using import data. Otherwise, Resolve returns
-// n.Sym().Def. An ONONAME node can be created using ir.NewIdent(), so an imported
-// symbol can be resolved via Resolve(ir.NewIdent(src.NoXPos, sym)).
-func Resolve(n ir.Node) (res ir.Node) {
- if n == nil || n.Op() != ir.ONONAME {
- return n
- }
-
- base.Fatalf("unexpected NONAME node: %+v", n)
- panic("unreachable")
-}
-
func typecheckslice(l []ir.Node, top int) {
for i := range l {
l[i] = typecheck(l[i], top)
@@ -170,56 +143,11 @@ func typekind(t *types.Type) string {
return fmt.Sprintf("etype=%d", et)
}
-func cycleFor(start ir.Node) []ir.Node {
- // Find the start node in typecheck_tcstack.
- // We know that it must exist because each time we mark
- // a node with n.SetTypecheck(2) we push it on the stack,
- // and each time we mark a node with n.SetTypecheck(2) we
- // pop it from the stack. We hit a cycle when we encounter
- // a node marked 2 in which case is must be on the stack.
- i := len(typecheck_tcstack) - 1
- for i > 0 && typecheck_tcstack[i] != start {
- i--
- }
-
- // collect all nodes with same Op
- var cycle []ir.Node
- for _, n := range typecheck_tcstack[i:] {
- if n.Op() == start.Op() {
- cycle = append(cycle, n)
- }
- }
-
- return cycle
-}
-
-func cycleTrace(cycle []ir.Node) string {
- var s string
- for i, n := range cycle {
- s += fmt.Sprintf("\n\t%v: %v uses %v", ir.Line(n), n, cycle[(i+1)%len(cycle)])
- }
- return s
-}
-
-var typecheck_tcstack []ir.Node
-
-func Func(fn *ir.Func) {
- new := Stmt(fn)
- if new != fn {
- base.Fatalf("typecheck changed func")
- }
-}
-
// typecheck type checks node n.
// The result of typecheck MUST be assigned back to n, e.g.
//
// n.Left = typecheck(n.Left, top)
func typecheck(n ir.Node, top int) (res ir.Node) {
- // cannot type check until all the source has been parsed
- if !TypecheckAllowed {
- base.Fatalf("early typecheck")
- }
-
if n == nil {
return nil
}
@@ -230,120 +158,33 @@ func typecheck(n ir.Node, top int) (res ir.Node) {
}
lno := ir.SetPos(n)
+ defer func() { base.Pos = lno }()
// Skip over parens.
for n.Op() == ir.OPAREN {
n = n.(*ir.ParenExpr).X
}
- // Resolve definition of name and value of iota lazily.
- n = Resolve(n)
-
// Skip typecheck if already done.
// But re-typecheck ONAME/OTYPE/OLITERAL/OPACK node in case context has changed.
if n.Typecheck() == 1 || n.Typecheck() == 3 {
switch n.Op() {
- case ir.ONAME, ir.OTYPE, ir.OLITERAL:
+ case ir.ONAME:
break
default:
- base.Pos = lno
return n
}
}
if n.Typecheck() == 2 {
- // Typechecking loop. Trying printing a meaningful message,
- // otherwise a stack trace of typechecking.
- switch n.Op() {
- // We can already diagnose variables used as types.
- case ir.ONAME:
- n := n.(*ir.Name)
- if top&(ctxExpr|ctxType) == ctxType {
- base.Errorf("%v is not a type", n)
- }
-
- case ir.OTYPE:
- // Only report a type cycle if we are expecting a type.
- // Otherwise let other code report an error.
- if top&ctxType == ctxType {
- // A cycle containing only alias types is an error
- // since it would expand indefinitely when aliases
- // are substituted.
- cycle := cycleFor(n)
- for _, n1 := range cycle {
- if n1.Name() != nil && !n1.Name().Alias() {
- // Cycle is ok. But if n is an alias type and doesn't
- // have a type yet, we have a recursive type declaration
- // with aliases that we can't handle properly yet.
- // Report an error rather than crashing later.
- if n.Name() != nil && n.Name().Alias() && n.Type() == nil {
- base.Pos = n.Pos()
- base.Fatalf("cannot handle alias type declaration (issue #25838): %v", n)
- }
- base.Pos = lno
- return n
- }
- }
- base.ErrorfAt(n.Pos(), errors.InvalidDeclCycle, "invalid recursive type alias %v%s", n, cycleTrace(cycle))
- }
-
- case ir.OLITERAL:
- if top&(ctxExpr|ctxType) == ctxType {
- base.Errorf("%v is not a type", n)
- break
- }
- base.ErrorfAt(n.Pos(), errors.InvalidInitCycle, "constant definition loop%s", cycleTrace(cycleFor(n)))
- }
-
- if base.Errors() == 0 {
- var trace string
- for i := len(typecheck_tcstack) - 1; i >= 0; i-- {
- x := typecheck_tcstack[i]
- trace += fmt.Sprintf("\n\t%v %v", ir.Line(x), x)
- }
- base.Errorf("typechecking loop involving %v%s", n, trace)
- }
-
- base.Pos = lno
- return n
+ base.FatalfAt(n.Pos(), "typechecking loop")
}
- typecheck_tcstack = append(typecheck_tcstack, n)
-
n.SetTypecheck(2)
n = typecheck1(n, top)
n.SetTypecheck(1)
- last := len(typecheck_tcstack) - 1
- typecheck_tcstack[last] = nil
- typecheck_tcstack = typecheck_tcstack[:last]
-
- _, isExpr := n.(ir.Expr)
- _, isStmt := n.(ir.Stmt)
- isMulti := false
- switch n.Op() {
- case ir.OCALLFUNC, ir.OCALLINTER, ir.OCALLMETH:
- n := n.(*ir.CallExpr)
- if t := n.X.Type(); t != nil && t.Kind() == types.TFUNC {
- nr := t.NumResults()
- isMulti = nr > 1
- if nr == 0 {
- isExpr = false
- }
- }
- case ir.OAPPEND, ir.OMIN, ir.OMAX:
- // Must be used.
- isStmt = false
- case ir.OCLEAR, ir.OCLOSE, ir.ODELETE, ir.OPANIC, ir.OPRINT, ir.OPRINTN:
- // Must not be used.
- isExpr = false
- isStmt = true
- case ir.OCOPY, ir.ORECOVER, ir.ORECV:
- // Can be used or not.
- isStmt = true
- }
-
t := n.Type()
if t != nil && !t.IsFuncArgStruct() && n.Op() != ir.OTYPE {
switch t.Kind() {
@@ -356,25 +197,6 @@ func typecheck(n ir.Node, top int) (res ir.Node) {
}
}
- // TODO(rsc): Lots of the complexity here is because typecheck can
- // see OTYPE, ONAME, and OLITERAL nodes multiple times.
- // Once we make the IR a proper tree, we should be able to simplify
- // this code a bit, especially the final case.
- switch {
- case top&(ctxStmt|ctxExpr) == ctxExpr && !isExpr && n.Op() != ir.OTYPE && !isMulti:
- base.Fatalf("%v used as value", n)
-
- case top&ctxType == 0 && n.Op() == ir.OTYPE && t != nil:
- base.Fatalf("type %v is not an expression", n.Type())
-
- case top&(ctxStmt|ctxExpr) == ctxStmt && !isStmt && t != nil:
- base.Fatalf("%v evaluated but not used", n)
-
- case top&(ctxType|ctxExpr) == ctxType && n.Op() != ir.OTYPE && n.Op() != ir.ONONAME && (t != nil || n.Op() == ir.ONAME):
- base.Fatalf("%v is not a type", n)
- }
-
- base.Pos = lno
return n
}
@@ -400,22 +222,6 @@ func typecheck1(n ir.Node, top int) ir.Node {
base.Fatalf("typecheck %v", n.Op())
panic("unreachable")
- case ir.OLITERAL:
- if n.Sym() == nil && n.Type() == nil {
- base.Fatalf("literal missing type: %v", n)
- }
- return n
-
- case ir.ONIL:
- return n
-
- // names
- case ir.ONONAME:
- // Note: adderrorname looks for this string and
- // adds context about the outer expression
- base.FatalfAt(n.Pos(), "undefined: %v", n.Sym())
- panic("unreachable")
-
case ir.ONAME:
n := n.(*ir.Name)
if n.BuiltinOp != 0 {
@@ -437,14 +243,6 @@ func typecheck1(n ir.Node, top int) ir.Node {
}
return n
- case ir.OLINKSYMOFFSET:
- // type already set
- return n
-
- // types (ODEREF is with exprs)
- case ir.OTYPE:
- return n
-
// type or expr
case ir.ODEREF:
n := n.(*ir.StarExpr)
@@ -548,10 +346,6 @@ func typecheck1(n ir.Node, top int) ir.Node {
return tcUnaryArith(n)
// exprs
- case ir.OADDR:
- n := n.(*ir.AddrExpr)
- return tcAddr(n)
-
case ir.OCOMPLIT:
return tcCompLit(n.(*ir.CompLitExpr))
@@ -596,11 +390,6 @@ func typecheck1(n ir.Node, top int) ir.Node {
n := n.(*ir.CallExpr)
return tcCall(n, top)
- case ir.OALIGNOF, ir.OOFFSETOF, ir.OSIZEOF:
- n := n.(*ir.UnaryExpr)
- n.SetType(types.Types[types.TUINTPTR])
- return OrigInt(n, evalunsafe(n))
-
case ir.OCAP, ir.OLEN:
n := n.(*ir.UnaryExpr)
return tcLenCap(n)
@@ -649,7 +438,7 @@ func typecheck1(n ir.Node, top int) ir.Node {
n := n.(*ir.UnaryExpr)
return tcNew(n)
- case ir.OPRINT, ir.OPRINTN:
+ case ir.OPRINT, ir.OPRINTLN:
n := n.(*ir.CallExpr)
return tcPrint(n)
@@ -661,10 +450,6 @@ func typecheck1(n ir.Node, top int) ir.Node {
n := n.(*ir.CallExpr)
return tcRecover(n)
- case ir.ORECOVERFP:
- n := n.(*ir.CallExpr)
- return tcRecoverFP(n)
-
case ir.OUNSAFEADD:
n := n.(*ir.BinaryExpr)
return tcUnsafeAdd(n)
@@ -685,10 +470,6 @@ func typecheck1(n ir.Node, top int) ir.Node {
n := n.(*ir.UnaryExpr)
return tcUnsafeData(n)
- case ir.OCLOSURE:
- n := n.(*ir.ClosureExpr)
- return tcClosure(n, top)
-
case ir.OITAB:
n := n.(*ir.UnaryExpr)
return tcITab(n)
@@ -806,17 +587,6 @@ func typecheck1(n ir.Node, top int) ir.Node {
case ir.ODCLFUNC:
tcFunc(n.(*ir.Func))
return n
-
- case ir.ODCLCONST:
- n := n.(*ir.Decl)
- n.X = Expr(n.X).(*ir.Name)
- return n
-
- case ir.ODCLTYPE:
- n := n.(*ir.Decl)
- n.X = typecheck(n.X, ctxType).(*ir.Name)
- types.CheckSize(n.X.Type())
- return n
}
// No return n here!
@@ -849,11 +619,6 @@ func typecheckargs(n ir.InitNode) {
return
}
- // Save n as n.Orig for fmt.go.
- if ir.Orig(n) == n {
- n.(ir.OrigNode).SetOrig(ir.SepCopy(n))
- }
-
// Rewrite f(g()) into t1, t2, ... = g(); f(t1, t2, ...).
RewriteMultiValueCall(n, list[0])
}
@@ -861,10 +626,7 @@ func typecheckargs(n ir.InitNode) {
// RewriteNonNameCall replaces non-Name call expressions with temps,
// rewriting f()(...) to t0 := f(); t0(...).
func RewriteNonNameCall(n *ir.CallExpr) {
- np := &n.X
- if inst, ok := (*np).(*ir.InstExpr); ok && inst.Op() == ir.OFUNCINST {
- np = &inst.X
- }
+ np := &n.Fun
if dot, ok := (*np).(*ir.SelectorExpr); ok && (dot.Op() == ir.ODOTMETH || dot.Op() == ir.ODOTINTER || dot.Op() == ir.OMETHVALUE) {
np = &dot.X // peel away method selector
}
@@ -876,49 +638,26 @@ func RewriteNonNameCall(n *ir.CallExpr) {
return
}
- // See comment (1) in RewriteMultiValueCall.
- static := ir.CurFunc == nil
- if static {
- ir.CurFunc = InitTodoFunc
- }
-
- tmp := Temp((*np).Type())
+ tmp := TempAt(base.Pos, ir.CurFunc, (*np).Type())
as := ir.NewAssignStmt(base.Pos, tmp, *np)
as.PtrInit().Append(Stmt(ir.NewDecl(n.Pos(), ir.ODCL, tmp)))
*np = tmp
- if static {
- ir.CurFunc = nil
- }
-
n.PtrInit().Append(Stmt(as))
}
// RewriteMultiValueCall rewrites multi-valued f() to use temporaries,
// so the backend wouldn't need to worry about tuple-valued expressions.
func RewriteMultiValueCall(n ir.InitNode, call ir.Node) {
- // If we're outside of function context, then this call will
- // be executed during the generated init function. However,
- // init.go hasn't yet created it. Instead, associate the
- // temporary variables with InitTodoFunc for now, and init.go
- // will reassociate them later when it's appropriate. (1)
- static := ir.CurFunc == nil
- if static {
- ir.CurFunc = InitTodoFunc
- }
-
as := ir.NewAssignListStmt(base.Pos, ir.OAS2, nil, []ir.Node{call})
- results := call.Type().FieldSlice()
+ results := call.Type().Fields()
list := make([]ir.Node, len(results))
for i, result := range results {
- tmp := Temp(result.Type)
+ tmp := TempAt(base.Pos, ir.CurFunc, result.Type)
as.PtrInit().Append(ir.NewDecl(base.Pos, ir.ODCL, tmp))
as.Lhs.Append(tmp)
list[i] = tmp
}
- if static {
- ir.CurFunc = nil
- }
n.PtrInit().Append(Stmt(as))
@@ -1034,9 +773,9 @@ func needTwoArgs(n *ir.CallExpr) (ir.Node, ir.Node, bool) {
// the matching field or nil. If dostrcmp is 0, it matches the symbols. If
// dostrcmp is 1, it matches by name exactly. If dostrcmp is 2, it matches names
// with case folding.
-func Lookdot1(errnode ir.Node, s *types.Sym, t *types.Type, fs *types.Fields, dostrcmp int) *types.Field {
+func Lookdot1(errnode ir.Node, s *types.Sym, t *types.Type, fs []*types.Field, dostrcmp int) *types.Field {
var r *types.Field
- for _, f := range fs.Slice() {
+ for _, f := range fs {
if dostrcmp != 0 && f.Sym.Name == s.Name {
return f
}
@@ -1063,64 +802,35 @@ func Lookdot1(errnode ir.Node, s *types.Sym, t *types.Type, fs *types.Fields, do
return r
}
-// typecheckMethodExpr checks selector expressions (ODOT) where the
-// base expression is a type expression (OTYPE).
-func typecheckMethodExpr(n *ir.SelectorExpr) (res ir.Node) {
- if base.EnableTrace && base.Flag.LowerT {
- defer tracePrint("typecheckMethodExpr", n)(&res)
- }
-
- t := n.X.Type()
-
- // Compute the method set for t.
- var ms *types.Fields
- if t.IsInterface() {
- ms = t.AllMethods()
+// NewMethodExpr returns an OMETHEXPR node representing method
+// expression "recv.sym".
+func NewMethodExpr(pos src.XPos, recv *types.Type, sym *types.Sym) *ir.SelectorExpr {
+ // Compute the method set for recv.
+ var ms []*types.Field
+ if recv.IsInterface() {
+ ms = recv.AllMethods()
} else {
- mt := types.ReceiverBaseType(t)
+ mt := types.ReceiverBaseType(recv)
if mt == nil {
- base.Errorf("%v undefined (type %v has no method %v)", n, t, n.Sel)
- n.SetType(nil)
- return n
+ base.FatalfAt(pos, "type %v has no receiver base type", recv)
}
CalcMethods(mt)
ms = mt.AllMethods()
-
- // The method expression T.m requires a wrapper when T
- // is different from m's declared receiver type. We
- // normally generate these wrappers while writing out
- // runtime type descriptors, which is always done for
- // types declared at package scope. However, we need
- // to make sure to generate wrappers for anonymous
- // receiver types too.
- if mt.Sym() == nil {
- NeedRuntimeType(t)
- }
}
- s := n.Sel
- m := Lookdot1(n, s, t, ms, 0)
+ m := Lookdot1(nil, sym, recv, ms, 0)
if m == nil {
- if Lookdot1(n, s, t, ms, 1) != nil {
- base.Errorf("%v undefined (cannot refer to unexported method %v)", n, s)
- } else if _, ambig := dotpath(s, t, nil, false); ambig {
- base.Errorf("%v undefined (ambiguous selector)", n) // method or field
- } else {
- base.Errorf("%v undefined (type %v has no method %v)", n, t, s)
- }
- n.SetType(nil)
- return n
+ base.FatalfAt(pos, "type %v has no method %v", recv, sym)
}
- if !types.IsMethodApplicable(t, m) {
- base.Errorf("invalid method expression %v (needs pointer receiver: (*%v).%S)", n, t, s)
- n.SetType(nil)
- return n
+ if !types.IsMethodApplicable(recv, m) {
+ base.FatalfAt(pos, "invalid method expression %v.%v (needs pointer receiver)", recv, sym)
}
- n.SetOp(ir.OMETHEXPR)
+ n := ir.NewSelectorExpr(pos, ir.OMETHEXPR, ir.TypeNode(recv), sym)
n.Selection = m
- n.SetType(NewMethodType(m.Type, n.X.Type()))
+ n.SetType(NewMethodType(m.Type, recv))
+ n.SetTypecheck(1)
return n
}
@@ -1260,8 +970,9 @@ func nokeys(l ir.Nodes) bool {
return true
}
-func hasddd(t *types.Type) bool {
- for _, tl := range t.Fields().Slice() {
+func hasddd(params []*types.Field) bool {
+ // TODO(mdempsky): Simply check the last param.
+ for _, tl := range params {
if tl.IsDDD() {
return true
}
@@ -1271,7 +982,7 @@ func hasddd(t *types.Type) bool {
}
// typecheck assignment: type list = expression list
-func typecheckaste(op ir.Op, call ir.Node, isddd bool, tstruct *types.Type, nl ir.Nodes, desc func() string) {
+func typecheckaste(op ir.Op, call ir.Node, isddd bool, params []*types.Field, nl ir.Nodes, desc func() string) {
var t *types.Type
var i int
@@ -1283,9 +994,9 @@ func typecheckaste(op ir.Op, call ir.Node, isddd bool, tstruct *types.Type, nl i
n = nl[0]
}
- n1 := tstruct.NumFields()
+ n1 := len(params)
n2 := len(nl)
- if !hasddd(tstruct) {
+ if !hasddd(params) {
if isddd {
goto invalidddd
}
@@ -1311,7 +1022,7 @@ func typecheckaste(op ir.Op, call ir.Node, isddd bool, tstruct *types.Type, nl i
}
i = 0
- for _, tl := range tstruct.Fields().Slice() {
+ for _, tl := range params {
t = tl.Type
if tl.IsDDD() {
if isddd {
@@ -1367,98 +1078,12 @@ invalidddd:
notenough:
if n == nil || n.Type() != nil {
- details := errorDetails(nl, tstruct, isddd)
- if call != nil {
- // call is the expression being called, not the overall call.
- // Method expressions have the form T.M, and the compiler has
- // rewritten those to ONAME nodes but left T in Left.
- if call.Op() == ir.OMETHEXPR {
- call := call.(*ir.SelectorExpr)
- base.Errorf("not enough arguments in call to method expression %v%s", call, details)
- } else {
- base.Errorf("not enough arguments in call to %v%s", call, details)
- }
- } else {
- base.Errorf("not enough arguments to %v%s", op, details)
- }
- if n != nil {
- base.Fatalf("invalid call")
- }
+ base.Fatalf("not enough arguments to %v", op)
}
return
toomany:
- details := errorDetails(nl, tstruct, isddd)
- if call != nil {
- base.Errorf("too many arguments in call to %v%s", call, details)
- } else {
- base.Errorf("too many arguments to %v%s", op, details)
- }
-}
-
-func errorDetails(nl ir.Nodes, tstruct *types.Type, isddd bool) string {
- // Suppress any return message signatures if:
- //
- // (1) We don't know any type at a call site (see #19012).
- // (2) Any node has an unknown type.
- // (3) Invalid type for variadic parameter (see #46957).
- if tstruct == nil {
- return "" // case 1
- }
-
- if isddd && !nl[len(nl)-1].Type().IsSlice() {
- return "" // case 3
- }
-
- for _, n := range nl {
- if n.Type() == nil {
- return "" // case 2
- }
- }
- return fmt.Sprintf("\n\thave %s\n\twant %v", fmtSignature(nl, isddd), tstruct)
-}
-
-// sigrepr is a type's representation to the outside world,
-// in string representations of return signatures
-// e.g in error messages about wrong arguments to return.
-func sigrepr(t *types.Type, isddd bool) string {
- switch t {
- case types.UntypedString:
- return "string"
- case types.UntypedBool:
- return "bool"
- }
-
- if t.Kind() == types.TIDEAL {
- // "untyped number" is not commonly used
- // outside of the compiler, so let's use "number".
- // TODO(mdempsky): Revisit this.
- return "number"
- }
-
- // Turn []T... argument to ...T for clearer error message.
- if isddd {
- if !t.IsSlice() {
- base.Fatalf("bad type for ... argument: %v", t)
- }
- return "..." + t.Elem().String()
- }
- return t.String()
-}
-
-// fmtSignature returns the signature of the types at the call or return.
-func fmtSignature(nl ir.Nodes, isddd bool) string {
- if len(nl) < 1 {
- return "()"
- }
-
- var typeStrings []string
- for i, n := range nl {
- isdddArg := isddd && i == len(nl)-1
- typeStrings = append(typeStrings, sigrepr(n.Type(), isdddArg))
- }
-
- return fmt.Sprintf("(%s)", strings.Join(typeStrings, ", "))
+ base.Fatalf("too many arguments to %v", op)
}
// type check composite.
@@ -1584,7 +1209,7 @@ func checkassignto(src *types.Type, dst ir.Node) {
return
}
- if op, why := Assignop(src, dst.Type()); op == ir.OXXX {
+ if op, why := assignOp(src, dst.Type()); op == ir.OXXX {
base.Errorf("cannot assign %v to %L in multiple assignment%s", src, dst, why)
return
}
diff --git a/src/cmd/compile/internal/typecheck/universe.go b/src/cmd/compile/internal/typecheck/universe.go
index e43bede4ce..4c4487c649 100644
--- a/src/cmd/compile/internal/typecheck/universe.go
+++ b/src/cmd/compile/internal/typecheck/universe.go
@@ -14,7 +14,6 @@ import (
var (
okfor [ir.OEND][]bool
- iscmp [ir.OEND]bool
)
var (
@@ -47,7 +46,7 @@ var builtinFuncs = [...]struct {
{"new", ir.ONEW},
{"panic", ir.OPANIC},
{"print", ir.OPRINT},
- {"println", ir.OPRINTN},
+ {"println", ir.OPRINTLN},
{"real", ir.OREAL},
{"recover", ir.ORECOVER},
}
@@ -57,9 +56,6 @@ var unsafeFuncs = [...]struct {
op ir.Op
}{
{"Add", ir.OUNSAFEADD},
- {"Alignof", ir.OALIGNOF},
- {"Offsetof", ir.OOFFSETOF},
- {"Sizeof", ir.OSIZEOF},
{"Slice", ir.OUNSAFESLICE},
{"SliceData", ir.OUNSAFESLICEDATA},
{"String", ir.OUNSAFESTRING},
@@ -71,22 +67,17 @@ func InitUniverse() {
types.InitTypes(func(sym *types.Sym, typ *types.Type) types.Object {
n := ir.NewDeclNameAt(src.NoXPos, ir.OTYPE, sym)
n.SetType(typ)
+ n.SetTypecheck(1)
sym.Def = n
return n
})
for _, s := range &builtinFuncs {
- s2 := types.BuiltinPkg.Lookup(s.name)
- def := NewName(s2)
- def.BuiltinOp = s.op
- s2.Def = def
+ ir.NewBuiltin(types.BuiltinPkg.Lookup(s.name), s.op)
}
for _, s := range &unsafeFuncs {
- s2 := types.UnsafePkg.Lookup(s.name)
- def := NewName(s2)
- def.BuiltinOp = s.op
- s2.Def = def
+ ir.NewBuiltin(types.UnsafePkg.Lookup(s.name), s.op)
}
s := types.BuiltinPkg.Lookup("true")
@@ -97,14 +88,11 @@ func InitUniverse() {
s = Lookup("_")
types.BlankSym = s
- s.Def = NewName(s)
- ir.BlankNode = ir.AsNode(s.Def)
- ir.BlankNode.SetType(types.Types[types.TBLANK])
- ir.BlankNode.SetTypecheck(1)
+ ir.BlankNode = ir.NewNameAt(src.NoXPos, s, types.Types[types.TBLANK])
+ s.Def = ir.BlankNode
s = types.BuiltinPkg.Lookup("_")
- s.Def = NewName(s)
- ir.AsNode(s.Def).SetType(types.Types[types.TBLANK])
+ s.Def = ir.NewNameAt(src.NoXPos, s, types.Types[types.TBLANK])
s = types.BuiltinPkg.Lookup("nil")
s.Def = NodNil()
@@ -207,22 +195,3 @@ func InitUniverse() {
okfor[ir.OCAP] = okforcap[:]
okfor[ir.OLEN] = okforlen[:]
}
-
-// DeclareUniverse makes the universe block visible within the current package.
-func DeclareUniverse() {
- // Operationally, this is similar to a dot import of builtinpkg, except
- // that we silently skip symbols that are already declared in the
- // package block rather than emitting a redeclared symbol error.
-
- for _, s := range types.BuiltinPkg.Syms {
- if s.Def == nil {
- continue
- }
- s1 := Lookup(s.Name)
- if s1.Def != nil {
- continue
- }
-
- s1.Def = s.Def
- }
-}
diff --git a/src/cmd/compile/internal/types/alg.go b/src/cmd/compile/internal/types/alg.go
index 8d56dec824..d3b446213e 100644
--- a/src/cmd/compile/internal/types/alg.go
+++ b/src/cmd/compile/internal/types/alg.go
@@ -103,7 +103,7 @@ func AlgType(t *Type) (AlgKind, *Type) {
return ASPECIAL, nil
case TSTRUCT:
- fields := t.FieldSlice()
+ fields := t.Fields()
// One-field struct is same as that one field alone.
if len(fields) == 1 && !fields[0].Sym.IsBlank() {
@@ -147,7 +147,7 @@ func IsComparable(t *Type) bool {
// IncomparableField returns an incomparable Field of struct Type t, if any.
func IncomparableField(t *Type) *Field {
- for _, f := range t.FieldSlice() {
+ for _, f := range t.Fields() {
if !IsComparable(f.Type) {
return f
}
diff --git a/src/cmd/compile/internal/types/fmt.go b/src/cmd/compile/internal/types/fmt.go
index c5d9941cfd..c9b9853f78 100644
--- a/src/cmd/compile/internal/types/fmt.go
+++ b/src/cmd/compile/internal/types/fmt.go
@@ -8,9 +8,7 @@ import (
"bytes"
"encoding/binary"
"fmt"
- "go/constant"
"strconv"
- "strings"
"sync"
"cmd/compile/internal/base"
@@ -29,31 +27,6 @@ var UnsafePkg *Pkg
// BlankSym is the blank (_) symbol.
var BlankSym *Sym
-// OrigSym returns the original symbol written by the user.
-func OrigSym(s *Sym) *Sym {
- if s == nil {
- return nil
- }
-
- if len(s.Name) > 1 && s.Name[0] == '~' {
- switch s.Name[1] {
- case 'r': // originally an unnamed result
- return nil
- case 'b': // originally the blank identifier _
- // TODO(mdempsky): Does s.Pkg matter here?
- return BlankSym
- }
- return s
- }
-
- if strings.HasPrefix(s.Name, ".anon") {
- // originally an unnamed or _ name (see subr.go: NewFuncParams)
- return nil
- }
-
- return s
-}
-
// numImport tracks how often a package with a given name is imported.
// It is used to provide a better error message (by using the package
// path to disambiguate) if a package that appears multiple times with
@@ -346,14 +319,6 @@ func tconv2(b *bytes.Buffer, t *Type, verb rune, mode fmtMode, visited map[*Type
}
}
sconv2(b, sym, verb, mode)
-
- // TODO(mdempsky): Investigate including Vargen in fmtTypeIDName
- // output too. It seems like it should, but that mode is currently
- // used in string representation used by reflection, which is
- // user-visible and doesn't expect this.
- if mode == fmtTypeID && t.vargen != 0 {
- fmt.Fprintf(b, "·%d", t.vargen)
- }
return
}
@@ -452,7 +417,7 @@ func tconv2(b *bytes.Buffer, t *Type, verb rune, mode fmtMode, visited map[*Type
break
}
b.WriteString("interface {")
- for i, f := range t.AllMethods().Slice() {
+ for i, f := range t.AllMethods() {
if i != 0 {
b.WriteByte(';')
}
@@ -472,7 +437,7 @@ func tconv2(b *bytes.Buffer, t *Type, verb rune, mode fmtMode, visited map[*Type
}
tconv2(b, f.Type, 'S', mode, visited)
}
- if t.AllMethods().Len() != 0 {
+ if len(t.AllMethods()) != 0 {
b.WriteByte(' ')
}
b.WriteByte('}')
@@ -483,12 +448,12 @@ func tconv2(b *bytes.Buffer, t *Type, verb rune, mode fmtMode, visited map[*Type
} else {
if t.Recv() != nil {
b.WriteString("method")
- tconv2(b, t.Recvs(), 0, mode, visited)
+ formatParams(b, t.Recvs(), mode, visited)
b.WriteByte(' ')
}
b.WriteString("func")
}
- tconv2(b, t.Params(), 0, mode, visited)
+ formatParams(b, t.Params(), mode, visited)
switch t.NumResults() {
case 0:
@@ -496,11 +461,11 @@ func tconv2(b *bytes.Buffer, t *Type, verb rune, mode fmtMode, visited map[*Type
case 1:
b.WriteByte(' ')
- tconv2(b, t.Results().Field(0).Type, 0, mode, visited) // struct->field->field's type
+ tconv2(b, t.Result(0).Type, 0, mode, visited) // struct->field->field's type
default:
b.WriteByte(' ')
- tconv2(b, t.Results(), 0, mode, visited)
+ formatParams(b, t.Results(), mode, visited)
}
case TSTRUCT:
@@ -511,10 +476,6 @@ func tconv2(b *bytes.Buffer, t *Type, verb rune, mode fmtMode, visited map[*Type
switch t {
case mt.Bucket:
b.WriteString("map.bucket[")
- case mt.Hmap:
- b.WriteString("map.hdr[")
- case mt.Hiter:
- b.WriteString("map.iter[")
default:
base.Fatalf("unknown internal map type")
}
@@ -524,39 +485,18 @@ func tconv2(b *bytes.Buffer, t *Type, verb rune, mode fmtMode, visited map[*Type
break
}
- if funarg := t.StructType().Funarg; funarg != FunargNone {
- open, close := '(', ')'
- if funarg == FunargTparams {
- open, close = '[', ']'
- }
- b.WriteByte(byte(open))
- fieldVerb := 'v'
- switch mode {
- case fmtTypeID, fmtTypeIDName, fmtGo:
- // no argument names on function signature, and no "noescape"/"nosplit" tags
- fieldVerb = 'S'
- }
- for i, f := range t.Fields().Slice() {
- if i != 0 {
- b.WriteString(", ")
- }
- fldconv(b, f, fieldVerb, mode, visited, funarg)
- }
- b.WriteByte(byte(close))
- } else {
- b.WriteString("struct {")
- for i, f := range t.Fields().Slice() {
- if i != 0 {
- b.WriteByte(';')
- }
- b.WriteByte(' ')
- fldconv(b, f, 'L', mode, visited, funarg)
- }
- if t.NumFields() != 0 {
- b.WriteByte(' ')
+ b.WriteString("struct {")
+ for i, f := range t.Fields() {
+ if i != 0 {
+ b.WriteByte(';')
}
- b.WriteByte('}')
+ b.WriteByte(' ')
+ fldconv(b, f, 'L', mode, visited, false)
+ }
+ if t.NumFields() != 0 {
+ b.WriteByte(' ')
}
+ b.WriteByte('}')
case TFORW:
b.WriteString("undefined")
@@ -581,7 +521,24 @@ func tconv2(b *bytes.Buffer, t *Type, verb rune, mode fmtMode, visited map[*Type
}
}
-func fldconv(b *bytes.Buffer, f *Field, verb rune, mode fmtMode, visited map[*Type]int, funarg Funarg) {
+func formatParams(b *bytes.Buffer, params []*Field, mode fmtMode, visited map[*Type]int) {
+ b.WriteByte('(')
+ fieldVerb := 'v'
+ switch mode {
+ case fmtTypeID, fmtTypeIDName, fmtGo:
+ // no argument names on function signature, and no "noescape"/"nosplit" tags
+ fieldVerb = 'S'
+ }
+ for i, param := range params {
+ if i != 0 {
+ b.WriteString(", ")
+ }
+ fldconv(b, param, fieldVerb, mode, visited, true)
+ }
+ b.WriteByte(')')
+}
+
+func fldconv(b *bytes.Buffer, f *Field, verb rune, mode fmtMode, visited map[*Type]int, isParam bool) {
if f == nil {
b.WriteString("<T>")
return
@@ -592,11 +549,6 @@ func fldconv(b *bytes.Buffer, f *Field, verb rune, mode fmtMode, visited map[*Ty
if verb != 'S' {
s := f.Sym
- // Take the name from the original.
- if mode == fmtGo {
- s = OrigSym(s)
- }
-
// Using type aliases and embedded fields, it's possible to
// construct types that can't be directly represented as a
// type literal. For example, given "type Int = int" (#50190),
@@ -638,7 +590,7 @@ func fldconv(b *bytes.Buffer, f *Field, verb rune, mode fmtMode, visited map[*Ty
}
if s != nil {
- if funarg != FunargNone {
+ if isParam {
name = fmt.Sprint(f.Nname)
} else if verb == 'L' {
name = s.Name
@@ -667,7 +619,7 @@ func fldconv(b *bytes.Buffer, f *Field, verb rune, mode fmtMode, visited map[*Ty
tconv2(b, f.Type, 0, mode, visited)
}
- if verb != 'S' && funarg == FunargNone && f.Note != "" {
+ if verb != 'S' && !isParam && f.Note != "" {
b.WriteString(" ")
b.WriteString(strconv.Quote(f.Note))
}
@@ -688,41 +640,6 @@ func SplitVargenSuffix(name string) (base, suffix string) {
return name, ""
}
-// Val
-
-func FmtConst(v constant.Value, sharp bool) string {
- if !sharp && v.Kind() == constant.Complex {
- real, imag := constant.Real(v), constant.Imag(v)
-
- var re string
- sre := constant.Sign(real)
- if sre != 0 {
- re = real.String()
- }
-
- var im string
- sim := constant.Sign(imag)
- if sim != 0 {
- im = imag.String()
- }
-
- switch {
- case sre == 0 && sim == 0:
- return "0"
- case sre == 0:
- return im + "i"
- case sim == 0:
- return re
- case sim < 0:
- return fmt.Sprintf("(%s%si)", re, im)
- default:
- return fmt.Sprintf("(%s+%si)", re, im)
- }
- }
-
- return v.String()
-}
-
// TypeHash computes a hash value for type t to use in type switch statements.
func TypeHash(t *Type) uint32 {
p := t.LinkString()
diff --git a/src/cmd/compile/internal/types/goversion.go b/src/cmd/compile/internal/types/goversion.go
index c57493a5cb..ac08a49d0c 100644
--- a/src/cmd/compile/internal/types/goversion.go
+++ b/src/cmd/compile/internal/types/goversion.go
@@ -34,7 +34,7 @@ func AllowsGoVersion(major, minor int) bool {
}
// ParseLangFlag verifies that the -lang flag holds a valid value, and
-// exits if not. It initializes data used by langSupported.
+// exits if not. It initializes data used by AllowsGoVersion.
func ParseLangFlag() {
if base.Flag.Lang == "" {
return
@@ -59,6 +59,10 @@ func ParseLangFlag() {
// parseLang parses a -lang option into a langVer.
func parseLang(s string) (lang, error) {
+ if s == "go1" { // cmd/go's new spelling of "go1.0" (#65528)
+ s = "go1.0"
+ }
+
matches := goVersionRE.FindStringSubmatch(s)
if matches == nil {
return lang{}, fmt.Errorf(`should be something like "go1.12"`)
diff --git a/src/cmd/compile/internal/types/identity.go b/src/cmd/compile/internal/types/identity.go
index 6b3bc2ded1..fa28c038bd 100644
--- a/src/cmd/compile/internal/types/identity.go
+++ b/src/cmd/compile/internal/types/identity.go
@@ -92,11 +92,11 @@ cont:
return true
case TINTER:
- if t1.AllMethods().Len() != t2.AllMethods().Len() {
+ if len(t1.AllMethods()) != len(t2.AllMethods()) {
return false
}
- for i, f1 := range t1.AllMethods().Slice() {
- f2 := t2.AllMethods().Index(i)
+ for i, f1 := range t1.AllMethods() {
+ f2 := t2.AllMethods()[i]
if f1.Sym != f2.Sym || !identical(f1.Type, f2.Type, flags, assumedEqual) {
return false
}
@@ -107,7 +107,7 @@ cont:
if t1.NumFields() != t2.NumFields() {
return false
}
- for i, f1 := range t1.FieldSlice() {
+ for i, f1 := range t1.Fields() {
f2 := t2.Field(i)
if f1.Sym != f2.Sym || f1.Embedded != f2.Embedded || !identical(f1.Type, f2.Type, flags, assumedEqual) {
return false
@@ -122,18 +122,18 @@ cont:
// Check parameters and result parameters for type equality.
// We intentionally ignore receiver parameters for type
// equality, because they're never relevant.
- for _, f := range ParamsResults {
- // Loop over fields in structs, ignoring argument names.
- fs1, fs2 := f(t1).FieldSlice(), f(t2).FieldSlice()
- if len(fs1) != len(fs2) {
+ if t1.NumParams() != t2.NumParams() ||
+ t1.NumResults() != t2.NumResults() ||
+ t1.IsVariadic() != t2.IsVariadic() {
+ return false
+ }
+
+ fs1 := t1.ParamsResults()
+ fs2 := t2.ParamsResults()
+ for i, f1 := range fs1 {
+ if !identical(f1.Type, fs2[i].Type, flags, assumedEqual) {
return false
}
- for i, f1 := range fs1 {
- f2 := fs2[i]
- if f1.IsDDD() != f2.IsDDD() || !identical(f1.Type, f2.Type, flags, assumedEqual) {
- return false
- }
- }
}
return true
diff --git a/src/cmd/compile/internal/types/pkg.go b/src/cmd/compile/internal/types/pkg.go
index 9a21494017..c6ce7889af 100644
--- a/src/cmd/compile/internal/types/pkg.go
+++ b/src/cmd/compile/internal/types/pkg.go
@@ -8,7 +8,6 @@ import (
"cmd/internal/obj"
"cmd/internal/objabi"
"fmt"
- "sort"
"strconv"
"sync"
)
@@ -55,25 +54,10 @@ func NewPkg(path, name string) *Pkg {
return p
}
-// ImportedPkgList returns the list of directly imported packages.
-// The list is sorted by package path.
-func ImportedPkgList() []*Pkg {
- var list []*Pkg
- for _, p := range pkgMap {
- if p.Direct {
- list = append(list, p)
- }
- }
- sort.Sort(byPath(list))
- return list
+func PkgMap() map[string]*Pkg {
+ return pkgMap
}
-type byPath []*Pkg
-
-func (a byPath) Len() int { return len(a) }
-func (a byPath) Less(i, j int) bool { return a[i].Path < a[j].Path }
-func (a byPath) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
-
var nopkg = &Pkg{
Syms: make(map[string]*Sym),
}
@@ -122,6 +106,14 @@ func (pkg *Pkg) LookupNum(prefix string, n int) *Sym {
return pkg.LookupBytes(b)
}
+// Selector looks up a selector identifier.
+func (pkg *Pkg) Selector(name string) *Sym {
+ if IsExported(name) {
+ pkg = LocalPkg
+ }
+ return pkg.Lookup(name)
+}
+
var (
internedStringsmu sync.Mutex // protects internedStrings
internedStrings = map[string]string{}
@@ -137,12 +129,3 @@ func InternString(b []byte) string {
internedStringsmu.Unlock()
return s
}
-
-// CleanroomDo invokes f in an environment with no preexisting packages.
-// For testing of import/export only.
-func CleanroomDo(f func()) {
- saved := pkgMap
- pkgMap = make(map[string]*Pkg)
- f()
- pkgMap = saved
-}
diff --git a/src/cmd/compile/internal/types/scope.go b/src/cmd/compile/internal/types/scope.go
deleted file mode 100644
index 438a3f9a47..0000000000
--- a/src/cmd/compile/internal/types/scope.go
+++ /dev/null
@@ -1,11 +0,0 @@
-// Copyright 2017 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package types
-
-// PkgDef returns the definition associated with s at package scope.
-func (s *Sym) PkgDef() Object { return s.Def }
-
-// SetPkgDef sets the definition associated with s at package scope.
-func (s *Sym) SetPkgDef(n Object) { s.Def = n }
diff --git a/src/cmd/compile/internal/types/size.go b/src/cmd/compile/internal/types/size.go
index 1c20350128..6ba2b9153b 100644
--- a/src/cmd/compile/internal/types/size.go
+++ b/src/cmd/compile/internal/types/size.go
@@ -5,6 +5,7 @@
package types
import (
+ "math"
"sort"
"cmd/compile/internal/base"
@@ -91,7 +92,7 @@ func expandiface(t *Type) {
}
{
- methods := t.Methods().Slice()
+ methods := t.Methods()
sort.SliceStable(methods, func(i, j int) bool {
mi, mj := methods[i], methods[j]
@@ -110,7 +111,7 @@ func expandiface(t *Type) {
})
}
- for _, m := range t.Methods().Slice() {
+ for _, m := range t.Methods() {
if m.Sym == nil {
continue
}
@@ -119,7 +120,7 @@ func expandiface(t *Type) {
addMethod(m, true)
}
- for _, m := range t.Methods().Slice() {
+ for _, m := range t.Methods() {
if m.Sym != nil || m.Type == nil {
continue
}
@@ -133,7 +134,7 @@ func expandiface(t *Type) {
// Embedded interface: duplicate all methods
// and add to t's method set.
- for _, t1 := range m.Type.AllMethods().Slice() {
+ for _, t1 := range m.Type.AllMethods() {
f := NewField(m.Pos, t1.Sym, t1.Type)
addMethod(f, false)
@@ -157,90 +158,48 @@ func expandiface(t *Type) {
t.SetAllMethods(methods)
}
-func calcStructOffset(errtype *Type, t *Type, o int64, flag int) int64 {
- // flag is 0 (receiver), 1 (actual struct), or RegSize (in/out parameters)
- isStruct := flag == 1
- starto := o
- maxalign := int32(flag)
- if maxalign < 1 {
- maxalign = 1
- }
- // Special case: sync/atomic.align64 is an empty struct we recognize
- // as a signal that the struct it contains must be 64-bit-aligned.
- //
- // This logic is duplicated in go/types and cmd/compile/internal/types2.
- if isStruct && t.NumFields() == 0 && t.Sym() != nil && t.Sym().Name == "align64" && isAtomicStdPkg(t.Sym().Pkg) {
- maxalign = 8
- }
- lastzero := int64(0)
- for _, f := range t.Fields().Slice() {
- if f.Type == nil {
- // broken field, just skip it so that other valid fields
- // get a width.
- continue
- }
-
+// calcStructOffset computes the offsets of a sequence of fields,
+// starting at the given offset. It returns the resulting offset and
+// maximum field alignment.
+func calcStructOffset(t *Type, fields []*Field, offset int64) int64 {
+ for _, f := range fields {
CalcSize(f.Type)
- // If type T contains a field F marked as not-in-heap,
- // then T must also be a not-in-heap type. Otherwise,
- // you could heap allocate T and then get a pointer F,
- // which would be a heap pointer to a not-in-heap type.
- if f.Type.NotInHeap() {
- t.SetNotInHeap(true)
- }
- if int32(f.Type.align) > maxalign {
- maxalign = int32(f.Type.align)
- }
- if f.Type.align > 0 {
- o = RoundUp(o, int64(f.Type.align))
- }
- if isStruct { // For receiver/args/results, do not set, it depends on ABI
- f.Offset = o
- }
+ offset = RoundUp(offset, int64(f.Type.align))
- w := f.Type.width
- if w < 0 {
- base.Fatalf("invalid width %d", f.Type.width)
- }
- if w == 0 {
- lastzero = o
+ if t.IsStruct() { // param offsets depend on ABI
+ f.Offset = offset
+
+ // If type T contains a field F marked as not-in-heap,
+ // then T must also be a not-in-heap type. Otherwise,
+ // you could heap allocate T and then get a pointer F,
+ // which would be a heap pointer to a not-in-heap type.
+ if f.Type.NotInHeap() {
+ t.SetNotInHeap(true)
+ }
}
- o += w
+
+ offset += f.Type.width
+
maxwidth := MaxWidth
// On 32-bit systems, reflect tables impose an additional constraint
// that each field start offset must fit in 31 bits.
if maxwidth < 1<<32 {
maxwidth = 1<<31 - 1
}
- if o >= maxwidth {
- base.ErrorfAt(typePos(errtype), 0, "type %L too large", errtype)
- o = 8 // small but nonzero
+ if offset >= maxwidth {
+ base.ErrorfAt(typePos(t), 0, "type %L too large", t)
+ offset = 8 // small but nonzero
}
}
- // For nonzero-sized structs which end in a zero-sized thing, we add
- // an extra byte of padding to the type. This padding ensures that
- // taking the address of the zero-sized thing can't manufacture a
- // pointer to the next object in the heap. See issue 9401.
- if flag == 1 && o > starto && o == lastzero {
- o++
- }
-
- // final width is rounded
- if flag != 0 {
- o = RoundUp(o, int64(maxalign))
- }
- t.align = uint8(maxalign)
-
- // type width only includes back to first field's offset
- t.width = o - starto
-
- return o
+ return offset
}
func isAtomicStdPkg(p *Pkg) bool {
- return (p.Prefix == "sync/atomic" || p.Prefix == `""` && base.Ctxt.Pkgpath == "sync/atomic") ||
- (p.Prefix == "runtime/internal/atomic" || p.Prefix == `""` && base.Ctxt.Pkgpath == "runtime/internal/atomic")
+ if p.Prefix == `""` {
+ panic("bad package prefix")
+ }
+ return p.Prefix == "sync/atomic" || p.Prefix == "runtime/internal/atomic"
}
// CalcSize calculates and stores the size and alignment for t.
@@ -309,39 +268,58 @@ func CalcSize(t *Type) {
case TINT8, TUINT8, TBOOL:
// bool is int8
w = 1
+ t.intRegs = 1
case TINT16, TUINT16:
w = 2
+ t.intRegs = 1
+
+ case TINT32, TUINT32:
+ w = 4
+ t.intRegs = 1
+
+ case TINT64, TUINT64:
+ w = 8
+ t.align = uint8(RegSize)
+ t.intRegs = uint8(8 / RegSize)
- case TINT32, TUINT32, TFLOAT32:
+ case TFLOAT32:
w = 4
+ t.floatRegs = 1
- case TINT64, TUINT64, TFLOAT64:
+ case TFLOAT64:
w = 8
t.align = uint8(RegSize)
+ t.floatRegs = 1
case TCOMPLEX64:
w = 8
t.align = 4
+ t.floatRegs = 2
case TCOMPLEX128:
w = 16
t.align = uint8(RegSize)
+ t.floatRegs = 2
case TPTR:
w = int64(PtrSize)
+ t.intRegs = 1
CheckSize(t.Elem())
case TUNSAFEPTR:
w = int64(PtrSize)
+ t.intRegs = 1
case TINTER: // implemented as 2 pointers
w = 2 * int64(PtrSize)
t.align = uint8(PtrSize)
+ t.intRegs = 2
expandiface(t)
case TCHAN: // implemented as pointer
w = int64(PtrSize)
+ t.intRegs = 1
CheckSize(t.Elem())
@@ -365,15 +343,14 @@ func CalcSize(t *Type) {
case TMAP: // implemented as pointer
w = int64(PtrSize)
+ t.intRegs = 1
CheckSize(t.Elem())
CheckSize(t.Key())
case TFORW: // should have been filled in
base.Fatalf("invalid recursive type %v", t)
- w = 1 // anything will do
- case TANY:
- // not a real type; should be replaced before use.
+ case TANY: // not a real type; should be replaced before use.
base.Fatalf("CalcSize any")
case TSTRING:
@@ -382,6 +359,7 @@ func CalcSize(t *Type) {
}
w = StringSize
t.align = uint8(PtrSize)
+ t.intRegs = 2
case TARRAY:
if t.Elem() == nil {
@@ -399,6 +377,20 @@ func CalcSize(t *Type) {
w = t.NumElem() * t.Elem().width
t.align = t.Elem().align
+ // ABIInternal only allows "trivial" arrays (i.e., length 0 or 1)
+ // to be passed by register.
+ switch t.NumElem() {
+ case 0:
+ t.intRegs = 0
+ t.floatRegs = 0
+ case 1:
+ t.intRegs = t.Elem().intRegs
+ t.floatRegs = t.Elem().floatRegs
+ default:
+ t.intRegs = math.MaxUint8
+ t.floatRegs = math.MaxUint8
+ }
+
case TSLICE:
if t.Elem() == nil {
break
@@ -406,16 +398,14 @@ func CalcSize(t *Type) {
w = SliceSize
CheckSize(t.Elem())
t.align = uint8(PtrSize)
+ t.intRegs = 3
case TSTRUCT:
if t.IsFuncArgStruct() {
base.Fatalf("CalcSize fn struct %v", t)
}
- // Recognize and mark runtime/internal/sys.nih as not-in-heap.
- if sym := t.Sym(); sym != nil && sym.Pkg.Path == "runtime/internal/sys" && sym.Name == "nih" {
- t.SetNotInHeap(true)
- }
- w = calcStructOffset(t, t, 0, 1)
+ CalcStructSize(t)
+ w = t.width
// make fake type to check later to
// trigger function argument computation.
@@ -423,18 +413,19 @@ func CalcSize(t *Type) {
t1 := NewFuncArgs(t)
CheckSize(t1)
w = int64(PtrSize) // width of func type is pointer
+ t.intRegs = 1
// function is 3 cated structures;
// compute their widths as side-effect.
case TFUNCARGS:
t1 := t.FuncArgs()
- w = calcStructOffset(t1, t1.Recvs(), 0, 0)
- w = calcStructOffset(t1, t1.Params(), w, RegSize)
- w = calcStructOffset(t1, t1.Results(), w, RegSize)
+ // TODO(mdempsky): Should package abi be responsible for computing argwid?
+ w = calcStructOffset(t1, t1.Recvs(), 0)
+ w = calcStructOffset(t1, t1.Params(), w)
+ w = RoundUp(w, int64(RegSize))
+ w = calcStructOffset(t1, t1.Results(), w)
+ w = RoundUp(w, int64(RegSize))
t1.extra.(*Func).Argwid = w
- if w%int64(RegSize) != 0 {
- base.Warn("bad type %v %d\n", t1, w)
- }
t.align = 1
}
@@ -455,19 +446,62 @@ func CalcSize(t *Type) {
ResumeCheckSize()
}
-// CalcStructSize calculates the size of s,
-// filling in s.Width and s.Align,
+// CalcStructSize calculates the size of t,
+// filling in t.width, t.align, t.intRegs, and t.floatRegs,
// even if size calculation is otherwise disabled.
-func CalcStructSize(s *Type) {
- s.width = calcStructOffset(s, s, 0, 1) // sets align
-}
+func CalcStructSize(t *Type) {
+ var maxAlign uint8 = 1
+
+ // Recognize special types. This logic is duplicated in go/types and
+ // cmd/compile/internal/types2.
+ if sym := t.Sym(); sym != nil {
+ switch {
+ case sym.Name == "align64" && isAtomicStdPkg(sym.Pkg):
+ maxAlign = 8
+ case sym.Pkg.Path == "runtime/internal/sys" && sym.Name == "nih":
+ t.SetNotInHeap(true)
+ }
+ }
+
+ fields := t.Fields()
+ size := calcStructOffset(t, fields, 0)
+
+ // For non-zero-sized structs which end in a zero-sized field, we
+ // add an extra byte of padding to the type. This padding ensures
+ // that taking the address of a zero-sized field can't manufacture a
+ // pointer to the next object in the heap. See issue 9401.
+ if size > 0 && fields[len(fields)-1].Type.width == 0 {
+ size++
+ }
+
+ var intRegs, floatRegs uint64
+ for _, field := range fields {
+ typ := field.Type
+
+ // The alignment of a struct type is the maximum alignment of its
+ // field types.
+ if align := typ.align; align > maxAlign {
+ maxAlign = align
+ }
+
+ // Each field needs its own registers.
+ // We sum in uint64 to avoid possible overflows.
+ intRegs += uint64(typ.intRegs)
+ floatRegs += uint64(typ.floatRegs)
+ }
+
+ // Final size includes trailing padding.
+ size = RoundUp(size, int64(maxAlign))
+
+ if intRegs > math.MaxUint8 || floatRegs > math.MaxUint8 {
+ intRegs = math.MaxUint8
+ floatRegs = math.MaxUint8
+ }
-// RecalcSize is like CalcSize, but recalculates t's size even if it
-// has already been calculated before. It does not recalculate other
-// types.
-func RecalcSize(t *Type) {
- t.align = 0
- CalcSize(t)
+ t.width = size
+ t.align = maxAlign
+ t.intRegs = uint8(intRegs)
+ t.floatRegs = uint8(floatRegs)
}
func (t *Type) widthCalculated() bool {
@@ -583,7 +617,7 @@ func PtrDataSize(t *Type) int64 {
case TSTRUCT:
// Find the last field that has pointers, if any.
- fs := t.Fields().Slice()
+ fs := t.Fields()
for i := len(fs) - 1; i >= 0; i-- {
if size := PtrDataSize(fs[i].Type); size > 0 {
return fs[i].Offset + size
diff --git a/src/cmd/compile/internal/types/sizeof_test.go b/src/cmd/compile/internal/types/sizeof_test.go
index 76ccbd54a5..8a6f24124a 100644
--- a/src/cmd/compile/internal/types/sizeof_test.go
+++ b/src/cmd/compile/internal/types/sizeof_test.go
@@ -22,9 +22,9 @@ func TestSizeof(t *testing.T) {
}{
{Sym{}, 32, 64},
{Type{}, 56, 96},
- {Map{}, 20, 40},
+ {Map{}, 12, 24},
{Forward{}, 20, 32},
- {Func{}, 20, 32},
+ {Func{}, 32, 56},
{Struct{}, 12, 24},
{Interface{}, 0, 0},
{Chan{}, 8, 16},
diff --git a/src/cmd/compile/internal/types/type.go b/src/cmd/compile/internal/types/type.go
index c390b8194b..2777b4f007 100644
--- a/src/cmd/compile/internal/types/type.go
+++ b/src/cmd/compile/internal/types/type.go
@@ -6,8 +6,10 @@ package types
import (
"cmd/compile/internal/base"
+ "cmd/internal/objabi"
"cmd/internal/src"
"fmt"
+ "go/constant"
"internal/types/errors"
"sync"
)
@@ -127,6 +129,25 @@ var (
UntypedComplex = newType(TIDEAL)
)
+// UntypedTypes maps from a constant.Kind to its untyped Type
+// representation.
+var UntypedTypes = [...]*Type{
+ constant.Bool: UntypedBool,
+ constant.String: UntypedString,
+ constant.Int: UntypedInt,
+ constant.Float: UntypedFloat,
+ constant.Complex: UntypedComplex,
+}
+
+// DefaultKinds maps from a constant.Kind to its default Kind.
+var DefaultKinds = [...]Kind{
+ constant.Bool: TBOOL,
+ constant.String: TSTRING,
+ constant.Int: TINT,
+ constant.Float: TFLOAT64,
+ constant.Complex: TCOMPLEX128,
+}
+
// A Type represents a Go type.
//
// There may be multiple unnamed types with identical structure. However, there must
@@ -135,7 +156,7 @@ var (
// package.Lookup(name)) and checking sym.Def. If sym.Def is non-nil, the type
// already exists at package scope and is available at sym.Def.(*ir.Name).Type().
// Local types (which may have the same name as a package-level type) are
-// distinguished by the value of vargen.
+// distinguished by their vargen, which is embedded in their symbol name.
type Type struct {
// extra contains extra etype-specific fields.
// As an optimization, those etype-specific structs which contain exactly
@@ -159,9 +180,9 @@ type Type struct {
width int64 // valid if Align > 0
// list of base methods (excluding embedding)
- methods Fields
+ methods fields
// list of all methods (including embedding)
- allMethods Fields
+ allMethods fields
// canonical OTYPE node for a named type (should be an ir.Name node with same sym)
obj Object
@@ -174,11 +195,11 @@ type Type struct {
slice *Type // []T, or nil
}
- vargen int32 // unique name for OTYPE/ONAME
-
kind Kind // kind of type
align uint8 // the required alignment of this type, in bytes (0 means Width and Align have not yet been computed)
+ intRegs, floatRegs uint8 // registers needed for ABIInternal
+
flags bitset8
// For defined (named) generic types, a pointer to the list of type params
@@ -192,6 +213,17 @@ type Type struct {
rparams *[]*Type
}
+// Registers returns the number of integer and floating-point
+// registers required to represent a parameter of this type under the
+// ABIInternal calling conventions.
+//
+// If t must be passed by memory, Registers returns (math.MaxUint8,
+// math.MaxUint8).
+func (t *Type) Registers() (uint8, uint8) {
+ CalcSize(t)
+ return t.intRegs, t.floatRegs
+}
+
func (*Type) CanBeAnSSAAux() {}
const (
@@ -276,8 +308,6 @@ type Map struct {
Elem *Type // Val (elem) type
Bucket *Type // internal struct type representing a hash bucket
- Hmap *Type // internal struct type representing the Hmap (map header object)
- Hiter *Type // internal struct type representing hash iterator state
}
// MapType returns t's extra map-specific fields.
@@ -292,17 +322,20 @@ type Forward struct {
Embedlineno src.XPos // first use of this type as an embedded type
}
-// ForwardType returns t's extra forward-type-specific fields.
-func (t *Type) ForwardType() *Forward {
+// forwardType returns t's extra forward-type-specific fields.
+func (t *Type) forwardType() *Forward {
t.wantEtype(TFORW)
return t.extra.(*Forward)
}
// Func contains Type fields specific to func types.
type Func struct {
- Receiver *Type // function receiver
- Results *Type // function results
- Params *Type // function params
+ allParams []*Field // slice of all parameters, in receiver/params/results order
+
+ startParams int // index of the start of the (regular) parameters section
+ startResults int // index of the start of the results section
+
+ resultsTuple *Type // struct-like type representing multi-value results
// Argwid is the total width of the function receiver, params, and results.
// It gets calculated via a temporary TFUNCARGS type.
@@ -310,34 +343,29 @@ type Func struct {
Argwid int64
}
-// FuncType returns t's extra func-specific fields.
-func (t *Type) FuncType() *Func {
+func (ft *Func) recvs() []*Field { return ft.allParams[:ft.startParams] }
+func (ft *Func) params() []*Field { return ft.allParams[ft.startParams:ft.startResults] }
+func (ft *Func) results() []*Field { return ft.allParams[ft.startResults:] }
+func (ft *Func) recvParams() []*Field { return ft.allParams[:ft.startResults] }
+func (ft *Func) paramsResults() []*Field { return ft.allParams[ft.startParams:] }
+
+// funcType returns t's extra func-specific fields.
+func (t *Type) funcType() *Func {
t.wantEtype(TFUNC)
return t.extra.(*Func)
}
// StructType contains Type fields specific to struct types.
type Struct struct {
- fields Fields
+ fields fields
// Maps have three associated internal structs (see struct MapType).
// Map links such structs back to their map type.
Map *Type
- Funarg Funarg // type of function arguments for arg struct
+ ParamTuple bool // whether this struct is actually a tuple of signature parameters
}
-// Funarg records the kind of function argument
-type Funarg uint8
-
-const (
- FunargNone Funarg = iota
- FunargRcvr // receiver
- FunargParams // input parameters
- FunargResults // output results
- FunargTparams // type params
-)
-
// StructType returns t's extra struct-specific fields.
func (t *Type) StructType() *Struct {
t.wantEtype(TSTRUCT)
@@ -369,8 +397,8 @@ type Chan struct {
Dir ChanDir // channel direction
}
-// ChanType returns t's extra channel-specific fields.
-func (t *Type) ChanType() *Chan {
+// chanType returns t's extra channel-specific fields.
+func (t *Type) chanType() *Chan {
t.wantEtype(TCHAN)
return t.extra.(*Chan)
}
@@ -421,8 +449,7 @@ type Field struct {
Nname Object
// Offset in bytes of this field or method within its enclosing struct
- // or interface Type. Exception: if field is function receiver, arg or
- // result, then this is BOGUS_FUNARG_OFFSET; types does not know the Abi.
+ // or interface Type. For parameters, this is BADWIDTH.
Offset int64
}
@@ -447,39 +474,25 @@ func (f *Field) IsMethod() bool {
return f.Type.kind == TFUNC && f.Type.Recv() != nil
}
-// Fields is a pointer to a slice of *Field.
+// fields is a pointer to a slice of *Field.
// This saves space in Types that do not have fields or methods
// compared to a simple slice of *Field.
-type Fields struct {
+type fields struct {
s *[]*Field
}
-// Len returns the number of entries in f.
-func (f *Fields) Len() int {
- if f.s == nil {
- return 0
- }
- return len(*f.s)
-}
-
// Slice returns the entries in f as a slice.
// Changes to the slice entries will be reflected in f.
-func (f *Fields) Slice() []*Field {
+func (f *fields) Slice() []*Field {
if f.s == nil {
return nil
}
return *f.s
}
-// Index returns the i'th element of Fields.
-// It panics if f does not have at least i+1 elements.
-func (f *Fields) Index(i int) *Field {
- return (*f.s)[i]
-}
-
// Set sets f to a slice.
// This takes ownership of the slice.
-func (f *Fields) Set(s []*Field) {
+func (f *fields) Set(s []*Field) {
if len(s) == 0 {
f.s = nil
} else {
@@ -490,14 +503,6 @@ func (f *Fields) Set(s []*Field) {
}
}
-// Append appends entries to f.
-func (f *Fields) Append(s ...*Field) {
- if f.s == nil {
- f.s = new([]*Field)
- }
- *f.s = append(*f.s, s...)
-}
-
// newType returns a new Type of the specified kind.
func newType(et Kind) *Type {
t := &Type{
@@ -570,7 +575,7 @@ func NewSlice(elem *Type) *Type {
// NewChan returns a new chan Type with direction dir.
func NewChan(elem *Type, dir ChanDir) *Type {
t := newType(TCHAN)
- ct := t.ChanType()
+ ct := t.chanType()
ct.Elem = elem
ct.Dir = dir
if elem.HasShape() {
@@ -645,6 +650,7 @@ func NewPtr(elem *Type) *Type {
t.extra = Ptr{Elem: elem}
t.width = int64(PtrSize)
t.align = uint8(PtrSize)
+ t.intRegs = 1
if NewPtrCacheEnabled {
elem.cache.ptr = t
}
@@ -737,34 +743,40 @@ func SubstAny(t *Type, types *[]*Type) *Type {
}
case TFUNC:
- recvs := SubstAny(t.Recvs(), types)
- params := SubstAny(t.Params(), types)
- results := SubstAny(t.Results(), types)
- if recvs != t.Recvs() || params != t.Params() || results != t.Results() {
- t = t.copy()
- t.FuncType().Receiver = recvs
- t.FuncType().Results = results
- t.FuncType().Params = params
- }
+ ft := t.funcType()
+ allParams := substFields(ft.allParams, types)
+
+ t = t.copy()
+ ft = t.funcType()
+ ft.allParams = allParams
+
+ rt := ft.resultsTuple
+ rt = rt.copy()
+ ft.resultsTuple = rt
+ rt.setFields(t.Results())
case TSTRUCT:
// Make a copy of all fields, including ones whose type does not change.
// This prevents aliasing across functions, which can lead to later
// fields getting their Offset incorrectly overwritten.
- fields := t.FieldSlice()
- nfs := make([]*Field, len(fields))
- for i, f := range fields {
- nft := SubstAny(f.Type, types)
- nfs[i] = f.Copy()
- nfs[i].Type = nft
- }
+ nfs := substFields(t.Fields(), types)
t = t.copy()
- t.SetFields(nfs)
+ t.setFields(nfs)
}
return t
}
+func substFields(fields []*Field, types *[]*Type) []*Field {
+ nfs := make([]*Field, len(fields))
+ for i, f := range fields {
+ nft := SubstAny(f.Type, types)
+ nfs[i] = f.Copy()
+ nfs[i].Type = nft
+ }
+ return nfs
+}
+
// copy returns a shallow copy of the Type.
func (t *Type) copy() *Type {
if t == nil {
@@ -815,45 +827,56 @@ func (t *Type) wantEtype(et Kind) {
}
}
-func (t *Type) Recvs() *Type { return t.FuncType().Receiver }
-func (t *Type) Params() *Type { return t.FuncType().Params }
-func (t *Type) Results() *Type { return t.FuncType().Results }
+// ResultTuple returns the result type of signature type t as a tuple.
+// This can be used as the type of multi-valued call expressions.
+func (t *Type) ResultsTuple() *Type { return t.funcType().resultsTuple }
+
+// Recvs returns a slice of receiver parameters of signature type t.
+// The returned slice always has length 0 or 1.
+func (t *Type) Recvs() []*Field { return t.funcType().recvs() }
+
+// Params returns a slice of regular parameters of signature type t.
+func (t *Type) Params() []*Field { return t.funcType().params() }
+
+// Results returns a slice of result parameters of signature type t.
+func (t *Type) Results() []*Field { return t.funcType().results() }
+
+// RecvsParamsResults returns a slice containing all of the
+// signature's parameters in receiver (if any), (normal) parameters,
+// and then results.
+func (t *Type) RecvParamsResults() []*Field { return t.funcType().allParams }
+
+// RecvParams returns a slice containing the signature's receiver (if
+// any) followed by its (normal) parameters.
+func (t *Type) RecvParams() []*Field { return t.funcType().recvParams() }
-func (t *Type) NumRecvs() int { return t.FuncType().Receiver.NumFields() }
-func (t *Type) NumParams() int { return t.FuncType().Params.NumFields() }
-func (t *Type) NumResults() int { return t.FuncType().Results.NumFields() }
+// ParamsResults returns a slice containing the signature's (normal)
+// parameters followed by its results.
+func (t *Type) ParamsResults() []*Field { return t.funcType().paramsResults() }
+
+func (t *Type) NumRecvs() int { return len(t.Recvs()) }
+func (t *Type) NumParams() int { return len(t.Params()) }
+func (t *Type) NumResults() int { return len(t.Results()) }
// IsVariadic reports whether function type t is variadic.
func (t *Type) IsVariadic() bool {
n := t.NumParams()
- return n > 0 && t.Params().Field(n-1).IsDDD()
+ return n > 0 && t.Param(n-1).IsDDD()
}
// Recv returns the receiver of function type t, if any.
func (t *Type) Recv() *Field {
- s := t.Recvs()
- if s.NumFields() == 0 {
- return nil
+ if s := t.Recvs(); len(s) == 1 {
+ return s[0]
}
- return s.Field(0)
+ return nil
}
-// RecvsParamsResults stores the accessor functions for a function Type's
-// receiver, parameters, and result parameters, in that order.
-// It can be used to iterate over all of a function's parameter lists.
-var RecvsParamsResults = [3]func(*Type) *Type{
- (*Type).Recvs, (*Type).Params, (*Type).Results,
-}
+// Param returns the i'th parameter of signature type t.
+func (t *Type) Param(i int) *Field { return t.Params()[i] }
-// RecvsParams is like RecvsParamsResults, but omits result parameters.
-var RecvsParams = [2]func(*Type) *Type{
- (*Type).Recvs, (*Type).Params,
-}
-
-// ParamsResults is like RecvsParamsResults, but omits receiver parameters.
-var ParamsResults = [2]func(*Type) *Type{
- (*Type).Params, (*Type).Results,
-}
+// Result returns the i'th result of signature type t.
+func (t *Type) Result(i int) *Field { return t.Results()[i] }
// Key returns the key type of map type t.
func (t *Type) Key() *Type {
@@ -894,55 +917,56 @@ func (t *Type) FuncArgs() *Type {
// IsFuncArgStruct reports whether t is a struct representing function parameters or results.
func (t *Type) IsFuncArgStruct() bool {
- return t.kind == TSTRUCT && t.extra.(*Struct).Funarg != FunargNone
+ return t.kind == TSTRUCT && t.extra.(*Struct).ParamTuple
}
// Methods returns a pointer to the base methods (excluding embedding) for type t.
// These can either be concrete methods (for non-interface types) or interface
// methods (for interface types).
-func (t *Type) Methods() *Fields {
- return &t.methods
+func (t *Type) Methods() []*Field {
+ return t.methods.Slice()
}
// AllMethods returns a pointer to all the methods (including embedding) for type t.
// For an interface type, this is the set of methods that are typically iterated
// over. For non-interface types, AllMethods() only returns a valid result after
// CalcMethods() has been called at least once.
-func (t *Type) AllMethods() *Fields {
+func (t *Type) AllMethods() []*Field {
if t.kind == TINTER {
// Calculate the full method set of an interface type on the fly
// now, if not done yet.
CalcSize(t)
}
- return &t.allMethods
+ return t.allMethods.Slice()
+}
+
+// SetMethods sets the direct method set for type t (i.e., *not*
+// including promoted methods from embedded types).
+func (t *Type) SetMethods(fs []*Field) {
+ t.methods.Set(fs)
}
-// SetAllMethods sets the set of all methods (including embedding) for type t.
-// Use this method instead of t.AllMethods().Set(), which might call CalcSize() on
-// an uninitialized interface type.
+// SetAllMethods sets the set of all methods for type t (i.e.,
+// including promoted methods from embedded types).
func (t *Type) SetAllMethods(fs []*Field) {
t.allMethods.Set(fs)
}
-// Fields returns the fields of struct type t.
-func (t *Type) Fields() *Fields {
+// fields returns the fields of struct type t.
+func (t *Type) fields() *fields {
t.wantEtype(TSTRUCT)
return &t.extra.(*Struct).fields
}
// Field returns the i'th field of struct type t.
-func (t *Type) Field(i int) *Field {
- return t.Fields().Slice()[i]
-}
+func (t *Type) Field(i int) *Field { return t.Fields()[i] }
-// FieldSlice returns a slice of containing all fields of
+// Fields returns a slice of containing all fields of
// a struct type t.
-func (t *Type) FieldSlice() []*Field {
- return t.Fields().Slice()
-}
+func (t *Type) Fields() []*Field { return t.fields().Slice() }
-// SetFields sets struct type t's fields to fields.
-func (t *Type) SetFields(fields []*Field) {
+// setFields sets struct type t's fields to fields.
+func (t *Type) setFields(fields []*Field) {
// If we've calculated the width of t before,
// then some other type such as a function signature
// might now have the wrong type.
@@ -953,13 +977,13 @@ func (t *Type) SetFields(fields []*Field) {
base.Fatalf("SetFields of %v: width previously calculated", t)
}
t.wantEtype(TSTRUCT)
- t.Fields().Set(fields)
+ t.fields().Set(fields)
}
// SetInterface sets the base methods of an interface type t.
func (t *Type) SetInterface(methods []*Field) {
t.wantEtype(TINTER)
- t.Methods().Set(methods)
+ t.methods.Set(methods)
}
// ArgWidth returns the total aligned argument size for a function.
@@ -1102,10 +1126,6 @@ func (t *Type) cmp(x *Type) Cmp {
}
if x.obj != nil {
- // Syms non-nil, if vargens match then equal.
- if t.vargen != x.vargen {
- return cmpForNe(t.vargen < x.vargen)
- }
return CMPeq
}
// both syms nil, look at structure below.
@@ -1187,8 +1207,8 @@ func (t *Type) cmp(x *Type) Cmp {
return CMPgt // bucket maps are least
} // If t != t.Map.Bucket, fall through to general case
- tfs := t.FieldSlice()
- xfs := x.FieldSlice()
+ tfs := t.Fields()
+ xfs := x.Fields()
for i := 0; i < len(tfs) && i < len(xfs); i++ {
t1, x1 := tfs[i], xfs[i]
if t1.Embedded != x1.Embedded {
@@ -1210,8 +1230,8 @@ func (t *Type) cmp(x *Type) Cmp {
return CMPeq
case TINTER:
- tfs := t.AllMethods().Slice()
- xfs := x.AllMethods().Slice()
+ tfs := t.AllMethods()
+ xfs := x.AllMethods()
for i := 0; i < len(tfs) && i < len(xfs); i++ {
t1, x1 := tfs[i], xfs[i]
if c := t1.Sym.cmpsym(x1.Sym); c != CMPeq {
@@ -1227,22 +1247,24 @@ func (t *Type) cmp(x *Type) Cmp {
return CMPeq
case TFUNC:
- for _, f := range RecvsParamsResults {
- // Loop over fields in structs, ignoring argument names.
- tfs := f(t).FieldSlice()
- xfs := f(x).FieldSlice()
- for i := 0; i < len(tfs) && i < len(xfs); i++ {
- ta := tfs[i]
- tb := xfs[i]
- if ta.IsDDD() != tb.IsDDD() {
- return cmpForNe(!ta.IsDDD())
- }
- if c := ta.Type.cmp(tb.Type); c != CMPeq {
- return c
- }
- }
- if len(tfs) != len(xfs) {
- return cmpForNe(len(tfs) < len(xfs))
+ if tn, xn := t.NumRecvs(), x.NumRecvs(); tn != xn {
+ return cmpForNe(tn < xn)
+ }
+ if tn, xn := t.NumParams(), x.NumParams(); tn != xn {
+ return cmpForNe(tn < xn)
+ }
+ if tn, xn := t.NumResults(), x.NumResults(); tn != xn {
+ return cmpForNe(tn < xn)
+ }
+ if tv, xv := t.IsVariadic(), x.IsVariadic(); tv != xv {
+ return cmpForNe(!tv)
+ }
+
+ tfs := t.RecvParamsResults()
+ xfs := x.RecvParamsResults()
+ for i, tf := range tfs {
+ if c := tf.Type.cmp(xfs[i].Type); c != CMPeq {
+ return c
}
}
return CMPeq
@@ -1399,7 +1421,7 @@ func (t *Type) IsInterface() bool {
// IsEmptyInterface reports whether t is an empty interface type.
func (t *Type) IsEmptyInterface() bool {
- return t.IsInterface() && t.AllMethods().Len() == 0
+ return t.IsInterface() && len(t.AllMethods()) == 0
}
// IsScalar reports whether 't' is a scalar Go type, e.g.
@@ -1424,7 +1446,7 @@ func (t *Type) NumFields() int {
if t.kind == TRESULTS {
return len(t.extra.(*Results).Types)
}
- return t.Fields().Len()
+ return len(t.Fields())
}
func (t *Type) FieldType(i int) *Type {
if t.kind == TTUPLE {
@@ -1449,6 +1471,21 @@ func (t *Type) FieldName(i int) string {
return t.Field(i).Sym.Name
}
+// OffsetOf reports the offset of the field of a struct.
+// The field is looked up by name.
+func (t *Type) OffsetOf(name string) int64 {
+ if t.kind != TSTRUCT {
+ base.Fatalf("can't call OffsetOf on non-struct %v", t)
+ }
+ for _, f := range t.Fields() {
+ if f.Sym.Name == name {
+ return f.Offset
+ }
+ }
+ base.Fatalf("couldn't find field %s in %v", name, t)
+ return -1
+}
+
func (t *Type) NumElem() int64 {
t.wantEtype(TARRAY)
return t.extra.(*Array).Bound
@@ -1474,7 +1511,7 @@ func (t *Type) NumComponents(countBlank componentsIncludeBlankFields) int64 {
base.Fatalf("NumComponents func arg struct")
}
var n int64
- for _, f := range t.FieldSlice() {
+ for _, f := range t.Fields() {
if countBlank == IgnoreBlankFields && f.Sym.IsBlank() {
continue
}
@@ -1603,42 +1640,25 @@ func (t *Type) Obj() Object {
return t.obj
}
-// typeGen tracks the number of function-scoped defined types that
-// have been declared. It's used to generate unique linker symbols for
-// their runtime type descriptors.
-var typeGen int32
-
-// SetVargen assigns a unique generation number to type t, which must
-// be a defined type declared within function scope. The generation
-// number is used to distinguish it from other similarly spelled
-// defined types from the same package.
-//
-// TODO(mdempsky): Come up with a better solution.
-func (t *Type) SetVargen() {
- base.Assertf(t.Sym() != nil, "SetVargen on anonymous type %v", t)
- base.Assertf(t.vargen == 0, "type %v already has Vargen %v", t, t.vargen)
-
- typeGen++
- t.vargen = typeGen
-}
-
// SetUnderlying sets the underlying type of an incomplete type (i.e. type whose kind
// is currently TFORW). SetUnderlying automatically updates any types that were waiting
// for this type to be completed.
func (t *Type) SetUnderlying(underlying *Type) {
if underlying.kind == TFORW {
// This type isn't computed yet; when it is, update n.
- underlying.ForwardType().Copyto = append(underlying.ForwardType().Copyto, t)
+ underlying.forwardType().Copyto = append(underlying.forwardType().Copyto, t)
return
}
- ft := t.ForwardType()
+ ft := t.forwardType()
// TODO(mdempsky): Fix Type rekinding.
t.kind = underlying.kind
t.extra = underlying.extra
t.width = underlying.width
t.align = underlying.align
+ t.intRegs = underlying.intRegs
+ t.floatRegs = underlying.floatRegs
t.underlying = underlying.underlying
if underlying.NotInHeap() {
@@ -1700,40 +1720,38 @@ func NewInterface(methods []*Field) *Type {
return t
}
-const BOGUS_FUNARG_OFFSET = -1000000000
-
-func unzeroFieldOffsets(f []*Field) {
- for i := range f {
- f[i].Offset = BOGUS_FUNARG_OFFSET // This will cause an explosion if it is not corrected
- }
-}
-
// NewSignature returns a new function type for the given receiver,
// parameters, and results, any of which may be nil.
func NewSignature(recv *Field, params, results []*Field) *Type {
- var recvs []*Field
+ startParams := 0
+ if recv != nil {
+ startParams = 1
+ }
+ startResults := startParams + len(params)
+
+ allParams := make([]*Field, startResults+len(results))
if recv != nil {
- recvs = []*Field{recv}
+ allParams[0] = recv
}
+ copy(allParams[startParams:], params)
+ copy(allParams[startResults:], results)
t := newType(TFUNC)
- ft := t.FuncType()
+ ft := t.funcType()
- funargs := func(fields []*Field, funarg Funarg) *Type {
+ funargs := func(fields []*Field) *Type {
s := NewStruct(fields)
- s.StructType().Funarg = funarg
+ s.StructType().ParamTuple = true
return s
}
- if recv != nil {
- recv.Offset = BOGUS_FUNARG_OFFSET
- }
- unzeroFieldOffsets(params)
- unzeroFieldOffsets(results)
- ft.Receiver = funargs(recvs, FunargRcvr)
- ft.Params = funargs(params, FunargParams)
- ft.Results = funargs(results, FunargResults)
- if fieldsHasShape(recvs) || fieldsHasShape(params) || fieldsHasShape(results) {
+ ft.allParams = allParams
+ ft.startParams = startParams
+ ft.startResults = startResults
+
+ ft.resultsTuple = funargs(allParams[startResults:])
+
+ if fieldsHasShape(allParams) {
t.SetHasShape(true)
}
@@ -1743,7 +1761,7 @@ func NewSignature(recv *Field, params, results []*Field) *Type {
// NewStruct returns a new struct with the given fields.
func NewStruct(fields []*Field) *Type {
t := newType(TSTRUCT)
- t.SetFields(fields)
+ t.setFields(fields)
if fieldsHasShape(fields) {
t.SetHasShape(true)
}
@@ -1792,7 +1810,7 @@ func IsReflexive(t *Type) bool {
return IsReflexive(t.Elem())
case TSTRUCT:
- for _, t1 := range t.Fields().Slice() {
+ for _, t1 := range t.Fields() {
if !IsReflexive(t1.Type) {
return false
}
@@ -1845,44 +1863,34 @@ func IsMethodApplicable(t *Type, m *Field) bool {
return t.IsPtr() || !m.Type.Recv().Type.IsPtr() || IsInterfaceMethod(m.Type) || m.Embedded == 2
}
-// IsRuntimePkg reports whether p is package runtime.
-func IsRuntimePkg(p *Pkg) bool {
- if base.Flag.CompilingRuntime && p == LocalPkg {
- return true
+// RuntimeSymName returns the name of s if it's in package "runtime"; otherwise
+// it returns "".
+func RuntimeSymName(s *Sym) string {
+ if s.Pkg.Path == "runtime" {
+ return s.Name
}
- return p.Path == "runtime"
+ return ""
}
-// IsReflectPkg reports whether p is package reflect.
-func IsReflectPkg(p *Pkg) bool {
- return p.Path == "reflect"
-}
-
-// IsTypePkg reports whether p is pesudo package type.
-func IsTypePkg(p *Pkg) bool {
- return p == typepkg
+// ReflectSymName returns the name of s if it's in package "reflect"; otherwise
+// it returns "".
+func ReflectSymName(s *Sym) string {
+ if s.Pkg.Path == "reflect" {
+ return s.Name
+ }
+ return ""
}
// IsNoInstrumentPkg reports whether p is a package that
// should not be instrumented.
func IsNoInstrumentPkg(p *Pkg) bool {
- for _, np := range base.NoInstrumentPkgs {
- if p.Path == np {
- return true
- }
- }
- return false
+ return objabi.LookupPkgSpecial(p.Path).NoInstrument
}
// IsNoRacePkg reports whether p is a package that
// should not be race instrumented.
func IsNoRacePkg(p *Pkg) bool {
- for _, np := range base.NoRacePkgs {
- if p.Path == np {
- return true
- }
- }
- return false
+ return objabi.LookupPkgSpecial(p.Path).NoRaceFunc
}
// ReceiverBaseType returns the underlying type, if any,
diff --git a/src/cmd/compile/internal/types2/alias.go b/src/cmd/compile/internal/types2/alias.go
new file mode 100644
index 0000000000..06dfba1697
--- /dev/null
+++ b/src/cmd/compile/internal/types2/alias.go
@@ -0,0 +1,88 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types2
+
+import "fmt"
+
+// An Alias represents an alias type.
+// Whether or not Alias types are created is controlled by the
+// gotypesalias setting with the GODEBUG environment variable.
+// For gotypesalias=1, alias declarations produce an Alias type.
+// Otherwise, the alias information is only in the type name,
+// which points directly to the actual (aliased) type.
+type Alias struct {
+ obj *TypeName // corresponding declared alias object
+ fromRHS Type // RHS of type alias declaration; may be an alias
+ actual Type // actual (aliased) type; never an alias
+}
+
+// NewAlias creates a new Alias type with the given type name and rhs.
+// rhs must not be nil.
+func NewAlias(obj *TypeName, rhs Type) *Alias {
+ alias := (*Checker)(nil).newAlias(obj, rhs)
+ // Ensure that alias.actual is set (#65455).
+ unalias(alias)
+ return alias
+}
+
+func (a *Alias) Obj() *TypeName { return a.obj }
+func (a *Alias) Underlying() Type { return unalias(a).Underlying() }
+func (a *Alias) String() string { return TypeString(a, nil) }
+
+// Type accessors
+
+// Unalias returns t if it is not an alias type;
+// otherwise it follows t's alias chain until it
+// reaches a non-alias type which is then returned.
+// Consequently, the result is never an alias type.
+func Unalias(t Type) Type {
+ if a0, _ := t.(*Alias); a0 != nil {
+ return unalias(a0)
+ }
+ return t
+}
+
+func unalias(a0 *Alias) Type {
+ if a0.actual != nil {
+ return a0.actual
+ }
+ var t Type
+ for a := a0; a != nil; a, _ = t.(*Alias) {
+ t = a.fromRHS
+ }
+ if t == nil {
+ panic(fmt.Sprintf("non-terminated alias %s", a0.obj.name))
+ }
+ a0.actual = t
+ return t
+}
+
+// asNamed returns t as *Named if that is t's
+// actual type. It returns nil otherwise.
+func asNamed(t Type) *Named {
+ n, _ := Unalias(t).(*Named)
+ return n
+}
+
+// newAlias creates a new Alias type with the given type name and rhs.
+// rhs must not be nil.
+func (check *Checker) newAlias(obj *TypeName, rhs Type) *Alias {
+ assert(rhs != nil)
+ a := &Alias{obj, rhs, nil}
+ if obj.typ == nil {
+ obj.typ = a
+ }
+
+ // Ensure that a.actual is set at the end of type checking.
+ if check != nil {
+ check.needsCleanup(a)
+ }
+
+ return a
+}
+
+func (a *Alias) cleanup() {
+ Unalias(a)
+}
diff --git a/src/cmd/compile/internal/types2/api.go b/src/cmd/compile/internal/types2/api.go
index d0c0cdb8f9..bb02d9198e 100644
--- a/src/cmd/compile/internal/types2/api.go
+++ b/src/cmd/compile/internal/types2/api.go
@@ -268,6 +268,15 @@ type Info struct {
// scope, the function scopes are embedded in the file scope of the file
// containing the function declaration.
//
+ // The Scope of a function contains the declarations of any
+ // type parameters, parameters, and named results, plus any
+ // local declarations in the body block.
+ // It is coextensive with the complete extent of the
+ // function's syntax ([*ast.FuncDecl] or [*ast.FuncLit]).
+ // The Scopes mapping does not contain an entry for the
+ // function body ([*ast.BlockStmt]); the function's scope is
+ // associated with the [*ast.FuncType].
+ //
// The following node types may appear in Scopes:
//
// *syntax.File
@@ -288,6 +297,13 @@ type Info struct {
// in source order. Variables without an initialization expression do not
// appear in this list.
InitOrder []*Initializer
+
+ // FileVersions maps a file to its Go version string.
+ // If the file doesn't specify a version, the reported
+ // string is Config.GoVersion.
+ // Version strings begin with “go”, like “go1.21”, and
+ // are suitable for use with the [go/version] package.
+ FileVersions map[*syntax.PosBase]string
}
func (info *Info) recordTypes() bool {
@@ -330,6 +346,23 @@ func (info *Info) ObjectOf(id *syntax.Name) Object {
return info.Uses[id]
}
+// PkgNameOf returns the local package name defined by the import,
+// or nil if not found.
+//
+// For dot-imports, the package name is ".".
+//
+// Precondition: the Defs and Implicts maps are populated.
+func (info *Info) PkgNameOf(imp *syntax.ImportDecl) *PkgName {
+ var obj Object
+ if imp.LocalPkgName != nil {
+ obj = info.Defs[imp.LocalPkgName]
+ } else {
+ obj = info.Implicits[imp]
+ }
+ pkgname, _ := obj.(*PkgName)
+ return pkgname
+}
+
// TypeAndValue reports the type and value (for constants)
// of the corresponding expression.
type TypeAndValue struct {
@@ -436,80 +469,3 @@ func (conf *Config) Check(path string, files []*syntax.File, info *Info) (*Packa
pkg := NewPackage(path, "")
return pkg, NewChecker(conf, pkg, info).Files(files)
}
-
-// AssertableTo reports whether a value of type V can be asserted to have type T.
-//
-// The behavior of AssertableTo is unspecified in three cases:
-// - if T is Typ[Invalid]
-// - if V is a generalized interface; i.e., an interface that may only be used
-// as a type constraint in Go code
-// - if T is an uninstantiated generic type
-func AssertableTo(V *Interface, T Type) bool {
- // Checker.newAssertableTo suppresses errors for invalid types, so we need special
- // handling here.
- if T.Underlying() == Typ[Invalid] {
- return false
- }
- return (*Checker)(nil).newAssertableTo(nopos, V, T, nil)
-}
-
-// AssignableTo reports whether a value of type V is assignable to a variable
-// of type T.
-//
-// The behavior of AssignableTo is unspecified if V or T is Typ[Invalid] or an
-// uninstantiated generic type.
-func AssignableTo(V, T Type) bool {
- x := operand{mode: value, typ: V}
- ok, _ := x.assignableTo(nil, T, nil) // check not needed for non-constant x
- return ok
-}
-
-// ConvertibleTo reports whether a value of type V is convertible to a value of
-// type T.
-//
-// The behavior of ConvertibleTo is unspecified if V or T is Typ[Invalid] or an
-// uninstantiated generic type.
-func ConvertibleTo(V, T Type) bool {
- x := operand{mode: value, typ: V}
- return x.convertibleTo(nil, T, nil) // check not needed for non-constant x
-}
-
-// Implements reports whether type V implements interface T.
-//
-// The behavior of Implements is unspecified if V is Typ[Invalid] or an uninstantiated
-// generic type.
-func Implements(V Type, T *Interface) bool {
- if T.Empty() {
- // All types (even Typ[Invalid]) implement the empty interface.
- return true
- }
- // Checker.implements suppresses errors for invalid types, so we need special
- // handling here.
- if V.Underlying() == Typ[Invalid] {
- return false
- }
- return (*Checker)(nil).implements(nopos, V, T, false, nil)
-}
-
-// Satisfies reports whether type V satisfies the constraint T.
-//
-// The behavior of Satisfies is unspecified if V is Typ[Invalid] or an uninstantiated
-// generic type.
-func Satisfies(V Type, T *Interface) bool {
- return (*Checker)(nil).implements(nopos, V, T, true, nil)
-}
-
-// Identical reports whether x and y are identical types.
-// Receivers of Signature types are ignored.
-func Identical(x, y Type) bool {
- var c comparer
- return c.identical(x, y, nil)
-}
-
-// IdenticalIgnoreTags reports whether x and y are identical types if tags are ignored.
-// Receivers of Signature types are ignored.
-func IdenticalIgnoreTags(x, y Type) bool {
- var c comparer
- c.ignoreTags = true
- return c.identical(x, y, nil)
-}
diff --git a/src/cmd/compile/internal/types2/api_predicates.go b/src/cmd/compile/internal/types2/api_predicates.go
new file mode 100644
index 0000000000..480f71144e
--- /dev/null
+++ b/src/cmd/compile/internal/types2/api_predicates.go
@@ -0,0 +1,84 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements exported type predicates.
+
+package types2
+
+// AssertableTo reports whether a value of type V can be asserted to have type T.
+//
+// The behavior of AssertableTo is unspecified in three cases:
+// - if T is Typ[Invalid]
+// - if V is a generalized interface; i.e., an interface that may only be used
+// as a type constraint in Go code
+// - if T is an uninstantiated generic type
+func AssertableTo(V *Interface, T Type) bool {
+ // Checker.newAssertableTo suppresses errors for invalid types, so we need special
+ // handling here.
+ if !isValid(T.Underlying()) {
+ return false
+ }
+ return (*Checker)(nil).newAssertableTo(nopos, V, T, nil)
+}
+
+// AssignableTo reports whether a value of type V is assignable to a variable
+// of type T.
+//
+// The behavior of AssignableTo is unspecified if V or T is Typ[Invalid] or an
+// uninstantiated generic type.
+func AssignableTo(V, T Type) bool {
+ x := operand{mode: value, typ: V}
+ ok, _ := x.assignableTo(nil, T, nil) // check not needed for non-constant x
+ return ok
+}
+
+// ConvertibleTo reports whether a value of type V is convertible to a value of
+// type T.
+//
+// The behavior of ConvertibleTo is unspecified if V or T is Typ[Invalid] or an
+// uninstantiated generic type.
+func ConvertibleTo(V, T Type) bool {
+ x := operand{mode: value, typ: V}
+ return x.convertibleTo(nil, T, nil) // check not needed for non-constant x
+}
+
+// Implements reports whether type V implements interface T.
+//
+// The behavior of Implements is unspecified if V is Typ[Invalid] or an uninstantiated
+// generic type.
+func Implements(V Type, T *Interface) bool {
+ if T.Empty() {
+ // All types (even Typ[Invalid]) implement the empty interface.
+ return true
+ }
+ // Checker.implements suppresses errors for invalid types, so we need special
+ // handling here.
+ if !isValid(V.Underlying()) {
+ return false
+ }
+ return (*Checker)(nil).implements(nopos, V, T, false, nil)
+}
+
+// Satisfies reports whether type V satisfies the constraint T.
+//
+// The behavior of Satisfies is unspecified if V is Typ[Invalid] or an uninstantiated
+// generic type.
+func Satisfies(V Type, T *Interface) bool {
+ return (*Checker)(nil).implements(nopos, V, T, true, nil)
+}
+
+// Identical reports whether x and y are identical types.
+// Receivers of [Signature] types are ignored.
+func Identical(x, y Type) bool {
+ var c comparer
+ return c.identical(x, y, nil)
+}
+
+// IdenticalIgnoreTags reports whether x and y are identical types if tags are ignored.
+// Receivers of [Signature] types are ignored.
+func IdenticalIgnoreTags(x, y Type) bool {
+ var c comparer
+ c.ignoreTags = true
+ return c.identical(x, y, nil)
+}
diff --git a/src/cmd/compile/internal/types2/api_test.go b/src/cmd/compile/internal/types2/api_test.go
index 0f50650b04..bacba71955 100644
--- a/src/cmd/compile/internal/types2/api_test.go
+++ b/src/cmd/compile/internal/types2/api_test.go
@@ -8,11 +8,13 @@ import (
"cmd/compile/internal/syntax"
"errors"
"fmt"
+ "internal/goversion"
"internal/testenv"
"reflect"
"regexp"
"sort"
"strings"
+ "sync"
"testing"
. "cmd/compile/internal/types2"
@@ -957,6 +959,80 @@ func TestImplicitsInfo(t *testing.T) {
}
}
+func TestPkgNameOf(t *testing.T) {
+ testenv.MustHaveGoBuild(t)
+
+ const src = `
+package p
+
+import (
+ . "os"
+ _ "io"
+ "math"
+ "path/filepath"
+ snort "sort"
+)
+
+// avoid imported and not used errors
+var (
+ _ = Open // os.Open
+ _ = math.Sin
+ _ = filepath.Abs
+ _ = snort.Ints
+)
+`
+
+ var tests = []struct {
+ path string // path string enclosed in "'s
+ want string
+ }{
+ {`"os"`, "."},
+ {`"io"`, "_"},
+ {`"math"`, "math"},
+ {`"path/filepath"`, "filepath"},
+ {`"sort"`, "snort"},
+ }
+
+ f := mustParse(src)
+ info := Info{
+ Defs: make(map[*syntax.Name]Object),
+ Implicits: make(map[syntax.Node]Object),
+ }
+ var conf Config
+ conf.Importer = defaultImporter()
+ _, err := conf.Check("p", []*syntax.File{f}, &info)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // map import paths to importDecl
+ imports := make(map[string]*syntax.ImportDecl)
+ for _, d := range f.DeclList {
+ if imp, _ := d.(*syntax.ImportDecl); imp != nil {
+ imports[imp.Path.Value] = imp
+ }
+ }
+
+ for _, test := range tests {
+ imp := imports[test.path]
+ if imp == nil {
+ t.Fatalf("invalid test case: import path %s not found", test.path)
+ }
+ got := info.PkgNameOf(imp)
+ if got == nil {
+ t.Fatalf("import %s: package name not found", test.path)
+ }
+ if got.Name() != test.want {
+ t.Errorf("import %s: got %s; want %s", test.path, got.Name(), test.want)
+ }
+ }
+
+ // test non-existing importDecl
+ if got := info.PkgNameOf(new(syntax.ImportDecl)); got != nil {
+ t.Errorf("got %s for non-existing import declaration", got.Name())
+ }
+}
+
func predString(tv TypeAndValue) string {
var buf strings.Builder
pred := func(b bool, s string) {
@@ -1816,12 +1892,12 @@ const Pi = 3.1415
type T struct{}
var Y, _ = lib.X, X
-func F(){
+func F[T *U, U any](param1, param2 int) /*param1=undef*/ (res1 /*res1=undef*/, res2 int) /*param1=var:12*/ /*res1=var:12*/ /*U=typename:12*/ {
const pi, e = 3.1415, /*pi=undef*/ 2.71828 /*pi=const:13*/ /*e=const:13*/
type /*t=undef*/ t /*t=typename:14*/ *t
print(Y) /*Y=var:10*/
x, Y := Y, /*x=undef*/ /*Y=var:10*/ Pi /*x=var:16*/ /*Y=var:16*/ ; _ = x; _ = Y
- var F = /*F=func:12*/ F /*F=var:17*/ ; _ = F
+ var F = /*F=func:12*/ F[*int, int] /*F=var:17*/ ; _ = F
var a []int
for i, x := range a /*i=undef*/ /*x=var:16*/ { _ = i; _ = x }
@@ -1840,6 +1916,10 @@ func F(){
println(int)
default /*int=var:31*/ :
}
+
+ _ = param1
+ _ = res1
+ return
}
/*main=undef*/
`
@@ -1905,7 +1985,29 @@ func F(){
_, gotObj := inner.LookupParent(id.Value, id.Pos())
if gotObj != wantObj {
- t.Errorf("%s: got %v, want %v", id.Pos(), gotObj, wantObj)
+ // Print the scope tree of mainScope in case of error.
+ var printScopeTree func(indent string, s *Scope)
+ printScopeTree = func(indent string, s *Scope) {
+ t.Logf("%sscope %s %v-%v = %v",
+ indent,
+ ScopeComment(s),
+ s.Pos(),
+ s.End(),
+ s.Names())
+ for i := range s.NumChildren() {
+ printScopeTree(indent+" ", s.Child(i))
+ }
+ }
+ printScopeTree("", mainScope)
+
+ t.Errorf("%s: Scope(%s).LookupParent(%s@%v) got %v, want %v [scopePos=%v]",
+ id.Pos(),
+ ScopeComment(inner),
+ id.Value,
+ id.Pos(),
+ gotObj,
+ wantObj,
+ ObjectScopePos(wantObj))
continue
}
}
@@ -2093,6 +2195,12 @@ func TestIssue61737(t *testing.T) {
iface.NumMethods() // unlike go/types, there is no Complete() method, so we complete implicitly
}
+func TestNewAlias_Issue65455(t *testing.T) {
+ obj := NewTypeName(nopos, nil, "A", nil)
+ alias := NewAlias(obj, Typ[Int])
+ alias.Underlying() // must not panic
+}
+
func TestIssue15305(t *testing.T) {
const src = "package p; func f() int16; var _ = f(undef)"
f := mustParse(src)
@@ -2318,6 +2426,60 @@ func TestInstantiate(t *testing.T) {
}
}
+func TestInstantiateConcurrent(t *testing.T) {
+ const src = `package p
+
+type I[P any] interface {
+ m(P)
+ n() P
+}
+
+type J = I[int]
+
+type Nested[P any] *interface{b(P)}
+
+type K = Nested[string]
+`
+ pkg := mustTypecheck(src, nil, nil)
+
+ insts := []*Interface{
+ pkg.Scope().Lookup("J").Type().Underlying().(*Interface),
+ pkg.Scope().Lookup("K").Type().Underlying().(*Pointer).Elem().(*Interface),
+ }
+
+ // Use the interface instances concurrently.
+ for _, inst := range insts {
+ var (
+ counts [2]int // method counts
+ methods [2][]string // method strings
+ )
+ var wg sync.WaitGroup
+ for i := 0; i < 2; i++ {
+ i := i
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+
+ counts[i] = inst.NumMethods()
+ for mi := 0; mi < counts[i]; mi++ {
+ methods[i] = append(methods[i], inst.Method(mi).String())
+ }
+ }()
+ }
+ wg.Wait()
+
+ if counts[0] != counts[1] {
+ t.Errorf("mismatching method counts for %s: %d vs %d", inst, counts[0], counts[1])
+ continue
+ }
+ for i := 0; i < counts[0]; i++ {
+ if m0, m1 := methods[0][i], methods[1][i]; m0 != m1 {
+ t.Errorf("mismatching methods for %s: %s vs %s", inst, m0, m1)
+ }
+ }
+ }
+}
+
func TestInstantiateErrors(t *testing.T) {
tests := []struct {
src string // by convention, T must be the type being instantiated
@@ -2709,3 +2871,69 @@ var _ = f(1, 2)
t.Errorf("src1: unexpected error: got %v", err)
}
}
+
+func TestModuleVersion(t *testing.T) {
+ // version go1.dd must be able to typecheck go1.dd.0, go1.dd.1, etc.
+ goversion := fmt.Sprintf("go1.%d", goversion.Version)
+ for _, v := range []string{
+ goversion,
+ goversion + ".0",
+ goversion + ".1",
+ goversion + ".rc",
+ } {
+ conf := Config{GoVersion: v}
+ pkg := mustTypecheck("package p", &conf, nil)
+ if pkg.GoVersion() != conf.GoVersion {
+ t.Errorf("got %s; want %s", pkg.GoVersion(), conf.GoVersion)
+ }
+ }
+}
+
+func TestFileVersions(t *testing.T) {
+ for _, test := range []struct {
+ goVersion string
+ fileVersion string
+ wantVersion string
+ }{
+ {"", "", ""}, // no versions specified
+ {"go1.19", "", "go1.19"}, // module version specified
+ {"", "go1.20", ""}, // file upgrade ignored
+ {"go1.19", "go1.20", "go1.20"}, // file upgrade permitted
+ {"go1.20", "go1.19", "go1.20"}, // file downgrade not permitted
+ {"go1.21", "go1.19", "go1.19"}, // file downgrade permitted (module version is >= go1.21)
+
+ // versions containing release numbers
+ // (file versions containing release numbers are considered invalid)
+ {"go1.19.0", "", "go1.19.0"}, // no file version specified
+ {"go1.20", "go1.20.1", "go1.20"}, // file upgrade ignored
+ {"go1.20.1", "go1.20", "go1.20.1"}, // file upgrade ignored
+ {"go1.20.1", "go1.21", "go1.21"}, // file upgrade permitted
+ {"go1.20.1", "go1.19", "go1.20.1"}, // file downgrade not permitted
+ {"go1.21.1", "go1.19.1", "go1.21.1"}, // file downgrade not permitted (invalid file version)
+ {"go1.21.1", "go1.19", "go1.19"}, // file downgrade permitted (module version is >= go1.21)
+ } {
+ var src string
+ if test.fileVersion != "" {
+ src = "//go:build " + test.fileVersion + "\n"
+ }
+ src += "package p"
+
+ conf := Config{GoVersion: test.goVersion}
+ versions := make(map[*syntax.PosBase]string)
+ var info Info
+ info.FileVersions = versions
+ mustTypecheck(src, &conf, &info)
+
+ n := 0
+ for _, v := range info.FileVersions {
+ want := test.wantVersion
+ if v != want {
+ t.Errorf("%q: unexpected file version: got %v, want %v", src, v, want)
+ }
+ n++
+ }
+ if n != 1 {
+ t.Errorf("%q: incorrect number of map entries: got %d", src, n)
+ }
+ }
+}
diff --git a/src/cmd/compile/internal/types2/assignments.go b/src/cmd/compile/internal/types2/assignments.go
index cba102e4f4..8abafdba1b 100644
--- a/src/cmd/compile/internal/types2/assignments.go
+++ b/src/cmd/compile/internal/types2/assignments.go
@@ -102,7 +102,7 @@ func (check *Checker) assignment(x *operand, T Type, context string) {
}
func (check *Checker) initConst(lhs *Const, x *operand) {
- if x.mode == invalid || x.typ == Typ[Invalid] || lhs.typ == Typ[Invalid] {
+ if x.mode == invalid || !isValid(x.typ) || !isValid(lhs.typ) {
if lhs.typ == nil {
lhs.typ = Typ[Invalid]
}
@@ -137,7 +137,7 @@ func (check *Checker) initConst(lhs *Const, x *operand) {
// or Typ[Invalid] in case of an error.
// If the initialization check fails, x.mode is set to invalid.
func (check *Checker) initVar(lhs *Var, x *operand, context string) {
- if x.mode == invalid || x.typ == Typ[Invalid] || lhs.typ == Typ[Invalid] {
+ if x.mode == invalid || !isValid(x.typ) || !isValid(lhs.typ) {
if lhs.typ == nil {
lhs.typ = Typ[Invalid]
}
@@ -170,7 +170,7 @@ func (check *Checker) initVar(lhs *Var, x *operand, context string) {
// and Typ[Invalid] if it is an invalid lhs expression.
func (check *Checker) lhsVar(lhs syntax.Expr) Type {
// Determine if the lhs is a (possibly parenthesized) identifier.
- ident, _ := unparen(lhs).(*syntax.Name)
+ ident, _ := syntax.Unparen(lhs).(*syntax.Name)
// Don't evaluate lhs if it is the blank identifier.
if ident != nil && ident.Value == "_" {
@@ -202,7 +202,7 @@ func (check *Checker) lhsVar(lhs syntax.Expr) Type {
v.used = v_used // restore v.used
}
- if x.mode == invalid || x.typ == Typ[Invalid] {
+ if x.mode == invalid || !isValid(x.typ) {
return Typ[Invalid]
}
@@ -232,9 +232,9 @@ func (check *Checker) lhsVar(lhs syntax.Expr) Type {
// assignVar checks the assignment lhs = rhs (if x == nil), or lhs = x (if x != nil).
// If x != nil, it must be the evaluation of rhs (and rhs will be ignored).
// If the assignment check fails and x != nil, x.mode is set to invalid.
-func (check *Checker) assignVar(lhs, rhs syntax.Expr, x *operand) {
+func (check *Checker) assignVar(lhs, rhs syntax.Expr, x *operand, context string) {
T := check.lhsVar(lhs) // nil if lhs is _
- if T == Typ[Invalid] {
+ if !isValid(T) {
if x != nil {
x.mode = invalid
} else {
@@ -244,12 +244,18 @@ func (check *Checker) assignVar(lhs, rhs syntax.Expr, x *operand) {
}
if x == nil {
+ var target *target
+ // avoid calling syntax.String if not needed
+ if T != nil {
+ if _, ok := under(T).(*Signature); ok {
+ target = newTarget(T, syntax.String(lhs))
+ }
+ }
x = new(operand)
- check.expr(T, x, rhs)
+ check.expr(target, x, rhs)
}
- context := "assignment"
- if T == nil {
+ if T == nil && context == "assignment" {
context = "assignment to _ identifier"
}
check.assignment(x, T, context)
@@ -282,7 +288,7 @@ func (check *Checker) typesSummary(list []Type, variadic bool) string {
switch {
case t == nil:
fallthrough // should not happen but be cautious
- case t == Typ[Invalid]:
+ case !isValid(t):
s = "unknown type"
case isUntyped(t):
if isNumeric(t) {
@@ -320,7 +326,7 @@ func (check *Checker) assignError(rhs []syntax.Expr, l, r int) {
rhs0 := rhs[0]
if len(rhs) == 1 {
- if call, _ := unparen(rhs0).(*syntax.CallExpr); call != nil {
+ if call, _ := syntax.Unparen(rhs0).(*syntax.CallExpr); call != nil {
check.errorf(rhs0, WrongAssignCount, "assignment mismatch: %s but %s returns %s", vars, call.Fun, vals)
return
}
@@ -361,7 +367,7 @@ func (check *Checker) initVars(lhs []*Var, orig_rhs []syntax.Expr, returnStmt sy
// error message don't handle it as n:n mapping below.
isCall := false
if r == 1 {
- _, isCall = unparen(orig_rhs[0]).(*syntax.CallExpr)
+ _, isCall = syntax.Unparen(orig_rhs[0]).(*syntax.CallExpr)
}
// If we have a n:n mapping from lhs variable to rhs expression,
@@ -369,7 +375,11 @@ func (check *Checker) initVars(lhs []*Var, orig_rhs []syntax.Expr, returnStmt sy
if l == r && !isCall {
var x operand
for i, lhs := range lhs {
- check.expr(lhs.typ, &x, orig_rhs[i])
+ desc := lhs.name
+ if returnStmt != nil && desc == "" {
+ desc = "result variable"
+ }
+ check.expr(newTarget(lhs.typ, desc), &x, orig_rhs[i])
check.initVar(lhs, &x, context)
}
return
@@ -436,14 +446,14 @@ func (check *Checker) assignVars(lhs, orig_rhs []syntax.Expr) {
// error message don't handle it as n:n mapping below.
isCall := false
if r == 1 {
- _, isCall = unparen(orig_rhs[0]).(*syntax.CallExpr)
+ _, isCall = syntax.Unparen(orig_rhs[0]).(*syntax.CallExpr)
}
// If we have a n:n mapping from lhs variable to rhs expression,
// each value can be assigned to its corresponding variable.
if l == r && !isCall {
for i, lhs := range lhs {
- check.assignVar(lhs, orig_rhs[i], nil)
+ check.assignVar(lhs, orig_rhs[i], nil, "assignment")
}
return
}
@@ -464,7 +474,7 @@ func (check *Checker) assignVars(lhs, orig_rhs []syntax.Expr) {
r = len(rhs)
if l == r {
for i, lhs := range lhs {
- check.assignVar(lhs, nil, rhs[i])
+ check.assignVar(lhs, nil, rhs[i], "assignment")
}
// Only record comma-ok expression if both assignments succeeded
// (go.dev/issue/59371).
@@ -483,21 +493,6 @@ func (check *Checker) assignVars(lhs, orig_rhs []syntax.Expr) {
// orig_rhs[0] was already evaluated
}
-// unpackExpr unpacks a *syntax.ListExpr into a list of syntax.Expr.
-// Helper introduced for the go/types -> types2 port.
-// TODO(gri) Should find a more efficient solution that doesn't
-// require introduction of a new slice for simple
-// expressions.
-func unpackExpr(x syntax.Expr) []syntax.Expr {
- if x, _ := x.(*syntax.ListExpr); x != nil {
- return x.ElemList
- }
- if x != nil {
- return []syntax.Expr{x}
- }
- return nil
-}
-
func (check *Checker) shortVarDecl(pos syntax.Pos, lhs, rhs []syntax.Expr) {
top := len(check.delayed)
scope := check.scope
diff --git a/src/cmd/compile/internal/types2/builtins.go b/src/cmd/compile/internal/types2/builtins.go
index 7a209e7a97..60f6d7f415 100644
--- a/src/cmd/compile/internal/types2/builtins.go
+++ b/src/cmd/compile/internal/types2/builtins.go
@@ -206,7 +206,7 @@ func (check *Checker) builtin(x *operand, call *syntax.CallExpr, id builtinId) (
if mode == invalid {
// avoid error if underlying type is invalid
- if under(x.typ) != Typ[Invalid] {
+ if isValid(under(x.typ)) {
code := InvalidCap
if id == _Len {
code = InvalidLen
@@ -490,7 +490,7 @@ func (check *Checker) builtin(x *operand, call *syntax.CallExpr, id builtinId) (
// (no argument evaluated yet)
arg0 := argList[0]
T := check.varType(arg0)
- if T == Typ[Invalid] {
+ if !isValid(T) {
return
}
@@ -600,7 +600,7 @@ func (check *Checker) builtin(x *operand, call *syntax.CallExpr, id builtinId) (
// new(T)
// (no argument evaluated yet)
T := check.varType(argList[0])
- if T == Typ[Invalid] {
+ if !isValid(T) {
return
}
@@ -706,7 +706,7 @@ func (check *Checker) builtin(x *operand, call *syntax.CallExpr, id builtinId) (
// unsafe.Offsetof(x T) uintptr, where x must be a selector
// (no argument evaluated yet)
arg0 := argList[0]
- selx, _ := unparen(arg0).(*syntax.SelectorExpr)
+ selx, _ := syntax.Unparen(arg0).(*syntax.SelectorExpr)
if selx == nil {
check.errorf(arg0, BadOffsetofSyntax, invalidArg+"%s is not a selector expression", arg0)
check.use(arg0)
@@ -799,7 +799,7 @@ func (check *Checker) builtin(x *operand, call *syntax.CallExpr, id builtinId) (
// unsafe.Slice(ptr *T, len IntegerType) []T
check.verifyVersionf(call.Fun, go1_17, "unsafe.Slice")
- ptr, _ := under(x.typ).(*Pointer) // TODO(gri) should this be coreType rather than under?
+ ptr, _ := coreType(x.typ).(*Pointer)
if ptr == nil {
check.errorf(x, InvalidUnsafeSlice, invalidArg+"%s is not a pointer", x)
return
@@ -820,7 +820,7 @@ func (check *Checker) builtin(x *operand, call *syntax.CallExpr, id builtinId) (
// unsafe.SliceData(slice []T) *T
check.verifyVersionf(call.Fun, go1_20, "unsafe.SliceData")
- slice, _ := under(x.typ).(*Slice) // TODO(gri) should this be coreType rather than under?
+ slice, _ := coreType(x.typ).(*Slice)
if slice == nil {
check.errorf(x, InvalidUnsafeSliceData, invalidArg+"%s is not a slice", x)
return
@@ -923,7 +923,7 @@ func hasVarSize(t Type, seen map[*Named]bool) (varSized bool) {
// Cycles are only possible through *Named types.
// The seen map is used to detect cycles and track
// the results of previously seen types.
- if named, _ := t.(*Named); named != nil {
+ if named := asNamed(t); named != nil {
if v, ok := seen[named]; ok {
return v
}
@@ -954,7 +954,7 @@ func hasVarSize(t Type, seen map[*Named]bool) (varSized bool) {
}
// applyTypeFunc applies f to x. If x is a type parameter,
-// the result is a type parameter constrained by an new
+// the result is a type parameter constrained by a new
// interface bound. The type bounds for that interface
// are computed by applying f to each of the type bounds
// of x. If any of these applications of f return nil,
diff --git a/src/cmd/compile/internal/types2/call.go b/src/cmd/compile/internal/types2/call.go
index 24f54c36cf..db7d86e3d3 100644
--- a/src/cmd/compile/internal/types2/call.go
+++ b/src/cmd/compile/internal/types2/call.go
@@ -16,8 +16,8 @@ import (
// funcInst type-checks a function instantiation.
// The incoming x must be a generic function.
// If inst != nil, it provides some or all of the type arguments (inst.Index).
-// If target type tsig != nil, the signature may be used to infer missing type
-// arguments of x, if any. At least one of tsig or inst must be provided.
+// If target != nil, it may be used to infer missing type arguments of x, if any.
+// At least one of T or inst must be provided.
//
// There are two modes of operation:
//
@@ -32,8 +32,8 @@ import (
//
// If an error (other than a version error) occurs in any case, it is reported
// and x.mode is set to invalid.
-func (check *Checker) funcInst(tsig *Signature, pos syntax.Pos, x *operand, inst *syntax.IndexExpr, infer bool) ([]Type, []syntax.Expr) {
- assert(tsig != nil || inst != nil)
+func (check *Checker) funcInst(T *target, pos syntax.Pos, x *operand, inst *syntax.IndexExpr, infer bool) ([]Type, []syntax.Expr) {
+ assert(T != nil || inst != nil)
var instErrPos poser
if inst != nil {
@@ -47,7 +47,7 @@ func (check *Checker) funcInst(tsig *Signature, pos syntax.Pos, x *operand, inst
var targs []Type
var xlist []syntax.Expr
if inst != nil {
- xlist = unpackExpr(inst.Index)
+ xlist = syntax.UnpackListExpr(inst.Index)
targs = check.typeList(xlist)
if targs == nil {
x.mode = invalid
@@ -87,7 +87,8 @@ func (check *Checker) funcInst(tsig *Signature, pos syntax.Pos, x *operand, inst
//
var args []*operand
var params []*Var
- if tsig != nil && sig.tparams != nil {
+ var reverse bool
+ if T != nil && sig.tparams != nil {
if !versionErr && !check.allowVersion(check.pkg, instErrPos, go1_21) {
if inst != nil {
check.versionErrorf(instErrPos, go1_21, "partially instantiated function in assignment")
@@ -100,15 +101,16 @@ func (check *Checker) funcInst(tsig *Signature, pos syntax.Pos, x *operand, inst
// The type of the argument operand is tsig, which is the type of the LHS in an assignment
// or the result type in a return statement. Create a pseudo-expression for that operand
// that makes sense when reported in error messages from infer, below.
- expr := syntax.NewName(x.Pos(), "variable in assignment")
- args = []*operand{{mode: value, expr: expr, typ: tsig}}
+ expr := syntax.NewName(x.Pos(), T.desc)
+ args = []*operand{{mode: value, expr: expr, typ: T.sig}}
+ reverse = true
}
// Rename type parameters to avoid problems with recursive instantiations.
// Note that NewTuple(params...) below is (*Tuple)(nil) if len(params) == 0, as desired.
tparams, params2 := check.renameTParams(pos, sig.TypeParams().list(), NewTuple(params...))
- targs = check.infer(pos, tparams, targs, params2.(*Tuple), args)
+ targs = check.infer(pos, tparams, targs, params2.(*Tuple), args, reverse)
if targs == nil {
// error was already reported
x.mode = invalid
@@ -258,7 +260,7 @@ func (check *Checker) callExpr(x *operand, call *syntax.CallExpr) exprKind {
var xlist []syntax.Expr
var targs []Type
if inst != nil {
- xlist = unpackExpr(inst.Index)
+ xlist = syntax.UnpackListExpr(inst.Index)
targs = check.typeList(xlist)
if targs == nil {
check.use(call.ArgList...)
@@ -575,8 +577,7 @@ func (check *Checker) arguments(call *syntax.CallExpr, sig *Signature, targs []T
// Before we change the type (type parameter renaming, below), make
// a clone of it as otherwise we implicitly modify the object's type
// (go.dev/issues/63260).
- clone := *asig
- asig = &clone
+ asig = clone(asig)
// Rename type parameters for cases like f(g, g); this gives each
// generic function argument a unique type identity (go.dev/issues/59956).
// TODO(gri) Consider only doing this if a function argument appears
@@ -609,7 +610,7 @@ func (check *Checker) arguments(call *syntax.CallExpr, sig *Signature, targs []T
// infer missing type arguments of callee and function arguments
if len(tparams) > 0 {
- targs = check.infer(call.Pos(), tparams, targs, sigParams, args)
+ targs = check.infer(call.Pos(), tparams, targs, sigParams, args, false)
if targs == nil {
// TODO(gri) If infer inferred the first targs[:n], consider instantiating
// the call signature for better error messages/gopls behavior.
@@ -666,7 +667,7 @@ var cgoPrefixes = [...]string{
"_Cmacro_", // function to evaluate the expanded expression
}
-func (check *Checker) selector(x *operand, e *syntax.SelectorExpr, def *Named, wantType bool) {
+func (check *Checker) selector(x *operand, e *syntax.SelectorExpr, def *TypeName, wantType bool) {
// these must be declared before the "goto Error" statements
var (
obj Object
@@ -767,8 +768,8 @@ func (check *Checker) selector(x *operand, e *syntax.SelectorExpr, def *Named, w
switch x.mode {
case typexpr:
// don't crash for "type T T.x" (was go.dev/issue/51509)
- if def != nil && x.typ == def {
- check.cycleError([]Object{def.obj})
+ if def != nil && def.typ == x.typ {
+ check.cycleError([]Object{def})
goto Error
}
case builtin:
@@ -800,7 +801,7 @@ func (check *Checker) selector(x *operand, e *syntax.SelectorExpr, def *Named, w
obj, index, indirect = LookupFieldOrMethod(x.typ, x.mode == variable, check.pkg, sel)
if obj == nil {
// Don't report another error if the underlying type was invalid (go.dev/issue/49541).
- if under(x.typ) == Typ[Invalid] {
+ if !isValid(under(x.typ)) {
goto Error
}
@@ -961,7 +962,7 @@ func (check *Checker) useN(args []syntax.Expr, lhs bool) bool {
func (check *Checker) use1(e syntax.Expr, lhs bool) bool {
var x operand
x.mode = value // anything but invalid
- switch n := unparen(e).(type) {
+ switch n := syntax.Unparen(e).(type) {
case nil:
// nothing to do
case *syntax.Name:
diff --git a/src/cmd/compile/internal/types2/check.go b/src/cmd/compile/internal/types2/check.go
index 0a2a49062b..0582367083 100644
--- a/src/cmd/compile/internal/types2/check.go
+++ b/src/cmd/compile/internal/types2/check.go
@@ -11,7 +11,7 @@ import (
"errors"
"fmt"
"go/constant"
- "internal/goversion"
+ "internal/godebug"
. "internal/types/errors"
)
@@ -21,6 +21,9 @@ var nopos syntax.Pos
// debugging/development support
const debug = false // leave on during development
+// gotypesalias controls the use of Alias types.
+var gotypesalias = godebug.New("#gotypesalias")
+
// exprInfo stores information about an untyped expression.
type exprInfo struct {
isLhs bool // expression is lhs operand of a shift with delayed type-check
@@ -93,11 +96,17 @@ type actionDesc struct {
type Checker struct {
// package information
// (initialized by NewChecker, valid for the life-time of checker)
+
+ // If enableAlias is set, alias declarations produce an Alias type.
+ // Otherwise the alias information is only in the type name, which
+ // points directly to the actual (aliased) type.
+ enableAlias bool
+
conf *Config
ctxt *Context // context for de-duplicating instances
pkg *Package
*Info
- version version // accepted language version
+ version goVersion // accepted language version
nextID uint64 // unique Id for type parameters (first valid Id is 1)
objMap map[Object]*declInfo // maps package-level objects and (non-interface) methods to declaration info
impMap map[importKey]*Package // maps (import path, source directory) to (complete or fake) package
@@ -117,7 +126,7 @@ type Checker struct {
// (initialized by Files, valid only for the duration of check.Files;
// maps and lists are allocated on demand)
files []*syntax.File // list of package files
- posVers map[*syntax.PosBase]version // Pos -> Go version mapping
+ versions map[*syntax.PosBase]string // maps file bases to version strings (each file has an entry)
imports []*PkgName // list of imported packages
dotImportMap map[dotImportKey]*PkgName // maps dot-imported objects to the package they were dot-imported through
recvTParamMap map[*syntax.Name]*TypeParam // maps blank receiver type parameters to their type
@@ -152,9 +161,14 @@ func (check *Checker) addDeclDep(to Object) {
from.addDep(to)
}
+// Note: The following three alias-related functions are only used
+// when Alias types are not enabled.
+
// brokenAlias records that alias doesn't have a determined type yet.
// It also sets alias.typ to Typ[Invalid].
+// Not used if check.enableAlias is set.
func (check *Checker) brokenAlias(alias *TypeName) {
+ assert(!check.enableAlias)
if check.brokenAliases == nil {
check.brokenAliases = make(map[*TypeName]bool)
}
@@ -164,13 +178,15 @@ func (check *Checker) brokenAlias(alias *TypeName) {
// validAlias records that alias has the valid type typ (possibly Typ[Invalid]).
func (check *Checker) validAlias(alias *TypeName, typ Type) {
+ assert(!check.enableAlias)
delete(check.brokenAliases, alias)
alias.typ = typ
}
// isBrokenAlias reports whether alias doesn't have a determined type yet.
func (check *Checker) isBrokenAlias(alias *TypeName) bool {
- return alias.typ == Typ[Invalid] && check.brokenAliases[alias]
+ assert(!check.enableAlias)
+ return check.brokenAliases[alias]
}
func (check *Checker) rememberUntyped(e syntax.Expr, lhs bool, mode operandMode, typ *Basic, val constant.Value) {
@@ -239,12 +255,14 @@ func NewChecker(conf *Config, pkg *Package, info *Info) *Checker {
// (previously, pkg.goVersion was mutated here: go.dev/issue/61212)
return &Checker{
- conf: conf,
- ctxt: conf.Context,
- pkg: pkg,
- Info: info,
- objMap: make(map[Object]*declInfo),
- impMap: make(map[importKey]*Package),
+ enableAlias: gotypesalias.Value() == "1",
+ conf: conf,
+ ctxt: conf.Context,
+ pkg: pkg,
+ Info: info,
+ version: asGoVersion(conf.GoVersion),
+ objMap: make(map[Object]*declInfo),
+ impMap: make(map[importKey]*Package),
}
}
@@ -284,33 +302,51 @@ func (check *Checker) initFiles(files []*syntax.File) {
}
}
+ // reuse Info.FileVersions if provided
+ versions := check.Info.FileVersions
+ if versions == nil {
+ versions = make(map[*syntax.PosBase]string)
+ }
+ check.versions = versions
+
+ pkgVersionOk := check.version.isValid()
+ downgradeOk := check.version.cmp(go1_21) >= 0
+
+ // determine Go version for each file
for _, file := range check.files {
- v, _ := parseGoVersion(file.GoVersion)
- if v.major > 0 {
- if v.equal(check.version) {
- continue
- }
- // Go 1.21 introduced the feature of setting the go.mod
- // go line to an early version of Go and allowing //go:build lines
- // to “upgrade” the Go version in a given file.
- // We can do that backwards compatibly.
- // Go 1.21 also introduced the feature of allowing //go:build lines
- // to “downgrade” the Go version in a given file.
- // That can't be done compatibly in general, since before the
- // build lines were ignored and code got the module's Go version.
- // To work around this, downgrades are only allowed when the
- // module's Go version is Go 1.21 or later.
- // If there is no check.version, then we don't really know what Go version to apply.
- // Legacy tools may do this, and they historically have accepted everything.
- // Preserve that behavior by ignoring //go:build constraints entirely in that case.
- if (v.before(check.version) && check.version.before(version{1, 21})) || check.version.equal(version{0, 0}) {
- continue
+ // use unaltered Config.GoVersion by default
+ // (This version string may contain dot-release numbers as in go1.20.1,
+ // unlike file versions which are Go language versions only, if valid.)
+ v := check.conf.GoVersion
+ // use the file version, if applicable
+ // (file versions are either the empty string or of the form go1.dd)
+ if pkgVersionOk {
+ fileVersion := asGoVersion(file.GoVersion)
+ if fileVersion.isValid() {
+ cmp := fileVersion.cmp(check.version)
+ // Go 1.21 introduced the feature of setting the go.mod
+ // go line to an early version of Go and allowing //go:build lines
+ // to “upgrade” (cmp > 0) the Go version in a given file.
+ // We can do that backwards compatibly.
+ //
+ // Go 1.21 also introduced the feature of allowing //go:build lines
+ // to “downgrade” (cmp < 0) the Go version in a given file.
+ // That can't be done compatibly in general, since before the
+ // build lines were ignored and code got the module's Go version.
+ // To work around this, downgrades are only allowed when the
+ // module's Go version is Go 1.21 or later.
+ //
+ // If there is no valid check.version, then we don't really know what
+ // Go version to apply.
+ // Legacy tools may do this, and they historically have accepted everything.
+ // Preserve that behavior by ignoring //go:build constraints entirely in that
+ // case (!pkgVersionOk).
+ if cmp > 0 || cmp < 0 && downgradeOk {
+ v = file.GoVersion
+ }
}
- if check.posVers == nil {
- check.posVers = make(map[*syntax.PosBase]version)
- }
- check.posVers[base(file.Pos())] = v
}
+ versions[base(file.Pos())] = v // base(file.Pos()) may be nil for tests
}
}
@@ -341,11 +377,8 @@ func (check *Checker) checkFiles(files []*syntax.File) (err error) {
return nil
}
- check.version, err = parseGoVersion(check.conf.GoVersion)
- if err != nil {
- return err
- }
- if check.version.after(version{1, goversion.Version}) {
+ // Note: NewChecker doesn't return an error, so we need to check the version here.
+ if check.version.cmp(go_current) > 0 {
return fmt.Errorf("package requires newer Go version %v", check.version)
}
if check.conf.FakeImportC && check.conf.go115UsesCgo {
@@ -496,7 +529,7 @@ func (check *Checker) recordTypeAndValue(x syntax.Expr, mode operandMode, typ Ty
assert(val != nil)
// We check allBasic(typ, IsConstType) here as constant expressions may be
// recorded as type parameters.
- assert(typ == Typ[Invalid] || allBasic(typ, IsConstType))
+ assert(!isValid(typ) || allBasic(typ, IsConstType))
}
if m := check.Types; m != nil {
m[x] = TypeAndValue{mode, typ, val}
diff --git a/src/cmd/compile/internal/types2/check_test.go b/src/cmd/compile/internal/types2/check_test.go
index 8cb3000501..a9d6202a33 100644
--- a/src/cmd/compile/internal/types2/check_test.go
+++ b/src/cmd/compile/internal/types2/check_test.go
@@ -34,11 +34,13 @@ import (
"cmd/compile/internal/syntax"
"flag"
"fmt"
+ "internal/buildcfg"
"internal/testenv"
"os"
"path/filepath"
"reflect"
"regexp"
+ "runtime"
"strconv"
"strings"
"testing"
@@ -110,6 +112,8 @@ func parseFlags(src []byte, flags *flag.FlagSet) error {
// testFiles type-checks the package consisting of the given files, and
// compares the resulting errors with the ERROR annotations in the source.
+// Except for manual tests, each package is type-checked twice, once without
+// use of Alias types, and once with Alias types.
//
// The srcs slice contains the file content for the files named in the
// filenames slice. The colDelta parameter specifies the tolerance for position
@@ -118,25 +122,25 @@ func parseFlags(src []byte, flags *flag.FlagSet) error {
//
// If provided, opts may be used to mutate the Config before type-checking.
func testFiles(t *testing.T, filenames []string, srcs [][]byte, colDelta uint, manual bool, opts ...func(*Config)) {
- if len(filenames) == 0 {
- t.Fatal("no source files")
+ // Alias types are disabled by default
+ testFilesImpl(t, filenames, srcs, colDelta, manual, opts...)
+ if !manual {
+ t.Setenv("GODEBUG", "gotypesalias=1")
+ testFilesImpl(t, filenames, srcs, colDelta, manual, opts...)
}
+}
- var conf Config
- flags := flag.NewFlagSet("", flag.PanicOnError)
- flags.StringVar(&conf.GoVersion, "lang", "", "")
- flags.BoolVar(&conf.FakeImportC, "fakeImportC", false, "")
- if err := parseFlags(srcs[0], flags); err != nil {
- t.Fatal(err)
+func testFilesImpl(t *testing.T, filenames []string, srcs [][]byte, colDelta uint, manual bool, opts ...func(*Config)) {
+ if len(filenames) == 0 {
+ t.Fatal("no source files")
}
+ // parse files
files, errlist := parseFiles(t, filenames, srcs, 0)
-
pkgName := "<no package>"
if len(files) > 0 {
pkgName = files[0].PkgName.Value
}
-
listErrors := manual && !*verifyErrors
if listErrors && len(errlist) > 0 {
t.Errorf("--- %s:", pkgName)
@@ -145,7 +149,8 @@ func testFiles(t *testing.T, filenames []string, srcs [][]byte, colDelta uint, m
}
}
- // typecheck and collect typechecker errors
+ // set up typechecker
+ var conf Config
conf.Trace = manual && testing.Verbose()
conf.Importer = defaultImporter()
conf.Error = func(err error) {
@@ -159,12 +164,51 @@ func testFiles(t *testing.T, filenames []string, srcs [][]byte, colDelta uint, m
errlist = append(errlist, err)
}
+ // apply custom configuration
for _, opt := range opts {
opt(&conf)
}
- conf.Check(pkgName, files, nil)
+ // apply flag setting (overrides custom configuration)
+ var goexperiment, gotypesalias string
+ flags := flag.NewFlagSet("", flag.PanicOnError)
+ flags.StringVar(&conf.GoVersion, "lang", "", "")
+ flags.StringVar(&goexperiment, "goexperiment", "", "")
+ flags.BoolVar(&conf.FakeImportC, "fakeImportC", false, "")
+ flags.StringVar(&gotypesalias, "gotypesalias", "", "")
+ if err := parseFlags(srcs[0], flags); err != nil {
+ t.Fatal(err)
+ }
+
+ exp, err := buildcfg.ParseGOEXPERIMENT(runtime.GOOS, runtime.GOARCH, goexperiment)
+ if err != nil {
+ t.Fatal(err)
+ }
+ old := buildcfg.Experiment
+ defer func() {
+ buildcfg.Experiment = old
+ }()
+ buildcfg.Experiment = *exp
+
+ // By default, gotypesalias is not set.
+ if gotypesalias != "" {
+ t.Setenv("GODEBUG", "gotypesalias="+gotypesalias)
+ }
+ // Provide Config.Info with all maps so that info recording is tested.
+ info := Info{
+ Types: make(map[syntax.Expr]TypeAndValue),
+ Instances: make(map[*syntax.Name]Instance),
+ Defs: make(map[*syntax.Name]Object),
+ Uses: make(map[*syntax.Name]Object),
+ Implicits: make(map[syntax.Node]Object),
+ Selections: make(map[*syntax.SelectorExpr]*Selection),
+ Scopes: make(map[syntax.Node]*Scope),
+ FileVersions: make(map[*syntax.PosBase]string),
+ }
+
+ // typecheck
+ conf.Check(pkgName, files, &info)
if listErrors {
return
}
@@ -345,6 +389,12 @@ func TestIssue47243_TypedRHS(t *testing.T) {
}
func TestCheck(t *testing.T) {
+ old := buildcfg.Experiment.RangeFunc
+ defer func() {
+ buildcfg.Experiment.RangeFunc = old
+ }()
+ buildcfg.Experiment.RangeFunc = true
+
DefPredeclaredTestFuncs()
testDirFiles(t, "../../../../internal/types/testdata/check", 50, false) // TODO(gri) narrow column tolerance
}
diff --git a/src/cmd/compile/internal/types2/context.go b/src/cmd/compile/internal/types2/context.go
index ae39c7b830..772312463e 100644
--- a/src/cmd/compile/internal/types2/context.go
+++ b/src/cmd/compile/internal/types2/context.go
@@ -79,7 +79,7 @@ func (ctxt *Context) instanceHash(orig Type, targs []Type) string {
h.typeList(targs)
}
- return strings.Replace(buf.String(), " ", "#", -1) // ReplaceAll is not available in Go1.4
+ return strings.ReplaceAll(buf.String(), " ", "#")
}
// lookup returns an existing instantiation of orig with targs, if it exists.
diff --git a/src/cmd/compile/internal/types2/conversions.go b/src/cmd/compile/internal/types2/conversions.go
index ef0094dc70..8027092c6c 100644
--- a/src/cmd/compile/internal/types2/conversions.go
+++ b/src/cmd/compile/internal/types2/conversions.go
@@ -42,6 +42,14 @@ func (check *Checker) conversion(x *operand, T Type) {
case constArg && isConstType(T):
// constant conversion
ok = constConvertibleTo(T, &x.val)
+ // A conversion from an integer constant to an integer type
+ // can only fail if there's overflow. Give a concise error.
+ // (go.dev/issue/63563)
+ if !ok && isInteger(x.typ) && isInteger(T) {
+ check.errorf(x, InvalidConversion, "constant %s overflows %s", x.val, T)
+ x.mode = invalid
+ return
+ }
case constArg && isTypeParam(T):
// x is convertible to T if it is convertible
// to each specific type in the type set of T.
@@ -58,7 +66,12 @@ func (check *Checker) conversion(x *operand, T Type) {
return true
}
if !constConvertibleTo(u, nil) {
- cause = check.sprintf("cannot convert %s to type %s (in %s)", x, u, T)
+ if isInteger(x.typ) && isInteger(u) {
+ // see comment above on constant conversion
+ cause = check.sprintf("constant %s overflows %s (in %s)", x.val, u, T)
+ } else {
+ cause = check.sprintf("cannot convert %s to type %s (in %s)", x, u, T)
+ }
return false
}
return true
diff --git a/src/cmd/compile/internal/types2/decl.go b/src/cmd/compile/internal/types2/decl.go
index 88864cb93e..f3e3418f4f 100644
--- a/src/cmd/compile/internal/types2/decl.go
+++ b/src/cmd/compile/internal/types2/decl.go
@@ -55,7 +55,7 @@ func pathString(path []Object) string {
// objDecl type-checks the declaration of obj in its respective (file) environment.
// For the meaning of def, see Checker.definedType, in typexpr.go.
-func (check *Checker) objDecl(obj Object, def *Named) {
+func (check *Checker) objDecl(obj Object, def *TypeName) {
if check.conf.Trace && obj.Type() == nil {
if check.indent == 0 {
fmt.Println() // empty line between top-level objects for readability
@@ -251,10 +251,14 @@ loop:
// the syntactic information. We should consider storing
// this information explicitly in the object.
var alias bool
- if d := check.objMap[obj]; d != nil {
- alias = d.tdecl.Alias // package-level object
+ if check.enableAlias {
+ alias = obj.IsAlias()
} else {
- alias = obj.IsAlias() // function local object
+ if d := check.objMap[obj]; d != nil {
+ alias = d.tdecl.Alias // package-level object
+ } else {
+ alias = obj.IsAlias() // function local object
+ }
}
if !alias {
ndef++
@@ -322,7 +326,11 @@ func (check *Checker) cycleError(cycle []Object) {
// If obj is a type alias, mark it as valid (not broken) in order to avoid follow-on errors.
tname, _ := obj.(*TypeName)
if tname != nil && tname.IsAlias() {
- check.validAlias(tname, Typ[Invalid])
+ // If we use Alias nodes, it is initialized with Typ[Invalid].
+ // TODO(gri) Adjust this code if we initialize with nil.
+ if !check.enableAlias {
+ check.validAlias(tname, Typ[Invalid])
+ }
}
// report a more concise error for self references
@@ -387,7 +395,7 @@ func (check *Checker) constDecl(obj *Const, typ, init syntax.Expr, inherited boo
if !isConstType(t) {
// don't report an error if the type is an invalid C (defined) type
// (go.dev/issue/22090)
- if under(t) != Typ[Invalid] {
+ if isValid(under(t)) {
check.errorf(typ, InvalidConstType, "invalid constant type %s", t)
}
obj.typ = Typ[Invalid]
@@ -441,7 +449,7 @@ func (check *Checker) varDecl(obj *Var, lhs []*Var, typ, init syntax.Expr) {
if lhs == nil || len(lhs) == 1 {
assert(lhs == nil || lhs[0] == obj)
var x operand
- check.expr(obj.typ, &x, init)
+ check.expr(newTarget(obj.typ, obj.name), &x, init)
check.initVar(obj, &x, "variable declaration")
return
}
@@ -475,7 +483,7 @@ func (check *Checker) varDecl(obj *Var, lhs []*Var, typ, init syntax.Expr) {
// isImportedConstraint reports whether typ is an imported type constraint.
func (check *Checker) isImportedConstraint(typ Type) bool {
- named, _ := typ.(*Named)
+ named := asNamed(typ)
if named == nil || named.obj.pkg == check.pkg || named.obj.pkg == nil {
return false
}
@@ -483,38 +491,50 @@ func (check *Checker) isImportedConstraint(typ Type) bool {
return u != nil && !u.IsMethodSet()
}
-func (check *Checker) typeDecl(obj *TypeName, tdecl *syntax.TypeDecl, def *Named) {
+func (check *Checker) typeDecl(obj *TypeName, tdecl *syntax.TypeDecl, def *TypeName) {
assert(obj.typ == nil)
var rhs Type
check.later(func() {
- if t, _ := obj.typ.(*Named); t != nil { // type may be invalid
+ if t := asNamed(obj.typ); t != nil { // type may be invalid
check.validType(t)
}
// If typ is local, an error was already reported where typ is specified/defined.
_ = check.isImportedConstraint(rhs) && check.verifyVersionf(tdecl.Type, go1_18, "using type constraint %s", rhs)
}).describef(obj, "validType(%s)", obj.Name())
- alias := tdecl.Alias
- if alias && tdecl.TParamList != nil {
+ aliasDecl := tdecl.Alias
+ if aliasDecl && tdecl.TParamList != nil {
// The parser will ensure this but we may still get an invalid AST.
// Complain and continue as regular type definition.
check.error(tdecl, BadDecl, "generic type cannot be alias")
- alias = false
+ aliasDecl = false
}
// alias declaration
- if alias {
+ if aliasDecl {
check.verifyVersionf(tdecl, go1_9, "type aliases")
- check.brokenAlias(obj)
- rhs = check.typ(tdecl.Type)
- check.validAlias(obj, rhs)
+ if check.enableAlias {
+ // TODO(gri) Should be able to use nil instead of Typ[Invalid] to mark
+ // the alias as incomplete. Currently this causes problems
+ // with certain cycles. Investigate.
+ alias := check.newAlias(obj, Typ[Invalid])
+ setDefType(def, alias)
+ rhs = check.definedType(tdecl.Type, obj)
+ assert(rhs != nil)
+ alias.fromRHS = rhs
+ Unalias(alias) // resolve alias.actual
+ } else {
+ check.brokenAlias(obj)
+ rhs = check.typ(tdecl.Type)
+ check.validAlias(obj, rhs)
+ }
return
}
// type definition or generic type declaration
named := check.newNamed(obj, nil, nil)
- def.setUnderlying(named)
+ setDefType(def, named)
if tdecl.TParamList != nil {
check.openScope(tdecl, "type parameters")
@@ -523,7 +543,7 @@ func (check *Checker) typeDecl(obj *TypeName, tdecl *syntax.TypeDecl, def *Named
}
// determine underlying type of named
- rhs = check.definedType(tdecl.Type, named)
+ rhs = check.definedType(tdecl.Type, obj)
assert(rhs != nil)
named.fromRHS = rhs
@@ -550,8 +570,11 @@ func (check *Checker) collectTypeParams(dst **TypeParamList, list []*syntax.Fiel
// Declare type parameters up-front.
// The scope of type parameters starts at the beginning of the type parameter
// list (so we can have mutually recursive parameterized type bounds).
- for i, f := range list {
- tparams[i] = check.declareTypeParam(f.Name)
+ if len(list) > 0 {
+ scopePos := list[0].Pos()
+ for i, f := range list {
+ tparams[i] = check.declareTypeParam(f.Name, scopePos)
+ }
}
// Set the type parameters before collecting the type constraints because
@@ -608,7 +631,7 @@ func (check *Checker) bound(x syntax.Expr) Type {
return check.typ(x)
}
-func (check *Checker) declareTypeParam(name *syntax.Name) *TypeParam {
+func (check *Checker) declareTypeParam(name *syntax.Name, scopePos syntax.Pos) *TypeParam {
// Use Typ[Invalid] for the type constraint to ensure that a type
// is present even if the actual constraint has not been assigned
// yet.
@@ -616,8 +639,8 @@ func (check *Checker) declareTypeParam(name *syntax.Name) *TypeParam {
// constraints to make sure we don't rely on them if they
// are not properly set yet.
tname := NewTypeName(name.Pos(), check.pkg, name.Value, nil)
- tpar := check.newTypeParam(tname, Typ[Invalid]) // assigns type to tname as a side-effect
- check.declare(check.scope, name, tname, check.scope.pos) // TODO(gri) check scope position
+ tpar := check.newTypeParam(tname, Typ[Invalid]) // assigns type to tname as a side-effect
+ check.declare(check.scope, name, tname, scopePos)
return tpar
}
@@ -638,7 +661,7 @@ func (check *Checker) collectMethods(obj *TypeName) {
// spec: "If the base type is a struct type, the non-blank method
// and field names must be distinct."
- base, _ := obj.typ.(*Named) // shouldn't fail but be conservative
+ base := asNamed(obj.typ) // shouldn't fail but be conservative
if base != nil {
assert(base.TypeArgs().Len() == 0) // collectMethods should not be called on an instantiated type
@@ -730,6 +753,11 @@ func (check *Checker) funcDecl(obj *Func, decl *declInfo) {
check.funcType(sig, fdecl.Recv, fdecl.TParamList, fdecl.Type)
obj.color_ = saved
+ // Set the scope's extent to the complete "func (...) { ... }"
+ // so that Scope.Innermost works correctly.
+ sig.scope.pos = fdecl.Pos()
+ sig.scope.end = syntax.EndPos(fdecl)
+
if len(fdecl.TParamList) > 0 && fdecl.Body == nil {
check.softErrorf(fdecl, BadDecl, "generic function is missing function body")
}
@@ -777,7 +805,7 @@ func (check *Checker) declStmt(list []syntax.Decl) {
// declare all constants
lhs := make([]*Const, len(s.NameList))
- values := unpackExpr(last.Values)
+ values := syntax.UnpackListExpr(last.Values)
for i, name := range s.NameList {
obj := NewConst(name.Pos(), pkg, name.Value, nil, iota)
lhs[i] = obj
@@ -814,7 +842,7 @@ func (check *Checker) declStmt(list []syntax.Decl) {
}
// initialize all variables
- values := unpackExpr(s.Values)
+ values := syntax.UnpackListExpr(s.Values)
for i, obj := range lhs0 {
var lhs []*Var
var init syntax.Expr
diff --git a/src/cmd/compile/internal/types2/errorcalls_test.go b/src/cmd/compile/internal/types2/errorcalls_test.go
index 6153b42a34..ba4dc87b6a 100644
--- a/src/cmd/compile/internal/types2/errorcalls_test.go
+++ b/src/cmd/compile/internal/types2/errorcalls_test.go
@@ -1,6 +1,6 @@
// Copyright 2021 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE ast.
+// license that can be found in the LICENSE file.
package types2_test
diff --git a/src/cmd/compile/internal/types2/errors.go b/src/cmd/compile/internal/types2/errors.go
index 7db06d944d..b8414b4849 100644
--- a/src/cmd/compile/internal/types2/errors.go
+++ b/src/cmd/compile/internal/types2/errors.go
@@ -250,7 +250,7 @@ func (check *Checker) err(at poser, code Code, msg string, soft bool) {
pos = check.errpos
}
- // If we have an URL for error codes, add a link to the first line.
+ // If we have a URL for error codes, add a link to the first line.
if code != 0 && check.conf.ErrorURL != "" {
u := fmt.Sprintf(check.conf.ErrorURL, code)
if i := strings.Index(msg, "\n"); i >= 0 {
@@ -297,7 +297,7 @@ func (check *Checker) softErrorf(at poser, code Code, format string, args ...int
check.err(at, code, check.sprintf(format, args...), true)
}
-func (check *Checker) versionErrorf(at poser, v version, format string, args ...interface{}) {
+func (check *Checker) versionErrorf(at poser, v goVersion, format string, args ...interface{}) {
msg := check.sprintf(format, args...)
msg = fmt.Sprintf("%s requires %s or later", msg, v)
check.err(at, UnsupportedFeature, msg, true)
diff --git a/src/cmd/compile/internal/types2/expr.go b/src/cmd/compile/internal/types2/expr.go
index 67afbfb058..124d9701d6 100644
--- a/src/cmd/compile/internal/types2/expr.go
+++ b/src/cmd/compile/internal/types2/expr.go
@@ -147,7 +147,7 @@ func (check *Checker) unary(x *operand, e *syntax.Operation) {
case syntax.And:
// spec: "As an exception to the addressability
// requirement x may also be a composite literal."
- if _, ok := unparen(e.X).(*syntax.CompositeLit); !ok && x.mode != variable {
+ if _, ok := syntax.Unparen(e.X).(*syntax.CompositeLit); !ok && x.mode != variable {
check.errorf(x, UnaddressableOperand, invalidOp+"cannot take address of %s", x)
x.mode = invalid
return
@@ -392,7 +392,7 @@ func (check *Checker) updateExprVal(x syntax.Expr, val constant.Value) {
// If x is a constant operand, the returned constant.Value will be the
// representation of x in this context.
func (check *Checker) implicitTypeAndValue(x *operand, target Type) (Type, constant.Value, Code) {
- if x.mode == invalid || isTyped(x.typ) || target == Typ[Invalid] {
+ if x.mode == invalid || isTyped(x.typ) || !isValid(target) {
return x.typ, nil, 0
}
// x is untyped
@@ -474,7 +474,7 @@ func (check *Checker) implicitTypeAndValue(x *operand, target Type) (Type, const
// If switchCase is true, the operator op is ignored.
func (check *Checker) comparison(x, y *operand, op syntax.Operator, switchCase bool) {
// Avoid spurious errors if any of the operands has an invalid type (go.dev/issue/54405).
- if x.typ == Typ[Invalid] || y.typ == Typ[Invalid] {
+ if !isValid(x.typ) || !isValid(y.typ) {
x.mode = invalid
return
}
@@ -828,7 +828,7 @@ func (check *Checker) binary(x *operand, e syntax.Expr, lhs, rhs syntax.Expr, op
if !Identical(x.typ, y.typ) {
// only report an error if we have valid types
// (otherwise we had an error reported elsewhere already)
- if x.typ != Typ[Invalid] && y.typ != Typ[Invalid] {
+ if isValid(x.typ) && isValid(y.typ) {
if e != nil {
check.errorf(x, MismatchedTypes, invalidOp+"%s (mismatched types %s and %s)", e, x.typ, y.typ)
} else {
@@ -956,18 +956,32 @@ const (
statement
)
-// TODO(gri) In rawExpr below, consider using T instead of hint and
-// some sort of "operation mode" instead of allowGeneric.
-// May be clearer and less error-prone.
+// target represent the (signature) type and description of the LHS
+// variable of an assignment, or of a function result variable.
+type target struct {
+ sig *Signature
+ desc string
+}
+
+// newTarget creates a new target for the given type and description.
+// The result is nil if typ is not a signature.
+func newTarget(typ Type, desc string) *target {
+ if typ != nil {
+ if sig, _ := under(typ).(*Signature); sig != nil {
+ return &target{sig, desc}
+ }
+ }
+ return nil
+}
// rawExpr typechecks expression e and initializes x with the expression
// value or type. If an error occurred, x.mode is set to invalid.
-// If a non-nil target type T is given and e is a generic function
-// or function call, T is used to infer the type arguments for e.
+// If a non-nil target T is given and e is a generic function,
+// T is used to infer the type arguments for e.
// If hint != nil, it is the type of a composite literal element.
// If allowGeneric is set, the operand type may be an uninstantiated
// parameterized type or function value.
-func (check *Checker) rawExpr(T Type, x *operand, e syntax.Expr, hint Type, allowGeneric bool) exprKind {
+func (check *Checker) rawExpr(T *target, x *operand, e syntax.Expr, hint Type, allowGeneric bool) exprKind {
if check.conf.Trace {
check.trace(e.Pos(), "-- expr %s", e)
check.indent++
@@ -989,9 +1003,9 @@ func (check *Checker) rawExpr(T Type, x *operand, e syntax.Expr, hint Type, allo
}
// If x is a generic type, or a generic function whose type arguments cannot be inferred
-// from a non-nil target type T, nonGeneric reports an error and invalidates x.mode and x.typ.
+// from a non-nil target T, nonGeneric reports an error and invalidates x.mode and x.typ.
// Otherwise it leaves x alone.
-func (check *Checker) nonGeneric(T Type, x *operand) {
+func (check *Checker) nonGeneric(T *target, x *operand) {
if x.mode == invalid || x.mode == novalue {
return
}
@@ -1004,10 +1018,8 @@ func (check *Checker) nonGeneric(T Type, x *operand) {
case *Signature:
if t.tparams != nil {
if enableReverseTypeInference && T != nil {
- if tsig, _ := under(T).(*Signature); tsig != nil {
- check.funcInst(tsig, x.Pos(), x, nil, true)
- return
- }
+ check.funcInst(T, x.Pos(), x, nil, true)
+ return
}
what = "function"
}
@@ -1022,7 +1034,7 @@ func (check *Checker) nonGeneric(T Type, x *operand) {
// exprInternal contains the core of type checking of expressions.
// Must only be called by rawExpr.
// (See rawExpr for an explanation of the parameters.)
-func (check *Checker) exprInternal(T Type, x *operand, e syntax.Expr, hint Type) exprKind {
+func (check *Checker) exprInternal(T *target, x *operand, e syntax.Expr, hint Type) exprKind {
// make sure x has a valid state in case of bailout
// (was go.dev/issue/5770)
x.mode = invalid
@@ -1081,6 +1093,10 @@ func (check *Checker) exprInternal(T Type, x *operand, e syntax.Expr, hint Type)
case *syntax.FuncLit:
if sig, ok := check.typ(e.Type).(*Signature); ok {
+ // Set the Scope's extent to the complete "func (...) {...}"
+ // so that Scope.Innermost works correctly.
+ sig.scope.pos = e.Pos()
+ sig.scope.end = syntax.EndPos(e)
if !check.conf.IgnoreFuncBodies && e.Body != nil {
// Anonymous functions are considered part of the
// init expression/func declaration which contains
@@ -1308,7 +1324,7 @@ func (check *Checker) exprInternal(T Type, x *operand, e syntax.Expr, hint Type)
check.use(e)
}
// if utyp is invalid, an error was reported before
- if utyp != Typ[Invalid] {
+ if isValid(utyp) {
check.errorf(e, InvalidLit, "invalid composite literal type %s", typ)
goto Error
}
@@ -1328,11 +1344,10 @@ func (check *Checker) exprInternal(T Type, x *operand, e syntax.Expr, hint Type)
case *syntax.IndexExpr:
if check.indexExpr(x, e) {
- var tsig *Signature
- if enableReverseTypeInference && T != nil {
- tsig, _ = under(T).(*Signature)
+ if !enableReverseTypeInference {
+ T = nil
}
- check.funcInst(tsig, e.Pos(), x, e, true)
+ check.funcInst(T, e.Pos(), x, e, true)
}
if x.mode == invalid {
goto Error
@@ -1363,7 +1378,7 @@ func (check *Checker) exprInternal(T Type, x *operand, e syntax.Expr, hint Type)
goto Error
}
T := check.varType(e.Type)
- if T == Typ[Invalid] {
+ if !isValid(T) {
goto Error
}
check.typeAssertion(e, x, T, false)
@@ -1543,11 +1558,11 @@ func (check *Checker) typeAssertion(e syntax.Expr, x *operand, T Type, typeSwitc
}
// expr typechecks expression e and initializes x with the expression value.
-// If a non-nil target type T is given and e is a generic function
-// or function call, T is used to infer the type arguments for e.
+// If a non-nil target T is given and e is a generic function or
+// a function call, T is used to infer the type arguments for e.
// The result must be a single value.
// If an error occurred, x.mode is set to invalid.
-func (check *Checker) expr(T Type, x *operand, e syntax.Expr) {
+func (check *Checker) expr(T *target, x *operand, e syntax.Expr) {
check.rawExpr(T, x, e, nil, false)
check.exclude(x, 1<<novalue|1<<builtin|1<<typexpr)
check.singleValue(x)
diff --git a/src/cmd/compile/internal/types2/gcsizes.go b/src/cmd/compile/internal/types2/gcsizes.go
new file mode 100644
index 0000000000..d204d9feef
--- /dev/null
+++ b/src/cmd/compile/internal/types2/gcsizes.go
@@ -0,0 +1,170 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types2
+
+type gcSizes struct {
+ WordSize int64 // word size in bytes - must be >= 4 (32bits)
+ MaxAlign int64 // maximum alignment in bytes - must be >= 1
+}
+
+func (s *gcSizes) Alignof(T Type) (result int64) {
+ defer func() {
+ assert(result >= 1)
+ }()
+
+ // For arrays and structs, alignment is defined in terms
+ // of alignment of the elements and fields, respectively.
+ switch t := under(T).(type) {
+ case *Array:
+ // spec: "For a variable x of array type: unsafe.Alignof(x)
+ // is the same as unsafe.Alignof(x[0]), but at least 1."
+ return s.Alignof(t.elem)
+ case *Struct:
+ if len(t.fields) == 0 && IsSyncAtomicAlign64(T) {
+ // Special case: sync/atomic.align64 is an
+ // empty struct we recognize as a signal that
+ // the struct it contains must be
+ // 64-bit-aligned.
+ //
+ // This logic is equivalent to the logic in
+ // cmd/compile/internal/types/size.go:calcStructOffset
+ return 8
+ }
+
+ // spec: "For a variable x of struct type: unsafe.Alignof(x)
+ // is the largest of the values unsafe.Alignof(x.f) for each
+ // field f of x, but at least 1."
+ max := int64(1)
+ for _, f := range t.fields {
+ if a := s.Alignof(f.typ); a > max {
+ max = a
+ }
+ }
+ return max
+ case *Slice, *Interface:
+ // Multiword data structures are effectively structs
+ // in which each element has size WordSize.
+ // Type parameters lead to variable sizes/alignments;
+ // StdSizes.Alignof won't be called for them.
+ assert(!isTypeParam(T))
+ return s.WordSize
+ case *Basic:
+ // Strings are like slices and interfaces.
+ if t.Info()&IsString != 0 {
+ return s.WordSize
+ }
+ case *TypeParam, *Union:
+ unreachable()
+ }
+ a := s.Sizeof(T) // may be 0 or negative
+ // spec: "For a variable x of any type: unsafe.Alignof(x) is at least 1."
+ if a < 1 {
+ return 1
+ }
+ // complex{64,128} are aligned like [2]float{32,64}.
+ if isComplex(T) {
+ a /= 2
+ }
+ if a > s.MaxAlign {
+ return s.MaxAlign
+ }
+ return a
+}
+
+func (s *gcSizes) Offsetsof(fields []*Var) []int64 {
+ offsets := make([]int64, len(fields))
+ var offs int64
+ for i, f := range fields {
+ if offs < 0 {
+ // all remaining offsets are too large
+ offsets[i] = -1
+ continue
+ }
+ // offs >= 0
+ a := s.Alignof(f.typ)
+ offs = align(offs, a) // possibly < 0 if align overflows
+ offsets[i] = offs
+ if d := s.Sizeof(f.typ); d >= 0 && offs >= 0 {
+ offs += d // ok to overflow to < 0
+ } else {
+ offs = -1 // f.typ or offs is too large
+ }
+ }
+ return offsets
+}
+
+func (s *gcSizes) Sizeof(T Type) int64 {
+ switch t := under(T).(type) {
+ case *Basic:
+ assert(isTyped(T))
+ k := t.kind
+ if int(k) < len(basicSizes) {
+ if s := basicSizes[k]; s > 0 {
+ return int64(s)
+ }
+ }
+ if k == String {
+ return s.WordSize * 2
+ }
+ case *Array:
+ n := t.len
+ if n <= 0 {
+ return 0
+ }
+ // n > 0
+ esize := s.Sizeof(t.elem)
+ if esize < 0 {
+ return -1 // element too large
+ }
+ if esize == 0 {
+ return 0 // 0-size element
+ }
+ // esize > 0
+ // Final size is esize * n; and size must be <= maxInt64.
+ const maxInt64 = 1<<63 - 1
+ if esize > maxInt64/n {
+ return -1 // esize * n overflows
+ }
+ return esize * n
+ case *Slice:
+ return s.WordSize * 3
+ case *Struct:
+ n := t.NumFields()
+ if n == 0 {
+ return 0
+ }
+ offsets := s.Offsetsof(t.fields)
+ offs := offsets[n-1]
+ size := s.Sizeof(t.fields[n-1].typ)
+ if offs < 0 || size < 0 {
+ return -1 // type too large
+ }
+ // gc: The last field of a non-zero-sized struct is not allowed to
+ // have size 0.
+ if offs > 0 && size == 0 {
+ size = 1
+ }
+ // gc: Size includes alignment padding.
+ return align(offs+size, s.Alignof(t)) // may overflow to < 0 which is ok
+ case *Interface:
+ // Type parameters lead to variable sizes/alignments;
+ // StdSizes.Sizeof won't be called for them.
+ assert(!isTypeParam(T))
+ return s.WordSize * 2
+ case *TypeParam, *Union:
+ unreachable()
+ }
+ return s.WordSize // catch-all
+}
+
+// gcSizesFor returns the Sizes used by gc for an architecture.
+// The result is a nil *gcSizes pointer (which is not a valid types.Sizes)
+// if a compiler/architecture pair is not known.
+func gcSizesFor(compiler, arch string) *gcSizes {
+ if compiler != "gc" {
+ return nil
+ }
+ return gcArchSizes[arch]
+}
diff --git a/src/cmd/compile/internal/types2/index.go b/src/cmd/compile/internal/types2/index.go
index 3ebe851355..4db2213086 100644
--- a/src/cmd/compile/internal/types2/index.go
+++ b/src/cmd/compile/internal/types2/index.go
@@ -29,7 +29,7 @@ func (check *Checker) indexExpr(x *operand, e *syntax.IndexExpr) (isFuncInst boo
x.mode = invalid
// TODO(gri) here we re-evaluate e.X - try to avoid this
x.typ = check.varType(e)
- if x.typ != Typ[Invalid] {
+ if isValid(x.typ) {
x.mode = typexpr
}
return false
@@ -428,7 +428,7 @@ func (check *Checker) indexedElts(elts []syntax.Expr, typ Type, length int64) in
validIndex := false
eval := e
if kv, _ := e.(*syntax.KeyValueExpr); kv != nil {
- if typ, i := check.index(kv.Key, length); typ != Typ[Invalid] {
+ if typ, i := check.index(kv.Key, length); isValid(typ) {
if i >= 0 {
index = i
validIndex = true
diff --git a/src/cmd/compile/internal/types2/infer.go b/src/cmd/compile/internal/types2/infer.go
index 5eb916c528..a520f70253 100644
--- a/src/cmd/compile/internal/types2/infer.go
+++ b/src/cmd/compile/internal/types2/infer.go
@@ -24,10 +24,16 @@ const enableReverseTypeInference = true // disable for debugging
// based on the given type parameters tparams, type arguments targs, function parameters params, and
// function arguments args, if any. There must be at least one type parameter, no more type arguments
// than type parameters, and params and args must match in number (incl. zero).
+// If reverse is set, an error message's contents are reversed for a better error message for some
+// errors related to reverse type inference (where the function call is synthetic).
// If successful, infer returns the complete list of given and inferred type arguments, one for each
// type parameter. Otherwise the result is nil and appropriate errors will be reported.
-func (check *Checker) infer(pos syntax.Pos, tparams []*TypeParam, targs []Type, params *Tuple, args []*operand) (inferred []Type) {
- if debug {
+func (check *Checker) infer(pos syntax.Pos, tparams []*TypeParam, targs []Type, params *Tuple, args []*operand, reverse bool) (inferred []Type) {
+ // Don't verify result conditions if there's no error handler installed:
+ // in that case, an error leads to an exit panic and the result value may
+ // be incorrect. But in that case it doesn't matter because callers won't
+ // be able to use it either.
+ if check.conf.Error != nil {
defer func() {
assert(inferred == nil || len(inferred) == len(tparams) && !containsNil(inferred))
}()
@@ -52,6 +58,14 @@ func (check *Checker) infer(pos syntax.Pos, tparams []*TypeParam, targs []Type,
return targs
}
+ // If we have invalid (ordinary) arguments, an error was reported before.
+ // Avoid additional inference errors and exit early (go.dev/issue/60434).
+ for _, arg := range args {
+ if arg.mode == invalid {
+ return nil
+ }
+ }
+
// Make sure we have a "full" list of type arguments, some of which may
// be nil (unknown). Make a copy so as to not clobber the incoming slice.
if len(targs) < n {
@@ -98,7 +112,7 @@ func (check *Checker) infer(pos syntax.Pos, tparams []*TypeParam, targs []Type,
// Terminology: generic parameter = function parameter with a type-parameterized type
u := newUnifier(tparams, targs, check.allowVersion(check.pkg, pos, go1_21))
- errorf := func(kind string, tpar, targ Type, arg *operand) {
+ errorf := func(tpar, targ Type, arg *operand) {
// provide a better error message if we can
targs := u.inferred(tparams)
if targs[0] == nil {
@@ -113,7 +127,7 @@ func (check *Checker) infer(pos syntax.Pos, tparams []*TypeParam, targs []Type,
}
}
if allFailed {
- check.errorf(arg, CannotInferTypeArgs, "%s %s of %s does not match %s (cannot infer %s)", kind, targ, arg.expr, tpar, typeParamsString(tparams))
+ check.errorf(arg, CannotInferTypeArgs, "type %s of %s does not match %s (cannot infer %s)", targ, arg.expr, tpar, typeParamsString(tparams))
return
}
}
@@ -125,9 +139,13 @@ func (check *Checker) infer(pos syntax.Pos, tparams []*TypeParam, targs []Type,
// InvalidTypeArg). We can't differentiate these cases, so fall back on
// the more general CannotInferTypeArgs.
if inferred != tpar {
- check.errorf(arg, CannotInferTypeArgs, "%s %s of %s does not match inferred type %s for %s", kind, targ, arg.expr, inferred, tpar)
+ if reverse {
+ check.errorf(arg, CannotInferTypeArgs, "inferred type %s for %s does not match type %s of %s", inferred, tpar, targ, arg.expr)
+ } else {
+ check.errorf(arg, CannotInferTypeArgs, "type %s of %s does not match inferred type %s for %s", targ, arg.expr, inferred, tpar)
+ }
} else {
- check.errorf(arg, CannotInferTypeArgs, "%s %s of %s does not match %s", kind, targ, arg.expr, tpar)
+ check.errorf(arg, CannotInferTypeArgs, "type %s of %s does not match %s", targ, arg.expr, tpar)
}
}
@@ -156,7 +174,7 @@ func (check *Checker) infer(pos syntax.Pos, tparams []*TypeParam, targs []Type,
// Collect the indices of untyped arguments and handle them later.
if isTyped(arg.typ) {
if !u.unify(par.typ, arg.typ, assign) {
- errorf("type", par.typ, arg.typ, arg)
+ errorf(par.typ, arg.typ, arg)
return nil
}
} else if _, ok := par.typ.(*TypeParam); ok && !arg.isNil() {
@@ -538,6 +556,9 @@ func (w *tpWalker) isParameterized(typ Type) (res bool) {
case *Basic:
// nothing to do
+ case *Alias:
+ return w.isParameterized(Unalias(t))
+
case *Array:
return w.isParameterized(t.elem)
@@ -689,6 +710,9 @@ func (w *cycleFinder) typ(typ Type) {
case *Basic:
// nothing to do
+ case *Alias:
+ w.typ(Unalias(t))
+
case *Array:
w.typ(t.elem)
diff --git a/src/cmd/compile/internal/types2/instantiate.go b/src/cmd/compile/internal/types2/instantiate.go
index 6024035a38..e33d4b41c2 100644
--- a/src/cmd/compile/internal/types2/instantiate.go
+++ b/src/cmd/compile/internal/types2/instantiate.go
@@ -122,7 +122,8 @@ func (check *Checker) instance(pos syntax.Pos, orig Type, targs []Type, expandin
assert(expanding == nil) // function instances cannot be reached from Named types
tparams := orig.TypeParams()
- if !check.validateTArgLen(pos, tparams.Len(), len(targs)) {
+ // TODO(gri) investigate if this is needed (type argument and parameter count seem to be correct here)
+ if !check.validateTArgLen(pos, orig.String(), tparams.Len(), len(targs)) {
return Typ[Invalid]
}
if tparams.Len() == 0 {
@@ -150,19 +151,27 @@ func (check *Checker) instance(pos syntax.Pos, orig Type, targs []Type, expandin
return updateContexts(res)
}
-// validateTArgLen verifies that the length of targs and tparams matches,
-// reporting an error if not. If validation fails and check is nil,
-// validateTArgLen panics.
-func (check *Checker) validateTArgLen(pos syntax.Pos, ntparams, ntargs int) bool {
- if ntargs != ntparams {
- // TODO(gri) provide better error message
- if check != nil {
- check.errorf(pos, WrongTypeArgCount, "got %d arguments but %d type parameters", ntargs, ntparams)
- return false
- }
- panic(fmt.Sprintf("%v: got %d arguments but %d type parameters", pos, ntargs, ntparams))
+// validateTArgLen checks that the number of type arguments (got) matches the
+// number of type parameters (want); if they don't match an error is reported.
+// If validation fails and check is nil, validateTArgLen panics.
+func (check *Checker) validateTArgLen(pos syntax.Pos, name string, want, got int) bool {
+ var qual string
+ switch {
+ case got < want:
+ qual = "not enough"
+ case got > want:
+ qual = "too many"
+ default:
+ return true
}
- return true
+
+ msg := check.sprintf("%s type arguments for type %s: have %d, want %d", qual, name, got, want)
+ if check != nil {
+ check.error(atPos(pos), WrongTypeArgCount, msg)
+ return false
+ }
+
+ panic(fmt.Sprintf("%v: %s", pos, msg))
}
func (check *Checker) verify(pos syntax.Pos, tparams []*TypeParam, targs []Type, ctxt *Context) (int, error) {
@@ -192,10 +201,10 @@ func (check *Checker) verify(pos syntax.Pos, tparams []*TypeParam, targs []Type,
func (check *Checker) implements(pos syntax.Pos, V, T Type, constraint bool, cause *string) bool {
Vu := under(V)
Tu := under(T)
- if Vu == Typ[Invalid] || Tu == Typ[Invalid] {
+ if !isValid(Vu) || !isValid(Tu) {
return true // avoid follow-on errors
}
- if p, _ := Vu.(*Pointer); p != nil && under(p.base) == Typ[Invalid] {
+ if p, _ := Vu.(*Pointer); p != nil && !isValid(under(p.base)) {
return true // avoid follow-on errors (see go.dev/issue/49541 for an example)
}
diff --git a/src/cmd/compile/internal/types2/interface.go b/src/cmd/compile/internal/types2/interface.go
index 872a3217c2..4072098e05 100644
--- a/src/cmd/compile/internal/types2/interface.go
+++ b/src/cmd/compile/internal/types2/interface.go
@@ -112,11 +112,12 @@ func (t *Interface) String() string { return TypeString(t, nil) }
// Implementation
func (t *Interface) cleanup() {
+ t.typeSet() // any interface that escapes type checking must be safe for concurrent use
t.check = nil
t.embedPos = nil
}
-func (check *Checker) interfaceType(ityp *Interface, iface *syntax.InterfaceType, def *Named) {
+func (check *Checker) interfaceType(ityp *Interface, iface *syntax.InterfaceType, def *TypeName) {
addEmbedded := func(pos syntax.Pos, typ Type) {
ityp.embeddeds = append(ityp.embeddeds, typ)
if ityp.embedPos == nil {
@@ -142,7 +143,7 @@ func (check *Checker) interfaceType(ityp *Interface, iface *syntax.InterfaceType
typ := check.typ(f.Type)
sig, _ := typ.(*Signature)
if sig == nil {
- if typ != Typ[Invalid] {
+ if isValid(typ) {
check.errorf(f.Type, InvalidSyntaxTree, "%s is not a method signature", typ)
}
continue // ignore
@@ -151,7 +152,9 @@ func (check *Checker) interfaceType(ityp *Interface, iface *syntax.InterfaceType
// use named receiver type if available (for better error messages)
var recvTyp Type = ityp
if def != nil {
- recvTyp = def
+ if named := asNamed(def.typ); named != nil {
+ recvTyp = named
+ }
}
sig.recv = NewVar(f.Name.Pos(), check.pkg, "", recvTyp)
diff --git a/src/cmd/compile/internal/types2/issues_test.go b/src/cmd/compile/internal/types2/issues_test.go
index 3ac345729b..0117571f7b 100644
--- a/src/cmd/compile/internal/types2/issues_test.go
+++ b/src/cmd/compile/internal/types2/issues_test.go
@@ -921,6 +921,22 @@ func _() {
conf.Check(f.PkgName.Value, []*syntax.File{f}, nil) // must not panic
}
+func TestIssue61938(t *testing.T) {
+ const src = `
+package p
+
+func f[T any]() {}
+func _() { f() }
+`
+ // no error handler provided (this issue)
+ var conf Config
+ typecheck(src, &conf, nil) // must not panic
+
+ // with error handler (sanity check)
+ conf.Error = func(error) {}
+ typecheck(src, &conf, nil) // must not panic
+}
+
func TestIssue63260(t *testing.T) {
const src = `
package p
@@ -964,3 +980,116 @@ func f[I *T, T any]() {
t.Fatalf("types of v and T are not pointer-identical: %p != %p", v.Type().(*TypeParam), T)
}
}
+
+func TestIssue44410(t *testing.T) {
+ const src = `
+package p
+
+type A = []int
+type S struct{ A }
+`
+
+ t.Setenv("GODEBUG", "gotypesalias=1")
+ pkg := mustTypecheck(src, nil, nil)
+
+ S := pkg.Scope().Lookup("S")
+ if S == nil {
+ t.Fatal("object S not found")
+ }
+
+ got := S.String()
+ const want = "type p.S struct{p.A}"
+ if got != want {
+ t.Fatalf("got %q; want %q", got, want)
+ }
+}
+
+func TestIssue59831(t *testing.T) {
+ // Package a exports a type S with an unexported method m;
+ // the tests check the error messages when m is not found.
+ const asrc = `package a; type S struct{}; func (S) m() {}`
+ apkg := mustTypecheck(asrc, nil, nil)
+
+ // Package b exports a type S with an exported method m;
+ // the tests check the error messages when M is not found.
+ const bsrc = `package b; type S struct{}; func (S) M() {}`
+ bpkg := mustTypecheck(bsrc, nil, nil)
+
+ tests := []struct {
+ imported *Package
+ src, err string
+ }{
+ // tests importing a (or nothing)
+ {apkg, `package a1; import "a"; var _ interface { M() } = a.S{}`,
+ "a.S does not implement interface{M()} (missing method M) have m() want M()"},
+
+ {apkg, `package a2; import "a"; var _ interface { m() } = a.S{}`,
+ "a.S does not implement interface{m()} (unexported method m)"}, // test for issue
+
+ {nil, `package a3; type S struct{}; func (S) m(); var _ interface { M() } = S{}`,
+ "S does not implement interface{M()} (missing method M) have m() want M()"},
+
+ {nil, `package a4; type S struct{}; func (S) m(); var _ interface { m() } = S{}`,
+ ""}, // no error expected
+
+ {nil, `package a5; type S struct{}; func (S) m(); var _ interface { n() } = S{}`,
+ "S does not implement interface{n()} (missing method n)"},
+
+ // tests importing b (or nothing)
+ {bpkg, `package b1; import "b"; var _ interface { m() } = b.S{}`,
+ "b.S does not implement interface{m()} (missing method m) have M() want m()"},
+
+ {bpkg, `package b2; import "b"; var _ interface { M() } = b.S{}`,
+ ""}, // no error expected
+
+ {nil, `package b3; type S struct{}; func (S) M(); var _ interface { M() } = S{}`,
+ ""}, // no error expected
+
+ {nil, `package b4; type S struct{}; func (S) M(); var _ interface { m() } = S{}`,
+ "S does not implement interface{m()} (missing method m) have M() want m()"},
+
+ {nil, `package b5; type S struct{}; func (S) M(); var _ interface { n() } = S{}`,
+ "S does not implement interface{n()} (missing method n)"},
+ }
+
+ for _, test := range tests {
+ // typecheck test source
+ conf := Config{Importer: importHelper{pkg: test.imported}}
+ pkg, err := typecheck(test.src, &conf, nil)
+ if err == nil {
+ if test.err != "" {
+ t.Errorf("package %s: got no error, want %q", pkg.Name(), test.err)
+ }
+ continue
+ }
+ if test.err == "" {
+ t.Errorf("package %s: got %q, want not error", pkg.Name(), err.Error())
+ }
+
+ // flatten reported error message
+ errmsg := strings.ReplaceAll(err.Error(), "\n", " ")
+ errmsg = strings.ReplaceAll(errmsg, "\t", "")
+
+ // verify error message
+ if !strings.Contains(errmsg, test.err) {
+ t.Errorf("package %s: got %q, want %q", pkg.Name(), errmsg, test.err)
+ }
+ }
+}
+
+func TestIssue64759(t *testing.T) {
+ const src = `
+//go:build go1.18
+package p
+
+func f[S ~[]E, E any](S) {}
+
+func _() {
+ f([]string{})
+}
+`
+ // Per the go:build directive, the source must typecheck
+ // even though the (module) Go version is set to go1.17.
+ conf := Config{GoVersion: "go1.17"}
+ mustTypecheck(src, &conf, nil)
+}
diff --git a/src/cmd/compile/internal/types2/lookup.go b/src/cmd/compile/internal/types2/lookup.go
index b7370ca38d..bc47c15060 100644
--- a/src/cmd/compile/internal/types2/lookup.go
+++ b/src/cmd/compile/internal/types2/lookup.go
@@ -54,7 +54,7 @@ func LookupFieldOrMethod(T Type, addressable bool, pkg *Package, name string) (o
// Thus, if we have a named pointer type, proceed with the underlying
// pointer type but discard the result if it is a method since we would
// not have found it for T (see also go.dev/issue/8590).
- if t, _ := T.(*Named); t != nil {
+ if t := asNamed(T); t != nil {
if p, _ := t.Underlying().(*Pointer); p != nil {
obj, index, indirect = lookupFieldOrMethodImpl(p, false, pkg, name, false)
if _, ok := obj.(*Func); ok {
@@ -96,7 +96,7 @@ func LookupFieldOrMethod(T Type, addressable bool, pkg *Package, name string) (o
// and missingMethod (the latter doesn't care about struct fields).
//
// If foldCase is true, method names are considered equal if they are equal
-// with case folding.
+// with case folding, irrespective of which package they are in.
//
// The resulting object may not be fully type-checked.
func lookupFieldOrMethodImpl(T Type, addressable bool, pkg *Package, name string, foldCase bool) (obj Object, index []int, indirect bool) {
@@ -138,7 +138,7 @@ func lookupFieldOrMethodImpl(T Type, addressable bool, pkg *Package, name string
// If we have a named type, we may have associated methods.
// Look for those first.
- if named, _ := typ.(*Named); named != nil {
+ if named := asNamed(typ); named != nil {
if alt := seen.lookup(named); alt != nil {
// We have seen this type before, at a more shallow depth
// (note that multiples of this type at the current depth
@@ -343,6 +343,7 @@ func (check *Checker) missingMethod(V, T Type, static bool, equivalent func(x, y
ok = iota
notFound
wrongName
+ unexported
wrongSig
ambigSel
ptrRecv
@@ -388,6 +389,11 @@ func (check *Checker) missingMethod(V, T Type, static bool, equivalent func(x, y
f, _ = obj.(*Func)
if f != nil {
state = wrongName
+ if f.name == m.name {
+ // If the names are equal, f must be unexported
+ // (otherwise the package wouldn't matter).
+ state = unexported
+ }
}
}
break
@@ -436,8 +442,9 @@ func (check *Checker) missingMethod(V, T Type, static bool, equivalent func(x, y
}
case wrongName:
fs, ms := check.funcString(f, false), check.funcString(m, false)
- *cause = check.sprintf("(missing method %s)\n\t\thave %s\n\t\twant %s",
- m.Name(), fs, ms)
+ *cause = check.sprintf("(missing method %s)\n\t\thave %s\n\t\twant %s", m.Name(), fs, ms)
+ case unexported:
+ *cause = check.sprintf("(unexported method %s)", m.Name())
case wrongSig:
fs, ms := check.funcString(f, false), check.funcString(m, false)
if fs == ms {
@@ -445,8 +452,18 @@ func (check *Checker) missingMethod(V, T Type, static bool, equivalent func(x, y
// Add package information to disambiguate (go.dev/issue/54258).
fs, ms = check.funcString(f, true), check.funcString(m, true)
}
- *cause = check.sprintf("(wrong type for method %s)\n\t\thave %s\n\t\twant %s",
- m.Name(), fs, ms)
+ if fs == ms {
+ // We still have "want Foo, have Foo".
+ // This is most likely due to different type parameters with
+ // the same name appearing in the instantiated signatures
+ // (go.dev/issue/61685).
+ // Rather than reporting this misleading error cause, for now
+ // just point out that the method signature is incorrect.
+ // TODO(gri) should find a good way to report the root cause
+ *cause = check.sprintf("(wrong type for method %s)", m.Name())
+ break
+ }
+ *cause = check.sprintf("(wrong type for method %s)\n\t\thave %s\n\t\twant %s", m.Name(), fs, ms)
case ambigSel:
*cause = check.sprintf("(ambiguous selector %s.%s)", V, m.Name())
case ptrRecv:
@@ -527,7 +544,7 @@ func (check *Checker) newAssertableTo(pos syntax.Pos, V, T Type, cause *string)
// with an underlying pointer type!) and returns its base and true.
// Otherwise it returns (typ, false).
func deref(typ Type) (Type, bool) {
- if p, _ := typ.(*Pointer); p != nil {
+ if p, _ := Unalias(typ).(*Pointer); p != nil {
// p.base should never be nil, but be conservative
if p.base == nil {
if debug {
@@ -572,11 +589,12 @@ func fieldIndex(fields []*Var, pkg *Package, name string) int {
}
// lookupMethod returns the index of and method with matching package and name, or (-1, nil).
-// If foldCase is true, method names are considered equal if they are equal with case folding.
+// If foldCase is true, method names are considered equal if they are equal with case folding
+// and their packages are ignored (e.g., pkg1.m, pkg1.M, pkg2.m, and pkg2.M are all equal).
func lookupMethod(methods []*Func, pkg *Package, name string, foldCase bool) (int, *Func) {
if name != "_" {
for i, m := range methods {
- if (m.name == name || foldCase && strings.EqualFold(m.name, name)) && m.sameId(pkg, m.name) {
+ if m.sameId(pkg, name) || foldCase && strings.EqualFold(m.name, name) {
return i, m
}
}
diff --git a/src/cmd/compile/internal/types2/mono.go b/src/cmd/compile/internal/types2/mono.go
index 5b68f2aaa4..dae9230252 100644
--- a/src/cmd/compile/internal/types2/mono.go
+++ b/src/cmd/compile/internal/types2/mono.go
@@ -208,7 +208,7 @@ func (w *monoGraph) assign(pkg *Package, pos syntax.Pos, tpar *TypeParam, targ T
// type parameters.
var do func(typ Type)
do = func(typ Type) {
- switch typ := typ.(type) {
+ switch typ := Unalias(typ).(type) {
default:
panic("unexpected type")
diff --git a/src/cmd/compile/internal/types2/named.go b/src/cmd/compile/internal/types2/named.go
index 5408c7e77f..893247de35 100644
--- a/src/cmd/compile/internal/types2/named.go
+++ b/src/cmd/compile/internal/types2/named.go
@@ -141,7 +141,7 @@ const (
// If the given type name obj doesn't have a type yet, its type is set to the returned named type.
// The underlying type must not be a *Named.
func NewNamed(obj *TypeName, underlying Type, methods []*Func) *Named {
- if _, ok := underlying.(*Named); ok {
+ if asNamed(underlying) != nil {
panic("underlying type must not be *Named")
}
return (*Checker)(nil).newNamed(obj, underlying, methods)
@@ -224,7 +224,7 @@ func (n *Named) setState(state namedState) {
atomic.StoreUint32(&n.state_, uint32(state))
}
-// newNamed is like NewNamed but with a *Checker receiver and additional orig argument.
+// newNamed is like NewNamed but with a *Checker receiver.
func (check *Checker) newNamed(obj *TypeName, underlying Type, methods []*Func) *Named {
typ := &Named{check: check, obj: obj, fromRHS: underlying, underlying: underlying, methods: methods}
if obj.typ == nil {
@@ -434,7 +434,7 @@ func (t *Named) SetUnderlying(underlying Type) {
if underlying == nil {
panic("underlying type must not be nil")
}
- if _, ok := underlying.(*Named); ok {
+ if asNamed(underlying) != nil {
panic("underlying type must not be *Named")
}
t.resolve().underlying = underlying
@@ -453,7 +453,8 @@ func (t *Named) AddMethod(m *Func) {
}
}
-func (t *Named) Underlying() Type { return t.resolve().underlying }
+// TODO(gri) Investigate if Unalias can be moved to where underlying is set.
+func (t *Named) Underlying() Type { return Unalias(t.resolve().underlying) }
func (t *Named) String() string { return TypeString(t, nil) }
// ----------------------------------------------------------------------------
@@ -550,12 +551,6 @@ loop:
return u
}
-func (n *Named) setUnderlying(typ Type) {
- if n != nil {
- n.underlying = typ
- }
-}
-
func (n *Named) lookupMethod(pkg *Package, name string, foldCase bool) (int, *Func) {
n.resolve()
// If n is an instance, we may not have yet instantiated all of its methods.
@@ -598,7 +593,7 @@ func (n *Named) expandUnderlying() Type {
orig := n.inst.orig
targs := n.inst.targs
- if _, unexpanded := orig.underlying.(*Named); unexpanded {
+ if asNamed(orig.underlying) != nil {
// We should only get a Named underlying type here during type checking
// (for example, in recursive type declarations).
assert(check != nil)
@@ -633,11 +628,18 @@ func (n *Named) expandUnderlying() Type {
old := iface
iface = check.newInterface()
iface.embeddeds = old.embeddeds
+ assert(old.complete) // otherwise we are copying incomplete data
iface.complete = old.complete
iface.implicit = old.implicit // should be false but be conservative
underlying = iface
}
iface.methods = methods
+ iface.tset = nil // recompute type set with new methods
+
+ // If check != nil, check.newInterface will have saved the interface for later completion.
+ if check == nil { // golang/go#61561: all newly created interfaces must be fully evaluated
+ iface.typeSet()
+ }
}
}
@@ -649,7 +651,7 @@ func (n *Named) expandUnderlying() Type {
//
// TODO(rfindley): eliminate this function or give it a better name.
func safeUnderlying(typ Type) Type {
- if t, _ := typ.(*Named); t != nil {
+ if t := asNamed(typ); t != nil {
return t.underlying
}
return typ.Underlying()
diff --git a/src/cmd/compile/internal/types2/object.go b/src/cmd/compile/internal/types2/object.go
index 5c0ea8ca16..251587224b 100644
--- a/src/cmd/compile/internal/types2/object.go
+++ b/src/cmd/compile/internal/types2/object.go
@@ -285,6 +285,8 @@ func (obj *TypeName) IsAlias() bool {
switch t := obj.typ.(type) {
case nil:
return false
+ // case *Alias:
+ // handled by default case
case *Basic:
// unsafe.Pointer is not an alias.
if obj.pkg == Unsafe {
@@ -406,6 +408,12 @@ func (obj *Func) Origin() *Func {
return obj
}
+// Pkg returns the package to which the function belongs.
+//
+// The result is nil for methods of types in the Universe scope,
+// like method Error of the error built-in interface type.
+func (obj *Func) Pkg() *Package { return obj.object.Pkg() }
+
// hasPtrRecv reports whether the receiver is of the form *T for the given method obj.
func (obj *Func) hasPtrRecv() bool {
// If a method's receiver type is set, use that as the source of truth for the receiver.
diff --git a/src/cmd/compile/internal/types2/operand.go b/src/cmd/compile/internal/types2/operand.go
index 0469b000bb..3f151007e5 100644
--- a/src/cmd/compile/internal/types2/operand.go
+++ b/src/cmd/compile/internal/types2/operand.go
@@ -172,7 +172,7 @@ func operandString(x *operand, qf Qualifier) string {
// <typ>
if hasType {
- if x.typ != Typ[Invalid] {
+ if isValid(x.typ) {
var intro string
if isGeneric(x.typ) {
intro = " of generic type "
@@ -245,7 +245,7 @@ func (x *operand) isNil() bool { return x.mode == nilvalue }
// if assignableTo is invoked through an exported API call, i.e., when all
// methods have been type-checked.
func (x *operand) assignableTo(check *Checker, T Type, cause *string) (bool, Code) {
- if x.mode == invalid || T == Typ[Invalid] {
+ if x.mode == invalid || !isValid(T) {
return true, 0 // avoid spurious errors
}
diff --git a/src/cmd/compile/internal/types2/predicates.go b/src/cmd/compile/internal/types2/predicates.go
index 13a3bf8af5..7a096e3d97 100644
--- a/src/cmd/compile/internal/types2/predicates.go
+++ b/src/cmd/compile/internal/types2/predicates.go
@@ -6,6 +6,9 @@
package types2
+// isValid reports whether t is a valid type.
+func isValid(t Type) bool { return Unalias(t) != Typ[Invalid] }
+
// The isX predicates below report whether t is an X.
// If t is a type parameter the result is false; i.e.,
// these predicates don't look inside a type parameter.
@@ -47,7 +50,7 @@ func allNumericOrString(t Type) bool { return allBasic(t, IsNumeric|IsString) }
// for all specific types of the type parameter's type set.
// allBasic(t, info) is an optimized version of isBasic(coreType(t), info).
func allBasic(t Type, info BasicInfo) bool {
- if tpar, _ := t.(*TypeParam); tpar != nil {
+ if tpar, _ := Unalias(t).(*TypeParam); tpar != nil {
return tpar.is(func(t *term) bool { return t != nil && isBasic(t.typ, info) })
}
return isBasic(t, info)
@@ -57,7 +60,7 @@ func allBasic(t Type, info BasicInfo) bool {
// predeclared types, defined types, and type parameters.
// hasName may be called with types that are not fully set up.
func hasName(t Type) bool {
- switch t.(type) {
+ switch Unalias(t).(type) {
case *Basic, *Named, *TypeParam:
return true
}
@@ -68,7 +71,7 @@ func hasName(t Type) bool {
// This includes all non-defined types, but also basic types.
// isTypeLit may be called with types that are not fully set up.
func isTypeLit(t Type) bool {
- switch t.(type) {
+ switch Unalias(t).(type) {
case *Named, *TypeParam:
return false
}
@@ -79,8 +82,10 @@ func isTypeLit(t Type) bool {
// constant or boolean. isTyped may be called with types that
// are not fully set up.
func isTyped(t Type) bool {
- // isTyped is called with types that are not fully
- // set up. Must not call under()!
+ // Alias or Named types cannot denote untyped types,
+ // thus we don't need to call Unalias or under
+ // (which would be unsafe to do for types that are
+ // not fully set up).
b, _ := t.(*Basic)
return b == nil || b.info&IsUntyped == 0
}
@@ -103,7 +108,7 @@ func isNonTypeParamInterface(t Type) bool {
// isTypeParam reports whether t is a type parameter.
func isTypeParam(t Type) bool {
- _, ok := t.(*TypeParam)
+ _, ok := Unalias(t).(*TypeParam)
return ok
}
@@ -112,7 +117,7 @@ func isTypeParam(t Type) bool {
// use anywhere, but it may report a false negative if the type set has not been
// computed yet.
func hasEmptyTypeset(t Type) bool {
- if tpar, _ := t.(*TypeParam); tpar != nil && tpar.bound != nil {
+ if tpar, _ := Unalias(t).(*TypeParam); tpar != nil && tpar.bound != nil {
iface, _ := safeUnderlying(tpar.bound).(*Interface)
return iface != nil && iface.tset != nil && iface.tset.IsEmpty()
}
@@ -124,7 +129,7 @@ func hasEmptyTypeset(t Type) bool {
// TODO(gri) should we include signatures or assert that they are not present?
func isGeneric(t Type) bool {
// A parameterized type is only generic if it doesn't have an instantiation already.
- named, _ := t.(*Named)
+ named := asNamed(t)
return named != nil && named.obj != nil && named.inst == nil && named.TypeParams().Len() > 0
}
@@ -218,11 +223,14 @@ type comparer struct {
// For changes to this code the corresponding changes should be made to unifier.nify.
func (c *comparer) identical(x, y Type, p *ifacePair) bool {
+ x = Unalias(x)
+ y = Unalias(y)
+
if x == y {
return true
}
- if c.ignoreInvalids && (x == Typ[Invalid] || y == Typ[Invalid]) {
+ if c.ignoreInvalids && (!isValid(x) || !isValid(y)) {
return true
}
@@ -435,7 +443,7 @@ func (c *comparer) identical(x, y Type, p *ifacePair) bool {
// Two named types are identical if their type names originate
// in the same type declaration; if they are instantiated they
// must have identical type argument lists.
- if y, ok := y.(*Named); ok {
+ if y := asNamed(y); y != nil {
// check type arguments before origins to match unifier
// (for correct source code we need to do all checks so
// order doesn't matter)
@@ -449,7 +457,7 @@ func (c *comparer) identical(x, y Type, p *ifacePair) bool {
return false
}
}
- return indenticalOrigin(x, y)
+ return identicalOrigin(x, y)
}
case *TypeParam:
@@ -466,7 +474,7 @@ func (c *comparer) identical(x, y Type, p *ifacePair) bool {
}
// identicalOrigin reports whether x and y originated in the same declaration.
-func indenticalOrigin(x, y *Named) bool {
+func identicalOrigin(x, y *Named) bool {
// TODO(gri) is this correct?
return x.Origin().obj == y.Origin().obj
}
@@ -492,7 +500,7 @@ func identicalInstance(xorig Type, xargs []Type, yorig Type, yargs []Type) bool
// it returns the incoming type for all other types. The default type
// for untyped nil is untyped nil.
func Default(t Type) Type {
- if t, ok := t.(*Basic); ok {
+ if t, ok := Unalias(t).(*Basic); ok {
switch t.kind {
case UntypedBool:
return Typ[Bool]
@@ -530,3 +538,9 @@ func maxType(x, y Type) Type {
}
return nil
}
+
+// clone makes a "flat copy" of *p and returns a pointer to the copy.
+func clone[P *T, T any](p P) P {
+ c := *p
+ return &c
+}
diff --git a/src/cmd/compile/internal/types2/resolver.go b/src/cmd/compile/internal/types2/resolver.go
index d051fb50e1..0cf7c9142e 100644
--- a/src/cmd/compile/internal/types2/resolver.go
+++ b/src/cmd/compile/internal/types2/resolver.go
@@ -351,7 +351,7 @@ func (check *Checker) collectObjects() {
}
// declare all constants
- values := unpackExpr(last.Values)
+ values := syntax.UnpackListExpr(last.Values)
for i, name := range s.NameList {
obj := NewConst(name.Pos(), pkg, name.Value, nil, iota)
@@ -382,7 +382,7 @@ func (check *Checker) collectObjects() {
}
// declare all variables
- values := unpackExpr(s.Values)
+ values := syntax.UnpackListExpr(s.Values)
for i, name := range s.NameList {
obj := NewVar(name.Pos(), pkg, name.Value, nil)
lhs[i] = obj
@@ -538,7 +538,7 @@ L: // unpack receiver type
if ptyp, _ := rtyp.(*syntax.IndexExpr); ptyp != nil {
rtyp = ptyp.X
if unpackParams {
- for _, arg := range unpackExpr(ptyp.Index) {
+ for _, arg := range syntax.UnpackListExpr(ptyp.Index) {
var par *syntax.Name
switch arg := arg.(type) {
case *syntax.Name:
@@ -588,7 +588,7 @@ func (check *Checker) resolveBaseTypeName(seenPtr bool, typ syntax.Expr, fileSco
return false, nil
}
ptr = true
- typ = unparen(pexpr.X) // continue with pointer base type
+ typ = syntax.Unparen(pexpr.X) // continue with pointer base type
}
// typ must be a name, or a C.name cgo selector.
@@ -677,32 +677,39 @@ func (check *Checker) packageObjects() {
}
}
- // We process non-alias type declarations first, followed by alias declarations,
- // and then everything else. This appears to avoid most situations where the type
- // of an alias is needed before it is available.
- // There may still be cases where this is not good enough (see also go.dev/issue/25838).
- // In those cases Checker.ident will report an error ("invalid use of type alias").
- var aliasList []*TypeName
- var othersList []Object // everything that's not a type
- // phase 1: non-alias type declarations
- for _, obj := range objList {
- if tname, _ := obj.(*TypeName); tname != nil {
- if check.objMap[tname].tdecl.Alias {
- aliasList = append(aliasList, tname)
+ if check.enableAlias {
+ // With Alias nodes we can process declarations in any order.
+ for _, obj := range objList {
+ check.objDecl(obj, nil)
+ }
+ } else {
+ // Without Alias nodes, we process non-alias type declarations first, followed by
+ // alias declarations, and then everything else. This appears to avoid most situations
+ // where the type of an alias is needed before it is available.
+ // There may still be cases where this is not good enough (see also go.dev/issue/25838).
+ // In those cases Checker.ident will report an error ("invalid use of type alias").
+ var aliasList []*TypeName
+ var othersList []Object // everything that's not a type
+ // phase 1: non-alias type declarations
+ for _, obj := range objList {
+ if tname, _ := obj.(*TypeName); tname != nil {
+ if check.objMap[tname].tdecl.Alias {
+ aliasList = append(aliasList, tname)
+ } else {
+ check.objDecl(obj, nil)
+ }
} else {
- check.objDecl(obj, nil)
+ othersList = append(othersList, obj)
}
- } else {
- othersList = append(othersList, obj)
}
- }
- // phase 2: alias type declarations
- for _, obj := range aliasList {
- check.objDecl(obj, nil)
- }
- // phase 3: all other declarations
- for _, obj := range othersList {
- check.objDecl(obj, nil)
+ // phase 2: alias type declarations
+ for _, obj := range aliasList {
+ check.objDecl(obj, nil)
+ }
+ // phase 3: all other declarations
+ for _, obj := range othersList {
+ check.objDecl(obj, nil)
+ }
}
// At this point we may have a non-empty check.methods map; this means that not all
diff --git a/src/cmd/compile/internal/types2/return.go b/src/cmd/compile/internal/types2/return.go
index ab611ef9b2..01988b012e 100644
--- a/src/cmd/compile/internal/types2/return.go
+++ b/src/cmd/compile/internal/types2/return.go
@@ -27,7 +27,7 @@ func (check *Checker) isTerminating(s syntax.Stmt, label string) bool {
case *syntax.ExprStmt:
// calling the predeclared (possibly parenthesized) panic() function is terminating
- if call, ok := unparen(s.X).(*syntax.CallExpr); ok && check.isPanic[call] {
+ if call, ok := syntax.Unparen(s.X).(*syntax.CallExpr); ok && check.isPanic[call] {
return true
}
diff --git a/src/cmd/compile/internal/types2/selection.go b/src/cmd/compile/internal/types2/selection.go
index c820a29fad..dfbf3a0191 100644
--- a/src/cmd/compile/internal/types2/selection.go
+++ b/src/cmd/compile/internal/types2/selection.go
@@ -13,6 +13,39 @@ import (
// SelectionKind describes the kind of a selector expression x.f
// (excluding qualified identifiers).
+//
+// If x is a struct or *struct, a selector expression x.f may denote a
+// sequence of selection operations x.a.b.c.f. The SelectionKind
+// describes the kind of the final (explicit) operation; all the
+// previous (implicit) operations are always field selections.
+// Each element of Indices specifies an implicit field (a, b, c)
+// by its index in the struct type of the field selection operand.
+//
+// For a FieldVal operation, the final selection refers to the field
+// specified by Selection.Obj.
+//
+// For a MethodVal operation, the final selection refers to a method.
+// If the "pointerness" of the method's declared receiver does not
+// match that of the effective receiver after implicit field
+// selection, then an & or * operation is implicitly applied to the
+// receiver variable or value.
+// So, x.f denotes (&x.a.b.c).f when f requires a pointer receiver but
+// x.a.b.c is a non-pointer variable; and it denotes (*x.a.b.c).f when
+// f requires a non-pointer receiver but x.a.b.c is a pointer value.
+//
+// All pointer indirections, whether due to implicit or explicit field
+// selections or * operations inserted for "pointerness", panic if
+// applied to a nil pointer, so a method call x.f() may panic even
+// before the function call.
+//
+// By contrast, a MethodExpr operation T.f is essentially equivalent
+// to a function literal of the form:
+//
+// func(x T, args) (results) { return x.f(args) }
+//
+// Consequently, any implicit field selections and * operations
+// inserted for "pointerness" are not evaluated until the function is
+// called, so a T.f or (*T).f expression never panics.
type SelectionKind int
const (
@@ -102,6 +135,11 @@ func (s *Selection) Index() []int { return s.index }
// Indirect reports whether any pointer indirection was required to get from
// x to f in x.f.
+//
+// Beware: Indirect spuriously returns true (Go issue #8353) for a
+// MethodVal selection in which the receiver argument and parameter
+// both have type *T so there is no indirection.
+// Unfortunately, a fix is too risky.
func (s *Selection) Indirect() bool { return s.indirect }
func (s *Selection) String() string { return SelectionString(s, nil) }
diff --git a/src/cmd/compile/internal/types2/signature.go b/src/cmd/compile/internal/types2/signature.go
index 8e0dfe2881..18a64ec1a0 100644
--- a/src/cmd/compile/internal/types2/signature.go
+++ b/src/cmd/compile/internal/types2/signature.go
@@ -108,9 +108,12 @@ func (check *Checker) funcType(sig *Signature, recvPar *syntax.Field, tparams []
// - the receiver specification acts as local declaration for its type parameters, which may be blank
_, rname, rparams := check.unpackRecv(recvPar.Type, true)
if len(rparams) > 0 {
+ // The scope of the type parameter T in "func (r T[T]) f()"
+ // starts after f, not at "r"; see #52038.
+ scopePos := ftyp.Pos()
tparams := make([]*TypeParam, len(rparams))
for i, rparam := range rparams {
- tparams[i] = check.declareTypeParam(rparam)
+ tparams[i] = check.declareTypeParam(rparam, scopePos)
}
sig.rparams = bindTParams(tparams)
// Blank identifiers don't get declared, so naive type-checking of the
@@ -136,7 +139,7 @@ func (check *Checker) funcType(sig *Signature, recvPar *syntax.Field, tparams []
// Also: Don't report an error via genericType since it will be reported
// again when we type-check the signature.
// TODO(gri) maybe the receiver should be marked as invalid instead?
- if recv, _ := check.genericType(rname, nil).(*Named); recv != nil {
+ if recv := asNamed(check.genericType(rname, nil)); recv != nil {
recvTParams = recv.TypeParams().list()
}
}
@@ -167,16 +170,21 @@ func (check *Checker) funcType(sig *Signature, recvPar *syntax.Field, tparams []
check.collectTypeParams(&sig.tparams, tparams)
}
- // Value (non-type) parameters' scope starts in the function body. Use a temporary scope for their
- // declarations and then squash that scope into the parent scope (and report any redeclarations at
- // that time).
+ // Use a temporary scope for all parameter declarations and then
+ // squash that scope into the parent scope (and report any
+ // redeclarations at that time).
+ //
+ // TODO(adonovan): now that each declaration has the correct
+ // scopePos, there should be no need for scope squashing.
+ // Audit to ensure all lookups honor scopePos and simplify.
scope := NewScope(check.scope, nopos, nopos, "function body (temp. scope)")
- var recvList []*Var // TODO(gri) remove the need for making a list here
+ scopePos := syntax.EndPos(ftyp) // all parameters' scopes start after the signature
+ var recvList []*Var // TODO(gri) remove the need for making a list here
if recvPar != nil {
- recvList, _ = check.collectParams(scope, []*syntax.Field{recvPar}, false) // use rewritten receiver type, if any
+ recvList, _ = check.collectParams(scope, []*syntax.Field{recvPar}, false, scopePos) // use rewritten receiver type, if any
}
- params, variadic := check.collectParams(scope, ftyp.ParamList, true)
- results, _ := check.collectParams(scope, ftyp.ResultList, false)
+ params, variadic := check.collectParams(scope, ftyp.ParamList, true, scopePos)
+ results, _ := check.collectParams(scope, ftyp.ResultList, false, scopePos)
scope.Squash(func(obj, alt Object) {
var err error_
err.code = DuplicateDecl
@@ -208,13 +216,14 @@ func (check *Checker) funcType(sig *Signature, recvPar *syntax.Field, tparams []
check.later(func() {
// spec: "The receiver type must be of the form T or *T where T is a type name."
rtyp, _ := deref(recv.typ)
- if rtyp == Typ[Invalid] {
+ atyp := Unalias(rtyp)
+ if !isValid(atyp) {
return // error was reported before
}
// spec: "The type denoted by T is called the receiver base type; it must not
// be a pointer or interface type and it must be declared in the same package
// as the method."
- switch T := rtyp.(type) {
+ switch T := atyp.(type) {
case *Named:
// The receiver type may be an instantiated type referred to
// by an alias (which cannot have receiver parameters for now).
@@ -258,7 +267,7 @@ func (check *Checker) funcType(sig *Signature, recvPar *syntax.Field, tparams []
// collectParams declares the parameters of list in scope and returns the corresponding
// variable list.
-func (check *Checker) collectParams(scope *Scope, list []*syntax.Field, variadicOk bool) (params []*Var, variadic bool) {
+func (check *Checker) collectParams(scope *Scope, list []*syntax.Field, variadicOk bool, scopePos syntax.Pos) (params []*Var, variadic bool) {
if list == nil {
return
}
@@ -293,7 +302,7 @@ func (check *Checker) collectParams(scope *Scope, list []*syntax.Field, variadic
// ok to continue
}
par := NewParam(field.Name.Pos(), check.pkg, name, typ)
- check.declare(scope, field.Name, par, scope.pos)
+ check.declare(scope, field.Name, par, scopePos)
params = append(params, par)
named = true
} else {
diff --git a/src/cmd/compile/internal/types2/sizes.go b/src/cmd/compile/internal/types2/sizes.go
index 59f600a05b..486c05c61c 100644
--- a/src/cmd/compile/internal/types2/sizes.go
+++ b/src/cmd/compile/internal/types2/sizes.go
@@ -112,8 +112,8 @@ func (s *StdSizes) Alignof(T Type) (result int64) {
}
func IsSyncAtomicAlign64(T Type) bool {
- named, ok := T.(*Named)
- if !ok {
+ named := asNamed(T)
+ if named == nil {
return false
}
obj := named.Obj()
@@ -227,7 +227,7 @@ func (s *StdSizes) Sizeof(T Type) int64 {
}
// common architecture word sizes and alignments
-var gcArchSizes = map[string]*StdSizes{
+var gcArchSizes = map[string]*gcSizes{
"386": {4, 4},
"amd64": {8, 8},
"amd64p32": {4, 8},
@@ -255,20 +255,17 @@ var gcArchSizes = map[string]*StdSizes{
// "386", "amd64", "amd64p32", "arm", "arm64", "loong64", "mips", "mipsle",
// "mips64", "mips64le", "ppc64", "ppc64le", "riscv64", "s390x", "sparc64", "wasm".
func SizesFor(compiler, arch string) Sizes {
- var m map[string]*StdSizes
switch compiler {
case "gc":
- m = gcArchSizes
+ if s := gcSizesFor(compiler, arch); s != nil {
+ return Sizes(s)
+ }
case "gccgo":
- m = gccgoArchSizes
- default:
- return nil
- }
- s, ok := m[arch]
- if !ok {
- return nil
+ if s, ok := gccgoArchSizes[arch]; ok {
+ return Sizes(s)
+ }
}
- return s
+ return nil
}
// stdSizes is used if Config.Sizes == nil.
diff --git a/src/cmd/compile/internal/types2/sizes_test.go b/src/cmd/compile/internal/types2/sizes_test.go
index 7af89583f2..9a772f4b15 100644
--- a/src/cmd/compile/internal/types2/sizes_test.go
+++ b/src/cmd/compile/internal/types2/sizes_test.go
@@ -133,3 +133,62 @@ var s struct {
})
}
}
+
+type gcSizeTest struct {
+ name string
+ src string
+}
+
+var gcSizesTests = []gcSizeTest{
+ {
+ "issue60431",
+ `
+package main
+
+import "unsafe"
+
+// The foo struct size is expected to be rounded up to 16 bytes.
+type foo struct {
+ a int64
+ b bool
+}
+
+func main() {
+ assert(unsafe.Sizeof(foo{}) == 16)
+}`,
+ },
+ {
+ "issue60734",
+ `
+package main
+
+import (
+ "unsafe"
+)
+
+// The Data struct size is expected to be rounded up to 16 bytes.
+type Data struct {
+ Value uint32 // 4 bytes
+ Label [10]byte // 10 bytes
+ Active bool // 1 byte
+ // padded with 1 byte to make it align
+}
+
+func main() {
+ assert(unsafe.Sizeof(Data{}) == 16)
+}
+`,
+ },
+}
+
+func TestGCSizes(t *testing.T) {
+ types2.DefPredeclaredTestFuncs()
+ for _, tc := range gcSizesTests {
+ tc := tc
+ t.Run(tc.name, func(t *testing.T) {
+ t.Parallel()
+ conf := types2.Config{Importer: defaultImporter(), Sizes: types2.SizesFor("gc", "amd64")}
+ mustTypecheck(tc.src, &conf, nil)
+ })
+ }
+}
diff --git a/src/cmd/compile/internal/types2/stdlib_test.go b/src/cmd/compile/internal/types2/stdlib_test.go
index ee852f5c4c..405af78572 100644
--- a/src/cmd/compile/internal/types2/stdlib_test.go
+++ b/src/cmd/compile/internal/types2/stdlib_test.go
@@ -191,7 +191,7 @@ func firstComment(filename string) (first string) {
}
text = strings.TrimSpace(text[2:])
- if strings.HasPrefix(text, "+build ") {
+ if strings.HasPrefix(text, "go:build ") {
panic("skip")
}
if first == "" {
@@ -233,6 +233,9 @@ func testTestDir(t *testing.T, path string, ignore ...string) {
filename := filepath.Join(path, f.Name())
goVersion := ""
if comment := firstComment(filename); comment != "" {
+ if strings.Contains(comment, "-goexperiment") {
+ continue // ignore this file
+ }
fields := strings.Fields(comment)
switch fields[0] {
case "skip", "compiledir":
@@ -308,6 +311,7 @@ func TestStdFixed(t *testing.T) {
testTestDir(t, filepath.Join(testenv.GOROOT(t), "test", "fixedbugs"),
"bug248.go", "bug302.go", "bug369.go", // complex test instructions - ignore
+ "bug398.go", // types2 doesn't check for anonymous interface cycles (go.dev/issue/56103)
"issue6889.go", // gc-specific test
"issue11362.go", // canonical import path check
"issue16369.go", // types2 handles this correctly - not an issue
@@ -325,6 +329,7 @@ func TestStdFixed(t *testing.T) {
"issue49767.go", // go/types does not have constraints on channel element size
"issue49814.go", // go/types does not have constraints on array size
"issue56103.go", // anonymous interface cycles; will be a type checker error in 1.22
+ "issue52697.go", // types2 does not have constraints on stack size
// These tests requires runtime/cgo.Incomplete, which is only available on some platforms.
// However, types2 does not know about build constraints.
diff --git a/src/cmd/compile/internal/types2/stmt.go b/src/cmd/compile/internal/types2/stmt.go
index f13ab69830..c9713dac6f 100644
--- a/src/cmd/compile/internal/types2/stmt.go
+++ b/src/cmd/compile/internal/types2/stmt.go
@@ -9,6 +9,7 @@ package types2
import (
"cmd/compile/internal/syntax"
"go/constant"
+ "internal/buildcfg"
. "internal/types/errors"
"sort"
)
@@ -22,10 +23,6 @@ func (check *Checker) funcBody(decl *declInfo, name string, sig *Signature, body
check.trace(body.Pos(), "-- %s: %s", name, sig)
}
- // set function scope extent
- sig.scope.pos = body.Pos()
- sig.scope.end = syntax.EndPos(body)
-
// save/restore current environment and set up function environment
// (and use 0 indentation at function start)
defer func(env environment, indent int) {
@@ -279,7 +276,7 @@ L:
// isNil reports whether the expression e denotes the predeclared value nil.
func (check *Checker) isNil(e syntax.Expr) bool {
// The only way to express the nil value is by literally writing nil (possibly in parentheses).
- if name, _ := unparen(e).(*syntax.Name); name != nil {
+ if name, _ := syntax.Unparen(e).(*syntax.Name); name != nil {
_, ok := check.lookup(name.Value).(*Nil)
return ok
}
@@ -297,7 +294,7 @@ L:
check.expr(nil, &dummy, e) // run e through expr so we get the usual Info recordings
} else {
T = check.varType(e)
- if T == Typ[Invalid] {
+ if !isValid(T) {
continue L
}
}
@@ -341,7 +338,7 @@ L:
// hash = "<nil>" // avoid collision with a type named nil
// } else {
// T = check.varType(e)
-// if T == Typ[Invalid] {
+// if !isValid(T) {
// continue L
// }
// hash = typeHash(T, nil)
@@ -458,12 +455,12 @@ func (check *Checker) stmt(ctxt stmtContext, s syntax.Stmt) {
check.errorf(s.Lhs, NonNumericIncDec, invalidOp+"%s%s%s (non-numeric type %s)", s.Lhs, s.Op, s.Op, x.typ)
return
}
- check.assignVar(s.Lhs, nil, &x)
+ check.assignVar(s.Lhs, nil, &x, "assignment")
return
}
- lhs := unpackExpr(s.Lhs)
- rhs := unpackExpr(s.Rhs)
+ lhs := syntax.UnpackListExpr(s.Lhs)
+ rhs := syntax.UnpackListExpr(s.Rhs)
switch s.Op {
case 0:
check.assignVars(lhs, rhs)
@@ -481,7 +478,7 @@ func (check *Checker) stmt(ctxt stmtContext, s syntax.Stmt) {
var x operand
check.binary(&x, nil, lhs[0], rhs[0], s.Op)
- check.assignVar(lhs[0], nil, &x)
+ check.assignVar(lhs[0], nil, &x, "assignment")
case *syntax.CallStmt:
kind := "go"
@@ -494,7 +491,7 @@ func (check *Checker) stmt(ctxt stmtContext, s syntax.Stmt) {
res := check.sig.results
// Return with implicit results allowed for function with named results.
// (If one is named, all are named.)
- results := unpackExpr(s.Results)
+ results := syntax.UnpackListExpr(s.Results)
if len(results) == 0 && res.Len() > 0 && res.vars[0].name != "" {
// spec: "Implementation restriction: A compiler may disallow an empty expression
// list in a "return" statement if a different entity (constant, type, or variable)
@@ -621,7 +618,7 @@ func (check *Checker) stmt(ctxt stmtContext, s syntax.Stmt) {
// if present, rhs must be a receive operation
if rhs != nil {
- if x, _ := unparen(rhs).(*syntax.Operation); x != nil && x.Y == nil && x.Op == syntax.Recv {
+ if x, _ := syntax.Unparen(rhs).(*syntax.Operation); x != nil && x.Y == nil && x.Op == syntax.Recv {
valid = true
}
}
@@ -718,7 +715,7 @@ func (check *Checker) switchStmt(inner stmtContext, s *syntax.SwitchStmt) {
} else {
inner |= finalSwitchCase
}
- check.caseValues(&x, unpackExpr(clause.Cases), seen)
+ check.caseValues(&x, syntax.UnpackListExpr(clause.Cases), seen)
check.openScopeUntil(clause, end, "case")
check.stmtList(inner, clause.Body)
check.closeScope()
@@ -778,7 +775,7 @@ func (check *Checker) typeSwitchStmt(inner stmtContext, s *syntax.SwitchStmt, gu
end = s.Body[i+1].Pos()
}
// Check each type in this type switch case.
- cases := unpackExpr(clause.Cases)
+ cases := syntax.UnpackListExpr(clause.Cases)
T := check.caseTypes(sx, cases, seen)
check.openScopeUntil(clause, end, "case")
// If lhs exists, declare a corresponding variable in the case-local scope.
@@ -828,7 +825,10 @@ func (check *Checker) typeSwitchStmt(inner stmtContext, s *syntax.SwitchStmt, gu
}
func (check *Checker) rangeStmt(inner stmtContext, s *syntax.ForStmt, rclause *syntax.RangeClause) {
- // determine lhs, if any
+ // Convert syntax form to local variables.
+ type Expr = syntax.Expr
+ type identType = syntax.Name
+ identName := func(n *identType) string { return n.Value }
sKey := rclause.Lhs // possibly nil
var sValue, sExtra syntax.Expr
if p, _ := sKey.(*syntax.ListExpr); p != nil {
@@ -844,43 +844,50 @@ func (check *Checker) rangeStmt(inner stmtContext, s *syntax.ForStmt, rclause *s
sExtra = p.ElemList[2]
}
}
+ isDef := rclause.Def
+ rangeVar := rclause.X
+ noNewVarPos := s
+
+ // Do not use rclause anymore.
+ rclause = nil
+
+ // Everything from here on is shared between cmd/compile/internal/types2 and go/types.
// check expression to iterate over
var x operand
- check.expr(nil, &x, rclause.X)
+ check.expr(nil, &x, rangeVar)
// determine key/value types
var key, val Type
if x.mode != invalid {
// Ranging over a type parameter is permitted if it has a core type.
- var cause string
- u := coreType(x.typ)
- if t, _ := u.(*Chan); t != nil {
- if sValue != nil {
- check.softErrorf(sValue, InvalidIterVar, "range over %s permits only one iteration variable", &x)
- // ok to continue
- }
- if t.dir == SendOnly {
- cause = "receive from send-only channel"
- }
- } else {
- if sExtra != nil {
- check.softErrorf(sExtra, InvalidIterVar, "range clause permits at most two iteration variables")
- // ok to continue
- }
- if u == nil {
- cause = check.sprintf("%s has no core type", x.typ)
+ k, v, cause, isFunc, ok := rangeKeyVal(x.typ, func(v goVersion) bool {
+ return check.allowVersion(check.pkg, x.expr, v)
+ })
+ switch {
+ case !ok && cause != "":
+ check.softErrorf(&x, InvalidRangeExpr, "cannot range over %s: %s", &x, cause)
+ case !ok:
+ check.softErrorf(&x, InvalidRangeExpr, "cannot range over %s", &x)
+ case k == nil && sKey != nil:
+ check.softErrorf(sKey, InvalidIterVar, "range over %s permits no iteration variables", &x)
+ case v == nil && sValue != nil:
+ check.softErrorf(sValue, InvalidIterVar, "range over %s permits only one iteration variable", &x)
+ case sExtra != nil:
+ check.softErrorf(sExtra, InvalidIterVar, "range clause permits at most two iteration variables")
+ case isFunc && ((k == nil) != (sKey == nil) || (v == nil) != (sValue == nil)):
+ var count string
+ switch {
+ case k == nil:
+ count = "no iteration variables"
+ case v == nil:
+ count = "one iteration variable"
+ default:
+ count = "two iteration variables"
}
+ check.softErrorf(&x, InvalidIterVar, "range over %s must have %s", &x, count)
}
- key, val = rangeKeyVal(u)
- if key == nil || cause != "" {
- if cause == "" {
- check.softErrorf(&x, InvalidRangeExpr, "cannot range over %s", &x)
- } else {
- check.softErrorf(&x, InvalidRangeExpr, "cannot range over %s (%s)", &x, cause)
- }
- // ok to continue
- }
+ key, val = k, v
}
// Open the for-statement block scope now, after the range clause.
@@ -892,10 +899,12 @@ func (check *Checker) rangeStmt(inner stmtContext, s *syntax.ForStmt, rclause *s
// (irregular assignment, cannot easily map to existing assignment checks)
// lhs expressions and initialization value (rhs) types
- lhs := [2]syntax.Expr{sKey, sValue}
- rhs := [2]Type{key, val} // key, val may be nil
+ lhs := [2]Expr{sKey, sValue} // sKey, sValue may be nil
+ rhs := [2]Type{key, val} // key, val may be nil
- if rclause.Def {
+ constIntRange := x.mode == constant_ && isInteger(x.typ)
+
+ if isDef {
// short variable declaration
var vars []*Var
for i, lhs := range lhs {
@@ -905,9 +914,9 @@ func (check *Checker) rangeStmt(inner stmtContext, s *syntax.ForStmt, rclause *s
// determine lhs variable
var obj *Var
- if ident, _ := lhs.(*syntax.Name); ident != nil {
+ if ident, _ := lhs.(*identType); ident != nil {
// declare new variable
- name := ident.Value
+ name := identName(ident)
obj = NewVar(ident.Pos(), check.pkg, name, nil)
check.recordDef(ident, obj)
// _ variables don't count as new variables
@@ -920,11 +929,13 @@ func (check *Checker) rangeStmt(inner stmtContext, s *syntax.ForStmt, rclause *s
}
// initialize lhs variable
- if typ := rhs[i]; typ != nil {
+ if constIntRange {
+ check.initVar(obj, &x, "range clause")
+ } else if typ := rhs[i]; typ != nil {
x.mode = value
x.expr = lhs // we don't have a better rhs expression to use here
x.typ = typ
- check.initVar(obj, &x, "range clause")
+ check.initVar(obj, &x, "assignment") // error is on variable, use "assignment" not "range clause"
} else {
obj.typ = Typ[Invalid]
obj.used = true // don't complain about unused variable
@@ -938,43 +949,111 @@ func (check *Checker) rangeStmt(inner stmtContext, s *syntax.ForStmt, rclause *s
check.declare(check.scope, nil /* recordDef already called */, obj, scopePos)
}
} else {
- check.error(s, NoNewVar, "no new variables on left side of :=")
+ check.error(noNewVarPos, NoNewVar, "no new variables on left side of :=")
}
- } else {
+ } else if sKey != nil /* lhs[0] != nil */ {
// ordinary assignment
for i, lhs := range lhs {
if lhs == nil {
continue
}
- if typ := rhs[i]; typ != nil {
+
+ if constIntRange {
+ check.assignVar(lhs, nil, &x, "range clause")
+ } else if typ := rhs[i]; typ != nil {
x.mode = value
x.expr = lhs // we don't have a better rhs expression to use here
x.typ = typ
- check.assignVar(lhs, nil, &x)
+ check.assignVar(lhs, nil, &x, "assignment") // error is on variable, use "assignment" not "range clause"
}
}
+ } else if constIntRange {
+ // If we don't have any iteration variables, we still need to
+ // check that a (possibly untyped) integer range expression x
+ // is valid.
+ // We do this by checking the assignment _ = x. This ensures
+ // that an untyped x can be converted to a value of type int.
+ check.assignment(&x, nil, "range clause")
}
check.stmt(inner, s.Body)
}
+// RangeKeyVal returns the key and value types for a range over typ.
+// Exported for use by the compiler (does not exist in go/types).
+func RangeKeyVal(typ Type) (Type, Type) {
+ key, val, _, _, _ := rangeKeyVal(typ, nil)
+ return key, val
+}
+
// rangeKeyVal returns the key and value type produced by a range clause
-// over an expression of type typ. If the range clause is not permitted
-// the results are nil.
-func rangeKeyVal(typ Type) (key, val Type) {
- switch typ := arrayPtrDeref(typ).(type) {
+// over an expression of type typ.
+// If allowVersion != nil, it is used to check the required language version.
+// If the range clause is not permitted, rangeKeyVal returns ok = false.
+// When ok = false, rangeKeyVal may also return a reason in cause.
+func rangeKeyVal(typ Type, allowVersion func(goVersion) bool) (key, val Type, cause string, isFunc, ok bool) {
+ bad := func(cause string) (Type, Type, string, bool, bool) {
+ return Typ[Invalid], Typ[Invalid], cause, false, false
+ }
+ toSig := func(t Type) *Signature {
+ sig, _ := coreType(t).(*Signature)
+ return sig
+ }
+
+ orig := typ
+ switch typ := arrayPtrDeref(coreType(typ)).(type) {
+ case nil:
+ return bad("no core type")
case *Basic:
if isString(typ) {
- return Typ[Int], universeRune // use 'rune' name
+ return Typ[Int], universeRune, "", false, true // use 'rune' name
+ }
+ if isInteger(typ) {
+ if allowVersion != nil && !allowVersion(go1_22) {
+ return bad("requires go1.22 or later")
+ }
+ return orig, nil, "", false, true
}
case *Array:
- return Typ[Int], typ.elem
+ return Typ[Int], typ.elem, "", false, true
case *Slice:
- return Typ[Int], typ.elem
+ return Typ[Int], typ.elem, "", false, true
case *Map:
- return typ.key, typ.elem
+ return typ.key, typ.elem, "", false, true
case *Chan:
- return typ.elem, Typ[Invalid]
+ if typ.dir == SendOnly {
+ return bad("receive from send-only channel")
+ }
+ return typ.elem, nil, "", false, true
+ case *Signature:
+ // TODO(gri) when this becomes enabled permanently, add version check
+ if !buildcfg.Experiment.RangeFunc {
+ break
+ }
+ assert(typ.Recv() == nil)
+ switch {
+ case typ.Params().Len() != 1:
+ return bad("func must be func(yield func(...) bool): wrong argument count")
+ case toSig(typ.Params().At(0).Type()) == nil:
+ return bad("func must be func(yield func(...) bool): argument is not func")
+ case typ.Results().Len() != 0:
+ return bad("func must be func(yield func(...) bool): unexpected results")
+ }
+ cb := toSig(typ.Params().At(0).Type())
+ assert(cb.Recv() == nil)
+ switch {
+ case cb.Params().Len() > 2:
+ return bad("func must be func(yield func(...) bool): yield func has too many parameters")
+ case cb.Results().Len() != 1 || !isBoolean(cb.Results().At(0).Type()):
+ return bad("func must be func(yield func(...) bool): yield func does not return bool")
+ }
+ if cb.Params().Len() >= 1 {
+ key = cb.Params().At(0).Type()
+ }
+ if cb.Params().Len() >= 2 {
+ val = cb.Params().At(1).Type()
+ }
+ return key, val, "", true, true
}
return
}
diff --git a/src/cmd/compile/internal/types2/struct.go b/src/cmd/compile/internal/types2/struct.go
index 125e94647b..9e46b349a3 100644
--- a/src/cmd/compile/internal/types2/struct.go
+++ b/src/cmd/compile/internal/types2/struct.go
@@ -147,7 +147,7 @@ func (check *Checker) structType(styp *Struct, e *syntax.StructType) {
t, isPtr := deref(embeddedTyp)
switch u := under(t).(type) {
case *Basic:
- if t == Typ[Invalid] {
+ if !isValid(t) {
// error was reported before
return
}
diff --git a/src/cmd/compile/internal/types2/subst.go b/src/cmd/compile/internal/types2/subst.go
index 74d6294dff..09dc58527a 100644
--- a/src/cmd/compile/internal/types2/subst.go
+++ b/src/cmd/compile/internal/types2/subst.go
@@ -169,7 +169,9 @@ func (subst *subster) typ(typ Type) Type {
if mcopied || ecopied {
iface := subst.check.newInterface()
iface.embeddeds = embeddeds
+ iface.embedPos = t.embedPos
iface.implicit = t.implicit
+ assert(t.complete) // otherwise we are copying incomplete data
iface.complete = t.complete
// If we've changed the interface type, we may need to replace its
// receiver if the receiver type is the original interface. Receivers of
@@ -185,6 +187,11 @@ func (subst *subster) typ(typ Type) Type {
// need to create new interface methods to hold the instantiated
// receiver. This is handled by Named.expandUnderlying.
iface.methods, _ = replaceRecvType(methods, t, iface)
+
+ // If check != nil, check.newInterface will have saved the interface for later completion.
+ if subst.check == nil { // golang/go#61561: all newly created interfaces must be completed
+ iface.typeSet()
+ }
return iface
}
diff --git a/src/cmd/compile/internal/types2/typeparam.go b/src/cmd/compile/internal/types2/typeparam.go
index aebbec27a8..5c6030b3fb 100644
--- a/src/cmd/compile/internal/types2/typeparam.go
+++ b/src/cmd/compile/internal/types2/typeparam.go
@@ -9,11 +9,11 @@ import "sync/atomic"
// Note: This is a uint32 rather than a uint64 because the
// respective 64 bit atomic instructions are not available
// on all platforms.
-var lastID uint32
+var lastID atomic.Uint32
// nextID returns a value increasing monotonically by 1 with
// each call, starting with 1. It may be called concurrently.
-func nextID() uint64 { return uint64(atomic.AddUint32(&lastID, 1)) }
+func nextID() uint64 { return uint64(lastID.Add(1)) }
// A TypeParam represents a type parameter type.
type TypeParam struct {
@@ -108,7 +108,7 @@ func (t *TypeParam) iface() *Interface {
var ityp *Interface
switch u := under(bound).(type) {
case *Basic:
- if u == Typ[Invalid] {
+ if !isValid(u) {
// error is reported elsewhere
return &emptyInterface
}
@@ -132,7 +132,7 @@ func (t *TypeParam) iface() *Interface {
// pos is used for tracing output; start with the type parameter position.
pos := t.obj.pos
// use the (original or possibly instantiated) type bound position if we have one
- if n, _ := bound.(*Named); n != nil {
+ if n := asNamed(bound); n != nil {
pos = n.obj.pos
}
computeInterfaceTypeSet(t.check, pos, ityp)
diff --git a/src/cmd/compile/internal/types2/typeset.go b/src/cmd/compile/internal/types2/typeset.go
index 70b9e36aef..a6ccfdb80c 100644
--- a/src/cmd/compile/internal/types2/typeset.go
+++ b/src/cmd/compile/internal/types2/typeset.go
@@ -290,7 +290,7 @@ func computeInterfaceTypeSet(check *Checker, pos syntax.Pos, ityp *Interface) *_
assert(len(tset.methods) == 0)
terms = tset.terms
default:
- if u == Typ[Invalid] {
+ if !isValid(u) {
continue
}
if check != nil && !check.verifyVersionf(pos, go1_18, "embedding non-interface type %s", typ) {
@@ -304,7 +304,6 @@ func computeInterfaceTypeSet(check *Checker, pos syntax.Pos, ityp *Interface) *_
// separately. Here we only need to intersect the term lists and comparable bits.
allTerms, allComparable = intersectTermLists(allTerms, allComparable, terms, comparable)
}
- ityp.embedPos = nil // not needed anymore (errors have been reported)
ityp.tset.comparable = allComparable
if len(allMethods) != 0 {
@@ -389,7 +388,7 @@ func computeUnionTypeSet(check *Checker, unionSets map[*Union]*_TypeSet, pos syn
// For now we don't permit type parameters as constraints.
assert(!isTypeParam(t.typ))
terms = computeInterfaceTypeSet(check, pos, ui).terms
- } else if u == Typ[Invalid] {
+ } else if !isValid(u) {
continue
} else {
if t.tilde && !Identical(t.typ, u) {
diff --git a/src/cmd/compile/internal/types2/typestring.go b/src/cmd/compile/internal/types2/typestring.go
index 2f4fb5220d..4b410af6b7 100644
--- a/src/cmd/compile/internal/types2/typestring.go
+++ b/src/cmd/compile/internal/types2/typestring.go
@@ -218,7 +218,7 @@ func (w *typeWriter) typ(typ Type) {
w.string("any")
break
}
- if t == universeComparable.Type().(*Named).underlying {
+ if t == asNamed(universeComparable.Type()).underlying {
w.string("interface{comparable}")
break
}
@@ -322,10 +322,17 @@ func (w *typeWriter) typ(typ Type) {
// error messages. This doesn't need to be super-elegant; we just
// need a clear indication that this is not a predeclared name.
if w.ctxt == nil && Universe.Lookup(t.obj.name) != nil {
- w.string(sprintf(nil, false, " /* with %s declared at %s */", t.obj.name, t.obj.Pos()))
+ w.string(fmt.Sprintf(" /* with %s declared at %s */", t.obj.name, t.obj.Pos()))
}
}
+ case *Alias:
+ w.typeName(t.obj)
+ if w.ctxt != nil {
+ // TODO(gri) do we need to print the alias type name, too?
+ w.typ(Unalias(t.obj.typ))
+ }
+
default:
// For externally defined implementations of Type.
// Note: In this case cycles won't be caught.
diff --git a/src/cmd/compile/internal/types2/typexpr.go b/src/cmd/compile/internal/types2/typexpr.go
index ca717fed8b..81adcbd9cf 100644
--- a/src/cmd/compile/internal/types2/typexpr.go
+++ b/src/cmd/compile/internal/types2/typexpr.go
@@ -18,7 +18,7 @@ import (
// If an error occurred, x.mode is set to invalid.
// For the meaning of def, see Checker.definedType, below.
// If wantType is set, the identifier e is expected to denote a type.
-func (check *Checker) ident(x *operand, e *syntax.Name, def *Named, wantType bool) {
+func (check *Checker) ident(x *operand, e *syntax.Name, def *TypeName, wantType bool) {
x.mode = invalid
x.expr = e
@@ -78,7 +78,7 @@ func (check *Checker) ident(x *operand, e *syntax.Name, def *Named, wantType boo
case *Const:
check.addDeclDep(obj)
- if typ == Typ[Invalid] {
+ if !isValid(typ) {
return
}
if obj == universeIota {
@@ -94,7 +94,7 @@ func (check *Checker) ident(x *operand, e *syntax.Name, def *Named, wantType boo
x.mode = constant_
case *TypeName:
- if check.isBrokenAlias(obj) {
+ if !check.enableAlias && check.isBrokenAlias(obj) {
check.errorf(e, InvalidDeclCycle, "invalid use of type alias %s in recursive type (see go.dev/issue/50729)", obj.name)
return
}
@@ -108,7 +108,7 @@ func (check *Checker) ident(x *operand, e *syntax.Name, def *Named, wantType boo
obj.used = true
}
check.addDeclDep(obj)
- if typ == Typ[Invalid] {
+ if !isValid(typ) {
return
}
x.mode = variable
@@ -173,10 +173,10 @@ func (check *Checker) validVarType(e syntax.Expr, typ Type) {
}
// definedType is like typ but also accepts a type name def.
-// If def != nil, e is the type specification for the defined type def, declared
-// in a type declaration, and def.underlying will be set to the type of e before
-// any components of e are type-checked.
-func (check *Checker) definedType(e syntax.Expr, def *Named) Type {
+// If def != nil, e is the type specification for the type named def, declared
+// in a type declaration, and def.typ.underlying will be set to the type of e
+// before any components of e are type-checked.
+func (check *Checker) definedType(e syntax.Expr, def *TypeName) Type {
typ := check.typInternal(e, def)
assert(isTyped(typ))
if isGeneric(typ) {
@@ -193,7 +193,7 @@ func (check *Checker) definedType(e syntax.Expr, def *Named) Type {
func (check *Checker) genericType(e syntax.Expr, cause *string) Type {
typ := check.typInternal(e, nil)
assert(isTyped(typ))
- if typ != Typ[Invalid] && !isGeneric(typ) {
+ if isValid(typ) && !isGeneric(typ) {
if cause != nil {
*cause = check.sprintf("%s is not a generic type", typ)
}
@@ -207,12 +207,12 @@ func (check *Checker) genericType(e syntax.Expr, cause *string) Type {
// goTypeName returns the Go type name for typ and
// removes any occurrences of "types2." from that name.
func goTypeName(typ Type) string {
- return strings.Replace(fmt.Sprintf("%T", typ), "types2.", "", -1) // strings.ReplaceAll is not available in Go 1.4
+ return strings.ReplaceAll(fmt.Sprintf("%T", typ), "types2.", "")
}
// typInternal drives type checking of types.
// Must only be called by definedType or genericType.
-func (check *Checker) typInternal(e0 syntax.Expr, def *Named) (T Type) {
+func (check *Checker) typInternal(e0 syntax.Expr, def *TypeName) (T Type) {
if check.conf.Trace {
check.trace(e0.Pos(), "-- type %s", e0)
check.indent++
@@ -243,7 +243,7 @@ func (check *Checker) typInternal(e0 syntax.Expr, def *Named) (T Type) {
switch x.mode {
case typexpr:
typ := x.typ
- def.setUnderlying(typ)
+ setDefType(def, typ)
return typ
case invalid:
// ignore - error reported before
@@ -260,7 +260,7 @@ func (check *Checker) typInternal(e0 syntax.Expr, def *Named) (T Type) {
switch x.mode {
case typexpr:
typ := x.typ
- def.setUnderlying(typ)
+ setDefType(def, typ)
return typ
case invalid:
// ignore - error reported before
@@ -272,7 +272,7 @@ func (check *Checker) typInternal(e0 syntax.Expr, def *Named) (T Type) {
case *syntax.IndexExpr:
check.verifyVersionf(e, go1_18, "type instantiation")
- return check.instantiatedType(e.X, unpackExpr(e.Index), def)
+ return check.instantiatedType(e.X, syntax.UnpackListExpr(e.Index), def)
case *syntax.ParenExpr:
// Generic types must be instantiated before they can be used in any form.
@@ -281,7 +281,7 @@ func (check *Checker) typInternal(e0 syntax.Expr, def *Named) (T Type) {
case *syntax.ArrayType:
typ := new(Array)
- def.setUnderlying(typ)
+ setDefType(def, typ)
if e.Len != nil {
typ.len = check.arrayLength(e.Len)
} else {
@@ -297,7 +297,7 @@ func (check *Checker) typInternal(e0 syntax.Expr, def *Named) (T Type) {
case *syntax.SliceType:
typ := new(Slice)
- def.setUnderlying(typ)
+ setDefType(def, typ)
typ.elem = check.varType(e.Elem)
return typ
@@ -309,7 +309,7 @@ func (check *Checker) typInternal(e0 syntax.Expr, def *Named) (T Type) {
case *syntax.StructType:
typ := new(Struct)
- def.setUnderlying(typ)
+ setDefType(def, typ)
check.structType(typ, e)
return typ
@@ -317,13 +317,13 @@ func (check *Checker) typInternal(e0 syntax.Expr, def *Named) (T Type) {
if e.Op == syntax.Mul && e.Y == nil {
typ := new(Pointer)
typ.base = Typ[Invalid] // avoid nil base in invalid recursive type declaration
- def.setUnderlying(typ)
+ setDefType(def, typ)
typ.base = check.varType(e.X)
// If typ.base is invalid, it's unlikely that *base is particularly
// useful - even a valid dereferenciation will lead to an invalid
// type again, and in some cases we get unexpected follow-on errors
// (e.g., go.dev/issue/49005). Return an invalid type instead.
- if typ.base == Typ[Invalid] {
+ if !isValid(typ.base) {
return Typ[Invalid]
}
return typ
@@ -334,19 +334,19 @@ func (check *Checker) typInternal(e0 syntax.Expr, def *Named) (T Type) {
case *syntax.FuncType:
typ := new(Signature)
- def.setUnderlying(typ)
+ setDefType(def, typ)
check.funcType(typ, nil, nil, e)
return typ
case *syntax.InterfaceType:
typ := check.newInterface()
- def.setUnderlying(typ)
+ setDefType(def, typ)
check.interfaceType(typ, e, def)
return typ
case *syntax.MapType:
typ := new(Map)
- def.setUnderlying(typ)
+ setDefType(def, typ)
typ.key = check.varType(e.Key)
typ.elem = check.varType(e.Value)
@@ -371,7 +371,7 @@ func (check *Checker) typInternal(e0 syntax.Expr, def *Named) (T Type) {
case *syntax.ChanType:
typ := new(Chan)
- def.setUnderlying(typ)
+ setDefType(def, typ)
dir := SendRecv
switch e.Dir {
@@ -396,11 +396,31 @@ func (check *Checker) typInternal(e0 syntax.Expr, def *Named) (T Type) {
}
typ := Typ[Invalid]
- def.setUnderlying(typ)
+ setDefType(def, typ)
return typ
}
-func (check *Checker) instantiatedType(x syntax.Expr, xlist []syntax.Expr, def *Named) (res Type) {
+func setDefType(def *TypeName, typ Type) {
+ if def != nil {
+ switch t := def.typ.(type) {
+ case *Alias:
+ // t.fromRHS should always be set, either to an invalid type
+ // in the beginning, or to typ in certain cyclic declarations.
+ if t.fromRHS != Typ[Invalid] && t.fromRHS != typ {
+ panic(sprintf(nil, true, "t.fromRHS = %s, typ = %s\n", t.fromRHS, typ))
+ }
+ t.fromRHS = typ
+ case *Basic:
+ assert(t == Typ[Invalid])
+ case *Named:
+ t.underlying = typ
+ default:
+ panic(fmt.Sprintf("unexpected type %T", t))
+ }
+ }
+}
+
+func (check *Checker) instantiatedType(x syntax.Expr, xlist []syntax.Expr, def *TypeName) (res Type) {
if check.conf.Trace {
check.trace(x.Pos(), "-- instantiating type %s with %s", x, xlist)
check.indent++
@@ -416,11 +436,11 @@ func (check *Checker) instantiatedType(x syntax.Expr, xlist []syntax.Expr, def *
if cause != "" {
check.errorf(x, NotAGenericType, invalidOp+"%s%s (%s)", x, xlist, cause)
}
- if gtyp == Typ[Invalid] {
+ if !isValid(gtyp) {
return gtyp // error already reported
}
- orig, _ := gtyp.(*Named)
+ orig := asNamed(gtyp)
if orig == nil {
panic(fmt.Sprintf("%v: cannot instantiate %v", x.Pos(), gtyp))
}
@@ -428,13 +448,13 @@ func (check *Checker) instantiatedType(x syntax.Expr, xlist []syntax.Expr, def *
// evaluate arguments
targs := check.typeList(xlist)
if targs == nil {
- def.setUnderlying(Typ[Invalid]) // avoid errors later due to lazy instantiation
+ setDefType(def, Typ[Invalid]) // avoid errors later due to lazy instantiation
return Typ[Invalid]
}
// create the instance
- inst := check.instance(x.Pos(), orig, targs, nil, check.context()).(*Named)
- def.setUnderlying(inst)
+ inst := asNamed(check.instance(x.Pos(), orig, targs, nil, check.context()))
+ setDefType(def, inst)
// orig.tparams may not be set up, so we need to do expansion later.
check.later(func() {
@@ -443,7 +463,7 @@ func (check *Checker) instantiatedType(x syntax.Expr, xlist []syntax.Expr, def *
// errors.
check.recordInstance(x, inst.TypeArgs().list(), inst)
- if check.validateTArgLen(x.Pos(), inst.TypeParams().Len(), inst.TypeArgs().Len()) {
+ if check.validateTArgLen(x.Pos(), inst.obj.name, inst.TypeParams().Len(), inst.TypeArgs().Len()) {
if i, err := check.verify(x.Pos(), inst.TypeParams().list(), inst.TypeArgs().list(), check.context()); err != nil {
// best position for error reporting
pos := x.Pos()
@@ -520,7 +540,7 @@ func (check *Checker) typeList(list []syntax.Expr) []Type {
res := make([]Type, len(list)) // res != nil even if len(list) == 0
for i, x := range list {
t := check.varType(x)
- if t == Typ[Invalid] {
+ if !isValid(t) {
res = nil
}
if res != nil {
diff --git a/src/cmd/compile/internal/types2/under.go b/src/cmd/compile/internal/types2/under.go
index 887f7816ba..6b24399de4 100644
--- a/src/cmd/compile/internal/types2/under.go
+++ b/src/cmd/compile/internal/types2/under.go
@@ -9,7 +9,7 @@ package types2
// under must only be called when a type is known
// to be fully set up.
func under(t Type) Type {
- if t, _ := t.(*Named); t != nil {
+ if t := asNamed(t); t != nil {
return t.under()
}
return t.Underlying()
diff --git a/src/cmd/compile/internal/types2/unify.go b/src/cmd/compile/internal/types2/unify.go
index 4e9c771615..8218939b68 100644
--- a/src/cmd/compile/internal/types2/unify.go
+++ b/src/cmd/compile/internal/types2/unify.go
@@ -291,6 +291,9 @@ func (u *unifier) nify(x, y Type, mode unifyMode, p *ifacePair) (result bool) {
u.depth--
}()
+ x = Unalias(x)
+ y = Unalias(y)
+
// nothing to do if x == y
if x == y {
return true
@@ -311,7 +314,7 @@ func (u *unifier) nify(x, y Type, mode unifyMode, p *ifacePair) (result bool) {
// Ensure that if we have at least one
// - defined type, make sure one is in y
// - type parameter recorded with u, make sure one is in x
- if _, ok := x.(*Named); ok || u.asTypeParam(y) != nil {
+ if asNamed(x) != nil || u.asTypeParam(y) != nil {
if traceInference {
u.tracef("%s ≡ %s\t// swap", y, x)
}
@@ -335,7 +338,7 @@ func (u *unifier) nify(x, y Type, mode unifyMode, p *ifacePair) (result bool) {
// we will fail at function instantiation or argument assignment time.
//
// If we have at least one defined type, there is one in y.
- if ny, _ := y.(*Named); mode&exact == 0 && ny != nil && isTypeLit(x) && !(u.enableInterfaceInference && IsInterface(x)) {
+ if ny := asNamed(y); mode&exact == 0 && ny != nil && isTypeLit(x) && !(u.enableInterfaceInference && IsInterface(x)) {
if traceInference {
u.tracef("%s ≡ under %s", x, ny)
}
@@ -372,8 +375,8 @@ func (u *unifier) nify(x, y Type, mode unifyMode, p *ifacePair) (result bool) {
// We have a match, possibly through underlying types.
xi := asInterface(x)
yi := asInterface(y)
- _, xn := x.(*Named)
- _, yn := y.(*Named)
+ xn := asNamed(x) != nil
+ yn := asNamed(y) != nil
// If we have two interfaces, what to do depends on
// whether they are named and their method sets.
if xi != nil && yi != nil {
@@ -448,13 +451,6 @@ func (u *unifier) nify(x, y Type, mode unifyMode, p *ifacePair) (result bool) {
// x != y if we get here
assert(x != y)
- // Type elements (array, slice, etc. elements) use emode for unification.
- // Element types must match exactly if the types are used in an assignment.
- emode := mode
- if mode&assign != 0 {
- emode |= exact
- }
-
// If u.EnableInterfaceInference is set and we don't require exact unification,
// if both types are interfaces, one interface must have a subset of the
// methods of the other and corresponding method signatures must unify.
@@ -570,6 +566,13 @@ func (u *unifier) nify(x, y Type, mode unifyMode, p *ifacePair) (result bool) {
x, y = y, x
}
+ // Type elements (array, slice, etc. elements) use emode for unification.
+ // Element types must match exactly if the types are used in an assignment.
+ emode := mode
+ if mode&assign != 0 {
+ emode |= exact
+ }
+
switch x := x.(type) {
case *Basic:
// Basic types are singletons except for the rune and byte
@@ -728,7 +731,7 @@ func (u *unifier) nify(x, y Type, mode unifyMode, p *ifacePair) (result bool) {
case *Named:
// Two named types unify if their type names originate in the same type declaration.
// If they are instantiated, their type argument lists must unify.
- if y, ok := y.(*Named); ok {
+ if y := asNamed(y); y != nil {
// Check type arguments before origins so they unify
// even if the origins don't match; for better error
// messages (see go.dev/issue/53692).
@@ -742,7 +745,7 @@ func (u *unifier) nify(x, y Type, mode unifyMode, p *ifacePair) (result bool) {
return false
}
}
- return indenticalOrigin(x, y)
+ return identicalOrigin(x, y)
}
case *TypeParam:
diff --git a/src/cmd/compile/internal/types2/union.go b/src/cmd/compile/internal/types2/union.go
index 8f354a708f..1bf4353f26 100644
--- a/src/cmd/compile/internal/types2/union.go
+++ b/src/cmd/compile/internal/types2/union.go
@@ -66,7 +66,7 @@ func parseUnion(check *Checker, uexpr syntax.Expr) Type {
return term.typ // typ already recorded through check.typ in parseTilde
}
if len(terms) >= maxTermCount {
- if u != Typ[Invalid] {
+ if isValid(u) {
check.errorf(x, InvalidUnion, "cannot handle more than %d union terms (implementation limitation)", maxTermCount)
u = Typ[Invalid]
}
@@ -80,7 +80,7 @@ func parseUnion(check *Checker, uexpr syntax.Expr) Type {
}
}
- if u == Typ[Invalid] {
+ if !isValid(u) {
return u
}
@@ -89,7 +89,7 @@ func parseUnion(check *Checker, uexpr syntax.Expr) Type {
// Note: This is a quadratic algorithm, but unions tend to be short.
check.later(func() {
for i, t := range terms {
- if t.typ == Typ[Invalid] {
+ if !isValid(t.typ) {
continue
}
diff --git a/src/cmd/compile/internal/types2/universe.go b/src/cmd/compile/internal/types2/universe.go
index 79cd8cbf0a..c8be81b9eb 100644
--- a/src/cmd/compile/internal/types2/universe.go
+++ b/src/cmd/compile/internal/types2/universe.go
@@ -265,7 +265,7 @@ func def(obj Object) {
return // nothing to do
}
// fix Obj link for named types
- if typ, _ := obj.Type().(*Named); typ != nil {
+ if typ := asNamed(obj.Type()); typ != nil {
typ.obj = obj.(*TypeName)
}
// exported identifiers go into package unsafe
diff --git a/src/cmd/compile/internal/types2/util_test.go b/src/cmd/compile/internal/types2/util_test.go
index 4cbd002355..70058aad84 100644
--- a/src/cmd/compile/internal/types2/util_test.go
+++ b/src/cmd/compile/internal/types2/util_test.go
@@ -7,6 +7,11 @@
package types2
-import "cmd/compile/internal/syntax"
+import (
+ "cmd/compile/internal/syntax"
+)
func CmpPos(p, q syntax.Pos) int { return cmpPos(p, q) }
+
+func ScopeComment(s *Scope) string { return s.comment }
+func ObjectScopePos(obj Object) syntax.Pos { return obj.scopePos() }
diff --git a/src/cmd/compile/internal/types2/validtype.go b/src/cmd/compile/internal/types2/validtype.go
index dbe91dc08f..a880a3d933 100644
--- a/src/cmd/compile/internal/types2/validtype.go
+++ b/src/cmd/compile/internal/types2/validtype.go
@@ -23,7 +23,7 @@ func (check *Checker) validType(typ *Named) {
// (say S->F->S) we have an invalid recursive type. The path list is the full
// path of named types in a cycle, it is only needed for error reporting.
func (check *Checker) validType0(typ Type, nest, path []*Named) bool {
- switch t := typ.(type) {
+ switch t := Unalias(typ).(type) {
case nil:
// We should never see a nil type but be conservative and panic
// only in debug mode.
@@ -68,7 +68,7 @@ func (check *Checker) validType0(typ Type, nest, path []*Named) bool {
// Don't report a 2nd error if we already know the type is invalid
// (e.g., if a cycle was detected earlier, via under).
// Note: ensure that t.orig is fully resolved by calling Underlying().
- if t.Underlying() == Typ[Invalid] {
+ if !isValid(t.Underlying()) {
return false
}
diff --git a/src/cmd/compile/internal/types2/version.go b/src/cmd/compile/internal/types2/version.go
index e525f16470..5aa3c803b5 100644
--- a/src/cmd/compile/internal/types2/version.go
+++ b/src/cmd/compile/internal/types2/version.go
@@ -7,90 +7,47 @@ package types2
import (
"cmd/compile/internal/syntax"
"fmt"
+ "go/version"
+ "internal/goversion"
"strings"
)
-// A version represents a released Go version.
-type version struct {
- major, minor int
-}
-
-func (v version) String() string {
- return fmt.Sprintf("go%d.%d", v.major, v.minor)
-}
+// A goVersion is a Go language version string of the form "go1.%d"
+// where d is the minor version number. goVersion strings don't
+// contain release numbers ("go1.20.1" is not a valid goVersion).
+type goVersion string
-func (v version) equal(u version) bool {
- return v.major == u.major && v.minor == u.minor
+// asGoVersion returns v as a goVersion (e.g., "go1.20.1" becomes "go1.20").
+// If v is not a valid Go version, the result is the empty string.
+func asGoVersion(v string) goVersion {
+ return goVersion(version.Lang(v))
}
-func (v version) before(u version) bool {
- return v.major < u.major || v.major == u.major && v.minor < u.minor
+// isValid reports whether v is a valid Go version.
+func (v goVersion) isValid() bool {
+ return v != ""
}
-func (v version) after(u version) bool {
- return v.major > u.major || v.major == u.major && v.minor > u.minor
+// cmp returns -1, 0, or +1 depending on whether x < y, x == y, or x > y,
+// interpreted as Go versions.
+func (x goVersion) cmp(y goVersion) int {
+ return version.Compare(string(x), string(y))
}
-// Go versions that introduced language changes.
var (
- go0_0 = version{0, 0} // no version specified
- go1_9 = version{1, 9}
- go1_13 = version{1, 13}
- go1_14 = version{1, 14}
- go1_17 = version{1, 17}
- go1_18 = version{1, 18}
- go1_20 = version{1, 20}
- go1_21 = version{1, 21}
-)
+ // Go versions that introduced language changes
+ go1_9 = asGoVersion("go1.9")
+ go1_13 = asGoVersion("go1.13")
+ go1_14 = asGoVersion("go1.14")
+ go1_17 = asGoVersion("go1.17")
+ go1_18 = asGoVersion("go1.18")
+ go1_20 = asGoVersion("go1.20")
+ go1_21 = asGoVersion("go1.21")
+ go1_22 = asGoVersion("go1.22")
-// parseGoVersion parses a Go version string (such as "go1.12")
-// and returns the version, or an error. If s is the empty
-// string, the version is 0.0.
-func parseGoVersion(s string) (v version, err error) {
- bad := func() (version, error) {
- return version{}, fmt.Errorf("invalid Go version syntax %q", s)
- }
- if s == "" {
- return
- }
- if !strings.HasPrefix(s, "go") {
- return bad()
- }
- s = s[len("go"):]
- i := 0
- for ; i < len(s) && '0' <= s[i] && s[i] <= '9'; i++ {
- if i >= 10 || i == 0 && s[i] == '0' {
- return bad()
- }
- v.major = 10*v.major + int(s[i]) - '0'
- }
- if i > 0 && i == len(s) {
- return
- }
- if i == 0 || s[i] != '.' {
- return bad()
- }
- s = s[i+1:]
- if s == "0" {
- // We really should not accept "go1.0",
- // but we didn't reject it from the start
- // and there are now programs that use it.
- // So accept it.
- return
- }
- i = 0
- for ; i < len(s) && '0' <= s[i] && s[i] <= '9'; i++ {
- if i >= 10 || i == 0 && s[i] == '0' {
- return bad()
- }
- v.minor = 10*v.minor + int(s[i]) - '0'
- }
- // Accept any suffix after the minor number.
- // We are only looking for the language version (major.minor)
- // but want to accept any valid Go version, like go1.21.0
- // and go1.21rc2.
- return
-}
+ // current (deployed) Go version
+ go_current = asGoVersion(fmt.Sprintf("go1.%d", goversion.Version))
+)
// langCompat reports an error if the representation of a numeric
// literal is not compatible with the current language version.
@@ -121,30 +78,30 @@ func (check *Checker) langCompat(lit *syntax.BasicLit) {
}
}
-// allowVersion reports whether the given package
-// is allowed to use version major.minor.
-func (check *Checker) allowVersion(pkg *Package, at poser, v version) bool {
+// allowVersion reports whether the given package is allowed to use version v.
+func (check *Checker) allowVersion(pkg *Package, at poser, v goVersion) bool {
// We assume that imported packages have all been checked,
// so we only have to check for the local package.
if pkg != check.pkg {
return true
}
- // If the source file declares its Go version, use that to decide.
- if check.posVers != nil {
- if src, ok := check.posVers[base(at.Pos())]; ok && src.major >= 1 {
- return !src.before(v)
- }
- }
-
- // Otherwise fall back to the version in the checker.
- return check.version.equal(go0_0) || !check.version.before(v)
+ // If no explicit file version is specified,
+ // fileVersion corresponds to the module version.
+ var fileVersion goVersion
+ if pos := at.Pos(); pos.IsKnown() {
+ // We need version.Lang below because file versions
+ // can be (unaltered) Config.GoVersion strings that
+ // may contain dot-release information.
+ fileVersion = asGoVersion(check.versions[base(pos)])
+ }
+ return !fileVersion.isValid() || fileVersion.cmp(v) >= 0
}
// verifyVersionf is like allowVersion but also accepts a format string and arguments
// which are used to report a version error if allowVersion returns false. It uses the
// current package.
-func (check *Checker) verifyVersionf(at poser, v version, format string, args ...interface{}) bool {
+func (check *Checker) verifyVersionf(at poser, v goVersion, format string, args ...interface{}) bool {
if !check.allowVersion(check.pkg, at, v) {
check.versionErrorf(at, v, format, args...)
return false
@@ -154,7 +111,9 @@ func (check *Checker) verifyVersionf(at poser, v version, format string, args ..
// base finds the underlying PosBase of the source file containing pos,
// skipping over intermediate PosBase layers created by //line directives.
+// The positions must be known.
func base(pos syntax.Pos) *syntax.PosBase {
+ assert(pos.IsKnown())
b := pos.Base()
for {
bb := b.Pos().Base()
diff --git a/src/cmd/compile/internal/types2/version_test.go b/src/cmd/compile/internal/types2/version_test.go
deleted file mode 100644
index 651758e1b0..0000000000
--- a/src/cmd/compile/internal/types2/version_test.go
+++ /dev/null
@@ -1,24 +0,0 @@
-// Copyright 2023 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package types2
-
-import "testing"
-
-var parseGoVersionTests = []struct {
- in string
- out version
-}{
- {"go1.21", version{1, 21}},
- {"go1.21.0", version{1, 21}},
- {"go1.21rc2", version{1, 21}},
-}
-
-func TestParseGoVersion(t *testing.T) {
- for _, tt := range parseGoVersionTests {
- if out, err := parseGoVersion(tt.in); out != tt.out || err != nil {
- t.Errorf("parseGoVersion(%q) = %v, %v, want %v, nil", tt.in, out, err, tt.out)
- }
- }
-}
diff --git a/src/cmd/compile/internal/walk/assign.go b/src/cmd/compile/internal/walk/assign.go
index 4207b487a7..fc3b858a80 100644
--- a/src/cmd/compile/internal/walk/assign.go
+++ b/src/cmd/compile/internal/walk/assign.go
@@ -6,6 +6,7 @@ package walk
import (
"go/constant"
+ "internal/abi"
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
@@ -103,7 +104,7 @@ func walkAssign(init *ir.Nodes, n ir.Node) ir.Node {
// Left in place for back end.
// Do not add a new write barrier.
// Set up address of type for back end.
- r.X = reflectdata.AppendElemRType(base.Pos, r)
+ r.Fun = reflectdata.AppendElemRType(base.Pos, r)
return as
}
// Otherwise, lowered for race detector.
@@ -168,13 +169,13 @@ func walkAssignMapRead(init *ir.Nodes, n *ir.AssignListStmt) ir.Node {
a := n.Lhs[0]
var call *ir.CallExpr
- if w := t.Elem().Size(); w <= zeroValSize {
+ if w := t.Elem().Size(); w <= abi.ZeroValSize {
fn := mapfn(mapaccess2[fast], t, false)
- call = mkcall1(fn, fn.Type().Results(), init, reflectdata.IndexMapRType(base.Pos, r), r.X, key)
+ call = mkcall1(fn, fn.Type().ResultsTuple(), init, reflectdata.IndexMapRType(base.Pos, r), r.X, key)
} else {
fn := mapfn("mapaccess2_fat", t, true)
z := reflectdata.ZeroAddr(w)
- call = mkcall1(fn, fn.Type().Results(), init, reflectdata.IndexMapRType(base.Pos, r), r.X, key, z)
+ call = mkcall1(fn, fn.Type().ResultsTuple(), init, reflectdata.IndexMapRType(base.Pos, r), r.X, key, z)
}
// mapaccess2* returns a typed bool, but due to spec changes,
@@ -191,7 +192,7 @@ func walkAssignMapRead(init *ir.Nodes, n *ir.AssignListStmt) ir.Node {
return walkExpr(typecheck.Stmt(n), init)
}
- var_ := typecheck.Temp(types.NewPtr(t.Elem()))
+ var_ := typecheck.TempAt(base.Pos, ir.CurFunc, types.NewPtr(t.Elem()))
var_.SetTypecheck(1)
var_.MarkNonNil() // mapaccess always returns a non-nil pointer
@@ -230,7 +231,7 @@ func walkReturn(n *ir.ReturnStmt) ir.Node {
return n
}
- results := fn.Type().Results().FieldSlice()
+ results := fn.Type().Results()
dsts := make([]ir.Node, len(results))
for i, v := range results {
// TODO(mdempsky): typecheck should have already checked the result variables.
@@ -392,7 +393,7 @@ func ascompatee(op ir.Op, nl, nr []ir.Node) []ir.Node {
continue
}
- if sym := types.OrigSym(name.Sym()); sym == nil || sym.IsBlank() {
+ if ir.IsBlank(name) {
// We can ignore assignments to blank or anonymous result parameters.
// These can't appear in expressions anyway.
continue
@@ -432,7 +433,6 @@ func readsMemory(n ir.Node) bool {
ir.OBITNOT,
ir.OCONV,
ir.OCONVIFACE,
- ir.OCONVIDATA,
ir.OCONVNOP,
ir.ODIV,
ir.ODOT,
@@ -484,7 +484,7 @@ func appendSlice(n *ir.CallExpr, init *ir.Nodes) ir.Node {
var nodes ir.Nodes
// var s []T
- s := typecheck.Temp(l1.Type())
+ s := typecheck.TempAt(base.Pos, ir.CurFunc, l1.Type())
nodes.Append(ir.NewAssignStmt(base.Pos, s, l1)) // s = l1
elemtype := s.Type().Elem()
@@ -498,7 +498,7 @@ func appendSlice(n *ir.CallExpr, init *ir.Nodes) ir.Node {
num := ir.NewUnaryExpr(base.Pos, ir.OLEN, l2)
// newLen := oldLen + num
- newLen := typecheck.Temp(types.Types[types.TINT])
+ newLen := typecheck.TempAt(base.Pos, ir.CurFunc, types.Types[types.TINT])
nodes.Append(ir.NewAssignStmt(base.Pos, newLen, ir.NewBinaryExpr(base.Pos, ir.OADD, oldLen, num)))
// if uint(newLen) <= uint(oldCap)
@@ -513,12 +513,8 @@ func appendSlice(n *ir.CallExpr, init *ir.Nodes) ir.Node {
slice.SetBounded(true)
nif.Body = []ir.Node{ir.NewAssignStmt(base.Pos, s, slice)}
- // func growslice(oldPtr unsafe.Pointer, newLen, oldCap, num int, et *_type) []T
- fn := typecheck.LookupRuntime("growslice")
- fn = typecheck.SubstArgTypes(fn, elemtype, elemtype)
-
// else { s = growslice(oldPtr, newLen, oldCap, num, T) }
- call := mkcall1(fn, s.Type(), nif.PtrInit(), oldPtr, newLen, oldCap, num, reflectdata.TypePtr(elemtype))
+ call := walkGrowslice(s, nif.PtrInit(), oldPtr, newLen, oldCap, num)
nif.Else = []ir.Node{ir.NewAssignStmt(base.Pos, s, call)}
nodes.Append(nif)
@@ -541,8 +537,7 @@ func appendSlice(n *ir.CallExpr, init *ir.Nodes) ir.Node {
ir.CurFunc.SetWBPos(n.Pos())
// instantiate typedslicecopy(typ *type, dstPtr *any, dstLen int, srcPtr *any, srcLen int) int
- fn := typecheck.LookupRuntime("typedslicecopy")
- fn = typecheck.SubstArgTypes(fn, l1.Type().Elem(), l2.Type().Elem())
+ fn := typecheck.LookupRuntime("typedslicecopy", l1.Type().Elem(), l2.Type().Elem())
ptr1, len1 := backingArrayPtrLen(cheapExpr(slice, &nodes))
ptr2, len2 := backingArrayPtrLen(l2)
ncopy = mkcall1(fn, types.Types[types.TINT], &nodes, reflectdata.AppendElemRType(base.Pos, n), ptr1, len1, ptr2, len2)
@@ -557,8 +552,7 @@ func appendSlice(n *ir.CallExpr, init *ir.Nodes) ir.Node {
ptr1, len1 := backingArrayPtrLen(cheapExpr(slice, &nodes))
ptr2, len2 := backingArrayPtrLen(l2)
- fn := typecheck.LookupRuntime("slicecopy")
- fn = typecheck.SubstArgTypes(fn, ptr1.Type().Elem(), ptr2.Type().Elem())
+ fn := typecheck.LookupRuntime("slicecopy", ptr1.Type().Elem(), ptr2.Type().Elem())
ncopy = mkcall1(fn, types.Types[types.TINT], &nodes, ptr1, len1, ptr2, len2, ir.NewInt(base.Pos, elemtype.Size()))
} else {
// memmove(&s[idx], &l2[0], len(l2)*sizeof(T))
@@ -572,8 +566,7 @@ func appendSlice(n *ir.CallExpr, init *ir.Nodes) ir.Node {
nwid = ir.NewBinaryExpr(base.Pos, ir.OMUL, nwid, ir.NewInt(base.Pos, elemtype.Size()))
// instantiate func memmove(to *any, frm *any, length uintptr)
- fn := typecheck.LookupRuntime("memmove")
- fn = typecheck.SubstArgTypes(fn, elemtype, elemtype)
+ fn := typecheck.LookupRuntime("memmove", elemtype, elemtype)
ncopy = mkcall1(fn, nil, &nodes, addr, sptr, nwid)
}
ln := append(nodes, ncopy)
@@ -675,13 +668,13 @@ func extendSlice(n *ir.CallExpr, init *ir.Nodes) ir.Node {
nodes = append(nodes, nifneg)
// s := l1
- s := typecheck.Temp(l1.Type())
+ s := typecheck.TempAt(base.Pos, ir.CurFunc, l1.Type())
nodes = append(nodes, ir.NewAssignStmt(base.Pos, s, l1))
elemtype := s.Type().Elem()
// n := s.len + l2
- nn := typecheck.Temp(types.Types[types.TINT])
+ nn := typecheck.TempAt(base.Pos, ir.CurFunc, types.Types[types.TINT])
nodes = append(nodes, ir.NewAssignStmt(base.Pos, nn, ir.NewBinaryExpr(base.Pos, ir.OADD, ir.NewUnaryExpr(base.Pos, ir.OLEN, s), l2)))
// if uint(n) <= uint(s.cap)
@@ -695,18 +688,13 @@ func extendSlice(n *ir.CallExpr, init *ir.Nodes) ir.Node {
nt.SetBounded(true)
nif.Body = []ir.Node{ir.NewAssignStmt(base.Pos, s, nt)}
- // instantiate growslice(oldPtr *any, newLen, oldCap, num int, typ *type) []any
- fn := typecheck.LookupRuntime("growslice")
- fn = typecheck.SubstArgTypes(fn, elemtype, elemtype)
-
// else { s = growslice(s.ptr, n, s.cap, l2, T) }
nif.Else = []ir.Node{
- ir.NewAssignStmt(base.Pos, s, mkcall1(fn, s.Type(), nif.PtrInit(),
+ ir.NewAssignStmt(base.Pos, s, walkGrowslice(s, nif.PtrInit(),
ir.NewUnaryExpr(base.Pos, ir.OSPTR, s),
nn,
ir.NewUnaryExpr(base.Pos, ir.OCAP, s),
- l2,
- reflectdata.TypePtr(elemtype))),
+ l2)),
}
nodes = append(nodes, nif)
diff --git a/src/cmd/compile/internal/walk/builtin.go b/src/cmd/compile/internal/walk/builtin.go
index 0bb5018250..37143baa28 100644
--- a/src/cmd/compile/internal/walk/builtin.go
+++ b/src/cmd/compile/internal/walk/builtin.go
@@ -79,14 +79,14 @@ func walkAppend(n *ir.CallExpr, init *ir.Nodes, dst ir.Node) ir.Node {
var l []ir.Node
// s = slice to append to
- s := typecheck.Temp(nsrc.Type())
+ s := typecheck.TempAt(base.Pos, ir.CurFunc, nsrc.Type())
l = append(l, ir.NewAssignStmt(base.Pos, s, nsrc))
// num = number of things to append
num := ir.NewInt(base.Pos, int64(argc))
// newLen := s.len + num
- newLen := typecheck.Temp(types.Types[types.TINT])
+ newLen := typecheck.TempAt(base.Pos, ir.CurFunc, types.Types[types.TINT])
l = append(l, ir.NewAssignStmt(base.Pos, newLen, ir.NewBinaryExpr(base.Pos, ir.OADD, ir.NewUnaryExpr(base.Pos, ir.OLEN, s), num)))
// if uint(newLen) <= uint(s.cap)
@@ -101,17 +101,13 @@ func walkAppend(n *ir.CallExpr, init *ir.Nodes, dst ir.Node) ir.Node {
ir.NewAssignStmt(base.Pos, s, slice),
}
- fn := typecheck.LookupRuntime("growslice") // growslice(ptr *T, newLen, oldCap, num int, <type>) (ret []T)
- fn = typecheck.SubstArgTypes(fn, s.Type().Elem(), s.Type().Elem())
-
// else { s = growslice(s.ptr, n, s.cap, a, T) }
nif.Else = []ir.Node{
- ir.NewAssignStmt(base.Pos, s, mkcall1(fn, s.Type(), nif.PtrInit(),
+ ir.NewAssignStmt(base.Pos, s, walkGrowslice(s, nif.PtrInit(),
ir.NewUnaryExpr(base.Pos, ir.OSPTR, s),
newLen,
ir.NewUnaryExpr(base.Pos, ir.OCAP, s),
- num,
- reflectdata.TypePtr(s.Type().Elem()))),
+ num)),
}
l = append(l, nif)
@@ -130,6 +126,14 @@ func walkAppend(n *ir.CallExpr, init *ir.Nodes, dst ir.Node) ir.Node {
return s
}
+// growslice(ptr *T, newLen, oldCap, num int, <type>) (ret []T)
+func walkGrowslice(slice *ir.Name, init *ir.Nodes, oldPtr, newLen, oldCap, num ir.Node) *ir.CallExpr {
+ elemtype := slice.Type().Elem()
+ fn := typecheck.LookupRuntime("growslice", elemtype, elemtype)
+ elemtypeptr := reflectdata.TypePtrAt(base.Pos, elemtype)
+ return mkcall1(fn, slice.Type(), init, oldPtr, newLen, oldCap, num, elemtypeptr)
+}
+
// walkClear walks an OCLEAR node.
func walkClear(n *ir.UnaryExpr) ir.Node {
typ := n.X.Type()
@@ -149,8 +153,7 @@ func walkClear(n *ir.UnaryExpr) ir.Node {
// walkClose walks an OCLOSE node.
func walkClose(n *ir.UnaryExpr, init *ir.Nodes) ir.Node {
// cannot use chanfn - closechan takes any, not chan any
- fn := typecheck.LookupRuntime("closechan")
- fn = typecheck.SubstArgTypes(fn, n.X.Type())
+ fn := typecheck.LookupRuntime("closechan", n.X.Type())
return mkcall1(fn, nil, init, n.X)
}
@@ -185,16 +188,15 @@ func walkCopy(n *ir.BinaryExpr, init *ir.Nodes, runtimecall bool) ir.Node {
n.Y = cheapExpr(n.Y, init)
ptrR, lenR := backingArrayPtrLen(n.Y)
- fn := typecheck.LookupRuntime("slicecopy")
- fn = typecheck.SubstArgTypes(fn, ptrL.Type().Elem(), ptrR.Type().Elem())
+ fn := typecheck.LookupRuntime("slicecopy", ptrL.Type().Elem(), ptrR.Type().Elem())
return mkcall1(fn, n.Type(), init, ptrL, lenL, ptrR, lenR, ir.NewInt(base.Pos, n.X.Type().Elem().Size()))
}
n.X = walkExpr(n.X, init)
n.Y = walkExpr(n.Y, init)
- nl := typecheck.Temp(n.X.Type())
- nr := typecheck.Temp(n.Y.Type())
+ nl := typecheck.TempAt(base.Pos, ir.CurFunc, n.X.Type())
+ nr := typecheck.TempAt(base.Pos, ir.CurFunc, n.Y.Type())
var l []ir.Node
l = append(l, ir.NewAssignStmt(base.Pos, nl, n.X))
l = append(l, ir.NewAssignStmt(base.Pos, nr, n.Y))
@@ -202,7 +204,7 @@ func walkCopy(n *ir.BinaryExpr, init *ir.Nodes, runtimecall bool) ir.Node {
nfrm := ir.NewUnaryExpr(base.Pos, ir.OSPTR, nr)
nto := ir.NewUnaryExpr(base.Pos, ir.OSPTR, nl)
- nlen := typecheck.Temp(types.Types[types.TINT])
+ nlen := typecheck.TempAt(base.Pos, ir.CurFunc, types.Types[types.TINT])
// n = len(to)
l = append(l, ir.NewAssignStmt(base.Pos, nlen, ir.NewUnaryExpr(base.Pos, ir.OLEN, nl)))
@@ -219,9 +221,8 @@ func walkCopy(n *ir.BinaryExpr, init *ir.Nodes, runtimecall bool) ir.Node {
ne.Likely = true
l = append(l, ne)
- fn := typecheck.LookupRuntime("memmove")
- fn = typecheck.SubstArgTypes(fn, nl.Type().Elem(), nl.Type().Elem())
- nwid := ir.Node(typecheck.Temp(types.Types[types.TUINTPTR]))
+ fn := typecheck.LookupRuntime("memmove", nl.Type().Elem(), nl.Type().Elem())
+ nwid := ir.Node(typecheck.TempAt(base.Pos, ir.CurFunc, types.Types[types.TUINTPTR]))
setwid := ir.NewAssignStmt(base.Pos, nwid, typecheck.Conv(nlen, types.Types[types.TUINTPTR]))
ne.Body.Append(setwid)
nwid = ir.NewBinaryExpr(base.Pos, ir.OMUL, nwid, ir.NewInt(base.Pos, nl.Type().Elem().Size()))
@@ -273,7 +274,7 @@ func walkLenCap(n *ir.UnaryExpr, init *ir.Nodes) ir.Node {
}
if t.IsArray() {
safeExpr(n.X, init)
- con := typecheck.OrigInt(n, t.NumElem())
+ con := ir.NewConstExpr(constant.MakeInt64(t.NumElem()), n)
con.SetTypecheck(1)
return con
}
@@ -302,7 +303,7 @@ func walkMakeChan(n *ir.MakeExpr, init *ir.Nodes) ir.Node {
// walkMakeMap walks an OMAKEMAP node.
func walkMakeMap(n *ir.MakeExpr, init *ir.Nodes) ir.Node {
t := n.Type()
- hmapType := reflectdata.MapType(t)
+ hmapType := reflectdata.MapType()
hint := n.Len
// var h *hmap
@@ -340,7 +341,7 @@ func walkMakeMap(n *ir.MakeExpr, init *ir.Nodes) ir.Node {
// h.buckets = b
bsym := hmapType.Field(5).Sym // hmap.buckets see reflect.go:hmap
- na := ir.NewAssignStmt(base.Pos, ir.NewSelectorExpr(base.Pos, ir.ODOT, h, bsym), b)
+ na := ir.NewAssignStmt(base.Pos, ir.NewSelectorExpr(base.Pos, ir.ODOT, h, bsym), typecheck.ConvNop(b, types.Types[types.TUNSAFEPTR]))
nif.Body.Append(na)
appendWalkStmt(init, nif)
}
@@ -357,16 +358,15 @@ func walkMakeMap(n *ir.MakeExpr, init *ir.Nodes) ir.Node {
if n.Esc() == ir.EscNone {
// Only need to initialize h.hash0 since
// hmap h has been allocated on the stack already.
- // h.hash0 = fastrand()
- rand := mkcall("fastrand", types.Types[types.TUINT32], init)
+ // h.hash0 = rand32()
+ rand := mkcall("rand32", types.Types[types.TUINT32], init)
hashsym := hmapType.Field(4).Sym // hmap.hash0 see reflect.go:hmap
appendWalkStmt(init, ir.NewAssignStmt(base.Pos, ir.NewSelectorExpr(base.Pos, ir.ODOT, h, hashsym), rand))
return typecheck.ConvNop(h, t)
}
// Call runtime.makehmap to allocate an
// hmap on the heap and initialize hmap's hash0 field.
- fn := typecheck.LookupRuntime("makemap_small")
- fn = typecheck.SubstArgTypes(fn, t.Key(), t.Elem())
+ fn := typecheck.LookupRuntime("makemap_small", t.Key(), t.Elem())
return mkcall1(fn, n.Type(), init)
}
@@ -392,8 +392,7 @@ func walkMakeMap(n *ir.MakeExpr, init *ir.Nodes) ir.Node {
argtype = types.Types[types.TINT]
}
- fn := typecheck.LookupRuntime(fnname)
- fn = typecheck.SubstArgTypes(fn, hmapType, t.Key(), t.Elem())
+ fn := typecheck.LookupRuntime(fnname, hmapType, t.Key(), t.Elem())
return mkcall1(fn, n.Type(), init, reflectdata.MakeMapRType(base.Pos, n), typecheck.Conv(hint, argtype), h)
}
@@ -434,7 +433,7 @@ func walkMakeSlice(n *ir.MakeExpr, init *ir.Nodes) ir.Node {
init.Append(typecheck.Stmt(nif))
t = types.NewArray(t.Elem(), i) // [r]T
- var_ := typecheck.Temp(t)
+ var_ := typecheck.TempAt(base.Pos, ir.CurFunc, t)
appendWalkStmt(init, ir.NewAssignStmt(base.Pos, var_, nil)) // zero temp
r := ir.NewSliceExpr(base.Pos, ir.OSLICE, var_, nil, l, nil) // arr[:l]
// The conv is necessary in case n.Type is named.
@@ -497,14 +496,13 @@ func walkMakeSliceCopy(n *ir.MakeExpr, init *ir.Nodes) ir.Node {
ptr.MarkNonNil()
sh := ir.NewSliceHeaderExpr(base.Pos, t, ptr, length, length)
- s := typecheck.Temp(t)
+ s := typecheck.TempAt(base.Pos, ir.CurFunc, t)
r := typecheck.Stmt(ir.NewAssignStmt(base.Pos, s, sh))
r = walkExpr(r, init)
init.Append(r)
// instantiate memmove(to *any, frm *any, size uintptr)
- fn = typecheck.LookupRuntime("memmove")
- fn = typecheck.SubstArgTypes(fn, t.Elem(), t.Elem())
+ fn = typecheck.LookupRuntime("memmove", t.Elem(), t.Elem())
ncopy := mkcall1(fn, nil, init, ir.NewUnaryExpr(base.Pos, ir.OSPTR, s), copyptr, size)
init.Append(walkExpr(typecheck.Stmt(ncopy), init))
@@ -548,7 +546,7 @@ func walkPrint(nn *ir.CallExpr, init *ir.Nodes) ir.Node {
walkExprListCheap(nn.Args, init)
// For println, add " " between elements and "\n" at the end.
- if nn.Op() == ir.OPRINTN {
+ if nn.Op() == ir.OPRINTLN {
s := nn.Args
t := make([]ir.Node, 0, len(s)*2)
for i, n := range s {
@@ -609,11 +607,10 @@ func walkPrint(nn *ir.CallExpr, init *ir.Nodes) ir.Node {
switch n.Type().Kind() {
case types.TINTER:
if n.Type().IsEmptyInterface() {
- on = typecheck.LookupRuntime("printeface")
+ on = typecheck.LookupRuntime("printeface", n.Type())
} else {
- on = typecheck.LookupRuntime("printiface")
+ on = typecheck.LookupRuntime("printiface", n.Type())
}
- on = typecheck.SubstArgTypes(on, n.Type()) // any-1
case types.TPTR:
if n.Type().Elem().NotInHeap() {
on = typecheck.LookupRuntime("printuintptr")
@@ -625,13 +622,11 @@ func walkPrint(nn *ir.CallExpr, init *ir.Nodes) ir.Node {
}
fallthrough
case types.TCHAN, types.TMAP, types.TFUNC, types.TUNSAFEPTR:
- on = typecheck.LookupRuntime("printpointer")
- on = typecheck.SubstArgTypes(on, n.Type()) // any-1
+ on = typecheck.LookupRuntime("printpointer", n.Type())
case types.TSLICE:
- on = typecheck.LookupRuntime("printslice")
- on = typecheck.SubstArgTypes(on, n.Type()) // any-1
+ on = typecheck.LookupRuntime("printslice", n.Type())
case types.TUINT, types.TUINT8, types.TUINT16, types.TUINT32, types.TUINT64, types.TUINTPTR:
- if types.IsRuntimePkg(n.Type().Sym().Pkg) && n.Type().Sym().Name == "hex" {
+ if types.RuntimeSymName(n.Type().Sym()) == "hex" {
on = typecheck.LookupRuntime("printhex")
} else {
on = typecheck.LookupRuntime("printuint")
@@ -663,7 +658,7 @@ func walkPrint(nn *ir.CallExpr, init *ir.Nodes) ir.Node {
}
r := ir.NewCallExpr(base.Pos, ir.OCALL, on, nil)
- if params := on.Type().Params().FieldSlice(); len(params) > 0 {
+ if params := on.Type().Params(); len(params) > 0 {
t := params[0].Type
n = typecheck.Conv(n, t)
r.Args.Append(n)
@@ -753,11 +748,23 @@ func walkUnsafeSlice(n *ir.BinaryExpr, init *ir.Nodes) ir.Node {
return walkExpr(typecheck.Expr(h), init)
}
- // mem, overflow := runtime.mulUintptr(et.size, len)
- mem := typecheck.Temp(types.Types[types.TUINTPTR])
- overflow := typecheck.Temp(types.Types[types.TBOOL])
- fn := typecheck.LookupRuntime("mulUintptr")
- call := mkcall1(fn, fn.Type().Results(), init, ir.NewInt(base.Pos, sliceType.Elem().Size()), typecheck.Conv(typecheck.Conv(len, lenType), types.Types[types.TUINTPTR]))
+ // mem, overflow := math.mulUintptr(et.size, len)
+ mem := typecheck.TempAt(base.Pos, ir.CurFunc, types.Types[types.TUINTPTR])
+ overflow := typecheck.TempAt(base.Pos, ir.CurFunc, types.Types[types.TBOOL])
+
+ decl := types.NewSignature(nil,
+ []*types.Field{
+ types.NewField(base.Pos, nil, types.Types[types.TUINTPTR]),
+ types.NewField(base.Pos, nil, types.Types[types.TUINTPTR]),
+ },
+ []*types.Field{
+ types.NewField(base.Pos, nil, types.Types[types.TUINTPTR]),
+ types.NewField(base.Pos, nil, types.Types[types.TBOOL]),
+ })
+
+ fn := ir.NewFunc(n.Pos(), n.Pos(), math_MulUintptr, decl)
+
+ call := mkcall1(fn.Nname, fn.Type().ResultsTuple(), init, ir.NewInt(base.Pos, sliceType.Elem().Size()), typecheck.Conv(typecheck.Conv(len, lenType), types.Types[types.TUINTPTR]))
appendWalkStmt(init, ir.NewAssignListStmt(base.Pos, ir.OAS2, []ir.Node{mem, overflow}, []ir.Node{call}))
// if overflow || mem > -uintptr(ptr) {
@@ -783,6 +790,8 @@ func walkUnsafeSlice(n *ir.BinaryExpr, init *ir.Nodes) ir.Node {
return walkExpr(typecheck.Expr(h), init)
}
+var math_MulUintptr = &types.Sym{Pkg: types.NewPkg("runtime/internal/math", "math"), Name: "MulUintptr"}
+
func walkUnsafeString(n *ir.BinaryExpr, init *ir.Nodes) ir.Node {
ptr := safeExpr(n.X, init)
len := safeExpr(n.Y, init)
@@ -863,9 +872,7 @@ func badtype(op ir.Op, tl, tr *types.Type) {
}
func writebarrierfn(name string, l *types.Type, r *types.Type) ir.Node {
- fn := typecheck.LookupRuntime(name)
- fn = typecheck.SubstArgTypes(fn, l, r)
- return fn
+ return typecheck.LookupRuntime(name, l, r)
}
// isRuneCount reports whether n is of the form len([]rune(string)).
diff --git a/src/cmd/compile/internal/walk/closure.go b/src/cmd/compile/internal/walk/closure.go
index 1fa3ac0f18..38c6c03dc4 100644
--- a/src/cmd/compile/internal/walk/closure.go
+++ b/src/cmd/compile/internal/walk/closure.go
@@ -30,7 +30,7 @@ import (
// (*&byref)++
// }(byval, &byref, 42)
func directClosureCall(n *ir.CallExpr) {
- clo := n.X.(*ir.ClosureExpr)
+ clo := n.Fun.(*ir.ClosureExpr)
clofn := clo.Func
if ir.IsTrivialClosure(clo) {
@@ -47,9 +47,8 @@ func directClosureCall(n *ir.CallExpr) {
// and v remains PAUTOHEAP with &v heapaddr
// (accesses will implicitly deref &v).
- addr := ir.NewNameAt(clofn.Pos(), typecheck.Lookup("&"+v.Sym().Name))
+ addr := ir.NewNameAt(clofn.Pos(), typecheck.Lookup("&"+v.Sym().Name), types.NewPtr(v.Type()))
addr.Curfn = clofn
- addr.SetType(types.NewPtr(v.Type()))
v.Heapaddr = addr
v = addr
}
@@ -68,12 +67,12 @@ func directClosureCall(n *ir.CallExpr) {
// Create new function type with parameters prepended, and
// then update type and declarations.
- typ = types.NewSignature(nil, append(params, typ.Params().FieldSlice()...), typ.Results().FieldSlice())
+ typ = types.NewSignature(nil, append(params, typ.Params()...), typ.Results())
f.SetType(typ)
clofn.Dcl = append(decls, clofn.Dcl...)
// Rewrite call.
- n.X = f
+ n.Fun = f
n.Args.Prepend(closureArgs(clo)...)
// Update the call expression's type. We need to do this
@@ -81,9 +80,9 @@ func directClosureCall(n *ir.CallExpr) {
// node, but we only rewrote the ONAME node's type. Logically,
// they're the same, but the stack offsets probably changed.
if typ.NumResults() == 1 {
- n.SetType(typ.Results().Field(0).Type)
+ n.SetType(typ.Result(0).Type)
} else {
- n.SetType(typ.Results())
+ n.SetType(typ.ResultsTuple())
}
// Add to Closures for enqueueFunc. It's no longer a proper
@@ -145,7 +144,7 @@ func walkClosure(clo *ir.ClosureExpr, init *ir.Nodes) ir.Node {
return walkExpr(cfn, init)
}
-// closureArgs returns a slice of expressions that an be used to
+// closureArgs returns a slice of expressions that can be used to
// initialize the given closure's free variables. These correspond
// one-to-one with the variables in clo.Func.ClosureVars, and will be
// either an ONAME node (if the variable is captured by value) or an
diff --git a/src/cmd/compile/internal/walk/complit.go b/src/cmd/compile/internal/walk/complit.go
index 6330530aa4..adc44ca49d 100644
--- a/src/cmd/compile/internal/walk/complit.go
+++ b/src/cmd/compile/internal/walk/complit.go
@@ -7,7 +7,7 @@ package walk
import (
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
- "cmd/compile/internal/ssagen"
+ "cmd/compile/internal/ssa"
"cmd/compile/internal/staticdata"
"cmd/compile/internal/staticinit"
"cmd/compile/internal/typecheck"
@@ -18,7 +18,7 @@ import (
// walkCompLit walks a composite literal node:
// OARRAYLIT, OSLICELIT, OMAPLIT, OSTRUCTLIT (all CompLitExpr), or OPTRLIT (AddrExpr).
func walkCompLit(n ir.Node, init *ir.Nodes) ir.Node {
- if isStaticCompositeLiteral(n) && !ssagen.TypeOK(n.Type()) {
+ if isStaticCompositeLiteral(n) && !ssa.CanSSA(n.Type()) {
n := n.(*ir.CompLitExpr) // not OPTRLIT
// n can be directly represented in the read-only data section.
// Make direct reference to the static data. See issue 12841.
@@ -26,7 +26,7 @@ func walkCompLit(n ir.Node, init *ir.Nodes) ir.Node {
fixedlit(inInitFunction, initKindStatic, n, vstat, init)
return typecheck.Expr(vstat)
}
- var_ := typecheck.Temp(n.Type())
+ var_ := typecheck.TempAt(base.Pos, ir.CurFunc, n.Type())
anylit(n, var_, init)
return var_
}
@@ -341,7 +341,7 @@ func slicelit(ctxt initContext, n *ir.CompLitExpr, var_ ir.Node, init *ir.Nodes)
}
// make new auto *array (3 declare)
- vauto := typecheck.Temp(types.NewPtr(t))
+ vauto := typecheck.TempAt(base.Pos, ir.CurFunc, types.NewPtr(t))
// set auto to point at new temp or heap (3 assign)
var a ir.Node
@@ -352,7 +352,7 @@ func slicelit(ctxt initContext, n *ir.CompLitExpr, var_ ir.Node, init *ir.Nodes)
}
a = initStackTemp(init, x, vstat)
} else if n.Esc() == ir.EscNone {
- a = initStackTemp(init, typecheck.Temp(t), vstat)
+ a = initStackTemp(init, typecheck.TempAt(base.Pos, ir.CurFunc, t), vstat)
} else {
a = ir.NewUnaryExpr(base.Pos, ir.ONEW, ir.TypeNode(t))
}
@@ -464,7 +464,7 @@ func maplit(n *ir.CompLitExpr, m ir.Node, init *ir.Nodes) {
// for i = 0; i < len(vstatk); i++ {
// map[vstatk[i]] = vstate[i]
// }
- i := typecheck.Temp(types.Types[types.TINT])
+ i := typecheck.TempAt(base.Pos, ir.CurFunc, types.Types[types.TINT])
rhs := ir.NewIndexExpr(base.Pos, vstate, i)
rhs.SetBounded(true)
@@ -497,8 +497,8 @@ func maplit(n *ir.CompLitExpr, m ir.Node, init *ir.Nodes) {
// Use temporaries so that mapassign1 can have addressable key, elem.
// TODO(josharian): avoid map key temporaries for mapfast_* assignments with literal keys.
// TODO(khr): assign these temps in order phase so we can reuse them across multiple maplits?
- tmpkey := typecheck.Temp(m.Type().Key())
- tmpelem := typecheck.Temp(m.Type().Elem())
+ tmpkey := typecheck.TempAt(base.Pos, ir.CurFunc, m.Type().Key())
+ tmpelem := typecheck.TempAt(base.Pos, ir.CurFunc, m.Type().Elem())
for _, r := range entries {
r := r.(*ir.KeyExpr)
diff --git a/src/cmd/compile/internal/walk/convert.go b/src/cmd/compile/internal/walk/convert.go
index bfa0c5480f..280b3b65e8 100644
--- a/src/cmd/compile/internal/walk/convert.go
+++ b/src/cmd/compile/internal/walk/convert.go
@@ -57,7 +57,7 @@ func walkConvInterface(n *ir.ConvExpr, init *ir.Nodes) ir.Node {
if !fromType.IsInterface() {
typeWord := reflectdata.ConvIfaceTypeWord(base.Pos, n)
- l := ir.NewBinaryExpr(base.Pos, ir.OEFACE, typeWord, dataWord(n, init))
+ l := ir.NewBinaryExpr(base.Pos, ir.OMAKEFACE, typeWord, dataWord(n, init))
l.SetType(toType)
l.SetTypecheck(n.Typecheck())
return l
@@ -67,18 +67,9 @@ func walkConvInterface(n *ir.ConvExpr, init *ir.Nodes) ir.Node {
}
// Evaluate the input interface.
- c := typecheck.Temp(fromType)
+ c := typecheck.TempAt(base.Pos, ir.CurFunc, fromType)
init.Append(ir.NewAssignStmt(base.Pos, c, n.X))
- // Grab its parts.
- itab := ir.NewUnaryExpr(base.Pos, ir.OITAB, c)
- itab.SetType(types.Types[types.TUINTPTR].PtrTo())
- itab.SetTypecheck(1)
- data := ir.NewUnaryExpr(n.Pos(), ir.OIDATA, c)
- data.SetType(types.Types[types.TUINT8].PtrTo()) // Type is generic pointer - we're just passing it through.
- data.SetTypecheck(1)
-
- var typeWord ir.Node
if toType.IsEmptyInterface() {
// Implement interface to empty interface conversion:
//
@@ -87,27 +78,50 @@ func walkConvInterface(n *ir.ConvExpr, init *ir.Nodes) ir.Node {
// if res != nil {
// res = res.type
// }
- typeWord = typecheck.Temp(types.NewPtr(types.Types[types.TUINT8]))
+
+ // Grab its parts.
+ itab := ir.NewUnaryExpr(base.Pos, ir.OITAB, c)
+ itab.SetType(types.Types[types.TUINTPTR].PtrTo())
+ itab.SetTypecheck(1)
+ data := ir.NewUnaryExpr(n.Pos(), ir.OIDATA, c)
+ data.SetType(types.Types[types.TUINT8].PtrTo()) // Type is generic pointer - we're just passing it through.
+ data.SetTypecheck(1)
+
+ typeWord := typecheck.TempAt(base.Pos, ir.CurFunc, types.NewPtr(types.Types[types.TUINT8]))
init.Append(ir.NewAssignStmt(base.Pos, typeWord, typecheck.Conv(typecheck.Conv(itab, types.Types[types.TUNSAFEPTR]), typeWord.Type())))
nif := ir.NewIfStmt(base.Pos, typecheck.Expr(ir.NewBinaryExpr(base.Pos, ir.ONE, typeWord, typecheck.NodNil())), nil, nil)
nif.Body = []ir.Node{ir.NewAssignStmt(base.Pos, typeWord, itabType(typeWord))}
init.Append(nif)
+
+ // Build the result.
+ // e = iface{typeWord, data}
+ e := ir.NewBinaryExpr(base.Pos, ir.OMAKEFACE, typeWord, data)
+ e.SetType(toType) // assign type manually, typecheck doesn't understand OEFACE.
+ e.SetTypecheck(1)
+ return e
+ }
+
+ // Must be converting I2I (more specific to less specific interface).
+ // Use the same code as e, _ = c.(T).
+ var rhs ir.Node
+ if n.TypeWord == nil || n.TypeWord.Op() == ir.OADDR && n.TypeWord.(*ir.AddrExpr).X.Op() == ir.OLINKSYMOFFSET {
+ // Fixed (not loaded from a dictionary) type.
+ ta := ir.NewTypeAssertExpr(base.Pos, c, toType)
+ ta.SetOp(ir.ODOTTYPE2)
+ // Allocate a descriptor for this conversion to pass to the runtime.
+ ta.Descriptor = makeTypeAssertDescriptor(toType, true)
+ rhs = ta
} else {
- // Must be converting I2I (more specific to less specific interface).
- // res = convI2I(toType, itab)
- fn := typecheck.LookupRuntime("convI2I")
- types.CalcSize(fn.Type())
- call := ir.NewCallExpr(base.Pos, ir.OCALL, fn, nil)
- call.Args = []ir.Node{reflectdata.ConvIfaceTypeWord(base.Pos, n), itab}
- typeWord = walkExpr(typecheck.Expr(call), init)
- }
-
- // Build the result.
- // e = iface{typeWord, data}
- e := ir.NewBinaryExpr(base.Pos, ir.OEFACE, typeWord, data)
- e.SetType(toType) // assign type manually, typecheck doesn't understand OEFACE.
- e.SetTypecheck(1)
- return e
+ ta := ir.NewDynamicTypeAssertExpr(base.Pos, ir.ODYNAMICDOTTYPE2, c, n.TypeWord)
+ rhs = ta
+ }
+ rhs.SetType(toType)
+ rhs.SetTypecheck(1)
+
+ res := typecheck.TempAt(base.Pos, ir.CurFunc, toType)
+ as := ir.NewAssignListStmt(base.Pos, ir.OAS2DOTTYPE, []ir.Node{res, ir.BlankNode}, []ir.Node{rhs})
+ init.Append(as)
+ return res
}
// Returns the data word (the second word) used to represent conv.X in
@@ -155,7 +169,7 @@ func dataWord(conv *ir.ConvExpr, init *ir.Nodes) ir.Node {
value = n
case conv.Esc() == ir.EscNone && fromType.Size() <= 1024:
// n does not escape. Use a stack temporary initialized to n.
- value = typecheck.Temp(fromType)
+ value = typecheck.TempAt(base.Pos, ir.CurFunc, fromType)
init.Append(typecheck.Stmt(ir.NewAssignStmt(base.Pos, value, n)))
}
if value != nil {
@@ -165,7 +179,7 @@ func dataWord(conv *ir.ConvExpr, init *ir.Nodes) ir.Node {
// Time to do an allocation. We'll call into the runtime for that.
fnname, argType, needsaddr := dataWordFuncName(fromType)
- fn := typecheck.LookupRuntime(fnname)
+ var fn *ir.Name
var args []ir.Node
if needsaddr {
@@ -178,11 +192,12 @@ func dataWord(conv *ir.ConvExpr, init *ir.Nodes) ir.Node {
if !ir.IsAddressable(n) {
n = copyExpr(n, fromType, init)
}
- fn = typecheck.SubstArgTypes(fn, fromType)
+ fn = typecheck.LookupRuntime(fnname, fromType)
args = []ir.Node{reflectdata.ConvIfaceSrcRType(base.Pos, conv), typecheck.NodAddr(n)}
} else {
// Use a specialized conversion routine that takes the type being
// converted by value, not by pointer.
+ fn = typecheck.LookupRuntime(fnname)
var arg ir.Node
switch {
case fromType == argType:
@@ -211,12 +226,6 @@ func dataWord(conv *ir.ConvExpr, init *ir.Nodes) ir.Node {
return safeExpr(walkExpr(typecheck.Expr(call), init), init)
}
-// walkConvIData walks an OCONVIDATA node.
-func walkConvIData(n *ir.ConvExpr, init *ir.Nodes) ir.Node {
- n.X = walkExpr(n.X, init)
- return dataWord(n, init)
-}
-
// walkBytesRunesToString walks an OBYTES2STR or ORUNES2STR node.
func walkBytesRunesToString(n *ir.ConvExpr, init *ir.Nodes) ir.Node {
a := typecheck.NodNil()
@@ -276,7 +285,7 @@ func walkStringToBytes(n *ir.ConvExpr, init *ir.Nodes) ir.Node {
a.SetTypecheck(1)
a.MarkNonNil()
}
- p := typecheck.Temp(t.PtrTo()) // *[n]byte
+ p := typecheck.TempAt(base.Pos, ir.CurFunc, t.PtrTo()) // *[n]byte
init.Append(typecheck.Stmt(ir.NewAssignStmt(base.Pos, p, a)))
// Copy from the static string data to the [n]byte.
@@ -414,11 +423,11 @@ func soleComponent(init *ir.Nodes, n ir.Node) ir.Node {
case n.Type().IsStruct():
if n.Type().Field(0).Sym.IsBlank() {
// Treat blank fields as the zero value as the Go language requires.
- n = typecheck.Temp(n.Type().Field(0).Type)
+ n = typecheck.TempAt(base.Pos, ir.CurFunc, n.Type().Field(0).Type)
appendWalkStmt(init, ir.NewAssignStmt(base.Pos, n, nil))
continue
}
- n = typecheck.Expr(ir.NewSelectorExpr(n.Pos(), ir.OXDOT, n, n.Type().Field(0).Sym))
+ n = typecheck.DotField(n.Pos(), n, 0)
case n.Type().IsArray():
n = typecheck.Expr(ir.NewIndexExpr(n.Pos(), n, ir.NewInt(base.Pos, 0)))
default:
diff --git a/src/cmd/compile/internal/walk/expr.go b/src/cmd/compile/internal/walk/expr.go
index 909e7d624e..268f793dc9 100644
--- a/src/cmd/compile/internal/walk/expr.go
+++ b/src/cmd/compile/internal/walk/expr.go
@@ -7,16 +7,20 @@ package walk
import (
"fmt"
"go/constant"
+ "internal/abi"
"internal/buildcfg"
"strings"
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
+ "cmd/compile/internal/objw"
"cmd/compile/internal/reflectdata"
+ "cmd/compile/internal/rttype"
"cmd/compile/internal/staticdata"
"cmd/compile/internal/typecheck"
"cmd/compile/internal/types"
"cmd/internal/obj"
+ "cmd/internal/objabi"
)
// The result of walkExpr MUST be assigned back to n, e.g.
@@ -122,7 +126,7 @@ func walkExpr1(n ir.Node, init *ir.Nodes) ir.Node {
n.X = walkExpr(n.X, init)
return n
- case ir.OEFACE, ir.OAND, ir.OANDNOT, ir.OSUB, ir.OMUL, ir.OADD, ir.OOR, ir.OXOR, ir.OLSH, ir.ORSH,
+ case ir.OMAKEFACE, ir.OAND, ir.OANDNOT, ir.OSUB, ir.OMUL, ir.OADD, ir.OOR, ir.OXOR, ir.OLSH, ir.ORSH,
ir.OUNSAFEADD:
n := n.(*ir.BinaryExpr)
n.X = walkExpr(n.X, init)
@@ -171,7 +175,7 @@ func walkExpr1(n ir.Node, init *ir.Nodes) ir.Node {
n := n.(*ir.LogicalExpr)
return walkLogical(n, init)
- case ir.OPRINT, ir.OPRINTN:
+ case ir.OPRINT, ir.OPRINTLN:
return walkPrint(n.(*ir.CallExpr), init)
case ir.OPANIC:
@@ -223,10 +227,6 @@ func walkExpr1(n ir.Node, init *ir.Nodes) ir.Node {
n := n.(*ir.ConvExpr)
return walkConvInterface(n, init)
- case ir.OCONVIDATA:
- n := n.(*ir.ConvExpr)
- return walkConvIData(n, init)
-
case ir.OCONV, ir.OCONVNOP:
n := n.(*ir.ConvExpr)
return walkConv(n, init)
@@ -459,7 +459,7 @@ func safeExpr(n ir.Node, init *ir.Nodes) ir.Node {
}
func copyExpr(n ir.Node, t *types.Type, init *ir.Nodes) ir.Node {
- l := typecheck.Temp(t)
+ l := typecheck.TempAt(base.Pos, ir.CurFunc, t)
appendWalkStmt(init, ir.NewAssignStmt(base.Pos, l, n))
return l
}
@@ -535,7 +535,7 @@ func walkCall(n *ir.CallExpr, init *ir.Nodes) ir.Node {
if n.Op() == ir.OCALLMETH {
base.FatalfAt(n.Pos(), "OCALLMETH missed by typecheck")
}
- if n.Op() == ir.OCALLINTER || n.X.Op() == ir.OMETHEXPR {
+ if n.Op() == ir.OCALLINTER || n.Fun.Op() == ir.OMETHEXPR {
// We expect both interface call reflect.Type.Method and concrete
// call reflect.(*rtype).Method.
usemethod(n)
@@ -544,14 +544,14 @@ func walkCall(n *ir.CallExpr, init *ir.Nodes) ir.Node {
reflectdata.MarkUsedIfaceMethod(n)
}
- if n.Op() == ir.OCALLFUNC && n.X.Op() == ir.OCLOSURE {
+ if n.Op() == ir.OCALLFUNC && n.Fun.Op() == ir.OCLOSURE {
directClosureCall(n)
}
- if isFuncPCIntrinsic(n) {
+ if ir.IsFuncPCIntrinsic(n) {
// For internal/abi.FuncPCABIxxx(fn), if fn is a defined function, rewrite
// it to the address of the function of the ABI fn is defined.
- name := n.X.(*ir.Name).Sym().Name
+ name := n.Fun.(*ir.Name).Sym().Name
arg := n.Args[0]
var wantABI obj.ABI
switch name {
@@ -560,30 +560,22 @@ func walkCall(n *ir.CallExpr, init *ir.Nodes) ir.Node {
case "FuncPCABIInternal":
wantABI = obj.ABIInternal
}
- if isIfaceOfFunc(arg) {
- fn := arg.(*ir.ConvExpr).X.(*ir.Name)
- abi := fn.Func.ABI
- if abi != wantABI {
- base.ErrorfAt(n.Pos(), 0, "internal/abi.%s expects an %v function, %s is defined as %v", name, wantABI, fn.Sym().Name, abi)
- }
- var e ir.Node = ir.NewLinksymExpr(n.Pos(), fn.Sym().LinksymABI(abi), types.Types[types.TUINTPTR])
- e = ir.NewAddrExpr(n.Pos(), e)
- e.SetType(types.Types[types.TUINTPTR].PtrTo())
- return typecheck.Expr(ir.NewConvExpr(n.Pos(), ir.OCONVNOP, n.Type(), e))
+ if n.Type() != types.Types[types.TUINTPTR] {
+ base.FatalfAt(n.Pos(), "FuncPC intrinsic should return uintptr, got %v", n.Type()) // as expected by typecheck.FuncPC.
}
- // fn is not a defined function. It must be ABIInternal.
- // Read the address from func value, i.e. *(*uintptr)(idata(fn)).
- if wantABI != obj.ABIInternal {
- base.ErrorfAt(n.Pos(), 0, "internal/abi.%s does not accept func expression, which is ABIInternal", name)
+ n := ir.FuncPC(n.Pos(), arg, wantABI)
+ return walkExpr(n, init)
+ }
+
+ if name, ok := n.Fun.(*ir.Name); ok {
+ sym := name.Sym()
+ if sym.Pkg.Path == "go.runtime" && sym.Name == "deferrangefunc" {
+ // Call to runtime.deferrangefunc is being shared with a range-over-func
+ // body that might add defers to this frame, so we cannot use open-coded defers
+ // and we need to call deferreturn even if we don't see any other explicit defers.
+ ir.CurFunc.SetHasDefer(true)
+ ir.CurFunc.SetOpenCodedDeferDisallowed(true)
}
- arg = walkExpr(arg, init)
- var e ir.Node = ir.NewUnaryExpr(n.Pos(), ir.OIDATA, arg)
- e.SetType(n.Type().PtrTo())
- e.SetTypecheck(1)
- e = ir.NewStarExpr(n.Pos(), e)
- e.SetType(n.Type())
- e.SetTypecheck(1)
- return e
}
walkCall1(n, init)
@@ -601,14 +593,14 @@ func walkCall1(n *ir.CallExpr, init *ir.Nodes) {
}
args := n.Args
- params := n.X.Type().Params()
+ params := n.Fun.Type().Params()
- n.X = walkExpr(n.X, init)
+ n.Fun = walkExpr(n.Fun, init)
walkExprList(args, init)
for i, arg := range args {
// Validate argument and parameter types match.
- param := params.Field(i)
+ param := params[i]
if !types.Identical(arg.Type(), param.Type) {
base.FatalfAt(n.Pos(), "assigning %L to parameter %v (type %v)", arg, param.Sym, param.Type)
}
@@ -618,14 +610,14 @@ func walkCall1(n *ir.CallExpr, init *ir.Nodes) {
// to prevent that calls from clobbering arguments already on the stack.
if mayCall(arg) {
// assignment of arg to Temp
- tmp := typecheck.Temp(param.Type)
+ tmp := typecheck.TempAt(base.Pos, ir.CurFunc, param.Type)
init.Append(convas(typecheck.Stmt(ir.NewAssignStmt(base.Pos, tmp, arg)).(*ir.AssignStmt), init))
// replace arg with temp
args[i] = tmp
}
}
- funSym := n.X.Sym()
+ funSym := n.Fun.Sym()
if base.Debug.Libfuzzer != 0 && funSym != nil {
if hook, found := hooks[funSym.Pkg.Path+"."+funSym.Name]; found {
if len(args) != hook.argsNum {
@@ -714,16 +706,50 @@ func walkDotType(n *ir.TypeAssertExpr, init *ir.Nodes) ir.Node {
n.X = walkExpr(n.X, init)
// Set up interface type addresses for back end.
if !n.Type().IsInterface() && !n.X.Type().IsEmptyInterface() {
- n.ITab = reflectdata.ITabAddr(n.Type(), n.X.Type())
+ n.ITab = reflectdata.ITabAddrAt(base.Pos, n.Type(), n.X.Type())
+ }
+ if n.X.Type().IsInterface() && n.Type().IsInterface() && !n.Type().IsEmptyInterface() {
+ // This kind of conversion needs a runtime call. Allocate
+ // a descriptor for that call.
+ n.Descriptor = makeTypeAssertDescriptor(n.Type(), n.Op() == ir.ODOTTYPE2)
}
return n
}
+func makeTypeAssertDescriptor(target *types.Type, canFail bool) *obj.LSym {
+ // When converting from an interface to a non-empty interface. Needs a runtime call.
+ // Allocate an internal/abi.TypeAssert descriptor for that call.
+ lsym := types.LocalPkg.Lookup(fmt.Sprintf(".typeAssert.%d", typeAssertGen)).LinksymABI(obj.ABI0)
+ typeAssertGen++
+ c := rttype.NewCursor(lsym, 0, rttype.TypeAssert)
+ c.Field("Cache").WritePtr(typecheck.LookupRuntimeVar("emptyTypeAssertCache"))
+ c.Field("Inter").WritePtr(reflectdata.TypeSym(target).Linksym())
+ c.Field("CanFail").WriteBool(canFail)
+ objw.Global(lsym, int32(rttype.TypeAssert.Size()), obj.LOCAL)
+ lsym.Gotype = reflectdata.TypeLinksym(rttype.TypeAssert)
+ return lsym
+}
+
+var typeAssertGen int
+
// walkDynamicDotType walks an ODYNAMICDOTTYPE or ODYNAMICDOTTYPE2 node.
func walkDynamicDotType(n *ir.DynamicTypeAssertExpr, init *ir.Nodes) ir.Node {
n.X = walkExpr(n.X, init)
n.RType = walkExpr(n.RType, init)
n.ITab = walkExpr(n.ITab, init)
+ // Convert to non-dynamic if we can.
+ if n.RType != nil && n.RType.Op() == ir.OADDR {
+ addr := n.RType.(*ir.AddrExpr)
+ if addr.X.Op() == ir.OLINKSYMOFFSET {
+ r := ir.NewTypeAssertExpr(n.Pos(), n.X, n.Type())
+ if n.Op() == ir.ODYNAMICDOTTYPE2 {
+ r.SetOp(ir.ODOTTYPE2)
+ }
+ r.SetType(n.Type())
+ r.SetTypecheck(1)
+ return walkExpr(r, init)
+ }
+ }
return n
}
@@ -800,7 +826,7 @@ func walkIndexMap(n *ir.IndexExpr, init *ir.Nodes) ir.Node {
switch {
case n.Assigned:
mapFn = mapfn(mapassign[fast], t, false)
- case t.Elem().Size() > zeroValSize:
+ case t.Elem().Size() > abi.ZeroValSize:
args = append(args, reflectdata.ZeroAddr(t.Elem().Size()))
mapFn = mapfn("mapaccess1_fat", t, true)
default:
@@ -945,57 +971,87 @@ func bounded(n ir.Node, max int64) bool {
return false
}
-// usemethod checks calls for uses of reflect.Type.{Method,MethodByName}.
+// usemethod checks calls for uses of Method and MethodByName of reflect.Value,
+// reflect.Type, reflect.(*rtype), and reflect.(*interfaceType).
func usemethod(n *ir.CallExpr) {
// Don't mark reflect.(*rtype).Method, etc. themselves in the reflect package.
// Those functions may be alive via the itab, which should not cause all methods
// alive. We only want to mark their callers.
if base.Ctxt.Pkgpath == "reflect" {
- switch ir.CurFunc.Nname.Sym().Name { // TODO: is there a better way than hardcoding the names?
- case "(*rtype).Method", "(*rtype).MethodByName", "(*interfaceType).Method", "(*interfaceType).MethodByName":
+ // TODO: is there a better way than hardcoding the names?
+ switch fn := ir.CurFunc.Nname.Sym().Name; {
+ case fn == "(*rtype).Method", fn == "(*rtype).MethodByName":
+ return
+ case fn == "(*interfaceType).Method", fn == "(*interfaceType).MethodByName":
+ return
+ case fn == "Value.Method", fn == "Value.MethodByName":
return
}
}
- dot, ok := n.X.(*ir.SelectorExpr)
+ dot, ok := n.Fun.(*ir.SelectorExpr)
if !ok {
return
}
- // Looking for either direct method calls and interface method calls of:
- // reflect.Type.Method - func(int) reflect.Method
- // reflect.Type.MethodByName - func(string) (reflect.Method, bool)
- var pKind types.Kind
+ // looking for either direct method calls and interface method calls of:
+ // reflect.Type.Method - func(int) reflect.Method
+ // reflect.Type.MethodByName - func(string) (reflect.Method, bool)
+ //
+ // reflect.Value.Method - func(int) reflect.Value
+ // reflect.Value.MethodByName - func(string) reflect.Value
+ methodName := dot.Sel.Name
+ t := dot.Selection.Type
+
+ // Check the number of arguments and return values.
+ if t.NumParams() != 1 || (t.NumResults() != 1 && t.NumResults() != 2) {
+ return
+ }
+
+ // Check the type of the argument.
+ switch pKind := t.Param(0).Type.Kind(); {
+ case methodName == "Method" && pKind == types.TINT,
+ methodName == "MethodByName" && pKind == types.TSTRING:
- switch dot.Sel.Name {
- case "Method":
- pKind = types.TINT
- case "MethodByName":
- pKind = types.TSTRING
default:
+ // not a call to Method or MethodByName of reflect.{Type,Value}.
return
}
- t := dot.Selection.Type
- if t.NumParams() != 1 || t.Params().Field(0).Type.Kind() != pKind {
+ // Check that first result type is "reflect.Method" or "reflect.Value".
+ // Note that we have to check sym name and sym package separately, as
+ // we can't check for exact string "reflect.Method" reliably
+ // (e.g., see #19028 and #38515).
+ switch s := t.Result(0).Type.Sym(); {
+ case s != nil && types.ReflectSymName(s) == "Method",
+ s != nil && types.ReflectSymName(s) == "Value":
+
+ default:
+ // not a call to Method or MethodByName of reflect.{Type,Value}.
return
}
- switch t.NumResults() {
- case 1:
- // ok
- case 2:
- if t.Results().Field(1).Type.Kind() != types.TBOOL {
- return
+
+ var targetName ir.Node
+ switch dot.Op() {
+ case ir.ODOTINTER:
+ if methodName == "MethodByName" {
+ targetName = n.Args[0]
+ }
+ case ir.OMETHEXPR:
+ if methodName == "MethodByName" {
+ targetName = n.Args[1]
}
default:
- return
+ base.FatalfAt(dot.Pos(), "usemethod: unexpected dot.Op() %s", dot.Op())
}
- // Check that first result type is "reflect.Method". Note that we have to check sym name and sym package
- // separately, as we can't check for exact string "reflect.Method" reliably (e.g., see #19028 and #38515).
- if s := t.Results().Field(0).Type.Sym(); s != nil && s.Name == "Method" && types.IsReflectPkg(s.Pkg) {
- ir.CurFunc.SetReflectMethod(true)
- // The LSym is initialized at this point. We need to set the attribute on the LSym.
+ if ir.IsConst(targetName, constant.String) {
+ name := constant.StringVal(targetName.Val())
+
+ r := obj.Addrel(ir.CurFunc.LSym)
+ r.Type = objabi.R_USENAMEDMETHOD
+ r.Sym = staticdata.StringSymNoCommon(name)
+ } else {
ir.CurFunc.LSym.Set(obj.AttrReflectMethod, true)
}
}
diff --git a/src/cmd/compile/internal/walk/order.go b/src/cmd/compile/internal/walk/order.go
index 057e0b75b8..179fbdb99e 100644
--- a/src/cmd/compile/internal/walk/order.go
+++ b/src/cmd/compile/internal/walk/order.go
@@ -11,6 +11,7 @@ import (
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
"cmd/compile/internal/reflectdata"
+ "cmd/compile/internal/ssa"
"cmd/compile/internal/staticinit"
"cmd/compile/internal/typecheck"
"cmd/compile/internal/types"
@@ -73,7 +74,7 @@ func (o *orderState) newTemp(t *types.Type, clear bool) *ir.Name {
}
o.free[key] = a[:len(a)-1]
} else {
- v = typecheck.Temp(t)
+ v = typecheck.TempAt(base.Pos, ir.CurFunc, t)
}
if clear {
o.append(ir.NewAssignStmt(base.Pos, v, nil))
@@ -128,7 +129,7 @@ func (o *orderState) cheapExpr(n ir.Node) ir.Node {
if l == n.X {
return n
}
- a := ir.SepCopy(n).(*ir.UnaryExpr)
+ a := ir.Copy(n).(*ir.UnaryExpr)
a.X = l
return typecheck.Expr(a)
}
@@ -154,7 +155,7 @@ func (o *orderState) safeExpr(n ir.Node) ir.Node {
if l == n.X {
return n
}
- a := ir.SepCopy(n).(*ir.UnaryExpr)
+ a := ir.Copy(n).(*ir.UnaryExpr)
a.X = l
return typecheck.Expr(a)
@@ -164,7 +165,7 @@ func (o *orderState) safeExpr(n ir.Node) ir.Node {
if l == n.X {
return n
}
- a := ir.SepCopy(n).(*ir.SelectorExpr)
+ a := ir.Copy(n).(*ir.SelectorExpr)
a.X = l
return typecheck.Expr(a)
@@ -174,7 +175,7 @@ func (o *orderState) safeExpr(n ir.Node) ir.Node {
if l == n.X {
return n
}
- a := ir.SepCopy(n).(*ir.SelectorExpr)
+ a := ir.Copy(n).(*ir.SelectorExpr)
a.X = l
return typecheck.Expr(a)
@@ -184,7 +185,7 @@ func (o *orderState) safeExpr(n ir.Node) ir.Node {
if l == n.X {
return n
}
- a := ir.SepCopy(n).(*ir.StarExpr)
+ a := ir.Copy(n).(*ir.StarExpr)
a.X = l
return typecheck.Expr(a)
@@ -200,7 +201,7 @@ func (o *orderState) safeExpr(n ir.Node) ir.Node {
if l == n.X && r == n.Index {
return n
}
- a := ir.SepCopy(n).(*ir.IndexExpr)
+ a := ir.Copy(n).(*ir.IndexExpr)
a.X = l
a.Index = r
return typecheck.Expr(a)
@@ -231,14 +232,29 @@ func (o *orderState) addrTemp(n ir.Node) ir.Node {
vstat = typecheck.Expr(vstat).(*ir.Name)
return vstat
}
+
+ // Prevent taking the address of an SSA-able local variable (#63332).
+ //
+ // TODO(mdempsky): Note that OuterValue unwraps OCONVNOPs, but
+ // IsAddressable does not. It should be possible to skip copying for
+ // at least some of these OCONVNOPs (e.g., reinsert them after the
+ // OADDR operation), but at least walkCompare needs to be fixed to
+ // support that (see trybot failures on go.dev/cl/541715, PS1).
if ir.IsAddressable(n) {
+ if name, ok := ir.OuterValue(n).(*ir.Name); ok && name.Op() == ir.ONAME {
+ if name.Class == ir.PAUTO && !name.Addrtaken() && ssa.CanSSA(name.Type()) {
+ goto Copy
+ }
+ }
+
return n
}
+
+Copy:
return o.copyExpr(n)
}
// mapKeyTemp prepares n to be a key in a map runtime call and returns n.
-// It should only be used for map runtime calls which have *_fast* versions.
// The first parameter is the position of n's containing node, for use in case
// that n's position is not unique (e.g., if n is an ONAME).
func (o *orderState) mapKeyTemp(outerPos src.XPos, t *types.Type, n ir.Node) ir.Node {
@@ -538,14 +554,14 @@ func (o *orderState) call(nn ir.Node) {
n := nn.(*ir.CallExpr)
typecheck.AssertFixedCall(n)
- if isFuncPCIntrinsic(n) && isIfaceOfFunc(n.Args[0]) {
+ if ir.IsFuncPCIntrinsic(n) && ir.IsIfaceOfFunc(n.Args[0]) != nil {
// For internal/abi.FuncPCABIxxx(fn), if fn is a defined function,
// do not introduce temporaries here, so it is easier to rewrite it
// to symbol address reference later in walk.
return
}
- n.X = o.expr(n.X, nil)
+ n.Fun = o.expr(n.Fun, nil)
o.exprList(n.Args)
}
@@ -603,8 +619,38 @@ func (o *orderState) stmt(n ir.Node) {
case ir.OAS:
n := n.(*ir.AssignStmt)
t := o.markTemp()
+
+ // There's a delicate interaction here between two OINDEXMAP
+ // optimizations.
+ //
+ // First, we want to handle m[k] = append(m[k], ...) with a single
+ // runtime call to mapassign. This requires the m[k] expressions to
+ // satisfy ir.SameSafeExpr in walkAssign.
+ //
+ // But if k is a slow map key type that's passed by reference (e.g.,
+ // byte), then we want to avoid marking user variables as addrtaken,
+ // if that might prevent the compiler from keeping k in a register.
+ //
+ // TODO(mdempsky): It would be better if walk was responsible for
+ // inserting temporaries as needed.
+ mapAppend := n.X.Op() == ir.OINDEXMAP && n.Y.Op() == ir.OAPPEND &&
+ ir.SameSafeExpr(n.X, n.Y.(*ir.CallExpr).Args[0])
+
n.X = o.expr(n.X, nil)
- n.Y = o.expr(n.Y, n.X)
+ if mapAppend {
+ indexLHS := n.X.(*ir.IndexExpr)
+ indexLHS.X = o.cheapExpr(indexLHS.X)
+ indexLHS.Index = o.cheapExpr(indexLHS.Index)
+
+ call := n.Y.(*ir.CallExpr)
+ indexRHS := call.Args[0].(*ir.IndexExpr)
+ indexRHS.X = indexLHS.X
+ indexRHS.Index = indexLHS.Index
+
+ o.exprList(call.Args[1:])
+ } else {
+ n.Y = o.expr(n.Y, n.X)
+ }
o.mapAssign(n)
o.popTemp(t)
@@ -713,8 +759,6 @@ func (o *orderState) stmt(n ir.Node) {
case ir.OBREAK,
ir.OCONTINUE,
ir.ODCL,
- ir.ODCLCONST,
- ir.ODCLTYPE,
ir.OFALL,
ir.OGOTO,
ir.OLABEL,
@@ -755,7 +799,7 @@ func (o *orderState) stmt(n ir.Node) {
o.out = append(o.out, n)
o.popTemp(t)
- case ir.OPRINT, ir.OPRINTN, ir.ORECOVERFP:
+ case ir.OPRINT, ir.OPRINTLN, ir.ORECOVERFP:
n := n.(*ir.CallExpr)
t := o.markTemp()
o.call(n)
@@ -817,8 +861,14 @@ func (o *orderState) stmt(n ir.Node) {
// Mark []byte(str) range expression to reuse string backing storage.
// It is safe because the storage cannot be mutated.
n := n.(*ir.RangeStmt)
- if n.X.Op() == ir.OSTR2BYTES {
- n.X.(*ir.ConvExpr).SetOp(ir.OSTR2BYTESTMP)
+ if x, ok := n.X.(*ir.ConvExpr); ok {
+ switch x.Op() {
+ case ir.OSTR2BYTES:
+ x.SetOp(ir.OSTR2BYTESTMP)
+ fallthrough
+ case ir.OSTR2BYTESTMP:
+ x.MarkNonNil() // "range []byte(nil)" is fine
+ }
}
t := o.markTemp()
@@ -826,11 +876,14 @@ func (o *orderState) stmt(n ir.Node) {
orderBody := true
xt := typecheck.RangeExprType(n.X.Type())
- switch xt.Kind() {
+ switch k := xt.Kind(); {
default:
base.Fatalf("order.stmt range %v", n.Type())
- case types.TARRAY, types.TSLICE:
+ case types.IsInt[k]:
+ // Used only once, no need to copy.
+
+ case k == types.TARRAY, k == types.TSLICE:
if n.Value == nil || ir.IsBlank(n.Value) {
// for i := range x will only use x once, to compute len(x).
// No need to copy it.
@@ -838,7 +891,7 @@ func (o *orderState) stmt(n ir.Node) {
}
fallthrough
- case types.TCHAN, types.TSTRING:
+ case k == types.TCHAN, k == types.TSTRING:
// chan, string, slice, array ranges use value multiple times.
// make copy.
r := n.X
@@ -851,7 +904,7 @@ func (o *orderState) stmt(n ir.Node) {
n.X = o.copyExpr(r)
- case types.TMAP:
+ case k == types.TMAP:
if isMapClear(n) {
// Preserve the body of the map clear pattern so it can
// be detected during walk. The loop body will not be used
@@ -868,7 +921,7 @@ func (o *orderState) stmt(n ir.Node) {
// n.Prealloc is the temp for the iterator.
// MapIterType contains pointers and needs to be zeroed.
- n.Prealloc = o.newTemp(reflectdata.MapIterType(xt), true)
+ n.Prealloc = o.newTemp(reflectdata.MapIterType(), true)
}
n.Key = o.exprInPlace(n.Key)
n.Value = o.exprInPlace(n.Value)
@@ -1151,7 +1204,7 @@ func (o *orderState) expr1(n, lhs ir.Node) ir.Node {
}
}
- // key must be addressable
+ // key may need to be be addressable
n.Index = o.mapKeyTemp(n.Pos(), n.X.Type(), n.Index)
if needCopy {
return o.copyExpr(n)
@@ -1160,7 +1213,7 @@ func (o *orderState) expr1(n, lhs ir.Node) ir.Node {
// concrete type (not interface) argument might need an addressable
// temporary to pass to the runtime conversion routine.
- case ir.OCONVIFACE, ir.OCONVIDATA:
+ case ir.OCONVIFACE:
n := n.(*ir.ConvExpr)
n.X = o.expr(n.X, nil)
if n.X.Type().IsInterface() {
@@ -1169,7 +1222,7 @@ func (o *orderState) expr1(n, lhs ir.Node) ir.Node {
if _, _, needsaddr := dataWordFuncName(n.X.Type()); needsaddr || isStaticCompositeLiteral(n.X) {
// Need a temp if we need to pass the address to the conversion function.
// We also process static composite literal node here, making a named static global
- // whose address we can put directly in an interface (see OCONVIFACE/OCONVIDATA case in walk).
+ // whose address we can put directly in an interface (see OCONVIFACE case in walk).
n.X = o.addrTemp(n.X)
}
return n
@@ -1495,18 +1548,3 @@ func (o *orderState) as2ok(n *ir.AssignListStmt) {
o.out = append(o.out, n)
o.stmt(typecheck.Stmt(as))
}
-
-// isFuncPCIntrinsic returns whether n is a direct call of internal/abi.FuncPCABIxxx functions.
-func isFuncPCIntrinsic(n *ir.CallExpr) bool {
- if n.Op() != ir.OCALLFUNC || n.X.Op() != ir.ONAME {
- return false
- }
- fn := n.X.(*ir.Name).Sym()
- return (fn.Name == "FuncPCABI0" || fn.Name == "FuncPCABIInternal") &&
- (fn.Pkg.Path == "internal/abi" || fn.Pkg == types.LocalPkg && base.Ctxt.Pkgpath == "internal/abi")
-}
-
-// isIfaceOfFunc returns whether n is an interface conversion from a direct reference of a func.
-func isIfaceOfFunc(n ir.Node) bool {
- return n.Op() == ir.OCONVIFACE && n.(*ir.ConvExpr).X.Op() == ir.ONAME && n.(*ir.ConvExpr).X.(*ir.Name).Class == ir.PFUNC
-}
diff --git a/src/cmd/compile/internal/walk/race.go b/src/cmd/compile/internal/walk/race.go
deleted file mode 100644
index 859e5c57f0..0000000000
--- a/src/cmd/compile/internal/walk/race.go
+++ /dev/null
@@ -1,34 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package walk
-
-import (
- "cmd/compile/internal/base"
- "cmd/compile/internal/ir"
- "cmd/compile/internal/types"
- "cmd/internal/src"
-)
-
-func instrument(fn *ir.Func) {
- if fn.Pragma&ir.Norace != 0 || (fn.Linksym() != nil && fn.Linksym().ABIWrapper()) {
- return
- }
-
- if !base.Flag.Race || !base.Compiling(base.NoRacePkgs) {
- fn.SetInstrumentBody(true)
- }
-
- if base.Flag.Race {
- lno := base.Pos
- base.Pos = src.NoXPos
- var init ir.Nodes
- fn.Enter.Prepend(mkcallstmt("racefuncenter", mkcall("getcallerpc", types.Types[types.TUINTPTR], &init)))
- if len(init) != 0 {
- base.Fatalf("race walk: unexpected init for getcallerpc")
- }
- fn.Exit.Append(mkcallstmt("racefuncexit"))
- base.Pos = lno
- }
-}
diff --git a/src/cmd/compile/internal/walk/range.go b/src/cmd/compile/internal/walk/range.go
index 38479b323f..93898b3a66 100644
--- a/src/cmd/compile/internal/walk/range.go
+++ b/src/cmd/compile/internal/walk/range.go
@@ -74,11 +74,25 @@ func walkRange(nrange *ir.RangeStmt) ir.Node {
var body []ir.Node
var init []ir.Node
- switch t.Kind() {
+ switch k := t.Kind(); {
default:
base.Fatalf("walkRange")
- case types.TARRAY, types.TSLICE, types.TPTR: // TPTR is pointer-to-array
+ case types.IsInt[k]:
+ hv1 := typecheck.TempAt(base.Pos, ir.CurFunc, t)
+ hn := typecheck.TempAt(base.Pos, ir.CurFunc, t)
+
+ init = append(init, ir.NewAssignStmt(base.Pos, hv1, nil))
+ init = append(init, ir.NewAssignStmt(base.Pos, hn, a))
+
+ nfor.Cond = ir.NewBinaryExpr(base.Pos, ir.OLT, hv1, hn)
+ nfor.Post = ir.NewAssignStmt(base.Pos, hv1, ir.NewBinaryExpr(base.Pos, ir.OADD, hv1, ir.NewInt(base.Pos, 1)))
+
+ if v1 != nil {
+ body = []ir.Node{rangeAssign(nrange, hv1)}
+ }
+
+ case k == types.TARRAY, k == types.TSLICE, k == types.TPTR: // TPTR is pointer-to-array
if nn := arrayRangeClear(nrange, v1, v2, a); nn != nil {
base.Pos = lno
return nn
@@ -96,8 +110,8 @@ func walkRange(nrange *ir.RangeStmt) ir.Node {
// order.stmt arranged for a copy of the array/slice variable if needed.
ha := a
- hv1 := typecheck.Temp(types.Types[types.TINT])
- hn := typecheck.Temp(types.Types[types.TINT])
+ hv1 := typecheck.TempAt(base.Pos, ir.CurFunc, types.Types[types.TINT])
+ hn := typecheck.TempAt(base.Pos, ir.CurFunc, types.Types[types.TINT])
init = append(init, ir.NewAssignStmt(base.Pos, hv1, nil))
init = append(init, ir.NewAssignStmt(base.Pos, hn, ir.NewUnaryExpr(base.Pos, ir.OLEN, ha)))
@@ -196,14 +210,14 @@ func walkRange(nrange *ir.RangeStmt) ir.Node {
ptr.SetBounded(true)
huVal := ir.NewConvExpr(base.Pos, ir.OCONVNOP, types.Types[types.TUNSAFEPTR], ptr)
huVal = ir.NewConvExpr(base.Pos, ir.OCONVNOP, types.Types[types.TUINTPTR], huVal)
- hu := typecheck.Temp(types.Types[types.TUINTPTR])
+ hu := typecheck.TempAt(base.Pos, ir.CurFunc, types.Types[types.TUINTPTR])
init = append(init, ir.NewAssignStmt(base.Pos, hu, huVal))
// Convert hu to hp at the top of the loop (after the condition has been checked).
hpVal := ir.NewConvExpr(base.Pos, ir.OCONVNOP, types.Types[types.TUNSAFEPTR], hu)
hpVal.SetCheckPtr(true) // disable checkptr on this conversion
hpVal = ir.NewConvExpr(base.Pos, ir.OCONVNOP, elem.PtrTo(), hpVal)
- hp := typecheck.Temp(elem.PtrTo())
+ hp := typecheck.TempAt(base.Pos, ir.CurFunc, elem.PtrTo())
body = append(body, ir.NewAssignStmt(base.Pos, hp, hpVal))
// Assign variables on the LHS of the range statement. Use *hp to get the element.
@@ -219,7 +233,7 @@ func walkRange(nrange *ir.RangeStmt) ir.Node {
as := ir.NewAssignStmt(base.Pos, hu, ir.NewBinaryExpr(base.Pos, ir.OADD, huVal, ir.NewInt(base.Pos, elem.Size())))
nfor.Post = ir.NewBlockStmt(base.Pos, []ir.Node{nfor.Post, as})
- case types.TMAP:
+ case k == types.TMAP:
// order.stmt allocated the iterator for us.
// we only use a once, so no copy needed.
ha := a
@@ -231,36 +245,33 @@ func walkRange(nrange *ir.RangeStmt) ir.Node {
keysym := th.Field(0).Sym
elemsym := th.Field(1).Sym // ditto
- fn := typecheck.LookupRuntime("mapiterinit")
-
- fn = typecheck.SubstArgTypes(fn, t.Key(), t.Elem(), th)
+ fn := typecheck.LookupRuntime("mapiterinit", t.Key(), t.Elem(), th)
init = append(init, mkcallstmt1(fn, reflectdata.RangeMapRType(base.Pos, nrange), ha, typecheck.NodAddr(hit)))
nfor.Cond = ir.NewBinaryExpr(base.Pos, ir.ONE, ir.NewSelectorExpr(base.Pos, ir.ODOT, hit, keysym), typecheck.NodNil())
- fn = typecheck.LookupRuntime("mapiternext")
- fn = typecheck.SubstArgTypes(fn, th)
+ fn = typecheck.LookupRuntime("mapiternext", th)
nfor.Post = mkcallstmt1(fn, typecheck.NodAddr(hit))
- key := ir.NewStarExpr(base.Pos, ir.NewSelectorExpr(base.Pos, ir.ODOT, hit, keysym))
+ key := ir.NewStarExpr(base.Pos, typecheck.ConvNop(ir.NewSelectorExpr(base.Pos, ir.ODOT, hit, keysym), types.NewPtr(t.Key())))
if v1 == nil {
body = nil
} else if v2 == nil {
body = []ir.Node{rangeAssign(nrange, key)}
} else {
- elem := ir.NewStarExpr(base.Pos, ir.NewSelectorExpr(base.Pos, ir.ODOT, hit, elemsym))
+ elem := ir.NewStarExpr(base.Pos, typecheck.ConvNop(ir.NewSelectorExpr(base.Pos, ir.ODOT, hit, elemsym), types.NewPtr(t.Elem())))
body = []ir.Node{rangeAssign2(nrange, key, elem)}
}
- case types.TCHAN:
+ case k == types.TCHAN:
// order.stmt arranged for a copy of the channel variable.
ha := a
- hv1 := typecheck.Temp(t.Elem())
+ hv1 := typecheck.TempAt(base.Pos, ir.CurFunc, t.Elem())
hv1.SetTypecheck(1)
if t.Elem().HasPointers() {
init = append(init, ir.NewAssignStmt(base.Pos, hv1, nil))
}
- hb := typecheck.Temp(types.Types[types.TBOOL])
+ hb := typecheck.TempAt(base.Pos, ir.CurFunc, types.Types[types.TBOOL])
nfor.Cond = ir.NewBinaryExpr(base.Pos, ir.ONE, hb, ir.NewBool(base.Pos, false))
lhs := []ir.Node{hv1, hb}
@@ -278,7 +289,7 @@ func walkRange(nrange *ir.RangeStmt) ir.Node {
// See issue 15281.
body = append(body, ir.NewAssignStmt(base.Pos, hv1, nil))
- case types.TSTRING:
+ case k == types.TSTRING:
// Transform string range statements like "for v1, v2 = range a" into
//
// ha := a
@@ -297,9 +308,9 @@ func walkRange(nrange *ir.RangeStmt) ir.Node {
// order.stmt arranged for a copy of the string variable.
ha := a
- hv1 := typecheck.Temp(types.Types[types.TINT])
- hv1t := typecheck.Temp(types.Types[types.TINT])
- hv2 := typecheck.Temp(types.RuneType)
+ hv1 := typecheck.TempAt(base.Pos, ir.CurFunc, types.Types[types.TINT])
+ hv1t := typecheck.TempAt(base.Pos, ir.CurFunc, types.Types[types.TINT])
+ hv2 := typecheck.TempAt(base.Pos, ir.CurFunc, types.RuneType)
// hv1 := 0
init = append(init, ir.NewAssignStmt(base.Pos, hv1, nil))
@@ -327,7 +338,7 @@ func walkRange(nrange *ir.RangeStmt) ir.Node {
// } else {
// hv2, hv1 = decoderune(ha, hv1)
fn := typecheck.LookupRuntime("decoderune")
- call := mkcall1(fn, fn.Type().Results(), &nif.Else, ha, hv1)
+ call := mkcall1(fn, fn.Type().ResultsTuple(), &nif.Else, ha, hv1)
a := ir.NewAssignListStmt(base.Pos, ir.OAS2, []ir.Node{hv2, hv1}, []ir.Node{call})
nif.Else.Append(a)
@@ -454,8 +465,7 @@ func mapClear(m, rtyp ir.Node) ir.Node {
t := m.Type()
// instantiate mapclear(typ *type, hmap map[any]any)
- fn := typecheck.LookupRuntime("mapclear")
- fn = typecheck.SubstArgTypes(fn, t.Key(), t.Elem())
+ fn := typecheck.LookupRuntime("mapclear", t.Key(), t.Elem())
n := mkcallstmt1(fn, rtyp, m)
return walkStmt(typecheck.Stmt(n))
}
@@ -529,7 +539,7 @@ func arrayClear(wbPos src.XPos, a ir.Node, nrange *ir.RangeStmt) ir.Node {
n.Cond = ir.NewBinaryExpr(base.Pos, ir.ONE, ir.NewUnaryExpr(base.Pos, ir.OLEN, a), ir.NewInt(base.Pos, 0))
// hp = &a[0]
- hp := typecheck.Temp(types.Types[types.TUNSAFEPTR])
+ hp := typecheck.TempAt(base.Pos, ir.CurFunc, types.Types[types.TUNSAFEPTR])
ix := ir.NewIndexExpr(base.Pos, a, ir.NewInt(base.Pos, 0))
ix.SetBounded(true)
@@ -537,7 +547,7 @@ func arrayClear(wbPos src.XPos, a ir.Node, nrange *ir.RangeStmt) ir.Node {
n.Body.Append(ir.NewAssignStmt(base.Pos, hp, addr))
// hn = len(a) * sizeof(elem(a))
- hn := typecheck.Temp(types.Types[types.TUINTPTR])
+ hn := typecheck.TempAt(base.Pos, ir.CurFunc, types.Types[types.TUINTPTR])
mul := typecheck.Conv(ir.NewBinaryExpr(base.Pos, ir.OMUL, ir.NewUnaryExpr(base.Pos, ir.OLEN, a), ir.NewInt(base.Pos, elemsize)), types.Types[types.TUINTPTR])
n.Body.Append(ir.NewAssignStmt(base.Pos, hn, mul))
@@ -564,18 +574,3 @@ func arrayClear(wbPos src.XPos, a ir.Node, nrange *ir.RangeStmt) ir.Node {
typecheck.Stmts(n.Body)
return walkStmt(n)
}
-
-// addptr returns (*T)(uintptr(p) + n).
-func addptr(p ir.Node, n int64) ir.Node {
- t := p.Type()
-
- p = ir.NewConvExpr(base.Pos, ir.OCONVNOP, nil, p)
- p.SetType(types.Types[types.TUINTPTR])
-
- p = ir.NewBinaryExpr(base.Pos, ir.OADD, p, ir.NewInt(base.Pos, n))
-
- p = ir.NewConvExpr(base.Pos, ir.OCONVNOP, nil, p)
- p.SetType(t)
-
- return p
-}
diff --git a/src/cmd/compile/internal/walk/select.go b/src/cmd/compile/internal/walk/select.go
index c676a765bc..ca6a76ad00 100644
--- a/src/cmd/compile/internal/walk/select.go
+++ b/src/cmd/compile/internal/walk/select.go
@@ -9,6 +9,7 @@ import (
"cmd/compile/internal/ir"
"cmd/compile/internal/typecheck"
"cmd/compile/internal/types"
+ "cmd/internal/src"
)
func walkSelect(sel *ir.SelectStmt) {
@@ -125,9 +126,9 @@ func walkSelectCases(cases []*ir.CommClause) []ir.Node {
if ir.IsBlank(elem) {
elem = typecheck.NodNil()
}
- cond = typecheck.Temp(types.Types[types.TBOOL])
+ cond = typecheck.TempAt(base.Pos, ir.CurFunc, types.Types[types.TBOOL])
fn := chanfn("selectnbrecv", 2, ch.Type())
- call := mkcall1(fn, fn.Type().Results(), r.PtrInit(), elem, ch)
+ call := mkcall1(fn, fn.Type().ResultsTuple(), r.PtrInit(), elem, ch)
as := ir.NewAssignListStmt(r.Pos(), ir.OAS2, []ir.Node{cond, n.Lhs[1]}, []ir.Node{call})
r.PtrInit().Append(typecheck.Stmt(as))
}
@@ -148,15 +149,15 @@ func walkSelectCases(cases []*ir.CommClause) []ir.Node {
// generate sel-struct
base.Pos = sellineno
- selv := typecheck.Temp(types.NewArray(scasetype(), int64(ncas)))
+ selv := typecheck.TempAt(base.Pos, ir.CurFunc, types.NewArray(scasetype(), int64(ncas)))
init = append(init, typecheck.Stmt(ir.NewAssignStmt(base.Pos, selv, nil)))
// No initialization for order; runtime.selectgo is responsible for that.
- order := typecheck.Temp(types.NewArray(types.Types[types.TUINT16], 2*int64(ncas)))
+ order := typecheck.TempAt(base.Pos, ir.CurFunc, types.NewArray(types.Types[types.TUINT16], 2*int64(ncas)))
var pc0, pcs ir.Node
if base.Flag.Race {
- pcs = typecheck.Temp(types.NewArray(types.Types[types.TUINTPTR], int64(ncas)))
+ pcs = typecheck.TempAt(base.Pos, ir.CurFunc, types.NewArray(types.Types[types.TUINTPTR], int64(ncas)))
pc0 = typecheck.Expr(typecheck.NodAddr(ir.NewIndexExpr(base.Pos, pcs, ir.NewInt(base.Pos, 0))))
} else {
pc0 = typecheck.NodNil()
@@ -220,13 +221,13 @@ func walkSelectCases(cases []*ir.CommClause) []ir.Node {
// run the select
base.Pos = sellineno
- chosen := typecheck.Temp(types.Types[types.TINT])
- recvOK := typecheck.Temp(types.Types[types.TBOOL])
+ chosen := typecheck.TempAt(base.Pos, ir.CurFunc, types.Types[types.TINT])
+ recvOK := typecheck.TempAt(base.Pos, ir.CurFunc, types.Types[types.TBOOL])
r := ir.NewAssignListStmt(base.Pos, ir.OAS2, nil, nil)
r.Lhs = []ir.Node{chosen, recvOK}
fn := typecheck.LookupRuntime("selectgo")
var fnInit ir.Nodes
- r.Rhs = []ir.Node{mkcall1(fn, fn.Type().Results(), &fnInit, bytePtrToIndex(selv, 0), bytePtrToIndex(order, 0), pc0, ir.NewInt(base.Pos, int64(nsends)), ir.NewInt(base.Pos, int64(nrecvs)), ir.NewBool(base.Pos, dflt == nil))}
+ r.Rhs = []ir.Node{mkcall1(fn, fn.Type().ResultsTuple(), &fnInit, bytePtrToIndex(selv, 0), bytePtrToIndex(order, 0), pc0, ir.NewInt(base.Pos, int64(nsends)), ir.NewInt(base.Pos, int64(nrecvs)), ir.NewBool(base.Pos, dflt == nil))}
init = append(init, fnInit...)
init = append(init, typecheck.Stmt(r))
@@ -287,11 +288,15 @@ var scase *types.Type
// Keep in sync with src/runtime/select.go.
func scasetype() *types.Type {
if scase == nil {
- scase = types.NewStruct([]*types.Field{
+ n := ir.NewDeclNameAt(src.NoXPos, ir.OTYPE, ir.Pkgs.Runtime.Lookup("scase"))
+ scase = types.NewNamed(n)
+ n.SetType(scase)
+ n.SetTypecheck(1)
+
+ scase.SetUnderlying(types.NewStruct([]*types.Field{
types.NewField(base.Pos, typecheck.Lookup("c"), types.Types[types.TUNSAFEPTR]),
types.NewField(base.Pos, typecheck.Lookup("elem"), types.Types[types.TUNSAFEPTR]),
- })
- scase.SetNoalg(true)
+ }))
}
return scase
}
diff --git a/src/cmd/compile/internal/walk/stmt.go b/src/cmd/compile/internal/walk/stmt.go
index c6a03d2bd8..b2a226e078 100644
--- a/src/cmd/compile/internal/walk/stmt.go
+++ b/src/cmd/compile/internal/walk/stmt.go
@@ -48,13 +48,14 @@ func walkStmt(n ir.Node) ir.Node {
ir.ODELETE,
ir.OSEND,
ir.OPRINT,
- ir.OPRINTN,
+ ir.OPRINTLN,
ir.OPANIC,
ir.ORECOVERFP,
ir.OGETG:
if n.Typecheck() == 0 {
base.Fatalf("missing typecheck: %+v", n)
}
+
init := ir.TakeInit(n)
n = walkExpr(n, &init)
if n.Op() == ir.ONAME {
@@ -87,9 +88,8 @@ func walkStmt(n ir.Node) ir.Node {
ir.OGOTO,
ir.OLABEL,
ir.OJUMPTABLE,
+ ir.OINTERFACESWITCH,
ir.ODCL,
- ir.ODCLCONST,
- ir.ODCLTYPE,
ir.OCHECKNIL:
return n
@@ -106,10 +106,11 @@ func walkStmt(n ir.Node) ir.Node {
n := n.(*ir.GoDeferStmt)
ir.CurFunc.SetHasDefer(true)
ir.CurFunc.NumDefers++
- if ir.CurFunc.NumDefers > maxOpenDefers {
+ if ir.CurFunc.NumDefers > maxOpenDefers || n.DeferAt != nil {
// Don't allow open-coded defers if there are more than
// 8 defers in the function, since we use a single
// byte to record active defers.
+ // Also don't allow if we need to use deferprocat.
ir.CurFunc.SetOpenCodedDeferDisallowed(true)
}
if n.Esc() != ir.EscNever {
@@ -138,7 +139,7 @@ func walkStmt(n ir.Node) ir.Node {
n := n.(*ir.TailCallStmt)
var init ir.Nodes
- n.Call.X = walkExpr(n.Call.X, &init)
+ n.Call.Fun = walkExpr(n.Call.Fun, &init)
if len(init) > 0 {
init.Append(n)
@@ -195,7 +196,7 @@ func walkFor(n *ir.ForStmt) ir.Node {
// call without arguments or results.
func validGoDeferCall(call ir.Node) bool {
if call, ok := call.(*ir.CallExpr); ok && call.Op() == ir.OCALLFUNC && len(call.KeepAlive) == 0 {
- sig := call.X.Type()
+ sig := call.Fun.Type()
return sig.NumParams()+sig.NumResults() == 0
}
return false
@@ -210,7 +211,7 @@ func walkGoDefer(n *ir.GoDeferStmt) ir.Node {
var init ir.Nodes
call := n.Call.(*ir.CallExpr)
- call.X = walkExpr(call.X, &init)
+ call.Fun = walkExpr(call.Fun, &init)
if len(init) > 0 {
init.Append(n)
diff --git a/src/cmd/compile/internal/walk/switch.go b/src/cmd/compile/internal/walk/switch.go
index 1a167d363e..b67d0114c7 100644
--- a/src/cmd/compile/internal/walk/switch.go
+++ b/src/cmd/compile/internal/walk/switch.go
@@ -5,15 +5,21 @@
package walk
import (
+ "fmt"
"go/constant"
"go/token"
+ "math/bits"
"sort"
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
+ "cmd/compile/internal/objw"
+ "cmd/compile/internal/reflectdata"
+ "cmd/compile/internal/rttype"
"cmd/compile/internal/ssagen"
"cmd/compile/internal/typecheck"
"cmd/compile/internal/types"
+ "cmd/internal/obj"
"cmd/internal/src"
)
@@ -232,7 +238,7 @@ func (s *exprSwitch) flush() {
s.done.Append(ir.NewBranchStmt(pos, ir.OGOTO, endLabel))
// Add length case to outer switch.
- cas := ir.NewBasicLit(pos, constant.MakeInt64(runLen(run)))
+ cas := ir.NewInt(pos, runLen(run))
jmp := ir.NewBranchStmt(pos, ir.OGOTO, label)
outer.Add(pos, cas, nil, jmp)
}
@@ -378,17 +384,19 @@ func endsInFallthrough(stmts []ir.Node) (bool, src.XPos) {
// type switch.
func walkSwitchType(sw *ir.SwitchStmt) {
var s typeSwitch
- s.facename = sw.Tag.(*ir.TypeSwitchGuard).X
- sw.Tag = nil
-
- s.facename = walkExpr(s.facename, sw.PtrInit())
- s.facename = copyExpr(s.facename, s.facename.Type(), &sw.Compiled)
- s.okname = typecheck.Temp(types.Types[types.TBOOL])
+ s.srcName = sw.Tag.(*ir.TypeSwitchGuard).X
+ s.srcName = walkExpr(s.srcName, sw.PtrInit())
+ s.srcName = copyExpr(s.srcName, s.srcName.Type(), &sw.Compiled)
+ s.okName = typecheck.TempAt(base.Pos, ir.CurFunc, types.Types[types.TBOOL])
+ s.itabName = typecheck.TempAt(base.Pos, ir.CurFunc, types.Types[types.TUINT8].PtrTo())
// Get interface descriptor word.
// For empty interfaces this will be the type.
// For non-empty interfaces this will be the itab.
- itab := ir.NewUnaryExpr(base.Pos, ir.OITAB, s.facename)
+ srcItab := ir.NewUnaryExpr(base.Pos, ir.OITAB, s.srcName)
+ srcData := ir.NewUnaryExpr(base.Pos, ir.OIDATA, s.srcName)
+ srcData.SetType(types.Types[types.TUINT8].PtrTo())
+ srcData.SetTypecheck(1)
// For empty interfaces, do:
// if e._type == nil {
@@ -397,42 +405,49 @@ func walkSwitchType(sw *ir.SwitchStmt) {
// h := e._type.hash
// Use a similar strategy for non-empty interfaces.
ifNil := ir.NewIfStmt(base.Pos, nil, nil, nil)
- ifNil.Cond = ir.NewBinaryExpr(base.Pos, ir.OEQ, itab, typecheck.NodNil())
+ ifNil.Cond = ir.NewBinaryExpr(base.Pos, ir.OEQ, srcItab, typecheck.NodNil())
base.Pos = base.Pos.WithNotStmt() // disable statement marks after the first check.
ifNil.Cond = typecheck.Expr(ifNil.Cond)
ifNil.Cond = typecheck.DefaultLit(ifNil.Cond, nil)
- // ifNil.Nbody assigned at end.
+ // ifNil.Nbody assigned later.
sw.Compiled.Append(ifNil)
// Load hash from type or itab.
- dotHash := typeHashFieldOf(base.Pos, itab)
- s.hashname = copyExpr(dotHash, dotHash.Type(), &sw.Compiled)
+ dotHash := typeHashFieldOf(base.Pos, srcItab)
+ s.hashName = copyExpr(dotHash, dotHash.Type(), &sw.Compiled)
+
+ // Make a label for each case body.
+ labels := make([]*types.Sym, len(sw.Cases))
+ for i := range sw.Cases {
+ labels[i] = typecheck.AutoLabel(".s")
+ }
+ // "jump" to execute if no case matches.
br := ir.NewBranchStmt(base.Pos, ir.OBREAK, nil)
- var defaultGoto, nilGoto ir.Node
- var body ir.Nodes
- for _, ncase := range sw.Cases {
- caseVar := ncase.Var
-
- // For single-type cases with an interface type,
- // we initialize the case variable as part of the type assertion.
- // In other cases, we initialize it in the body.
- var singleType *types.Type
- if len(ncase.List) == 1 && ncase.List[0].Op() == ir.OTYPE {
- singleType = ncase.List[0].Type()
- }
- caseVarInitialized := false
- label := typecheck.AutoLabel(".s")
- jmp := ir.NewBranchStmt(ncase.Pos(), ir.OGOTO, label)
+ // Assemble a list of all the types we're looking for.
+ // This pass flattens the case lists, as well as handles
+ // some unusual cases, like default and nil cases.
+ type oneCase struct {
+ pos src.XPos
+ jmp ir.Node // jump to body of selected case
+ // The case we're matching. Normally the type we're looking for
+ // is typ.Type(), but when typ is ODYNAMICTYPE the actual type
+ // we're looking for is not a compile-time constant (typ.Type()
+ // will be its shape).
+ typ ir.Node
+ }
+ var cases []oneCase
+ var defaultGoto, nilGoto ir.Node
+ for i, ncase := range sw.Cases {
+ jmp := ir.NewBranchStmt(ncase.Pos(), ir.OGOTO, labels[i])
if len(ncase.List) == 0 { // default:
if defaultGoto != nil {
base.Fatalf("duplicate default case not detected during typechecking")
}
defaultGoto = jmp
}
-
for _, n1 := range ncase.List {
if ir.IsNil(n1) { // case nil:
if nilGoto != nil {
@@ -441,60 +456,233 @@ func walkSwitchType(sw *ir.SwitchStmt) {
nilGoto = jmp
continue
}
+ if n1.Op() == ir.ODYNAMICTYPE {
+ // Convert dynamic to static, if the dynamic is actually static.
+ // TODO: why isn't this OTYPE to begin with?
+ dt := n1.(*ir.DynamicType)
+ if dt.RType != nil && dt.RType.Op() == ir.OADDR {
+ addr := dt.RType.(*ir.AddrExpr)
+ if addr.X.Op() == ir.OLINKSYMOFFSET {
+ n1 = ir.TypeNode(n1.Type())
+ }
+ }
+ if dt.ITab != nil && dt.ITab.Op() == ir.OADDR {
+ addr := dt.ITab.(*ir.AddrExpr)
+ if addr.X.Op() == ir.OLINKSYMOFFSET {
+ n1 = ir.TypeNode(n1.Type())
+ }
+ }
+ }
+ cases = append(cases, oneCase{
+ pos: ncase.Pos(),
+ typ: n1,
+ jmp: jmp,
+ })
+ }
+ }
+ if defaultGoto == nil {
+ defaultGoto = br
+ }
+ if nilGoto == nil {
+ nilGoto = defaultGoto
+ }
+ ifNil.Body = []ir.Node{nilGoto}
- if singleType != nil && singleType.IsInterface() {
- s.Add(ncase.Pos(), n1, caseVar, jmp)
- caseVarInitialized = true
+ // Now go through the list of cases, processing groups as we find them.
+ var concreteCases []oneCase
+ var interfaceCases []oneCase
+ flush := func() {
+ // Process all the concrete types first. Because we handle shadowing
+ // below, it is correct to do all the concrete types before all of
+ // the interface types.
+ // The concrete cases can all be handled without a runtime call.
+ if len(concreteCases) > 0 {
+ var clauses []typeClause
+ for _, c := range concreteCases {
+ as := ir.NewAssignListStmt(c.pos, ir.OAS2,
+ []ir.Node{ir.BlankNode, s.okName}, // _, ok =
+ []ir.Node{ir.NewTypeAssertExpr(c.pos, s.srcName, c.typ.Type())}) // iface.(type)
+ nif := ir.NewIfStmt(c.pos, s.okName, []ir.Node{c.jmp}, nil)
+ clauses = append(clauses, typeClause{
+ hash: types.TypeHash(c.typ.Type()),
+ body: []ir.Node{typecheck.Stmt(as), typecheck.Stmt(nif)},
+ })
+ }
+ s.flush(clauses, &sw.Compiled)
+ concreteCases = concreteCases[:0]
+ }
+
+ // The "any" case, if it exists, must be the last interface case, because
+ // it would shadow all subsequent cases. Strip it off here so the runtime
+ // call only needs to handle non-empty interfaces.
+ var anyGoto ir.Node
+ if len(interfaceCases) > 0 && interfaceCases[len(interfaceCases)-1].typ.Type().IsEmptyInterface() {
+ anyGoto = interfaceCases[len(interfaceCases)-1].jmp
+ interfaceCases = interfaceCases[:len(interfaceCases)-1]
+ }
+
+ // Next, process all the interface types with a single call to the runtime.
+ if len(interfaceCases) > 0 {
+
+ // Build an internal/abi.InterfaceSwitch descriptor to pass to the runtime.
+ lsym := types.LocalPkg.Lookup(fmt.Sprintf(".interfaceSwitch.%d", interfaceSwitchGen)).LinksymABI(obj.ABI0)
+ interfaceSwitchGen++
+ c := rttype.NewCursor(lsym, 0, rttype.InterfaceSwitch)
+ c.Field("Cache").WritePtr(typecheck.LookupRuntimeVar("emptyInterfaceSwitchCache"))
+ c.Field("NCases").WriteInt(int64(len(interfaceCases)))
+ array, sizeDelta := c.Field("Cases").ModifyArray(len(interfaceCases))
+ for i, c := range interfaceCases {
+ array.Elem(i).WritePtr(reflectdata.TypeSym(c.typ.Type()).Linksym())
+ }
+ objw.Global(lsym, int32(rttype.InterfaceSwitch.Size()+sizeDelta), obj.LOCAL)
+ // The GC only needs to see the first pointer in the structure (all the others
+ // are to static locations). So the InterfaceSwitch type itself is fine, even
+ // though it might not cover the whole array we wrote above.
+ lsym.Gotype = reflectdata.TypeLinksym(rttype.InterfaceSwitch)
+
+ // Call runtime to do switch
+ // case, itab = runtime.interfaceSwitch(&descriptor, typeof(arg))
+ var typeArg ir.Node
+ if s.srcName.Type().IsEmptyInterface() {
+ typeArg = ir.NewConvExpr(base.Pos, ir.OCONVNOP, types.Types[types.TUINT8].PtrTo(), srcItab)
} else {
- s.Add(ncase.Pos(), n1, nil, jmp)
+ typeArg = itabType(srcItab)
+ }
+ caseVar := typecheck.TempAt(base.Pos, ir.CurFunc, types.Types[types.TINT])
+ isw := ir.NewInterfaceSwitchStmt(base.Pos, caseVar, s.itabName, typeArg, dotHash, lsym)
+ sw.Compiled.Append(isw)
+
+ // Switch on the result of the call (or cache lookup).
+ var newCases []*ir.CaseClause
+ for i, c := range interfaceCases {
+ newCases = append(newCases, &ir.CaseClause{
+ List: []ir.Node{ir.NewInt(base.Pos, int64(i))},
+ Body: []ir.Node{c.jmp},
+ })
}
+ // TODO: add len(newCases) case, mark switch as bounded
+ sw2 := ir.NewSwitchStmt(base.Pos, caseVar, newCases)
+ sw.Compiled.Append(typecheck.Stmt(sw2))
+ interfaceCases = interfaceCases[:0]
}
- body.Append(ir.NewLabelStmt(ncase.Pos(), label))
- if caseVar != nil && !caseVarInitialized {
- val := s.facename
- if singleType != nil {
- // We have a single concrete type. Extract the data.
- if singleType.IsInterface() {
- base.Fatalf("singleType interface should have been handled in Add")
- }
- val = ifaceData(ncase.Pos(), s.facename, singleType)
+ if anyGoto != nil {
+ // We've already handled the nil case, so everything
+ // that reaches here matches the "any" case.
+ sw.Compiled.Append(anyGoto)
+ }
+ }
+caseLoop:
+ for _, c := range cases {
+ if c.typ.Op() == ir.ODYNAMICTYPE {
+ flush() // process all previous cases
+ dt := c.typ.(*ir.DynamicType)
+ dot := ir.NewDynamicTypeAssertExpr(c.pos, ir.ODYNAMICDOTTYPE, s.srcName, dt.RType)
+ dot.ITab = dt.ITab
+ dot.SetType(c.typ.Type())
+ dot.SetTypecheck(1)
+
+ as := ir.NewAssignListStmt(c.pos, ir.OAS2, nil, nil)
+ as.Lhs = []ir.Node{ir.BlankNode, s.okName} // _, ok =
+ as.Rhs = []ir.Node{dot}
+ typecheck.Stmt(as)
+
+ nif := ir.NewIfStmt(c.pos, s.okName, []ir.Node{c.jmp}, nil)
+ sw.Compiled.Append(as, nif)
+ continue
+ }
+
+ // Check for shadowing (a case that will never fire because
+ // a previous case would have always fired first). This check
+ // allows us to reorder concrete and interface cases.
+ // (TODO: these should be vet failures, maybe?)
+ for _, ic := range interfaceCases {
+ // An interface type case will shadow all
+ // subsequent types that implement that interface.
+ if typecheck.Implements(c.typ.Type(), ic.typ.Type()) {
+ continue caseLoop
}
- if len(ncase.List) == 1 && ncase.List[0].Op() == ir.ODYNAMICTYPE {
- dt := ncase.List[0].(*ir.DynamicType)
- x := ir.NewDynamicTypeAssertExpr(ncase.Pos(), ir.ODYNAMICDOTTYPE, val, dt.RType)
- x.ITab = dt.ITab
- x.SetType(caseVar.Type())
- x.SetTypecheck(1)
- val = x
+ // Note that we don't need to worry about:
+ // 1. Two concrete types shadowing each other. That's
+ // disallowed by the spec.
+ // 2. A concrete type shadowing an interface type.
+ // That can never happen, as interface types can
+ // be satisfied by an infinite set of concrete types.
+ // The correctness of this step also depends on handling
+ // the dynamic type cases separately, as we do above.
+ }
+
+ if c.typ.Type().IsInterface() {
+ interfaceCases = append(interfaceCases, c)
+ } else {
+ concreteCases = append(concreteCases, c)
+ }
+ }
+ flush()
+
+ sw.Compiled.Append(defaultGoto) // if none of the cases matched
+
+ // Now generate all the case bodies
+ for i, ncase := range sw.Cases {
+ sw.Compiled.Append(ir.NewLabelStmt(ncase.Pos(), labels[i]))
+ if caseVar := ncase.Var; caseVar != nil {
+ val := s.srcName
+ if len(ncase.List) == 1 {
+ // single type. We have to downcast the input value to the target type.
+ if ncase.List[0].Op() == ir.OTYPE { // single compile-time known type
+ t := ncase.List[0].Type()
+ if t.IsInterface() {
+ // This case is an interface. Build case value from input interface.
+ // The data word will always be the same, but the itab/type changes.
+ if t.IsEmptyInterface() {
+ var typ ir.Node
+ if s.srcName.Type().IsEmptyInterface() {
+ // E->E, nothing to do, type is already correct.
+ typ = srcItab
+ } else {
+ // I->E, load type out of itab
+ typ = itabType(srcItab)
+ typ.SetPos(ncase.Pos())
+ }
+ val = ir.NewBinaryExpr(ncase.Pos(), ir.OMAKEFACE, typ, srcData)
+ } else {
+ // The itab we need was returned by a runtime.interfaceSwitch call.
+ val = ir.NewBinaryExpr(ncase.Pos(), ir.OMAKEFACE, s.itabName, srcData)
+ }
+ } else {
+ // This case is a concrete type, just read its value out of the interface.
+ val = ifaceData(ncase.Pos(), s.srcName, t)
+ }
+ } else if ncase.List[0].Op() == ir.ODYNAMICTYPE { // single runtime known type
+ dt := ncase.List[0].(*ir.DynamicType)
+ x := ir.NewDynamicTypeAssertExpr(ncase.Pos(), ir.ODYNAMICDOTTYPE, val, dt.RType)
+ x.ITab = dt.ITab
+ val = x
+ } else if ir.IsNil(ncase.List[0]) {
+ } else {
+ base.Fatalf("unhandled type switch case %v", ncase.List[0])
+ }
+ val.SetType(caseVar.Type())
+ val.SetTypecheck(1)
}
l := []ir.Node{
ir.NewDecl(ncase.Pos(), ir.ODCL, caseVar),
ir.NewAssignStmt(ncase.Pos(), caseVar, val),
}
typecheck.Stmts(l)
- body.Append(l...)
+ sw.Compiled.Append(l...)
}
- body.Append(ncase.Body...)
- body.Append(br)
- }
- sw.Cases = nil
-
- if defaultGoto == nil {
- defaultGoto = br
- }
- if nilGoto == nil {
- nilGoto = defaultGoto
+ sw.Compiled.Append(ncase.Body...)
+ sw.Compiled.Append(br)
}
- ifNil.Body = []ir.Node{nilGoto}
-
- s.Emit(&sw.Compiled)
- sw.Compiled.Append(defaultGoto)
- sw.Compiled.Append(body.Take()...)
walkStmtList(sw.Compiled)
+ sw.Tag = nil
+ sw.Cases = nil
}
+var interfaceSwitchGen int
+
// typeHashFieldOf returns an expression to select the type hash field
// from an interface's descriptor word (whether a *runtime._type or
// *runtime.itab pointer).
@@ -506,7 +694,7 @@ func typeHashFieldOf(pos src.XPos, itab *ir.UnaryExpr) *ir.SelectorExpr {
if itab.X.Type().IsEmptyInterface() {
// runtime._type's hash field
if rtypeHashField == nil {
- rtypeHashField = runtimeField("hash", int64(2*types.PtrSize), types.Types[types.TUINT32])
+ rtypeHashField = runtimeField("hash", rttype.Type.OffsetOf("Hash"), types.Types[types.TUINT32])
}
hashField = rtypeHashField
} else {
@@ -524,12 +712,10 @@ var rtypeHashField, itabHashField *types.Field
// A typeSwitch walks a type switch.
type typeSwitch struct {
// Temporary variables (i.e., ONAMEs) used by type switch dispatch logic:
- facename ir.Node // value being type-switched on
- hashname ir.Node // type hash of the value being type-switched on
- okname ir.Node // boolean used for comma-ok type assertions
-
- done ir.Nodes
- clauses []typeClause
+ srcName ir.Node // value being type-switched on
+ hashName ir.Node // type hash of the value being type-switched on
+ okName ir.Node // boolean used for comma-ok type assertions
+ itabName ir.Node // itab value to use for first word of non-empty interface
}
type typeClause struct {
@@ -537,68 +723,7 @@ type typeClause struct {
body ir.Nodes
}
-func (s *typeSwitch) Add(pos src.XPos, n1 ir.Node, caseVar *ir.Name, jmp ir.Node) {
- typ := n1.Type()
- var body ir.Nodes
- if caseVar != nil {
- l := []ir.Node{
- ir.NewDecl(pos, ir.ODCL, caseVar),
- ir.NewAssignStmt(pos, caseVar, nil),
- }
- typecheck.Stmts(l)
- body.Append(l...)
- } else {
- caseVar = ir.BlankNode.(*ir.Name)
- }
-
- // cv, ok = iface.(type)
- as := ir.NewAssignListStmt(pos, ir.OAS2, nil, nil)
- as.Lhs = []ir.Node{caseVar, s.okname} // cv, ok =
- switch n1.Op() {
- case ir.OTYPE:
- // Static type assertion (non-generic)
- dot := ir.NewTypeAssertExpr(pos, s.facename, typ) // iface.(type)
- as.Rhs = []ir.Node{dot}
- case ir.ODYNAMICTYPE:
- // Dynamic type assertion (generic)
- dt := n1.(*ir.DynamicType)
- dot := ir.NewDynamicTypeAssertExpr(pos, ir.ODYNAMICDOTTYPE, s.facename, dt.RType)
- dot.ITab = dt.ITab
- dot.SetType(typ)
- dot.SetTypecheck(1)
- as.Rhs = []ir.Node{dot}
- default:
- base.Fatalf("unhandled type case %s", n1.Op())
- }
- appendWalkStmt(&body, as)
-
- // if ok { goto label }
- nif := ir.NewIfStmt(pos, nil, nil, nil)
- nif.Cond = s.okname
- nif.Body = []ir.Node{jmp}
- body.Append(nif)
-
- if n1.Op() == ir.OTYPE && !typ.IsInterface() {
- // Defer static, noninterface cases so they can be binary searched by hash.
- s.clauses = append(s.clauses, typeClause{
- hash: types.TypeHash(n1.Type()),
- body: body,
- })
- return
- }
-
- s.flush()
- s.done.Append(body.Take()...)
-}
-
-func (s *typeSwitch) Emit(out *ir.Nodes) {
- s.flush()
- out.Append(s.done.Take()...)
-}
-
-func (s *typeSwitch) flush() {
- cc := s.clauses
- s.clauses = nil
+func (s *typeSwitch) flush(cc []typeClause, compiled *ir.Nodes) {
if len(cc) == 0 {
return
}
@@ -617,21 +742,100 @@ func (s *typeSwitch) flush() {
}
cc = merged
- // TODO: figure out if we could use a jump table using some low bits of the type hashes.
- binarySearch(len(cc), &s.done,
+ if s.tryJumpTable(cc, compiled) {
+ return
+ }
+ binarySearch(len(cc), compiled,
func(i int) ir.Node {
- return ir.NewBinaryExpr(base.Pos, ir.OLE, s.hashname, ir.NewInt(base.Pos, int64(cc[i-1].hash)))
+ return ir.NewBinaryExpr(base.Pos, ir.OLE, s.hashName, ir.NewInt(base.Pos, int64(cc[i-1].hash)))
},
func(i int, nif *ir.IfStmt) {
// TODO(mdempsky): Omit hash equality check if
// there's only one type.
c := cc[i]
- nif.Cond = ir.NewBinaryExpr(base.Pos, ir.OEQ, s.hashname, ir.NewInt(base.Pos, int64(c.hash)))
+ nif.Cond = ir.NewBinaryExpr(base.Pos, ir.OEQ, s.hashName, ir.NewInt(base.Pos, int64(c.hash)))
nif.Body.Append(c.body.Take()...)
},
)
}
+// Try to implement the clauses with a jump table. Returns true if successful.
+func (s *typeSwitch) tryJumpTable(cc []typeClause, out *ir.Nodes) bool {
+ const minCases = 5 // have at least minCases cases in the switch
+ if base.Flag.N != 0 || !ssagen.Arch.LinkArch.CanJumpTable || base.Ctxt.Retpoline {
+ return false
+ }
+ if len(cc) < minCases {
+ return false // not enough cases for it to be worth it
+ }
+ hashes := make([]uint32, len(cc))
+ // b = # of bits to use. Start with the minimum number of
+ // bits possible, but try a few larger sizes if needed.
+ b0 := bits.Len(uint(len(cc) - 1))
+ for b := b0; b < b0+3; b++ {
+ pickI:
+ for i := 0; i <= 32-b; i++ { // starting bit position
+ // Compute the hash we'd get from all the cases,
+ // selecting b bits starting at bit i.
+ hashes = hashes[:0]
+ for _, c := range cc {
+ h := c.hash >> i & (1<<b - 1)
+ hashes = append(hashes, h)
+ }
+ // Order by increasing hash.
+ sort.Slice(hashes, func(j, k int) bool {
+ return hashes[j] < hashes[k]
+ })
+ for j := 1; j < len(hashes); j++ {
+ if hashes[j] == hashes[j-1] {
+ // There is a duplicate hash; try a different b/i pair.
+ continue pickI
+ }
+ }
+
+ // All hashes are distinct. Use these values of b and i.
+ h := s.hashName
+ if i != 0 {
+ h = ir.NewBinaryExpr(base.Pos, ir.ORSH, h, ir.NewInt(base.Pos, int64(i)))
+ }
+ h = ir.NewBinaryExpr(base.Pos, ir.OAND, h, ir.NewInt(base.Pos, int64(1<<b-1)))
+ h = typecheck.Expr(h)
+
+ // Build jump table.
+ jt := ir.NewJumpTableStmt(base.Pos, h)
+ jt.Cases = make([]constant.Value, 1<<b)
+ jt.Targets = make([]*types.Sym, 1<<b)
+ out.Append(jt)
+
+ // Start with all hashes going to the didn't-match target.
+ noMatch := typecheck.AutoLabel(".s")
+ for j := 0; j < 1<<b; j++ {
+ jt.Cases[j] = constant.MakeInt64(int64(j))
+ jt.Targets[j] = noMatch
+ }
+ // This statement is not reachable, but it will make it obvious that we don't
+ // fall through to the first case.
+ out.Append(ir.NewBranchStmt(base.Pos, ir.OGOTO, noMatch))
+
+ // Emit each of the actual cases.
+ for _, c := range cc {
+ h := c.hash >> i & (1<<b - 1)
+ label := typecheck.AutoLabel(".s")
+ jt.Targets[h] = label
+ out.Append(ir.NewLabelStmt(base.Pos, label))
+ out.Append(c.body...)
+ // We reach here if the hash matches but the type equality test fails.
+ out.Append(ir.NewBranchStmt(base.Pos, ir.OGOTO, noMatch))
+ }
+ // Emit point to go to if type doesn't match any case.
+ out.Append(ir.NewLabelStmt(base.Pos, noMatch))
+ return true
+ }
+ }
+ // Couldn't find a perfect hash. Fall back to binary search.
+ return false
+}
+
// binarySearch constructs a binary search tree for handling n cases,
// and appends it to out. It's used for efficiently implementing
// switch statements.
@@ -736,6 +940,7 @@ func stringSearch(expr ir.Node, cc []exprClause, out *ir.Nodes) {
// Convert expr to a []int8
slice := ir.NewConvExpr(base.Pos, ir.OSTR2BYTESTMP, types.NewSlice(types.Types[types.TINT8]), expr)
slice.SetTypecheck(1) // legacy typechecker doesn't handle this op
+ slice.MarkNonNil()
// Load the byte we're splitting on.
load := ir.NewIndexExpr(base.Pos, slice, ir.NewInt(base.Pos, int64(bestIdx)))
// Compare with the value we're splitting on.
diff --git a/src/cmd/compile/internal/walk/temp.go b/src/cmd/compile/internal/walk/temp.go
index d2ffb226a9..886b5beec3 100644
--- a/src/cmd/compile/internal/walk/temp.go
+++ b/src/cmd/compile/internal/walk/temp.go
@@ -25,7 +25,7 @@ func initStackTemp(init *ir.Nodes, tmp *ir.Name, val ir.Node) *ir.AddrExpr {
// allocated temporary variable of the given type. Statements to
// zero-initialize tmp are appended to init.
func stackTempAddr(init *ir.Nodes, typ *types.Type) *ir.AddrExpr {
- return initStackTemp(init, typecheck.Temp(typ), nil)
+ return initStackTemp(init, typecheck.TempAt(base.Pos, ir.CurFunc, typ), nil)
}
// stackBufAddr returns the expression &tmp, where tmp is a newly
@@ -35,6 +35,6 @@ func stackBufAddr(len int64, elem *types.Type) *ir.AddrExpr {
if elem.HasPointers() {
base.FatalfAt(base.Pos, "%v has pointers", elem)
}
- tmp := typecheck.Temp(types.NewArray(elem, len))
+ tmp := typecheck.TempAt(base.Pos, ir.CurFunc, types.NewArray(elem, len))
return typecheck.Expr(typecheck.NodAddr(tmp)).(*ir.AddrExpr)
}
diff --git a/src/cmd/compile/internal/walk/walk.go b/src/cmd/compile/internal/walk/walk.go
index 149e23ab7c..001edcc332 100644
--- a/src/cmd/compile/internal/walk/walk.go
+++ b/src/cmd/compile/internal/walk/walk.go
@@ -5,7 +5,6 @@
package walk
import (
- "errors"
"fmt"
"cmd/compile/internal/base"
@@ -19,7 +18,6 @@ import (
// The constant is known to runtime.
const tmpstringbufsize = 32
-const zeroValSize = 1024 // must match value of runtime/map.go:maxZero
func Walk(fn *ir.Func) {
ir.CurFunc = fn
@@ -46,10 +44,6 @@ func Walk(fn *ir.Func) {
ir.DumpList(s, ir.CurFunc.Body)
}
- if base.Flag.Cfg.Instrumenting {
- instrument(fn)
- }
-
// Eagerly compute sizes of all variables for SSA.
for _, n := range fn.Dcl {
types.CalcSize(n.Type())
@@ -98,8 +92,6 @@ func convas(n *ir.AssignStmt, init *ir.Nodes) *ir.AssignStmt {
return n
}
-var stop = errors.New("stop")
-
func vmkcall(fn ir.Node, t *types.Type, init *ir.Nodes, va []ir.Node) *ir.CallExpr {
if init == nil {
base.Fatalf("mkcall with nil init: %v", fn)
@@ -144,42 +136,34 @@ func chanfn(name string, n int, t *types.Type) ir.Node {
if !t.IsChan() {
base.Fatalf("chanfn %v", t)
}
- fn := typecheck.LookupRuntime(name)
switch n {
- default:
- base.Fatalf("chanfn %d", n)
case 1:
- fn = typecheck.SubstArgTypes(fn, t.Elem())
+ return typecheck.LookupRuntime(name, t.Elem())
case 2:
- fn = typecheck.SubstArgTypes(fn, t.Elem(), t.Elem())
+ return typecheck.LookupRuntime(name, t.Elem(), t.Elem())
}
- return fn
+ base.Fatalf("chanfn %d", n)
+ return nil
}
func mapfn(name string, t *types.Type, isfat bool) ir.Node {
if !t.IsMap() {
base.Fatalf("mapfn %v", t)
}
- fn := typecheck.LookupRuntime(name)
if mapfast(t) == mapslow || isfat {
- fn = typecheck.SubstArgTypes(fn, t.Key(), t.Elem(), t.Key(), t.Elem())
- } else {
- fn = typecheck.SubstArgTypes(fn, t.Key(), t.Elem(), t.Elem())
+ return typecheck.LookupRuntime(name, t.Key(), t.Elem(), t.Key(), t.Elem())
}
- return fn
+ return typecheck.LookupRuntime(name, t.Key(), t.Elem(), t.Elem())
}
func mapfndel(name string, t *types.Type) ir.Node {
if !t.IsMap() {
base.Fatalf("mapfn %v", t)
}
- fn := typecheck.LookupRuntime(name)
if mapfast(t) == mapslow {
- fn = typecheck.SubstArgTypes(fn, t.Key(), t.Elem(), t.Key())
- } else {
- fn = typecheck.SubstArgTypes(fn, t.Key(), t.Elem())
+ return typecheck.LookupRuntime(name, t.Key(), t.Elem(), t.Key())
}
- return fn
+ return typecheck.LookupRuntime(name, t.Key(), t.Elem())
}
const (
@@ -344,7 +328,7 @@ func mayCall(n ir.Node) bool {
return n.Type().IsString() || n.Type().IsFloat()
case ir.OLITERAL, ir.ONIL, ir.ONAME, ir.OLINKSYMOFFSET, ir.OMETHEXPR,
- ir.OAND, ir.OANDNOT, ir.OLSH, ir.OOR, ir.ORSH, ir.OXOR, ir.OCOMPLEX, ir.OEFACE,
+ ir.OAND, ir.OANDNOT, ir.OLSH, ir.OOR, ir.ORSH, ir.OXOR, ir.OCOMPLEX, ir.OMAKEFACE,
ir.OADDR, ir.OBITNOT, ir.ONOT, ir.OPLUS,
ir.OCAP, ir.OIMAG, ir.OLEN, ir.OREAL,
ir.OCONVNOP, ir.ODOT,