[MLton-commit] r5566
Matthew Fluet
fluet at mlton.org
Fri May 18 10:16:27 PDT 2007
Native AMD64 codegen.
This commit changes the amd64-* target platforms to use the native
amd64 codegen.
All regressions compile and pass (with -debug true); a self-compile
bootstraps without issue.
The amd64 codegen is basically a copy/paste/edit of the x86 codegen.
There is probably some commonality that could be factored, but there
are lots of minor difference scattered everywhere. And, heck, MLton
would defunctorize and monomorphise everything anyways, so we aren't
likely to save compile time or code size.
The most significant difference between the x86 codegen and the amd64
codegen is that on amd64, we use the SSE2 instructions and XMM
registers for floating-point operations, entirely ignoring the x87
floating point stack. This supports the basic operations
(+,-,*,/,==,?=,<,<=), but all MATH functions go through libmath C
functions (gcc does the same thing). The nice bonus is that this
gives IEEE compilant floating-point operations; i.e., Real64 ops
compute at 64-bits of precision and Real32 ops compute at 32-bits of
precision.
The other big difference is more registers, all with 64/32/16/8 bit
variants. The 8-bit high registers of RAX, RBX, RCX, and RDX are
ignored. This very minorly complicates 8-bit multiplication and
division, which use AH rather than DL (in contrast to the 16/32/64-bit
multiplication and division, which use RDX). When an 8-bit mult or
div is emitted to the .S file, AH is copied to/from DL in the
appropriate manner.
The -codegen option has changed from
-codegen {native|c|bytecode}
to
-codegen {x86|amd64|c|bytecode}
in order to distinguish the two native codegens.
----------------------------------------------------------------------
U mlton/branches/on-20050822-x86_64-branch/basis-library/primitive/prim-mlton.sml
U mlton/branches/on-20050822-x86_64-branch/basis-library/real/real.sml
U mlton/branches/on-20050822-x86_64-branch/doc/changelog
A mlton/branches/on-20050822-x86_64-branch/include/amd64-main.h
U mlton/branches/on-20050822-x86_64-branch/include/x86-main.h
U mlton/branches/on-20050822-x86_64-branch/mlton/Makefile
U mlton/branches/on-20050822-x86_64-branch/mlton/backend/ssa-to-rssa.fun
A mlton/branches/on-20050822-x86_64-branch/mlton/codegen/amd64-codegen/
A mlton/branches/on-20050822-x86_64-branch/mlton/codegen/amd64-codegen/amd64-allocate-registers.fun
A mlton/branches/on-20050822-x86_64-branch/mlton/codegen/amd64-codegen/amd64-allocate-registers.sig
A mlton/branches/on-20050822-x86_64-branch/mlton/codegen/amd64-codegen/amd64-codegen.fun
A mlton/branches/on-20050822-x86_64-branch/mlton/codegen/amd64-codegen/amd64-codegen.sig
A mlton/branches/on-20050822-x86_64-branch/mlton/codegen/amd64-codegen/amd64-entry-transfer.fun
A mlton/branches/on-20050822-x86_64-branch/mlton/codegen/amd64-codegen/amd64-entry-transfer.sig
A mlton/branches/on-20050822-x86_64-branch/mlton/codegen/amd64-codegen/amd64-generate-transfers.fun
A mlton/branches/on-20050822-x86_64-branch/mlton/codegen/amd64-codegen/amd64-generate-transfers.sig
A mlton/branches/on-20050822-x86_64-branch/mlton/codegen/amd64-codegen/amd64-jump-info.fun
A mlton/branches/on-20050822-x86_64-branch/mlton/codegen/amd64-codegen/amd64-jump-info.sig
A mlton/branches/on-20050822-x86_64-branch/mlton/codegen/amd64-codegen/amd64-live-transfers.fun
A mlton/branches/on-20050822-x86_64-branch/mlton/codegen/amd64-codegen/amd64-live-transfers.sig
A mlton/branches/on-20050822-x86_64-branch/mlton/codegen/amd64-codegen/amd64-liveness.fun
A mlton/branches/on-20050822-x86_64-branch/mlton/codegen/amd64-codegen/amd64-liveness.sig
A mlton/branches/on-20050822-x86_64-branch/mlton/codegen/amd64-codegen/amd64-loop-info.fun
A mlton/branches/on-20050822-x86_64-branch/mlton/codegen/amd64-codegen/amd64-loop-info.sig
A mlton/branches/on-20050822-x86_64-branch/mlton/codegen/amd64-codegen/amd64-mlton-basic.fun
A mlton/branches/on-20050822-x86_64-branch/mlton/codegen/amd64-codegen/amd64-mlton-basic.sig
A mlton/branches/on-20050822-x86_64-branch/mlton/codegen/amd64-codegen/amd64-mlton.fun
A mlton/branches/on-20050822-x86_64-branch/mlton/codegen/amd64-codegen/amd64-mlton.sig
A mlton/branches/on-20050822-x86_64-branch/mlton/codegen/amd64-codegen/amd64-pseudo.sig
A mlton/branches/on-20050822-x86_64-branch/mlton/codegen/amd64-codegen/amd64-simplify.fun
A mlton/branches/on-20050822-x86_64-branch/mlton/codegen/amd64-codegen/amd64-simplify.sig
A mlton/branches/on-20050822-x86_64-branch/mlton/codegen/amd64-codegen/amd64-translate.fun
A mlton/branches/on-20050822-x86_64-branch/mlton/codegen/amd64-codegen/amd64-translate.sig
A mlton/branches/on-20050822-x86_64-branch/mlton/codegen/amd64-codegen/amd64-validate.fun
A mlton/branches/on-20050822-x86_64-branch/mlton/codegen/amd64-codegen/amd64-validate.sig
A mlton/branches/on-20050822-x86_64-branch/mlton/codegen/amd64-codegen/amd64.fun
A mlton/branches/on-20050822-x86_64-branch/mlton/codegen/amd64-codegen/amd64.sig
A mlton/branches/on-20050822-x86_64-branch/mlton/codegen/amd64-codegen/peephole.fun
A mlton/branches/on-20050822-x86_64-branch/mlton/codegen/amd64-codegen/peephole.sig
A mlton/branches/on-20050822-x86_64-branch/mlton/codegen/amd64-codegen/sources.cm
A mlton/branches/on-20050822-x86_64-branch/mlton/codegen/amd64-codegen/sources.mlb
U mlton/branches/on-20050822-x86_64-branch/mlton/codegen/sources.cm
U mlton/branches/on-20050822-x86_64-branch/mlton/codegen/sources.mlb
U mlton/branches/on-20050822-x86_64-branch/mlton/codegen/x86-codegen/x86-mlton.fun
U mlton/branches/on-20050822-x86_64-branch/mlton/codegen/x86-codegen/x86-simplify.fun
U mlton/branches/on-20050822-x86_64-branch/mlton/codegen/x86-codegen/x86-simplify.sig
U mlton/branches/on-20050822-x86_64-branch/mlton/control/control-flags.sig
U mlton/branches/on-20050822-x86_64-branch/mlton/control/control-flags.sml
U mlton/branches/on-20050822-x86_64-branch/mlton/main/compile.fun
U mlton/branches/on-20050822-x86_64-branch/mlton/main/lookup-constant.fun
U mlton/branches/on-20050822-x86_64-branch/mlton/main/main.fun
----------------------------------------------------------------------
Modified: mlton/branches/on-20050822-x86_64-branch/basis-library/primitive/prim-mlton.sml
===================================================================
--- mlton/branches/on-20050822-x86_64-branch/basis-library/primitive/prim-mlton.sml 2007-05-18 15:06:16 UTC (rev 5565)
+++ mlton/branches/on-20050822-x86_64-branch/basis-library/primitive/prim-mlton.sml 2007-05-18 17:16:17 UTC (rev 5566)
@@ -1,4 +1,4 @@
-(* Copyright (C) 1999-2006 Henry Cejtin, Matthew Fluet, Suresh
+(* Copyright (C) 1999-2007 Henry Cejtin, Matthew Fluet, Suresh
* Jagannathan, and Stephen Weeks.
* Copyright (C) 1997-2000 NEC Research Institute.
*
@@ -59,18 +59,21 @@
structure Codegen =
struct
- datatype t = Bytecode | C | Native
+ datatype t = Bytecode | C | x86 | amd64
val codegen =
case _build_const "MLton_Codegen_codegen": Int32.int; of
0 => Bytecode
| 1 => C
- | 2 => Native
+ | 2 => x86
+ | 3 => amd64
| _ => raise Primitive.Exn.Fail8 "MLton_Codegen_codegen"
val isBytecode = codegen = Bytecode
val isC = codegen = C
- val isNative = codegen = Native
+ val isX86 = codegen = x86
+ val isAmd64 = codegen = amd64
+ (* val isNative = isX86 orelse isAmd64 *)
end
structure Exn =
Modified: mlton/branches/on-20050822-x86_64-branch/basis-library/real/real.sml
===================================================================
--- mlton/branches/on-20050822-x86_64-branch/basis-library/real/real.sml 2007-05-18 15:06:16 UTC (rev 5565)
+++ mlton/branches/on-20050822-x86_64-branch/basis-library/real/real.sml 2007-05-18 17:16:17 UTC (rev 5566)
@@ -112,7 +112,7 @@
val class = IEEEReal.mkClass R.class
val abs =
- if MLton.Codegen.isNative
+ if MLton.Codegen.isX86
then abs
else
fn x =>
@@ -136,7 +136,7 @@
fun isNormal r = class r = NORMAL
val op ?= =
- if MLton.Codegen.isNative
+ if MLton.Codegen.isX86 orelse MLton.Codegen.isAmd64
then R.?=
else
fn (x, y) =>
@@ -238,7 +238,7 @@
man * (if Int.< (exp, 0) then zero else posInf)
val fromManExp =
- if MLton.Codegen.isNative
+ if MLton.Codegen.isX86
then fromManExp
else
fn {exp, man} =>
@@ -782,7 +782,7 @@
(* The x86 doesn't get exp right on infs. *)
val exp =
- if MLton.Codegen.isNative
+ if MLton.Codegen.isX86
andalso let open MLton.Platform.Arch in host = X86 end
then (fn x =>
case class x of
Modified: mlton/branches/on-20050822-x86_64-branch/doc/changelog
===================================================================
--- mlton/branches/on-20050822-x86_64-branch/doc/changelog 2007-05-18 15:06:16 UTC (rev 5565)
+++ mlton/branches/on-20050822-x86_64-branch/doc/changelog 2007-05-18 17:16:17 UTC (rev 5566)
@@ -1,5 +1,35 @@
Here are the changes since version 20051202.
+* 2007-05-18
+ - Native amd64 code generator for amd64-* targets.
+ - Eliminate native option from -codegen flag.
+ - Add x86 and amd64 options to -codegen flag.
+
+* 2007-04-29
+ - Improved type checking of RSSA and Machine ILs.
+
+* 2007-04-14
+ - Fixed aliasing issues with basis/Real/*.c files.
+ - Added real/word casts in MLton structure.
+
+* 2007-04-12
+ - Added primitivs for bit cast of word to/from real.
+ - Implement PackReal<N>{Big,Little} using PackWord<N>{Big,Little}
+ and bit casts.
+
+* 2007-04-11
+ - Move all system header #include-s to platform/ os headers.
+ - Use C99 <assert.h>, rather than custom "assert.{h,c}".
+
+* 2007-03-13
+ - Implement PackWord<N>{Big,Little} entirely in ML, using an ML
+ byte swap function.
+
+* 2007-02-25
+ - Change amd64-* target platforms from 32-bit compatability mode
+ (i.e., -m32) to 64-bit mode (i.e., -m64). Currently, only the C
+ codegen is able to generate 64-bit executables.
+
* 2007-02-23
- Removed expert command line switch -coalesce <n>.
- Added expert command line switch -chunkify {coalesce<n>|func|one}.
Copied: mlton/branches/on-20050822-x86_64-branch/include/amd64-main.h (from rev 5541, mlton/branches/on-20050822-x86_64-branch/include/x86-main.h)
===================================================================
--- mlton/branches/on-20050822-x86_64-branch/include/x86-main.h 2007-04-30 03:28:24 UTC (rev 5541)
+++ mlton/branches/on-20050822-x86_64-branch/include/amd64-main.h 2007-05-18 17:16:17 UTC (rev 5566)
@@ -0,0 +1,74 @@
+/* Copyright (C) 2000-2007 Henry Cejtin, Matthew Fluet, Suresh
+ * Jagannathan, and Stephen Weeks.
+ *
+ * MLton is released under a BSD-style license.
+ * See the file MLton-LICENSE for details.
+ */
+
+#ifndef _AMD64_MAIN_H_
+#define _AMD64_MAIN_H_
+
+#include "main.h"
+
+/* Globals */
+Word64 applyFFTempFun;
+Word64 applyFFTempStackArg;
+Word64 applyFFTempRegArg[6];
+Real32 applyFFTempXmmsRegArgD[8];
+Real64 applyFFTempXmmsRegArgS[8];
+Word32 checkTemp;
+Word64 cReturnTemp[16];
+Pointer c_stackP;
+Word64 fpcvtTemp;
+Word8 fpeqTemp;
+Word64 divTemp;
+Word32 indexTemp;
+Word64 raTemp1;
+Word64 spill[32];
+Word64 stackTopTemp;
+
+#ifndef DEBUG_AMD64CODEGEN
+#define DEBUG_AMD64CODEGEN FALSE
+#endif
+
+static GC_frameIndex returnAddressToFrameIndex (GC_returnAddress ra) {
+ return *((GC_frameIndex*)(ra - sizeof(GC_frameIndex)));
+}
+
+#define Main(al, mg, mfs, mmc, pk, ps, ml) \
+void MLton_jumpToSML (pointer jump); \
+void MLton_callFromC () { \
+ pointer jump; \
+ GC_state s; \
+ \
+ if (DEBUG_AMD64CODEGEN) \
+ fprintf (stderr, "MLton_callFromC() starting\n"); \
+ s = &gcState; \
+ s->savedThread = s->currentThread; \
+ s->atomicState += 3; \
+ /* Return to the C Handler thread. */ \
+ GC_switchToThread (s, s->callFromCHandlerThread, 0); \
+ jump = *(pointer*)(s->stackTop - GC_RETURNADDRESS_SIZE); \
+ MLton_jumpToSML(jump); \
+ GC_switchToThread (s, s->savedThread, 0); \
+ s->savedThread = BOGUS_OBJPTR; \
+ if (DEBUG_AMD64CODEGEN) \
+ fprintf (stderr, "MLton_callFromC() done\n"); \
+ return; \
+} \
+int main (int argc, char **argv) { \
+ pointer jump; \
+ extern pointer ml; \
+ \
+ Initialize (al, mg, mfs, mmc, pk, ps); \
+ if (gcState.amOriginal) { \
+ real_Init(); \
+ jump = (pointer)&ml; \
+ } else { \
+ jump = *(pointer*)(gcState.stackTop - GC_RETURNADDRESS_SIZE); \
+ } \
+ MLton_jumpToSML(jump); \
+ return 1; \
+}
+
+#endif /* #ifndef _AMD64_MAIN_H_ */
Modified: mlton/branches/on-20050822-x86_64-branch/include/x86-main.h
===================================================================
--- mlton/branches/on-20050822-x86_64-branch/include/x86-main.h 2007-05-18 15:06:16 UTC (rev 5565)
+++ mlton/branches/on-20050822-x86_64-branch/include/x86-main.h 2007-05-18 17:16:17 UTC (rev 5566)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2000-2005 Henry Cejtin, Matthew Fluet, Suresh
+/* Copyright (C) 2000-2007 Henry Cejtin, Matthew Fluet, Suresh
* Jagannathan, and Stephen Weeks.
*
* MLton is released under a BSD-style license.
@@ -39,7 +39,7 @@
#endif
static GC_frameIndex returnAddressToFrameIndex (GC_returnAddress ra) {
- return *((GC_frameIndex*)(ra - sizeof(GC_frameIndex*)));
+ return *((GC_frameIndex*)(ra - sizeof(GC_frameIndex)));
}
#define Main(al, mg, mfs, mmc, pk, ps, ml) \
Modified: mlton/branches/on-20050822-x86_64-branch/mlton/Makefile
===================================================================
--- mlton/branches/on-20050822-x86_64-branch/mlton/Makefile 2007-05-18 15:06:16 UTC (rev 5565)
+++ mlton/branches/on-20050822-x86_64-branch/mlton/Makefile 2007-05-18 17:16:17 UTC (rev 5566)
@@ -25,7 +25,7 @@
FILE := mlton.mlb
FLAGS += -default-ann 'sequenceNonUnit warn'
FLAGS += -default-ann 'warnUnused true'
- FLAGS += -type-check true -show-types true
+ # FLAGS += -type-check true -show-types true
else
ifeq (cygwin, $(HOST_OS))
# The stubs don't work on Cygwin, since they define spawn in terms of
Modified: mlton/branches/on-20050822-x86_64-branch/mlton/backend/ssa-to-rssa.fun
===================================================================
--- mlton/branches/on-20050822-x86_64-branch/mlton/backend/ssa-to-rssa.fun 2007-05-18 15:06:16 UTC (rev 5565)
+++ mlton/branches/on-20050822-x86_64-branch/mlton/backend/ssa-to-rssa.fun 2007-05-18 17:16:17 UTC (rev 5566)
@@ -489,6 +489,9 @@
| Real_qequal s => realCompare s
| Real_rndToReal (s1, s2) =>
coerce (real s1, realCType s1, real s2, realCType s2)
+ | Real_rndToWord (s1, s2, sg) =>
+ coerce (real s1, realCType s1,
+ word s2, wordCType (s2, sg))
| Real_round s => realUnary s
| Real_sub s => realBinary s
| Thread_returnToC => CFunction.returnToC ()
Copied: mlton/branches/on-20050822-x86_64-branch/mlton/codegen/amd64-codegen/amd64-allocate-registers.fun (from rev 5560, mlton/branches/on-20050822-x86_64-branch/mlton/codegen/x86-codegen/x86-allocate-registers.fun)
===================================================================
--- mlton/branches/on-20050822-x86_64-branch/mlton/codegen/x86-codegen/x86-allocate-registers.fun 2007-05-16 15:31:52 UTC (rev 5560)
+++ mlton/branches/on-20050822-x86_64-branch/mlton/codegen/amd64-codegen/amd64-allocate-registers.fun 2007-05-18 17:16:17 UTC (rev 5566)
@@ -0,0 +1,10296 @@
+(* Copyright (C) 1999-2007 Henry Cejtin, Matthew Fluet, Suresh
+ * Jagannathan, and Stephen Weeks.
+ * Copyright (C) 1997-2000 NEC Research Institute.
+ *
+ * MLton is released under a BSD-style license.
+ * See the file MLton-LICENSE for details.
+ *)
+
+functor amd64AllocateRegisters(S: AMD64_ALLOCATE_REGISTERS_STRUCTS) : AMD64_ALLOCATE_REGISTERS =
+struct
+
+ open S
+ open amd64
+
+ val tracer = amd64.tracer
+ val tracerTop = amd64.tracerTop
+
+ fun track memloc = let
+ val trackClasses
+ = ClassSet.+(ClassSet.+
+ (!amd64MLton.Classes.livenessClasses,
+ !amd64MLton.Classes.holdClasses),
+ ClassSet.fromList
+ [amd64MLton.Classes.StaticNonTemp,
+ amd64MLton.Classes.CArg])
+ in
+ ClassSet.contains(trackClasses, MemLoc.class memloc)
+ end
+ fun volatile memloc = let
+ val volatileClasses
+ = !amd64MLton.Classes.volatileClasses
+ in
+ ClassSet.contains(volatileClasses, MemLoc.class memloc)
+ end
+
+ fun partition(l, p)
+ = let
+ val rec partition'
+ = fn ([],PS) => PS
+ | (h::t,PS) => let
+ val rec partition''
+ = fn [] => [[h]]
+ | P::PS => if List.exists(P,fn x => p(h, x))
+ then (h::P)::PS
+ else P::(partition'' PS)
+ in
+ partition'(t,partition'' PS)
+ end
+ in
+ partition'(l,[])
+ end
+
+ fun totalOrder (l, plt)
+ = let
+ val rec totalOrder'
+ = fn ([],l) => l
+ | (h::t,l) => let
+ val rec split
+ = fn (lt,t)
+ => case List.splitPrefix
+ (t, fn x => plt(x,h))
+ of (nil,t) => lt@[h]@t
+ | (lt',t) => split(lt at lt',t)
+ in
+ totalOrder'(t,split([],l))
+ end
+ in
+ totalOrder'(l,[])
+ end
+
+ val bool_lt
+ = fn (false, true) => true
+ | _ => false
+
+ val bool_gt
+ = fn (true, false) => true
+ | _ => false
+
+ fun option_lt lt
+ = fn (SOME x, SOME y) => lt (x,y)
+ | (NONE, SOME _) => true
+ | _ => false
+
+ structure Liveness =
+ struct
+
+ datatype futureMemlocTag = FLIVE
+ | FCOMMIT | FREMOVE | FDEAD
+ | FUSE | FUSEDEF | FDEF
+
+ val futureMemlocTag_toString
+ = fn FLIVE => "FLIVE"
+ | FCOMMIT => "FCOMMIT"
+ | FREMOVE => "FREMOVE"
+ | FDEAD => "FDEAD"
+ | FUSE => "FUSE"
+ | FUSEDEF => "FUSEDEF"
+ | FDEF => "FDEF"
+
+ type futureMemloc = futureMemlocTag * MemLoc.t
+
+ datatype futureMemlocPredTag = FCOMMITP | FREMOVEP | FDEADP
+ | FMCOMMITP | FMREMOVEP
+
+ val futureMemlocPredTag_toString
+ = fn FCOMMITP => "FCOMMITP"
+ | FREMOVEP => "FREMOVEP"
+ | FDEADP => "FDEADP"
+ | FMCOMMITP => "FMCOMMITP"
+ | FMREMOVEP => "FMREMOVEP"
+
+ type futureMemlocPred = futureMemlocPredTag * (MemLoc.t -> bool)
+
+ datatype future = M of futureMemloc | MP of futureMemlocPred
+
+ val future_toString
+ = fn (M (tag, memloc))
+ => concat [futureMemlocTag_toString tag, " ", MemLoc.toString memloc]
+ | (MP (tag, _))
+ => concat [futureMemlocPredTag_toString tag]
+
+
+ type hint = Register.t * MemLoc.t list * MemLocSet.t
+ type xmmhint = XmmRegister.t * MemLoc.t list * MemLocSet.t
+
+ val hint_toString
+ = fn (register, memlocs, _)
+ => concat ["{ ",
+ List.fold
+ (memlocs,
+ "",
+ fn (memloc, s) => s ^ (MemLoc.toString memloc) ^ " "),
+ "} -> ",
+ Register.toString register]
+
+ val xmmhint_toString
+ = fn (register, memlocs, _)
+ => concat ["{ ",
+ List.fold
+ (memlocs,
+ "",
+ fn (memloc, s) => s ^ (MemLoc.toString memloc) ^ " "),
+ "} -> ",
+ XmmRegister.toString register]
+
+ type t = {dead: MemLocSet.t,
+ commit: MemLocSet.t,
+ remove: MemLocSet.t,
+ futures: {pre: future list,
+ post: future list},
+ hint: hint list,
+ xmmhint: xmmhint list}
+
+(*
+ fun toString {dead, commit, remove, futures = {pre, post}, hint, xmmhint}
+ = let
+ fun doit (name, l, toString, s)
+ = List.fold(l, s,
+ fn (x, s)
+ => concat [name, toString x, "\n", s])
+ fun doit' (name, l, toString, s)
+ = MemLocSet.fold(l, s,
+ fn (x, s)
+ => concat [name, toString x, "\n", s])
+ in
+ doit'("dead: ", dead, MemLoc.toString,
+ doit'("commit: ", commit, MemLoc.toString,
+ doit'("remove: ", remove, MemLoc.toString,
+ doit("future (pre): ", List.rev pre, future_toString,
+ doit("future (post): ", List.rev post, future_toString,
+ doit("hint: ", hint, hint_toString,
+ doit("xmmhint: ", xmmhint, xmmhint_toString, "")))))))
+ end
+*)
+
+ fun toComments {dead, commit, remove, futures = {pre, post}, hint, xmmhint}
+ = let
+ fun doit (name, l, toString, ac)
+ = List.fold(l, ac,
+ fn (x, ac)
+ => (Assembly.comment (concat [name, toString x]))::
+ ac)
+ fun doit' (name, l, toString, ac)
+ = MemLocSet.fold(l, ac,
+ fn (x, ac)
+ => (Assembly.comment (concat [name, toString x]))::
+ ac)
+ in
+ doit'("dead: ", dead, MemLoc.toString,
+ doit'("commit: ", commit, MemLoc.toString,
+ doit'("remove: ", remove, MemLoc.toString,
+ doit("future (pre): ", List.rev pre, future_toString,
+ doit("future (post): ", List.rev post, future_toString,
+ doit("hint: ", hint, hint_toString,
+ doit("xmmhint: ", xmmhint, xmmhint_toString, [])))))))
+ end
+
+
+ datatype commit = NO | COMMIT | REMOVE | DEAD
+
+ fun predict(future, memloc)
+ = let
+ val rec sawNothing
+ = fn [] => if track memloc then DEAD else REMOVE
+ | (M (tag',memloc'))::future
+ => if MemLoc.eq(memloc, memloc')
+ then case tag'
+ of FLIVE => NO
+ | FCOMMIT => sawCommit future
+ | FREMOVE => sawRemove future
+ | FDEAD => DEAD
+ | FUSE => sawUse future
+ | FUSEDEF => NO
+ | FDEF => DEAD
+ else if ((tag' = FUSEDEF) orelse (tag' = FDEF))
+ andalso
+ List.exists
+ (MemLoc.utilized memloc,
+ fn memloc'' => MemLoc.mayAlias(memloc'', memloc'))
+ then REMOVE
+ else if MemLoc.mayAlias(memloc, memloc')
+ then case tag'
+ of FUSE => sawCommit future
+ | FUSEDEF => REMOVE
+ | FDEF => REMOVE
+ | _ => sawNothing future
+ else sawNothing future
+ | (MP (tag',pred'))::future
+ => if pred' memloc
+ then case tag'
+ of FCOMMITP => sawCommit future
+ | FREMOVEP => sawRemove future
+ | FDEADP => DEAD
+ | FMCOMMITP => sawCommit future
+ | FMREMOVEP => sawRemove future
+ else sawNothing future
+ and sawCommit
+ = fn [] => REMOVE
+ | (M (tag',memloc'))::future
+ => if MemLoc.eq(memloc, memloc')
+ then case tag'
+ of FLIVE => COMMIT
+ | FCOMMIT => sawCommit future
+ | FREMOVE => REMOVE
+ | FDEAD => REMOVE
+ | FUSE => COMMIT
+ | FUSEDEF => COMMIT
+ | FDEF => REMOVE
+ else if MemLoc.mayAlias(memloc, memloc')
+ then case tag'
+ of FUSE => sawCommit future
+ | FUSEDEF => REMOVE
+ | FDEF => REMOVE
+ | _ => sawCommit future
+ else sawCommit future
+ | (MP (tag',pred'))::future
+ => if pred' memloc
+ then case tag'
+ of FCOMMITP => sawCommit future
+ | FREMOVEP => REMOVE
+ | FDEADP => REMOVE
+ | FMCOMMITP => sawCommit future
+ | FMREMOVEP => REMOVE
+ else sawCommit future
+ and sawRemove
+ = fn [] => REMOVE
+ | (M (tag',memloc'))::future
+ => if MemLoc.eq(memloc, memloc')
+ then case tag'
+ of FLIVE => REMOVE
+ | FCOMMIT => REMOVE
+ | FREMOVE => sawRemove future
+ | FDEAD => DEAD
+ | FUSE => REMOVE
+ | FUSEDEF => REMOVE
+ | FDEF => DEAD
+ else if MemLoc.mayAlias(memloc, memloc')
+ then case tag'
+ of FUSE => REMOVE
+ | FUSEDEF => REMOVE
+ | FDEF => REMOVE
+ | _ => sawRemove future
+ else sawRemove future
+ | (MP (tag',pred'))::future
+ => if pred' memloc
+ then case tag'
+ of FCOMMITP => REMOVE
+ | FREMOVEP => REMOVE
+ | FDEADP => DEAD
+ | FMCOMMITP => REMOVE
+ | FMREMOVEP => sawRemove future
+ else sawRemove future
+ and sawUse
+ = fn [] => if track memloc then NO else COMMIT
+ | (M (tag',memloc'))::future
+ => if MemLoc.eq(memloc, memloc')
+ then case tag'
+ of FLIVE => NO
+ | FCOMMIT => sawUseCommit future
+ | FREMOVE => NO
+ | FDEAD => NO
+ | FUSE => sawUse future
+ | FUSEDEF => NO
+ | FDEF => NO
+ else if MemLoc.mayAlias(memloc, memloc')
+ then case tag'
+ of FUSE => sawUseCommit future
+ | FUSEDEF => NO
+ | FDEF => NO
+ | _ => sawUse future
+ else sawUse future
+ | (MP (tag',pred'))::future
+ => if pred' memloc
+ then case tag'
+ of FCOMMITP => sawUseCommit future
+ | FREMOVEP => NO
+ | FDEADP => NO
+ | FMCOMMITP => sawUseCommit future
+ | FMREMOVEP => NO
+ else sawUse future
+ and sawUseCommit
+ = fn [] => if track memloc then NO else COMMIT
+ | (M (tag',memloc'))::future
+ => if MemLoc.eq(memloc, memloc')
+ then case tag'
+ of FLIVE => COMMIT
+ | FCOMMIT => sawUseCommit future
+ | FREMOVE => NO
+ | FDEAD => NO
+ | FUSE => COMMIT
+ | FUSEDEF => COMMIT
+ | FDEF => NO
+ else if MemLoc.mayAlias(memloc, memloc')
+ then case tag'
+ of FUSE => sawUseCommit future
+ | FUSEDEF => NO
+ | FDEF => NO
+ | _ => sawUseCommit future
+ else sawUseCommit future
+ | (MP (tag',pred'))::future
+ => if pred' memloc
+ then case tag'
+ of FCOMMITP => sawUseCommit future
+ | FREMOVEP => NO
+ | FDEADP => NO
+ | FMCOMMITP => sawUseCommit future
+ | FMREMOVEP => NO
+ else sawUseCommit future
+
+ fun check commit
+ = if List.exists
+ (MemLoc.utilized memloc,
+ fn memloc' => case predict (future, memloc')
+ of REMOVE => true
+ | DEAD => true
+ | _ => false)
+ then REMOVE
+ else commit
+
+ val default = case sawNothing future
+ of REMOVE => REMOVE
+ | DEAD => DEAD
+ | commit => check commit
+ in
+ default
+ end
+
+ val split
+ = fn (set, p)
+ => MemLocSet.fold
+ (set,
+ (MemLocSet.empty,MemLocSet.empty,MemLocSet.empty,MemLocSet.empty),
+ fn (memloc, (no, commit, remove, dead))
+ => let
+ val add = fn set => MemLocSet.add(set, memloc)
+ in
+ case p memloc
+ of NO => (add no, commit, remove, dead)
+ | COMMIT => (no, add commit, remove, dead)
+ | REMOVE => (no, commit, add remove, dead)
+ | DEAD => (no, commit, remove, add dead)
+ end)
+
+ fun liveness {uses: MemLocSet.t,
+ defs: MemLocSet.t,
+ future: future list} :
+ {dead: MemLocSet.t,
+ commit: MemLocSet.t,
+ remove: MemLocSet.t,
+ future: future list}
+ = let
+ local
+ fun doit' (memlocs, set)
+ = MemLocSet.fold
+ (memlocs,
+ set,
+ fn (memloc, set)
+ => MemLocSet.union
+ (set, MemLocSet.fromList (MemLoc.utilized memloc)))
+ in
+ val allUses
+ = doit'(defs,
+ doit'(uses,
+ uses))
+ val allDefs
+ = defs
+ end
+
+ val current
+ = MemLocSet.+(allUses, allDefs)
+ val current_usedef
+ = MemLocSet.intersect(allUses, allDefs)
+ val current_use
+ = MemLocSet.-(allUses, current_usedef)
+ val current_def
+ = MemLocSet.-(allDefs, current_usedef)
+
+ val (_,commit,remove,dead)
+ = split(current, fn memloc => predict(future, memloc))
+
+ val future
+ = let
+ fun doit(memlocs, tag, future)
+ = MemLocSet.fold
+ (memlocs,
+ future,
+ fn (memloc,future)
+ => (M (tag, memloc))::future)
+ in
+ doit(current_use, FUSE,
+ doit(current_usedef, FUSEDEF,
+ doit(current_def, FDEF,
+ future)))
+ end
+
+ val info
+ = {dead = dead,
+ commit = commit,
+ remove = remove,
+ future = future}
+ in
+ info
+ end
+
+ fun livenessInstruction {instruction: Instruction.t,
+ future: future list}
+ = let
+ val future_post = future
+
+ val {uses, defs, ...} = Instruction.uses_defs_kills instruction
+ local
+ fun doit operands
+ = List.fold
+ (operands,
+ MemLocSet.empty,
+ fn (operand, memlocs)
+ => case Operand.deMemloc operand
+ of SOME memloc => MemLocSet.add(memlocs, memloc)
+ | NONE => memlocs)
+ in
+ val uses = doit uses
+ val defs = doit defs
+ end
+
+ val {dead,commit,remove,future}
+ = liveness {uses = uses,
+ defs = defs,
+ future = future_post}
+ val future_pre = future
+
+ val info = {dead = dead,
+ commit = commit,
+ remove = remove,
+ futures = {pre = future_pre, post = future_post}}
+
+ in
+ info
+ end
+
+ fun livenessDirective {directive: Directive.t,
+ future: future list}
+ = let
+ val future_post = future
+
+ fun addLive (memlocsX, f)
+ = List.fold
+ (memlocsX,
+ future,
+ fn (X, future) => (M (FLIVE, f X))::future)
+ fun addLive' (memlocs)
+ = MemLocSet.fold
+ (memlocs,
+ future,
+ fn (memloc, future) => (M (FLIVE, memloc))::future)
+
+ val future_pre
+ = case directive
+ of Directive.Reset
+ => []
+ | Directive.Cache {caches, ...}
+ => addLive(caches, fn {memloc, ...} => memloc)
+ | Directive.XmmCache {caches, ...}
+ => addLive(caches, fn {memloc, ...} => memloc)
+ | Directive.Force {commit_memlocs,
+ commit_classes,
+ remove_memlocs,
+ remove_classes,
+ dead_memlocs,
+ dead_classes,
+ ...}
+ => MemLocSet.fold
+ (commit_memlocs,
+ MemLocSet.fold
+ (remove_memlocs,
+ MemLocSet.fold
+ (dead_memlocs,
+ (MP (FCOMMITP,
+ fn memloc
+ => ClassSet.contains(commit_classes,
+ MemLoc.class memloc)))::
+ (MP (FREMOVEP,
+ fn memloc
+ => ClassSet.contains(remove_classes,
+ MemLoc.class memloc)))::
+ (MP (FDEADP,
+ fn memloc
+ => ClassSet.contains(dead_classes,
+ MemLoc.class memloc)))::
+ future,
+ fn (memloc,future) => (M (FDEAD, memloc))::future),
+ fn (memloc,future) => (M (FREMOVE, memloc))::future),
+ fn (memloc,future) => (M (FCOMMIT, memloc))::future)
+ | Directive.CCall
+ => (MP (FCOMMITP,
+ fn memloc
+ => MemLoc.Class.eq
+ (MemLoc.class memloc,
+ MemLoc.Class.CStack)))::
+ (MP (FMREMOVEP,
+ fn memloc
+ => (not (MemLoc.Class.eq
+ (MemLoc.class memloc,
+ MemLoc.Class.CStack)))
+ andalso
+ (Size.class (MemLoc.size memloc) <> Size.INT)))::
+ future
+ | Directive.Return {returns}
+ => (List.map(returns, fn {dst, ...} => M (FDEF, dst))) @ future
+ | Directive.SaveRegAlloc {live, ...}
+ => addLive'(live)
+ | _ => future
+
+ val info = {dead = MemLocSet.empty,
+ commit = MemLocSet.empty,
+ remove = MemLocSet.empty,
+ futures = {pre = future_pre, post = future_post}}
+ in
+ info
+ end
+
+ fun livenessAssembly {assembly: Assembly.t,
+ future: future list,
+ hint: hint list,
+ xmmhint: xmmhint list} : t
+ = let
+ fun default () = {dead = MemLocSet.empty,
+ commit = MemLocSet.empty,
+ remove = MemLocSet.empty,
+ futures = {pre = future, post = future}}
+ val {dead, commit, remove, futures}
+ = case assembly
+ of Assembly.Comment _ => default ()
+ | Assembly.Directive d
+ => livenessDirective {directive = d,
+ future = future}
+ | Assembly.Instruction i
+ => livenessInstruction {instruction = i,
+ future = future}
+ | Assembly.Label _ => default ()
+ | Assembly.PseudoOp _ => default ()
+
+ val hint' = Assembly.hints assembly
+ val hint
+ = List.fold
+ (case assembly
+ of Assembly.Directive Directive.Reset => []
+ | _ => hint,
+ List.revMap
+ (hint',
+ fn (memloc, register)
+ => (register, [memloc], MemLocSet.empty)),
+ fn ((hint_register,hint_memlocs,hint_ignore),hint)
+ => if List.exists
+ (hint,
+ fn (hint_register',_,_) => Register.coincide(hint_register,
+ hint_register'))
+ then hint
+ else let
+ val hint_memloc = hd hint_memlocs
+ in
+ if List.fold
+ (hint,
+ false,
+ fn ((_,hint_memlocs',_),b)
+ => b orelse List.contains
+ (hint_memlocs',
+ hint_memloc,
+ MemLoc.eq))
+ then hint
+ else (hint_register,
+ [hint_memloc],
+ MemLocSet.union(dead, hint_ignore))::hint
+ end)
+ val hint
+ = case assembly
+ of (Assembly.Instruction (Instruction.MOV
+ {src = Operand.MemLoc src',
+ dst = Operand.MemLoc dst',
+ ...}))
+ => List.revMap
+ (hint,
+ fn (hint_register,hint_memlocs,hint_ignore)
+ => if List.contains(hint_memlocs, dst', MemLoc.eq)
+ then (hint_register,
+ src'::hint_memlocs,
+ hint_ignore)
+ else (hint_register,hint_memlocs,hint_ignore))
+ | _ => hint
+ val xmmhint
+ = case assembly
+ of (Assembly.Instruction (Instruction.SSE_MOVS
+ {src = Operand.MemLoc src',
+ dst = Operand.MemLoc dst',
+ ...}))
+ => List.revMap
+ (xmmhint,
+ fn (hint_register,hint_memlocs,hint_ignore)
+ => if List.contains(hint_memlocs, dst', MemLoc.eq)
+ then (hint_register,
+ src'::hint_memlocs,
+ hint_ignore)
+ else (hint_register,hint_memlocs,hint_ignore))
+ | _ => xmmhint
+
+ val info = {dead = dead,
+ commit = commit,
+ remove = remove,
+ futures = futures,
+ hint = hint,
+ xmmhint = xmmhint}
+ in
+ info
+ end
+
+ fun toLiveness (assembly: Assembly.t list) : ((Assembly.t * t) list)
+ = let
+ val {assembly,...}
+ = List.foldr
+ (assembly,
+ {assembly = [], future = [], hint = [], xmmhint = []},
+ fn (asm, {assembly,future,hint,xmmhint})
+ => let
+ val info as {futures = {pre, ...}, hint, xmmhint, ...}
+ = livenessAssembly {assembly = asm,
+ future = future,
+ hint = hint,
+ xmmhint = xmmhint}
+ in
+ {assembly = (asm,info)::assembly,
+ future = pre,
+ hint = hint,
+ xmmhint = xmmhint}
+ end)
+ in
+ assembly
+ end
+
+ val (toLiveness,toLiveness_msg)
+ = tracer
+ "toLiveness"
+ toLiveness
+
+ fun toNoLiveness (assembly: Assembly.t list) : ((Assembly.t * t) list)
+ = List.map(assembly, fn asm => (asm,{dead = MemLocSet.empty,
+ commit = MemLocSet.empty,
+ remove = MemLocSet.empty,
+ futures = {pre = [], post = []},
+ hint = [],
+ xmmhint = []}))
+
+ val (toNoLiveness,toNoLiveness_msg)
+ = tracer
+ "toNoLiveness"
+ toNoLiveness
+ end
+
+ structure RegisterAllocation =
+ struct
+ exception Spill
+ val spill : Int.t ref = ref 0
+ val spillLabel = Label.fromString "spill"
+ val depth : Int.t ref = ref 0
+
+ datatype commit
+ = NO
+ | COMMIT of int
+ | REMOVE of int
+ | TRYCOMMIT of int
+ | TRYREMOVE of int
+
+ val commit_toString
+ = fn NO => "NO"
+ | COMMIT i => "COMMIT " ^ (Int.toString i)
+ | REMOVE i => "REMOVE " ^ (Int.toString i)
+ | TRYCOMMIT i => "TRYCOMMIT " ^ (Int.toString i)
+ | TRYREMOVE i => "TRYREMOVE " ^ (Int.toString i)
+
+ type value = {register: Register.t,
+ memloc: MemLoc.t,
+ weight: int,
+ sync: bool,
+ commit: commit}
+
+ fun value_toString {register, memloc, weight, sync, commit}
+ = concat [Register.toString register, " ",
+ MemLoc.toString memloc, " ",
+ Int.toString weight, " ",
+ Bool.toString sync, " ",
+ commit_toString commit]
+
+ type xmmvalue = {register: XmmRegister.t,
+ memloc: MemLoc.t,
+ weight: int,
+ sync: bool,
+ commit: commit}
+
+ fun xmmvalue_toString {register, memloc, weight, sync, commit}
+ = concat [XmmRegister.toString register, " ",
+ MemLoc.toString memloc, " ",
+ Int.toString weight, " ",
+ Bool.toString sync, " ",
+ commit_toString commit]
+
+ type t = {entries: value list,
+ reserved: Register.t list,
+ xmmentries: xmmvalue list,
+ xmmreserved: XmmRegister.t list}
+
+ fun toString ({entries, reserved, xmmentries, xmmreserved}: t)
+ = let
+ fun doit (name, l, toString, ac)
+ = (name ^ "\n") ^
+ (List.fold(l, ac,
+ fn (x, ac)
+ => (toString x) ^ "\n" ^ ac))
+ in
+ doit("entries:", entries, value_toString,
+ doit("reserved:", reserved, Register.toString,
+ doit("xmmentries:", xmmentries, xmmvalue_toString,
+ doit("xmmreserved:", xmmreserved, XmmRegister.toString, ""))))
+ end
+
+ fun toComments ({entries, reserved, xmmentries, xmmreserved}: t)
+ = let
+ fun doit (name, l, toString, ac)
+ = (Assembly.comment name)::
+ (List.fold(l, ac,
+ fn (x, ac)
+ => (Assembly.comment (toString x))::
+ ac))
+ in
+ AppendList.fromList
+ (doit("entries:", entries, value_toString,
+ doit("reserved:", reserved, Register.toString,
+ doit("xmmentries:", xmmentries, xmmvalue_toString,
+ doit("xmmreserved:", xmmreserved, XmmRegister.toString,
+ [])))))
+ end
+
+ val {get = getRA : Directive.Id.t -> {registerAllocation: t},
+ set = setRA, ...}
+ = Property.getSetOnce
+ (Directive.Id.plist,
+ Property.initRaise ("getRA", fn _ => Layout.empty))
+
+ fun empty () : t
+ = {entries = [],
+ reserved = [],
+ xmmentries = [],
+ xmmreserved = []}
+
+ fun reserve' {register: Register.t,
+ registerAllocation = {entries, reserved,
+ xmmentries, xmmreserved}: t}
+ = {assembly = AppendList.empty,
+ registerAllocation = {entries = entries,
+ reserved = register::reserved,
+ xmmentries = xmmentries,
+ xmmreserved = xmmreserved}}
+
+ fun xmmreserve' {register: XmmRegister.t,
+ registerAllocation = {entries, reserved,
+ xmmentries, xmmreserved}: t}
+ = {assembly = AppendList.empty,
+ registerAllocation = {entries = entries,
+ reserved = reserved,
+ xmmentries = xmmentries,
+ xmmreserved = register::xmmreserved}}
+
+ fun reserve {registers: Register.t list,
+ registerAllocation = {entries, reserved,
+ xmmentries, xmmreserved}: t}
+ = {assembly = AppendList.empty,
+ registerAllocation = {entries = entries,
+ reserved = registers @ reserved,
+ xmmentries = xmmentries,
+ xmmreserved = xmmreserved}}
+
+ fun xmmreserve {registers: XmmRegister.t list,
+ registerAllocation = {entries, reserved,
+ xmmentries, xmmreserved}: t}
+ = {assembly = AppendList.empty,
+ registerAllocation = {entries = entries,
+ reserved = reserved,
+ xmmentries = xmmentries,
+ xmmreserved = registers @ xmmreserved}}
+
+ fun unreserve' {register: Register.t,
+ registerAllocation = {entries, reserved,
+ xmmentries, xmmreserved}: t}
+ = {assembly = AppendList.empty,
+ registerAllocation = {entries = entries,
+ reserved = List.revRemoveAll
+ (reserved,
+ fn register'
+ => Register.eq
+ (register',
+ register)),
+ xmmentries = xmmentries,
+ xmmreserved = xmmreserved}}
+
+ fun xmmunreserve' {register: XmmRegister.t,
+ registerAllocation = {entries, reserved,
+ xmmentries, xmmreserved}: t}
+ = {assembly = AppendList.empty,
+ registerAllocation = {entries = entries,
+ reserved = reserved,
+ xmmentries = xmmentries,
+ xmmreserved = List.revRemoveAll
+ (xmmreserved,
+ fn register'
+ => XmmRegister.eq
+ (register',
+ register))}}
+
+ fun unreserve {registers: Register.t list,
+ registerAllocation = {entries, reserved,
+ xmmentries, xmmreserved}: t}
+ = {assembly = AppendList.empty,
+ registerAllocation = {entries = entries,
+ reserved = List.revRemoveAll
+ (reserved,
+ fn register'
+ => List.contains
+ (registers,
+ register',
+ Register.eq)),
+ xmmentries = xmmentries,
+ xmmreserved = xmmreserved}}
+
+ fun xmmunreserve {registers: XmmRegister.t list,
+ registerAllocation = {entries, reserved,
+ xmmentries, xmmreserved}: t}
+ = {assembly = AppendList.empty,
+ registerAllocation = {entries = entries,
+ reserved = reserved,
+ xmmentries = xmmentries,
+ xmmreserved = List.revRemoveAll
+ (xmmreserved,
+ fn register'
+ => List.contains
+ (registers,
+ register',
+ XmmRegister.eq))}}
+
+ fun valueMap {map,
+ registerAllocation = {entries,
+ reserved,
+ xmmentries,
+ xmmreserved}: t}
+ = {entries = List.revMap(entries, map),
+ reserved = reserved,
+ xmmentries = xmmentries,
+ xmmreserved = xmmreserved}
+
+ fun xmmvalueMap {map,
+ registerAllocation = {entries,
+ reserved,
+ xmmentries,
+ xmmreserved}: t}
+ = {entries = entries,
+ reserved = reserved,
+ xmmentries = List.revMap(xmmentries, map),
+ xmmreserved = xmmreserved}
+
+ fun valueFilter {filter,
+ registerAllocation = {entries,
+ ...}: t}
+ = List.revKeepAll(entries, filter)
+
+ fun xmmvalueFilter {filter,
+ registerAllocation = {xmmentries,
+ ...}: t}
+ = List.revKeepAll(xmmentries, filter)
+
+ fun valueRegister {register,
+ registerAllocation}
+ = case valueFilter {filter = fn {register = register', ...}
+ => Register.eq(register, register'),
+ registerAllocation = registerAllocation}
+ of [] => NONE
+ | [value] => SOME value
+ | _ => Error.bug "amd64AllocateRegisters.RegisterAllocation.valueRegister"
+
+(*
+ fun xmmvalueRegister {register,
+ registerAllocation}
+ = case xmmvalueFilter {filter = fn {register = register', ...}
+ => XmmRegister.eq(register, register'),
+ registerAllocation = registerAllocation}
+ of [] => NONE
+ | [value] => SOME value
+ | _ => Error.bug "amd64AllocateRegisters.RegisterAllocation.xmmvalueRegister"
+*)
+
+ fun valuesRegister {register = Register.T {reg, ...},
+ registerAllocation = {entries,
+ ...}: t}
+ = List.revKeepAll(entries,
+ fn {register
+ = Register.T {reg = reg',
+ ...},
+ ...}
+ => reg = reg')
+
+ fun xmmvaluesXmmRegister {register = XmmRegister.T {reg, ...},
+ registerAllocation = {xmmentries,
+ ...}: t}
+ = List.revKeepAll(xmmentries,
+ fn {register
+ = XmmRegister.T {reg = reg',
+ ...},
+ ...}
+ => reg = reg')
+
+ fun update {value as {register,...},
+ registerAllocation = {entries, reserved,
+ xmmentries, xmmreserved}: t}
+ = {entries = let
+ val entries
+ = List.revRemoveAll(entries,
+ fn {register = register',...}
+ => Register.eq(register,register'))
+ in
+ value::entries
+ end,
+ reserved = reserved,
+ xmmentries = xmmentries,
+ xmmreserved = xmmreserved}
+
+ fun xmmupdate {value as {register,...},
+ registerAllocation = {entries, reserved,
+ xmmentries, xmmreserved}: t}
+ = {entries = entries,
+ reserved = reserved,
+ xmmentries = let
+ val xmmentries
+ = List.revRemoveAll(xmmentries,
+ fn {register = register',...}
+ => XmmRegister.eq(register,register'))
+ in
+ value::xmmentries
+ end,
+ xmmreserved = xmmreserved}
+
+ fun delete {register,
+ registerAllocation = {entries, reserved,
+ xmmentries, xmmreserved}: t}
+ = {entries = List.revRemoveAll(entries,
+ fn {register = register',...}
+ => Register.eq(register, register')),
+ reserved = reserved,
+ xmmentries = xmmentries,
+ xmmreserved = xmmreserved}
+
+ fun xmmdelete {register,
+ registerAllocation = {entries, reserved,
+ xmmentries, xmmreserved}: t}
+ = {entries = entries,
+ reserved = reserved,
+ xmmentries = List.revRemoveAll(xmmentries,
+ fn {register = register',...}
+ => XmmRegister.eq(register, register')),
+ xmmreserved = xmmreserved}
+
+ fun deletes {registers, registerAllocation: t}
+ = List.fold(registers,
+ registerAllocation,
+ fn (register, registerAllocation)
+ => delete {register = register,
+ registerAllocation = registerAllocation})
+
+ fun xmmdeletes {registers, registerAllocation: t}
+ = List.fold(registers,
+ registerAllocation,
+ fn (register, registerAllocation)
+ => xmmdelete {register = register,
+ registerAllocation = registerAllocation})
+
+ fun allocated {memloc,
+ registerAllocation: t}
+ = case valueFilter {filter = fn {memloc = memloc',...}
+ => MemLoc.eq(memloc,memloc'),
+ registerAllocation = registerAllocation}
+ of [] => NONE
+ | [value] => SOME value
+ | _ => Error.bug "amd64AllocateRegisters.RegisterAllocation.allocated"
+
+ fun xmmallocated {memloc,
+ registerAllocation: t}
+ = case xmmvalueFilter {filter = fn {memloc = memloc',...}
+ => MemLoc.eq(memloc,memloc'),
+ registerAllocation = registerAllocation}
+ of [] => NONE
+ | [value] => SOME value
+ | _ => Error.bug "amd64AllocateRegisters.RegisterAllocation.xmmallocated"
+
+ fun remove {memloc,
+ registerAllocation: t}
+ = case allocated {memloc = memloc,
+ registerAllocation = registerAllocation}
+ of SOME {register, ...}
+ => delete {register = register,
+ registerAllocation = registerAllocation}
+ | NONE => registerAllocation
+
+ fun xmmremove {memloc,
+ registerAllocation: t}
+ = case xmmallocated {memloc = memloc,
+ registerAllocation = registerAllocation}
+ of SOME {register, ...}
+ => xmmdelete {register = register,
+ registerAllocation = registerAllocation}
+ | NONE => registerAllocation
+
+ fun removes {memlocs,
+ registerAllocation: t}
+ = List.fold(memlocs,
+ registerAllocation,
+ fn (memloc,registerAllocation)
+ => remove {memloc = memloc,
+ registerAllocation = registerAllocation})
+
+ fun xmmremoves {memlocs,
+ registerAllocation: t}
+ = List.fold(memlocs,
+ registerAllocation,
+ fn (memloc,registerAllocation)
+ => xmmremove {memloc = memloc,
+ registerAllocation = registerAllocation})
+
+ local
+ val commitPush'
+ = fn NO => NO
+ | COMMIT i => COMMIT (i + 1)
+ | REMOVE i => REMOVE (i + 1)
+ | TRYCOMMIT i => TRYCOMMIT (i + 1)
+ | TRYREMOVE i => TRYREMOVE (i + 1)
+
+ val commitPop'
+ = fn NO => NO
+ | COMMIT i => COMMIT (i - 1)
+ | REMOVE i => REMOVE (i - 1)
+ | TRYCOMMIT i => TRYCOMMIT (i - 1)
+ | TRYREMOVE i => TRYREMOVE (i - 1)
+ in
+ fun commitPush {registerAllocation: t}
+ = valueMap {map = fn {register,memloc,weight,sync,commit}
+ => {register = register,
+ memloc = memloc,
+ weight = weight,
+ sync = sync,
+ commit = commitPush' commit},
+ registerAllocation = registerAllocation}
+
+ fun xmmcommitPush {registerAllocation: t}
+ = xmmvalueMap {map = fn {register,memloc,weight,sync,commit}
+ => {register = register,
+ memloc = memloc,
+ weight = weight,
+ sync = sync,
+ commit = commitPush' commit},
+ registerAllocation = registerAllocation}
+
+ fun commitPop {registerAllocation: t}
+ = valueMap {map = fn {register,memloc,weight,sync,commit}
+ => {register = register,
+ memloc = memloc,
+ weight = weight,
+ sync = sync,
+ commit = commitPop' commit},
+ registerAllocation = registerAllocation}
+
+ fun xmmcommitPop {registerAllocation: t}
+ = xmmvalueMap {map = fn {register,memloc,weight,sync,commit}
+ => {register = register,
+ memloc = memloc,
+ weight = weight,
+ sync = sync,
+ commit = commitPop' commit},
+ registerAllocation = registerAllocation}
+ end
+
+ fun savedRegisters {saves: Operand.t list,
+ registerAllocation: t} :
+ Register.t list
+ = List.concatMap
+ (saves,
+ fn Operand.MemLoc m
+ => (case allocated {memloc = m,
+ registerAllocation = registerAllocation}
+ of SOME {register, ...} => [register]
+ | NONE => [])
+ | Operand.Register r => [r]
+ | Operand.Address (Address.T {base, index, ...})
+ => (case (base, index)
+ of (NONE, NONE ) => []
+ | (SOME rb, NONE ) => [rb]
+ | (NONE, SOME ro) => [ro]
+ | (SOME rb, SOME ro) => [rb,ro])
+ | _ => [])
+
+ fun savedXmmRegisters {saves: Operand.t list,
+ registerAllocation: t} :
+ XmmRegister.t list
+ = List.concatMap
+ (saves,
+ fn Operand.MemLoc m
+ => (case xmmallocated {memloc = m,
+ registerAllocation = registerAllocation}
+ of SOME {register, ...} => [register]
+ | NONE => [])
+ | Operand.XmmRegister r => [r]
+ | _ => [])
+
+ fun supportedRegisters {supports: Operand.t list,
+ registerAllocation: t} :
+ Register.t list
+ = let
+ fun supportedRegisters' memloc
+ = case (allocated {memloc = memloc,
+ registerAllocation = registerAllocation},
+ xmmallocated {memloc = memloc,
+ registerAllocation = registerAllocation})
+ of (SOME {register, ...}, _) => [register]
+ | (_, SOME _) => []
+ | (NONE, NONE) => List.concatMap(MemLoc.utilized memloc,
+ supportedRegisters')
+ in
+ List.concatMap
+ (supports,
+ fn Operand.MemLoc m => supportedRegisters' m
+ | _ => [])
+ end
+
+ fun supportedXmmRegisters {supports: Operand.t list,
+ registerAllocation: t} :
+ XmmRegister.t list
+ = let
+ fun supportedXmmRegisters' memloc
+ = case (allocated {memloc = memloc,
+ registerAllocation = registerAllocation},
+ xmmallocated {memloc = memloc,
+ registerAllocation = registerAllocation})
+ of (SOME _, _) => []
+ | (_, SOME {register, ...}) => [register]
+ | (NONE, NONE) => List.concatMap(MemLoc.utilized memloc,
+ supportedXmmRegisters')
+ in
+ List.concatMap
+ (supports,
+ fn Operand.MemLoc m => supportedXmmRegisters' m
+ | _ => [])
+ end
+
+ fun supportedMemLocs {supports: Operand.t list,
+ registerAllocation: t} :
+ MemLoc.t list
+ = let
+ fun supportedMemLocs' memloc
+ = case (allocated {memloc = memloc,
+ registerAllocation = registerAllocation},
+ xmmallocated {memloc = memloc,
+ registerAllocation = registerAllocation})
+ of (SOME _, _) => [memloc]
+ | (_, SOME _) => [memloc]
+ | (NONE, NONE) => List.concatMap(MemLoc.utilized memloc,
+ supportedMemLocs')
+ in
+ List.concatMap
+ (supports,
+ fn Operand.MemLoc m => supportedMemLocs' m
+ | _ => [])
+ end
+
+
+ fun 'a spillAndReissue {info: Liveness.t,
+ supports: Operand.t list,
+ saves: Operand.t list,
+ registerAllocation: t,
+ spiller : {info: Liveness.t,
+ supports: Operand.t list,
+ saves: Operand.t list,
+ registerAllocation: t} ->
+ {assembly: Assembly.t AppendList.t,
+ registerAllocation: t},
+ msg : string,
+ reissue : {assembly: Assembly.t AppendList.t,
+ registerAllocation: t} -> 'a} : 'a
+ = (Int.dec depth;
+ if !depth = 0
+ then let
+ val _ = Int.inc depth
+ val {assembly, registerAllocation}
+ = spiller
+ {info = info,
+ supports = supports,
+ saves = saves,
+ registerAllocation = registerAllocation}
+ val return
+ = reissue {assembly = assembly,
+ registerAllocation = registerAllocation}
+ handle Spill
+ => (Error.bug (concat [msg, ":reSpill"]))
+ val _ = Int.dec depth
+ in
+ return
+ end
+ else raise Spill)
+
+ fun potentialRegisters ({size, force, ...}:
+ {size: Size.t,
+ saves: Operand.t list,
+ force: Register.t list,
+ registerAllocation: t}):
+ Register.t list
+ = case force
+ of [] => Register.registers size
+ | registers => List.revKeepAll(Register.registers size,
+ fn register
+ => List.contains(registers,
+ register,
+
More information about the MLton-commit
mailing list