{-# LANGUAGE TypeFamilies #-}
{-# OPTIONS_GHC -fno-warn-orphans #-}
module Futhark.Pass.ExplicitAllocations.GPU
( explicitAllocations,
explicitAllocationsInStms,
)
where
import Control.Monad
import Data.Set qualified as S
import Futhark.IR.GPU
import Futhark.IR.GPUMem
import Futhark.IR.Mem.LMAD qualified as LMAD
import Futhark.Pass.ExplicitAllocations
import Futhark.Pass.ExplicitAllocations.SegOp
instance SizeSubst (HostOp rep op) where
opIsConst :: HostOp rep op -> Bool
opIsConst (SizeOp GetSize {}) = Bool
True
opIsConst (SizeOp GetSizeMax {}) = Bool
True
opIsConst HostOp rep op
_ = Bool
False
allocAtLevel :: SegLevel -> AllocM GPU GPUMem a -> AllocM GPU GPUMem a
allocAtLevel :: forall a. SegLevel -> AllocM GPU GPUMem a -> AllocM GPU GPUMem a
allocAtLevel SegLevel
lvl = (AllocEnv GPU GPUMem -> AllocEnv GPU GPUMem)
-> AllocM GPU GPUMem a -> AllocM GPU GPUMem a
forall a.
(AllocEnv GPU GPUMem -> AllocEnv GPU GPUMem)
-> AllocM GPU GPUMem a -> AllocM GPU GPUMem a
forall r (m :: * -> *) a. MonadReader r m => (r -> r) -> m a -> m a
local ((AllocEnv GPU GPUMem -> AllocEnv GPU GPUMem)
-> AllocM GPU GPUMem a -> AllocM GPU GPUMem a)
-> (AllocEnv GPU GPUMem -> AllocEnv GPU GPUMem)
-> AllocM GPU GPUMem a
-> AllocM GPU GPUMem a
forall a b. (a -> b) -> a -> b
$ \AllocEnv GPU GPUMem
env ->
AllocEnv GPU GPUMem
env
{ allocSpace = space,
allocInOp = handleHostOp (Just lvl)
}
where
space :: Space
space = case SegLevel
lvl of
SegBlock {} -> [Char] -> Space
Space [Char]
"shared"
SegThread {} -> [Char] -> Space
Space [Char]
"device"
SegThreadInBlock {} -> [Char] -> Space
Space [Char]
"device"
handleSegOp ::
Maybe SegLevel ->
SegOp SegLevel GPU ->
AllocM GPU GPUMem (SegOp SegLevel GPUMem)
handleSegOp :: Maybe SegLevel
-> SegOp SegLevel GPU -> AllocM GPU GPUMem (SegOp SegLevel GPUMem)
handleSegOp Maybe SegLevel
outer_lvl SegOp SegLevel GPU
op = do
SubExp
num_threads <-
case (Maybe SegLevel
outer_lvl, SegOp SegLevel GPU -> SegLevel
forall lvl rep. SegOp lvl rep -> lvl
segLevel SegOp SegLevel GPU
op) of
(Just (SegBlock SegVirt
_ (Just KernelGrid
grid)), SegLevel
_) -> SubExp -> AllocM GPU GPUMem SubExp
forall a. a -> AllocM GPU GPUMem a
forall (f :: * -> *) a. Applicative f => a -> f a
pure (SubExp -> AllocM GPU GPUMem SubExp)
-> SubExp -> AllocM GPU GPUMem SubExp
forall a b. (a -> b) -> a -> b
$ Count BlockSize SubExp -> SubExp
forall {k} (u :: k) e. Count u e -> e
unCount (Count BlockSize SubExp -> SubExp)
-> Count BlockSize SubExp -> SubExp
forall a b. (a -> b) -> a -> b
$ KernelGrid -> Count BlockSize SubExp
gridBlockSize KernelGrid
grid
(Maybe SegLevel, SegLevel)
_ ->
[Char] -> Exp (Rep (AllocM GPU GPUMem)) -> AllocM GPU GPUMem SubExp
forall (m :: * -> *).
MonadBuilder m =>
[Char] -> Exp (Rep m) -> m SubExp
letSubExp [Char]
"num_threads"
(Exp GPUMem -> AllocM GPU GPUMem SubExp)
-> AllocM GPU GPUMem (Exp GPUMem) -> AllocM GPU GPUMem SubExp
forall (m :: * -> *) a b. Monad m => (a -> m b) -> m a -> m b
=<< case Maybe KernelGrid
maybe_grid of
Just KernelGrid
grid ->
Exp GPUMem -> AllocM GPU GPUMem (Exp GPUMem)
forall a. a -> AllocM GPU GPUMem a
forall (f :: * -> *) a. Applicative f => a -> f a
pure (Exp GPUMem -> AllocM GPU GPUMem (Exp GPUMem))
-> (BasicOp -> Exp GPUMem)
-> BasicOp
-> AllocM GPU GPUMem (Exp GPUMem)
forall b c a. (b -> c) -> (a -> b) -> a -> c
. BasicOp -> Exp GPUMem
forall rep. BasicOp -> Exp rep
BasicOp (BasicOp -> AllocM GPU GPUMem (Exp GPUMem))
-> BasicOp -> AllocM GPU GPUMem (Exp GPUMem)
forall a b. (a -> b) -> a -> b
$
BinOp -> SubExp -> SubExp -> BasicOp
BinOp
(IntType -> Overflow -> BinOp
Mul IntType
Int64 Overflow
OverflowUndef)
(Count NumBlocks SubExp -> SubExp
forall {k} (u :: k) e. Count u e -> e
unCount (KernelGrid -> Count NumBlocks SubExp
gridNumBlocks KernelGrid
grid))
(Count BlockSize SubExp -> SubExp
forall {k} (u :: k) e. Count u e -> e
unCount (KernelGrid -> Count BlockSize SubExp
gridBlockSize KernelGrid
grid))
Maybe KernelGrid
Nothing ->
BinOp
-> SubExp
-> [SubExp]
-> AllocM GPU GPUMem (Exp (Rep (AllocM GPU GPUMem)))
forall (m :: * -> *).
MonadBuilder m =>
BinOp -> SubExp -> [SubExp] -> m (Exp (Rep m))
foldBinOp
(IntType -> Overflow -> BinOp
Mul IntType
Int64 Overflow
OverflowUndef)
(IntType -> Integer -> SubExp
intConst IntType
Int64 Integer
1)
(SegSpace -> [SubExp]
segSpaceDims (SegSpace -> [SubExp]) -> SegSpace -> [SubExp]
forall a b. (a -> b) -> a -> b
$ SegOp SegLevel GPU -> SegSpace
forall lvl rep. SegOp lvl rep -> SegSpace
segSpace SegOp SegLevel GPU
op)
SegLevel
-> AllocM GPU GPUMem (SegOp SegLevel GPUMem)
-> AllocM GPU GPUMem (SegOp SegLevel GPUMem)
forall a. SegLevel -> AllocM GPU GPUMem a -> AllocM GPU GPUMem a
allocAtLevel (SegOp SegLevel GPU -> SegLevel
forall lvl rep. SegOp lvl rep -> lvl
segLevel SegOp SegLevel GPU
op) (AllocM GPU GPUMem (SegOp SegLevel GPUMem)
-> AllocM GPU GPUMem (SegOp SegLevel GPUMem))
-> AllocM GPU GPUMem (SegOp SegLevel GPUMem)
-> AllocM GPU GPUMem (SegOp SegLevel GPUMem)
forall a b. (a -> b) -> a -> b
$ SegOpMapper SegLevel GPU GPUMem (AllocM GPU GPUMem)
-> SegOp SegLevel GPU -> AllocM GPU GPUMem (SegOp SegLevel GPUMem)
forall (m :: * -> *) lvl frep trep.
Monad m =>
SegOpMapper lvl frep trep m -> SegOp lvl frep -> m (SegOp lvl trep)
mapSegOpM (SubExp -> SegOpMapper SegLevel GPU GPUMem (AllocM GPU GPUMem)
mapper SubExp
num_threads) SegOp SegLevel GPU
op
where
maybe_grid :: Maybe KernelGrid
maybe_grid =
case (Maybe SegLevel
outer_lvl, SegOp SegLevel GPU -> SegLevel
forall lvl rep. SegOp lvl rep -> lvl
segLevel SegOp SegLevel GPU
op) of
(Just (SegThread SegVirt
_ (Just KernelGrid
grid)), SegLevel
_) -> KernelGrid -> Maybe KernelGrid
forall a. a -> Maybe a
Just KernelGrid
grid
(Just (SegBlock SegVirt
_ (Just KernelGrid
grid)), SegLevel
_) -> KernelGrid -> Maybe KernelGrid
forall a. a -> Maybe a
Just KernelGrid
grid
(Maybe SegLevel
_, SegThread SegVirt
_ (Just KernelGrid
grid)) -> KernelGrid -> Maybe KernelGrid
forall a. a -> Maybe a
Just KernelGrid
grid
(Maybe SegLevel
_, SegBlock SegVirt
_ (Just KernelGrid
grid)) -> KernelGrid -> Maybe KernelGrid
forall a. a -> Maybe a
Just KernelGrid
grid
(Maybe SegLevel, SegLevel)
_ -> Maybe KernelGrid
forall a. Maybe a
Nothing
scope :: Scope GPUMem
scope = SegSpace -> Scope GPUMem
forall rep. SegSpace -> Scope rep
scopeOfSegSpace (SegSpace -> Scope GPUMem) -> SegSpace -> Scope GPUMem
forall a b. (a -> b) -> a -> b
$ SegOp SegLevel GPU -> SegSpace
forall lvl rep. SegOp lvl rep -> SegSpace
segSpace SegOp SegLevel GPU
op
mapper :: SubExp -> SegOpMapper SegLevel GPU GPUMem (AllocM GPU GPUMem)
mapper SubExp
num_threads =
SegOpMapper SegLevel Any Any (AllocM GPU GPUMem)
forall (m :: * -> *) lvl rep. Monad m => SegOpMapper lvl rep rep m
identitySegOpMapper
{ mapOnSegOpBody =
localScope scope . local f . allocInKernelBody,
mapOnSegOpLambda =
local inThread
. allocInBinOpLambda num_threads (segSpace op)
}
f :: AllocEnv GPU GPUMem -> AllocEnv GPU GPUMem
f = case SegOp SegLevel GPU -> SegLevel
forall lvl rep. SegOp lvl rep -> lvl
segLevel SegOp SegLevel GPU
op of
SegThread {} -> AllocEnv GPU GPUMem -> AllocEnv GPU GPUMem
inThread
SegThreadInBlock {} -> AllocEnv GPU GPUMem -> AllocEnv GPU GPUMem
inThread
SegBlock {} -> AllocEnv GPU GPUMem -> AllocEnv GPU GPUMem
inGroup
inThread :: AllocEnv GPU GPUMem -> AllocEnv GPU GPUMem
inThread AllocEnv GPU GPUMem
env = AllocEnv GPU GPUMem
env {envExpHints = inThreadExpHints}
inGroup :: AllocEnv GPU GPUMem -> AllocEnv GPU GPUMem
inGroup AllocEnv GPU GPUMem
env = AllocEnv GPU GPUMem
env {envExpHints = inGroupExpHints}
handleHostOp ::
Maybe SegLevel ->
HostOp SOAC GPU ->
AllocM GPU GPUMem (MemOp (HostOp NoOp) GPUMem)
handleHostOp :: Maybe SegLevel
-> HostOp SOAC GPU
-> AllocM GPU GPUMem (MemOp (HostOp NoOp) GPUMem)
handleHostOp Maybe SegLevel
_ (SizeOp SizeOp
op) =
MemOp (HostOp NoOp) GPUMem
-> AllocM GPU GPUMem (MemOp (HostOp NoOp) GPUMem)
forall a. a -> AllocM GPU GPUMem a
forall (f :: * -> *) a. Applicative f => a -> f a
pure (MemOp (HostOp NoOp) GPUMem
-> AllocM GPU GPUMem (MemOp (HostOp NoOp) GPUMem))
-> MemOp (HostOp NoOp) GPUMem
-> AllocM GPU GPUMem (MemOp (HostOp NoOp) GPUMem)
forall a b. (a -> b) -> a -> b
$ HostOp NoOp GPUMem -> MemOp (HostOp NoOp) GPUMem
forall (inner :: * -> *) rep. inner rep -> MemOp inner rep
Inner (HostOp NoOp GPUMem -> MemOp (HostOp NoOp) GPUMem)
-> HostOp NoOp GPUMem -> MemOp (HostOp NoOp) GPUMem
forall a b. (a -> b) -> a -> b
$ SizeOp -> HostOp NoOp GPUMem
forall (op :: * -> *) rep. SizeOp -> HostOp op rep
SizeOp SizeOp
op
handleHostOp Maybe SegLevel
_ (OtherOp SOAC GPU
op) =
[Char] -> AllocM GPU GPUMem (MemOp (HostOp NoOp) GPUMem)
forall a. HasCallStack => [Char] -> a
error ([Char] -> AllocM GPU GPUMem (MemOp (HostOp NoOp) GPUMem))
-> [Char] -> AllocM GPU GPUMem (MemOp (HostOp NoOp) GPUMem)
forall a b. (a -> b) -> a -> b
$ [Char]
"Cannot allocate memory in SOAC: " [Char] -> [Char] -> [Char]
forall a. [a] -> [a] -> [a]
++ SOAC GPU -> [Char]
forall a. Pretty a => a -> [Char]
prettyString SOAC GPU
op
handleHostOp Maybe SegLevel
outer_lvl (SegOp SegOp SegLevel GPU
op) =
HostOp NoOp GPUMem -> MemOp (HostOp NoOp) GPUMem
forall (inner :: * -> *) rep. inner rep -> MemOp inner rep
Inner (HostOp NoOp GPUMem -> MemOp (HostOp NoOp) GPUMem)
-> (SegOp SegLevel GPUMem -> HostOp NoOp GPUMem)
-> SegOp SegLevel GPUMem
-> MemOp (HostOp NoOp) GPUMem
forall b c a. (b -> c) -> (a -> b) -> a -> c
. SegOp SegLevel GPUMem -> HostOp NoOp GPUMem
forall (op :: * -> *) rep. SegOp SegLevel rep -> HostOp op rep
SegOp (SegOp SegLevel GPUMem -> MemOp (HostOp NoOp) GPUMem)
-> AllocM GPU GPUMem (SegOp SegLevel GPUMem)
-> AllocM GPU GPUMem (MemOp (HostOp NoOp) GPUMem)
forall (f :: * -> *) a b. Functor f => (a -> b) -> f a -> f b
<$> Maybe SegLevel
-> SegOp SegLevel GPU -> AllocM GPU GPUMem (SegOp SegLevel GPUMem)
handleSegOp Maybe SegLevel
outer_lvl SegOp SegLevel GPU
op
handleHostOp Maybe SegLevel
_ (GPUBody [Type]
ts (Body BodyDec GPU
_ Stms GPU
stms Result
res)) =
(Body GPUMem -> MemOp (HostOp NoOp) GPUMem)
-> AllocM GPU GPUMem (Body GPUMem)
-> AllocM GPU GPUMem (MemOp (HostOp NoOp) GPUMem)
forall a b. (a -> b) -> AllocM GPU GPUMem a -> AllocM GPU GPUMem b
forall (f :: * -> *) a b. Functor f => (a -> b) -> f a -> f b
fmap (HostOp NoOp GPUMem -> MemOp (HostOp NoOp) GPUMem
forall (inner :: * -> *) rep. inner rep -> MemOp inner rep
Inner (HostOp NoOp GPUMem -> MemOp (HostOp NoOp) GPUMem)
-> (Body GPUMem -> HostOp NoOp GPUMem)
-> Body GPUMem
-> MemOp (HostOp NoOp) GPUMem
forall b c a. (b -> c) -> (a -> b) -> a -> c
. [Type] -> Body GPUMem -> HostOp NoOp GPUMem
forall (op :: * -> *) rep. [Type] -> Body rep -> HostOp op rep
GPUBody [Type]
ts) (AllocM GPU GPUMem (Body GPUMem)
-> AllocM GPU GPUMem (MemOp (HostOp NoOp) GPUMem))
-> (AllocM GPU GPUMem Result -> AllocM GPU GPUMem (Body GPUMem))
-> AllocM GPU GPUMem Result
-> AllocM GPU GPUMem (MemOp (HostOp NoOp) GPUMem)
forall b c a. (b -> c) -> (a -> b) -> a -> c
. AllocM GPU GPUMem Result
-> AllocM GPU GPUMem (Body (Rep (AllocM GPU GPUMem)))
AllocM GPU GPUMem Result -> AllocM GPU GPUMem (Body GPUMem)
forall (m :: * -> *).
MonadBuilder m =>
m Result -> m (Body (Rep m))
buildBody_ (AllocM GPU GPUMem Result -> AllocM GPU GPUMem (Body GPUMem))
-> (AllocM GPU GPUMem Result -> AllocM GPU GPUMem Result)
-> AllocM GPU GPUMem Result
-> AllocM GPU GPUMem (Body GPUMem)
forall b c a. (b -> c) -> (a -> b) -> a -> c
. Stms GPU -> AllocM GPU GPUMem Result -> AllocM GPU GPUMem Result
forall fromrep torep (inner :: * -> *) a.
Allocable fromrep torep inner =>
Stms fromrep -> AllocM fromrep torep a -> AllocM fromrep torep a
allocInStms Stms GPU
stms (AllocM GPU GPUMem Result
-> AllocM GPU GPUMem (MemOp (HostOp NoOp) GPUMem))
-> AllocM GPU GPUMem Result
-> AllocM GPU GPUMem (MemOp (HostOp NoOp) GPUMem)
forall a b. (a -> b) -> a -> b
$ Result -> AllocM GPU GPUMem Result
forall a. a -> AllocM GPU GPUMem a
forall (f :: * -> *) a. Applicative f => a -> f a
pure Result
res
kernelExpHints :: Exp GPUMem -> AllocM GPU GPUMem [ExpHint]
kernelExpHints :: Exp GPUMem -> AllocM GPU GPUMem [ExpHint]
kernelExpHints (BasicOp (Manifest [Int]
perm VName
v)) = do
[SubExp]
dims <- Type -> [SubExp]
forall u. TypeBase Shape u -> [SubExp]
arrayDims (Type -> [SubExp])
-> AllocM GPU GPUMem Type -> AllocM GPU GPUMem [SubExp]
forall (f :: * -> *) a b. Functor f => (a -> b) -> f a -> f b
<$> VName -> AllocM GPU GPUMem Type
forall rep (m :: * -> *). HasScope rep m => VName -> m Type
lookupType VName
v
let perm_inv :: [Int]
perm_inv = [Int] -> [Int]
rearrangeInverse [Int]
perm
dims' :: [SubExp]
dims' = [Int] -> [SubExp] -> [SubExp]
forall a. [Int] -> [a] -> [a]
rearrangeShape [Int]
perm [SubExp]
dims
lmad :: LMAD (TPrimExp Int64 VName)
lmad = LMAD (TPrimExp Int64 VName) -> [Int] -> LMAD (TPrimExp Int64 VName)
forall num. LMAD num -> [Int] -> LMAD num
LMAD.permute (TPrimExp Int64 VName
-> [TPrimExp Int64 VName] -> LMAD (TPrimExp Int64 VName)
forall num. IntegralExp num => num -> [num] -> LMAD num
LMAD.iota TPrimExp Int64 VName
0 ([TPrimExp Int64 VName] -> LMAD (TPrimExp Int64 VName))
-> [TPrimExp Int64 VName] -> LMAD (TPrimExp Int64 VName)
forall a b. (a -> b) -> a -> b
$ (SubExp -> TPrimExp Int64 VName)
-> [SubExp] -> [TPrimExp Int64 VName]
forall a b. (a -> b) -> [a] -> [b]
map SubExp -> TPrimExp Int64 VName
pe64 [SubExp]
dims') [Int]
perm_inv
[ExpHint] -> AllocM GPU GPUMem [ExpHint]
forall a. a -> AllocM GPU GPUMem a
forall (f :: * -> *) a. Applicative f => a -> f a
pure [LMAD (TPrimExp Int64 VName) -> Space -> ExpHint
Hint LMAD (TPrimExp Int64 VName)
lmad (Space -> ExpHint) -> Space -> ExpHint
forall a b. (a -> b) -> a -> b
$ [Char] -> Space
Space [Char]
"device"]
kernelExpHints (Op (Inner (SegOp (SegMap lvl :: SegLevel
lvl@(SegThread SegVirt
_ Maybe KernelGrid
_) SegSpace
space [Type]
ts KernelBody GPUMem
body)))) =
(Type -> KernelResult -> AllocM GPU GPUMem ExpHint)
-> [Type] -> [KernelResult] -> AllocM GPU GPUMem [ExpHint]
forall (m :: * -> *) a b c.
Applicative m =>
(a -> b -> m c) -> [a] -> [b] -> m [c]
zipWithM (SegLevel
-> SegSpace -> Type -> KernelResult -> AllocM GPU GPUMem ExpHint
mapResultHint SegLevel
lvl SegSpace
space) [Type]
ts ([KernelResult] -> AllocM GPU GPUMem [ExpHint])
-> [KernelResult] -> AllocM GPU GPUMem [ExpHint]
forall a b. (a -> b) -> a -> b
$ KernelBody GPUMem -> [KernelResult]
forall rep. KernelBody rep -> [KernelResult]
kernelBodyResult KernelBody GPUMem
body
kernelExpHints (Op (Inner (SegOp (SegRed lvl :: SegLevel
lvl@(SegThread SegVirt
_ Maybe KernelGrid
_) SegSpace
space [SegBinOp GPUMem]
reds [Type]
ts KernelBody GPUMem
body)))) =
((KernelResult -> ExpHint) -> [KernelResult] -> [ExpHint]
forall a b. (a -> b) -> [a] -> [b]
map (ExpHint -> KernelResult -> ExpHint
forall a b. a -> b -> a
const ExpHint
NoHint) [KernelResult]
red_res <>) ([ExpHint] -> [ExpHint])
-> AllocM GPU GPUMem [ExpHint] -> AllocM GPU GPUMem [ExpHint]
forall (f :: * -> *) a b. Functor f => (a -> b) -> f a -> f b
<$> (Type -> KernelResult -> AllocM GPU GPUMem ExpHint)
-> [Type] -> [KernelResult] -> AllocM GPU GPUMem [ExpHint]
forall (m :: * -> *) a b c.
Applicative m =>
(a -> b -> m c) -> [a] -> [b] -> m [c]
zipWithM (SegLevel
-> SegSpace -> Type -> KernelResult -> AllocM GPU GPUMem ExpHint
mapResultHint SegLevel
lvl SegSpace
space) (Int -> [Type] -> [Type]
forall a. Int -> [a] -> [a]
drop Int
num_reds [Type]
ts) [KernelResult]
map_res
where
num_reds :: Int
num_reds = [SegBinOp GPUMem] -> Int
forall rep. [SegBinOp rep] -> Int
segBinOpResults [SegBinOp GPUMem]
reds
([KernelResult]
red_res, [KernelResult]
map_res) = Int -> [KernelResult] -> ([KernelResult], [KernelResult])
forall a. Int -> [a] -> ([a], [a])
splitAt Int
num_reds ([KernelResult] -> ([KernelResult], [KernelResult]))
-> [KernelResult] -> ([KernelResult], [KernelResult])
forall a b. (a -> b) -> a -> b
$ KernelBody GPUMem -> [KernelResult]
forall rep. KernelBody rep -> [KernelResult]
kernelBodyResult KernelBody GPUMem
body
kernelExpHints Exp GPUMem
e = Exp GPUMem -> AllocM GPU GPUMem [ExpHint]
forall rep (m :: * -> *).
(ASTRep rep, HasScope rep m) =>
Exp rep -> m [ExpHint]
defaultExpHints Exp GPUMem
e
mapResultHint ::
SegLevel ->
SegSpace ->
Type ->
KernelResult ->
AllocM GPU GPUMem ExpHint
mapResultHint :: SegLevel
-> SegSpace -> Type -> KernelResult -> AllocM GPU GPUMem ExpHint
mapResultHint SegLevel
_lvl SegSpace
space = Type -> KernelResult -> AllocM GPU GPUMem ExpHint
hint
where
coalesceReturnOfShape :: Int64 -> [SubExp] -> Bool
coalesceReturnOfShape Int64
_ [] = Bool
False
coalesceReturnOfShape Int64
bs [Constant (IntValue (Int64Value Int64
d))] = Int64
bs Int64 -> Int64 -> Int64
forall a. Num a => a -> a -> a
* Int64
d Int64 -> Int64 -> Bool
forall a. Ord a => a -> a -> Bool
> Int64
4
coalesceReturnOfShape Int64
_ [SubExp]
_ = Bool
True
hint :: Type -> KernelResult -> AllocM GPU GPUMem ExpHint
hint Type
t Returns {}
| Int64 -> [SubExp] -> Bool
coalesceReturnOfShape (PrimType -> Int64
forall a. Num a => PrimType -> a
primByteSize (Type -> PrimType
forall shape u. TypeBase shape u -> PrimType
elemType Type
t)) ([SubExp] -> Bool) -> [SubExp] -> Bool
forall a b. (a -> b) -> a -> b
$ Type -> [SubExp]
forall u. TypeBase Shape u -> [SubExp]
arrayDims Type
t = do
let space_dims :: [SubExp]
space_dims = SegSpace -> [SubExp]
segSpaceDims SegSpace
space
ExpHint -> AllocM GPU GPUMem ExpHint
forall a. a -> AllocM GPU GPUMem a
forall (f :: * -> *) a. Applicative f => a -> f a
pure (ExpHint -> AllocM GPU GPUMem ExpHint)
-> ExpHint -> AllocM GPU GPUMem ExpHint
forall a b. (a -> b) -> a -> b
$ LMAD (TPrimExp Int64 VName) -> Space -> ExpHint
Hint ([SubExp] -> [SubExp] -> LMAD (TPrimExp Int64 VName)
innermost [SubExp]
space_dims (Type -> [SubExp]
forall u. TypeBase Shape u -> [SubExp]
arrayDims Type
t)) (Space -> ExpHint) -> Space -> ExpHint
forall a b. (a -> b) -> a -> b
$ [Char] -> Space
Space [Char]
"device"
hint Type
_ KernelResult
_ = ExpHint -> AllocM GPU GPUMem ExpHint
forall a. a -> AllocM GPU GPUMem a
forall (f :: * -> *) a. Applicative f => a -> f a
pure ExpHint
NoHint
innermost :: [SubExp] -> [SubExp] -> LMAD
innermost :: [SubExp] -> [SubExp] -> LMAD (TPrimExp Int64 VName)
innermost [SubExp]
space_dims [SubExp]
t_dims =
let r :: Int
r = [SubExp] -> Int
forall a. [a] -> Int
forall (t :: * -> *) a. Foldable t => t a -> Int
length [SubExp]
t_dims
dims :: [SubExp]
dims = [SubExp]
space_dims [SubExp] -> [SubExp] -> [SubExp]
forall a. [a] -> [a] -> [a]
++ [SubExp]
t_dims
perm :: [Int]
perm =
[[SubExp] -> Int
forall a. [a] -> Int
forall (t :: * -> *) a. Foldable t => t a -> Int
length [SubExp]
space_dims .. [SubExp] -> Int
forall a. [a] -> Int
forall (t :: * -> *) a. Foldable t => t a -> Int
length [SubExp]
space_dims Int -> Int -> Int
forall a. Num a => a -> a -> a
+ Int
r Int -> Int -> Int
forall a. Num a => a -> a -> a
- Int
1]
[Int] -> [Int] -> [Int]
forall a. [a] -> [a] -> [a]
++ [Int
0 .. [SubExp] -> Int
forall a. [a] -> Int
forall (t :: * -> *) a. Foldable t => t a -> Int
length [SubExp]
space_dims Int -> Int -> Int
forall a. Num a => a -> a -> a
- Int
1]
perm_inv :: [Int]
perm_inv = [Int] -> [Int]
rearrangeInverse [Int]
perm
dims_perm :: [SubExp]
dims_perm = [Int] -> [SubExp] -> [SubExp]
forall a. [Int] -> [a] -> [a]
rearrangeShape [Int]
perm [SubExp]
dims
lmad_base :: LMAD (TPrimExp Int64 VName)
lmad_base = TPrimExp Int64 VName
-> [TPrimExp Int64 VName] -> LMAD (TPrimExp Int64 VName)
forall num. IntegralExp num => num -> [num] -> LMAD num
LMAD.iota TPrimExp Int64 VName
0 ([TPrimExp Int64 VName] -> LMAD (TPrimExp Int64 VName))
-> [TPrimExp Int64 VName] -> LMAD (TPrimExp Int64 VName)
forall a b. (a -> b) -> a -> b
$ (SubExp -> TPrimExp Int64 VName)
-> [SubExp] -> [TPrimExp Int64 VName]
forall a b. (a -> b) -> [a] -> [b]
map SubExp -> TPrimExp Int64 VName
pe64 [SubExp]
dims_perm
lmad_rearranged :: LMAD (TPrimExp Int64 VName)
lmad_rearranged = LMAD (TPrimExp Int64 VName) -> [Int] -> LMAD (TPrimExp Int64 VName)
forall num. LMAD num -> [Int] -> LMAD num
LMAD.permute LMAD (TPrimExp Int64 VName)
lmad_base [Int]
perm_inv
in LMAD (TPrimExp Int64 VName)
lmad_rearranged
semiStatic :: S.Set VName -> SubExp -> Bool
semiStatic :: Set VName -> SubExp -> Bool
semiStatic Set VName
_ Constant {} = Bool
True
semiStatic Set VName
consts (Var VName
v) = VName
v VName -> Set VName -> Bool
forall a. Ord a => a -> Set a -> Bool
`S.member` Set VName
consts
inGroupExpHints :: Exp GPUMem -> AllocM GPU GPUMem [ExpHint]
inGroupExpHints :: Exp GPUMem -> AllocM GPU GPUMem [ExpHint]
inGroupExpHints (Op (Inner (SegOp (SegMap SegLevel
_ SegSpace
space [Type]
ts KernelBody GPUMem
body))))
| (KernelResult -> Bool) -> [KernelResult] -> Bool
forall (t :: * -> *) a. Foldable t => (a -> Bool) -> t a -> Bool
any KernelResult -> Bool
private ([KernelResult] -> Bool) -> [KernelResult] -> Bool
forall a b. (a -> b) -> a -> b
$ KernelBody GPUMem -> [KernelResult]
forall rep. KernelBody rep -> [KernelResult]
kernelBodyResult KernelBody GPUMem
body = do
Set VName
consts <- (AllocEnv GPU GPUMem -> Set VName) -> AllocM GPU GPUMem (Set VName)
forall r (m :: * -> *) a. MonadReader r m => (r -> a) -> m a
asks AllocEnv GPU GPUMem -> Set VName
forall fromrep torep. AllocEnv fromrep torep -> Set VName
envConsts
[ExpHint] -> AllocM GPU GPUMem [ExpHint]
forall a. a -> AllocM GPU GPUMem a
forall (f :: * -> *) a. Applicative f => a -> f a
pure ([ExpHint] -> AllocM GPU GPUMem [ExpHint])
-> [ExpHint] -> AllocM GPU GPUMem [ExpHint]
forall a b. (a -> b) -> a -> b
$ do
(Type
t, KernelResult
r) <- [Type] -> [KernelResult] -> [(Type, KernelResult)]
forall a b. [a] -> [b] -> [(a, b)]
zip [Type]
ts ([KernelResult] -> [(Type, KernelResult)])
-> [KernelResult] -> [(Type, KernelResult)]
forall a b. (a -> b) -> a -> b
$ KernelBody GPUMem -> [KernelResult]
forall rep. KernelBody rep -> [KernelResult]
kernelBodyResult KernelBody GPUMem
body
ExpHint -> [ExpHint]
forall a. a -> [a]
forall (f :: * -> *) a. Applicative f => a -> f a
pure (ExpHint -> [ExpHint]) -> ExpHint -> [ExpHint]
forall a b. (a -> b) -> a -> b
$
if KernelResult -> Bool
private KernelResult
r Bool -> Bool -> Bool
&& (SubExp -> Bool) -> [SubExp] -> Bool
forall (t :: * -> *) a. Foldable t => (a -> Bool) -> t a -> Bool
all (Set VName -> SubExp -> Bool
semiStatic Set VName
consts) (Type -> [SubExp]
forall u. TypeBase Shape u -> [SubExp]
arrayDims Type
t)
then
let seg_dims :: [TPrimExp Int64 VName]
seg_dims = (SubExp -> TPrimExp Int64 VName)
-> [SubExp] -> [TPrimExp Int64 VName]
forall a b. (a -> b) -> [a] -> [b]
map SubExp -> TPrimExp Int64 VName
pe64 ([SubExp] -> [TPrimExp Int64 VName])
-> [SubExp] -> [TPrimExp Int64 VName]
forall a b. (a -> b) -> a -> b
$ SegSpace -> [SubExp]
segSpaceDims SegSpace
space
dims :: [TPrimExp Int64 VName]
dims = [TPrimExp Int64 VName]
seg_dims [TPrimExp Int64 VName]
-> [TPrimExp Int64 VName] -> [TPrimExp Int64 VName]
forall a. [a] -> [a] -> [a]
++ (SubExp -> TPrimExp Int64 VName)
-> [SubExp] -> [TPrimExp Int64 VName]
forall a b. (a -> b) -> [a] -> [b]
map SubExp -> TPrimExp Int64 VName
pe64 (Type -> [SubExp]
forall u. TypeBase Shape u -> [SubExp]
arrayDims Type
t)
nilSlice :: d -> DimIndex d
nilSlice d
d = d -> d -> d -> DimIndex d
forall d. d -> d -> d -> DimIndex d
DimSlice d
0 d
d d
0
in LMAD (TPrimExp Int64 VName) -> Space -> ExpHint
Hint
( LMAD (TPrimExp Int64 VName)
-> Slice (TPrimExp Int64 VName) -> LMAD (TPrimExp Int64 VName)
forall num.
(Eq num, IntegralExp num) =>
LMAD num -> Slice num -> LMAD num
LMAD.slice (TPrimExp Int64 VName
-> [TPrimExp Int64 VName] -> LMAD (TPrimExp Int64 VName)
forall num. IntegralExp num => num -> [num] -> LMAD num
LMAD.iota TPrimExp Int64 VName
0 [TPrimExp Int64 VName]
dims) (Slice (TPrimExp Int64 VName) -> LMAD (TPrimExp Int64 VName))
-> Slice (TPrimExp Int64 VName) -> LMAD (TPrimExp Int64 VName)
forall a b. (a -> b) -> a -> b
$
[TPrimExp Int64 VName]
-> [DimIndex (TPrimExp Int64 VName)]
-> Slice (TPrimExp Int64 VName)
forall d. Num d => [d] -> [DimIndex d] -> Slice d
fullSliceNum [TPrimExp Int64 VName]
dims ((TPrimExp Int64 VName -> DimIndex (TPrimExp Int64 VName))
-> [TPrimExp Int64 VName] -> [DimIndex (TPrimExp Int64 VName)]
forall a b. (a -> b) -> [a] -> [b]
map TPrimExp Int64 VName -> DimIndex (TPrimExp Int64 VName)
forall {d}. Num d => d -> DimIndex d
nilSlice [TPrimExp Int64 VName]
seg_dims)
)
(Space -> ExpHint) -> Space -> ExpHint
forall a b. (a -> b) -> a -> b
$ [SubExp] -> PrimType -> Space
ScalarSpace (Type -> [SubExp]
forall u. TypeBase Shape u -> [SubExp]
arrayDims Type
t)
(PrimType -> Space) -> PrimType -> Space
forall a b. (a -> b) -> a -> b
$ Type -> PrimType
forall shape u. TypeBase shape u -> PrimType
elemType Type
t
else ExpHint
NoHint
where
private :: KernelResult -> Bool
private (Returns ResultManifest
ResultPrivate Certs
_ SubExp
_) = Bool
True
private KernelResult
_ = Bool
False
inGroupExpHints Exp GPUMem
e = Exp GPUMem -> AllocM GPU GPUMem [ExpHint]
forall rep (m :: * -> *).
(ASTRep rep, HasScope rep m) =>
Exp rep -> m [ExpHint]
defaultExpHints Exp GPUMem
e
inThreadExpHints :: Exp GPUMem -> AllocM GPU GPUMem [ExpHint]
inThreadExpHints :: Exp GPUMem -> AllocM GPU GPUMem [ExpHint]
inThreadExpHints Exp GPUMem
e = do
Set VName
consts <- (AllocEnv GPU GPUMem -> Set VName) -> AllocM GPU GPUMem (Set VName)
forall r (m :: * -> *) a. MonadReader r m => (r -> a) -> m a
asks AllocEnv GPU GPUMem -> Set VName
forall fromrep torep. AllocEnv fromrep torep -> Set VName
envConsts
(ExtType -> AllocM GPU GPUMem ExpHint)
-> [ExtType] -> AllocM GPU GPUMem [ExpHint]
forall (t :: * -> *) (m :: * -> *) a b.
(Traversable t, Monad m) =>
(a -> m b) -> t a -> m (t b)
forall (m :: * -> *) a b. Monad m => (a -> m b) -> [a] -> m [b]
mapM (Set VName -> ExtType -> AllocM GPU GPUMem ExpHint
forall {f :: * -> *} {u}.
Applicative f =>
Set VName -> TypeBase ExtShape u -> f ExpHint
maybePrivate Set VName
consts) ([ExtType] -> AllocM GPU GPUMem [ExpHint])
-> AllocM GPU GPUMem [ExtType] -> AllocM GPU GPUMem [ExpHint]
forall (m :: * -> *) a b. Monad m => (a -> m b) -> m a -> m b
=<< Exp GPUMem -> AllocM GPU GPUMem [ExtType]
forall rep (m :: * -> *).
(HasScope rep m, TypedOp (Op rep)) =>
Exp rep -> m [ExtType]
expExtType Exp GPUMem
e
where
maybePrivate :: Set VName -> TypeBase ExtShape u -> f ExpHint
maybePrivate Set VName
consts TypeBase ExtShape u
t
| Just (Array PrimType
pt Shape
shape u
_) <- TypeBase ExtShape u -> Maybe (TypeBase Shape u)
forall u. TypeBase ExtShape u -> Maybe (TypeBase Shape u)
hasStaticShape TypeBase ExtShape u
t,
(SubExp -> Bool) -> [SubExp] -> Bool
forall (t :: * -> *) a. Foldable t => (a -> Bool) -> t a -> Bool
all (Set VName -> SubExp -> Bool
semiStatic Set VName
consts) ([SubExp] -> Bool) -> [SubExp] -> Bool
forall a b. (a -> b) -> a -> b
$ Shape -> [SubExp]
forall d. ShapeBase d -> [d]
shapeDims Shape
shape = do
let lmad :: LMAD (TPrimExp Int64 VName)
lmad = TPrimExp Int64 VName
-> [TPrimExp Int64 VName] -> LMAD (TPrimExp Int64 VName)
forall num. IntegralExp num => num -> [num] -> LMAD num
LMAD.iota TPrimExp Int64 VName
0 ([TPrimExp Int64 VName] -> LMAD (TPrimExp Int64 VName))
-> [TPrimExp Int64 VName] -> LMAD (TPrimExp Int64 VName)
forall a b. (a -> b) -> a -> b
$ (SubExp -> TPrimExp Int64 VName)
-> [SubExp] -> [TPrimExp Int64 VName]
forall a b. (a -> b) -> [a] -> [b]
map SubExp -> TPrimExp Int64 VName
pe64 ([SubExp] -> [TPrimExp Int64 VName])
-> [SubExp] -> [TPrimExp Int64 VName]
forall a b. (a -> b) -> a -> b
$ Shape -> [SubExp]
forall d. ShapeBase d -> [d]
shapeDims Shape
shape
ExpHint -> f ExpHint
forall a. a -> f a
forall (f :: * -> *) a. Applicative f => a -> f a
pure (ExpHint -> f ExpHint) -> ExpHint -> f ExpHint
forall a b. (a -> b) -> a -> b
$ LMAD (TPrimExp Int64 VName) -> Space -> ExpHint
Hint LMAD (TPrimExp Int64 VName)
lmad (Space -> ExpHint) -> Space -> ExpHint
forall a b. (a -> b) -> a -> b
$ [SubExp] -> PrimType -> Space
ScalarSpace (Shape -> [SubExp]
forall d. ShapeBase d -> [d]
shapeDims Shape
shape) PrimType
pt
| Bool
otherwise =
ExpHint -> f ExpHint
forall a. a -> f a
forall (f :: * -> *) a. Applicative f => a -> f a
pure ExpHint
NoHint
explicitAllocations :: Pass GPU GPUMem
explicitAllocations :: Pass GPU GPUMem
explicitAllocations = Space
-> (OpC GPU GPU -> AllocM GPU GPUMem (Op GPUMem))
-> (Exp GPUMem -> AllocM GPU GPUMem [ExpHint])
-> Pass GPU GPUMem
forall fromrep torep (inner :: * -> *).
Allocable fromrep torep inner =>
Space
-> (Op fromrep -> AllocM fromrep torep (Op torep))
-> (Exp torep -> AllocM fromrep torep [ExpHint])
-> Pass fromrep torep
explicitAllocationsGeneric ([Char] -> Space
Space [Char]
"device") (Maybe SegLevel
-> HostOp SOAC GPU
-> AllocM GPU GPUMem (MemOp (HostOp NoOp) GPUMem)
handleHostOp Maybe SegLevel
forall a. Maybe a
Nothing) Exp GPUMem -> AllocM GPU GPUMem [ExpHint]
kernelExpHints
explicitAllocationsInStms ::
(MonadFreshNames m, HasScope GPUMem m) =>
Stms GPU ->
m (Stms GPUMem)
explicitAllocationsInStms :: forall (m :: * -> *).
(MonadFreshNames m, HasScope GPUMem m) =>
Stms GPU -> m (Stms GPUMem)
explicitAllocationsInStms = Space
-> (OpC GPU GPU -> AllocM GPU GPUMem (Op GPUMem))
-> (Exp GPUMem -> AllocM GPU GPUMem [ExpHint])
-> Stms GPU
-> m (Stms GPUMem)
forall (m :: * -> *) torep fromrep (inner :: * -> *).
(MonadFreshNames m, HasScope torep m,
Allocable fromrep torep inner) =>
Space
-> (Op fromrep -> AllocM fromrep torep (Op torep))
-> (Exp torep -> AllocM fromrep torep [ExpHint])
-> Stms fromrep
-> m (Stms torep)
explicitAllocationsInStmsGeneric ([Char] -> Space
Space [Char]
"device") (Maybe SegLevel
-> HostOp SOAC GPU
-> AllocM GPU GPUMem (MemOp (HostOp NoOp) GPUMem)
handleHostOp Maybe SegLevel
forall a. Maybe a
Nothing) Exp GPUMem -> AllocM GPU GPUMem [ExpHint]
kernelExpHints