Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Use Map-based data structure for LLVM allocation histories. #550

Merged
merged 8 commits into from
Nov 9, 2020
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
13 changes: 5 additions & 8 deletions crucible-llvm/src/Lang/Crucible/LLVM/ArraySizeProfile.hs
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,6 @@ module Lang.Crucible.LLVM.ArraySizeProfile

import Control.Lens.TH

import Control.Monad
import Control.Lens

import Data.Type.Equality (testEquality)
Expand Down Expand Up @@ -97,13 +96,11 @@ ptrAllocSize ::
G.Mem sym ->
C.LLVMPtr sym w ->
Maybe Int
ptrAllocSize mem (C.llvmPointerView -> (blk, _)) = msum $ inAlloc <$> G.memAllocs mem
where inAlloc :: G.MemAlloc sym -> Maybe Int
inAlloc memAlloc
| G.Alloc _ a (Just sz) _ _ _ <- memAlloc
, Just a == W4.asNat blk =
fromIntegral <$> BV.asUnsigned <$> W4.asBV sz
| otherwise = Nothing
ptrAllocSize mem (C.llvmPointerView -> (blk, _)) =
do a <- W4.asNat blk
G.AllocInfo _ msz _ _ _ <- G.possibleAllocInfo a (G.memAllocs mem)
sz <- msz
fromIntegral <$> BV.asUnsigned <$> W4.asBV sz

ptrArraySize ::
C.IsSymInterface sym =>
Expand Down
175 changes: 50 additions & 125 deletions crucible-llvm/src/Lang/Crucible/LLVM/MemModel/Generic.hs
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,8 @@ module Lang.Crucible.LLVM.MemModel.Generic
, emptyMem
, AllocType(..)
, Mutability(..)
, MemAlloc(..)
, AllocInfo(..)
, MemAllocs
, memAllocs
, memEndian
, memAllocCount
Expand Down Expand Up @@ -58,12 +59,12 @@ module Lang.Crucible.LLVM.MemModel.Generic

, SomeAlloc(..)
, possibleAllocs
, possibleAllocInfo
, ppSomeAlloc

-- * Pretty printing
, ppType
, ppPtr
, ppAlloc
, ppAllocs
, ppMem
, ppTermExpr
Expand Down Expand Up @@ -817,18 +818,10 @@ memWritesSize (MemWrites writes) = getSum $ foldMap

muxChanges :: IsExpr (SymExpr sym) => Pred sym -> MemChanges sym -> MemChanges sym -> MemChanges sym
muxChanges c (left_allocs, lhs_writes) (rhs_allocs, rhs_writes) =
( muxAllocs c left_allocs rhs_allocs
( muxMemAllocs c left_allocs rhs_allocs
, muxWrites c lhs_writes rhs_writes
)

muxAllocs :: IsExpr (SymExpr sym) => Pred sym -> [MemAlloc sym] -> [MemAlloc sym] -> [MemAlloc sym]
muxAllocs _ [] [] = []
muxAllocs c xs ys =
case asConstantPred c of
Just True -> xs
Just False -> ys
Nothing -> [ AllocMerge c xs ys ]

muxWrites :: IsExpr (SymExpr sym) => Pred sym -> MemWrites sym -> MemWrites sym -> MemWrites sym
muxWrites _ (MemWrites []) (MemWrites []) = MemWrites []

Expand Down Expand Up @@ -874,7 +867,7 @@ memChanges f m = go (m^.memState)
go (StackFrame _ _ _ l s) = f l <> go s
go (BranchFrame _ _ l s) = f l <> go s

memAllocs :: Mem sym -> [MemAlloc sym]
memAllocs :: Mem sym -> MemAllocs sym
memAllocs = memChanges fst

memWrites :: Mem sym -> MemWrites sym
Expand Down Expand Up @@ -918,11 +911,11 @@ memAllocCount m = memStateAllocCount (m ^. memState)
memWriteCount :: Mem sym -> Int
memWriteCount m = memStateWriteCount (m ^. memState)

memAddAlloc :: MemAlloc sym -> Mem sym -> Mem sym
memAddAlloc x = memState %~ \case
EmptyMem ac wc (a, w) -> EmptyMem (ac+1) wc (x:a, w)
StackFrame ac wc nm (a, w) s -> StackFrame (ac+1) wc nm (x:a, w) s
BranchFrame ac wc (a, w) s -> BranchFrame (ac+1) wc (x:a, w) s
memAddAlloc :: (MemAllocs sym -> MemAllocs sym) -> Mem sym -> Mem sym
memAddAlloc f = memState %~ \case
EmptyMem ac wc (a, w) -> EmptyMem (ac+1) wc (f a, w)
StackFrame ac wc nm (a, w) s -> StackFrame (ac+1) wc nm (f a, w) s
BranchFrame ac wc (a, w) s -> BranchFrame (ac+1) wc (f a, w) s

memAddWrite ::
(IsExprBuilder sym, 1 <= w) =>
Expand All @@ -943,17 +936,16 @@ memAddWrite ptr src = do
memStateAddChanges :: MemChanges sym -> MemState sym -> MemState sym
memStateAddChanges (a, w) = \case
EmptyMem ac wc (a0, w0) ->
EmptyMem (length a + ac) (memWritesSize w + wc) (a ++ a0, w <> w0)
EmptyMem (sizeMemAllocs a + ac) (memWritesSize w + wc) (a <> a0, w <> w0)
StackFrame ac wc nm (a0, w0) s ->
StackFrame (length a + ac) (memWritesSize w + wc) nm (a ++ a0, w <> w0) s
StackFrame (sizeMemAllocs a + ac) (memWritesSize w + wc) nm (a <> a0, w <> w0) s
BranchFrame ac wc (a0, w0) s ->
BranchFrame (length a + ac) (memWritesSize w + wc) (a ++ a0, w <> w0) s
BranchFrame (sizeMemAllocs a + ac) (memWritesSize w + wc) (a <> a0, w <> w0) s


--------------------------------------------------------------------------------
-- Pointer validity


-- | @isAllocatedMut isMut sym w p sz m@ returns the condition required to
-- prove range @[p..p+sz)@ lies within a single allocation in @m@.
--
Expand All @@ -978,13 +970,18 @@ isAllocatedMut ::
Mem sym ->
IO (Pred sym)
isAllocatedMut mutOk sym w minAlign (llvmPointerView -> (blk, off)) sz m =
go (pure (falsePred sym)) (memAllocs m)
isAllocatedGeneric sym inAllocation blk (memAllocs m)
where
-- @inThisAllocation a allocatedSz@ produces the predicate that
inAllocation :: AllocInfo sym -> IO (Pred sym)
inAllocation (AllocInfo _ asz mut alignment _)
| mutOk mut && alignment >= minAlign = inBounds asz
| otherwise = pure (falsePred sym)

-- @inBounds a allocatedSz@ produces the predicate that
-- records whether the pointer @ptr@ of size @sz@ falls within the
-- allocation of block @a@ of size @allocatedSz@.
inThisAllocation :: forall w'. Maybe (SymBV sym w') -> IO (Pred sym)
inThisAllocation Nothing =
inBounds :: forall w'. Maybe (SymBV sym w') -> IO (Pred sym)
inBounds Nothing =
case sz of
Nothing ->
-- Unbounded access of an unbounded allocation must start at offset 0.
Expand All @@ -998,7 +995,7 @@ isAllocatedMut mutOk sym w minAlign (llvmPointerView -> (blk, off)) sz m =
noWrap <- bvUle sym off =<< bvNeg sym currSize
orPred sym zeroSize noWrap

inThisAllocation (Just allocSize)
inBounds (Just allocSize)
-- If the allocation is done at pointer width is equal to @w@, check
-- if this allocation covers the required range
| Just Refl <- testEquality w (bvWidth allocSize)
Expand All @@ -1008,32 +1005,13 @@ isAllocatedMut mutOk sym w minAlign (llvmPointerView -> (blk, off)) sz m =
inRange <- bvUle sym off maxOffset -- offset(ptr) <= maxOffset
andPred sym smallSize inRange

inThisAllocation (Just _allocSize)
inBounds (Just _allocSize)
-- If the allocation is done at pointer width not equal to @w@,
-- then this is not the allocation we're looking for. Similarly,
-- if @sz@ is @Nothing@ (indicating we are accessing the entire
-- address space) then any bounded allocation is too small.
| otherwise = return $ falsePred sym

go :: IO (Pred sym) -> [MemAlloc sym] -> IO (Pred sym)
go fallback [] = fallback
go fallback (Alloc _ a asz mut alignment _ : r)
| mutOk mut && alignment >= minAlign =
-- If the block ID matches, then call 'inThisAllocation',
-- otherwise search the remainder of the allocation history.
do sameBlock <- natEq sym blk =<< natLit sym a
itePredM sym sameBlock (inThisAllocation asz) (go fallback r)
| otherwise = go fallback r
go fallback (MemFree a _ : r) =
do notSameBlock <- notPred sym =<< natEq sym blk a
andPred sym notSameBlock =<< go fallback r
go fallback (AllocMerge _ [] [] : r) = go fallback r
go fallback (AllocMerge c xr yr : r) =
do p <- go fallback r -- TODO: wrap this in a delay
px <- go (return p) xr
py <- go (return p) yr
itePred sym c px py

-- | @isAllocated sym w p sz m@ returns the condition required to prove
-- range @[p..p+sz)@ lies within a single allocation in @m@.
--
Expand Down Expand Up @@ -1138,23 +1116,13 @@ notAliasable ::
IO (Pred sym)
notAliasable sym (llvmPointerView -> (blk1, _)) (llvmPointerView -> (blk2, _)) mem =
do p0 <- natEq sym blk1 blk2
p1 <- isMutable blk1 (memAllocs mem)
p2 <- isMutable blk2 (memAllocs mem)
p1 <- isAllocatedGeneric sym isMutable blk1 (memAllocs mem)
p2 <- isAllocatedGeneric sym isMutable blk2 (memAllocs mem)
orPred sym p0 =<< orPred sym p1 p2
where
isMutable _blk [] = return (falsePred sym)
isMutable blk (Alloc _ _ _ Immutable _ _ : r) = isMutable blk r
isMutable blk (Alloc _ a _ Mutable _ _ : r) =
do p1 <- natEq sym blk =<< natLit sym a
p2 <- isMutable blk r
orPred sym p1 p2
isMutable blk (MemFree _ _ : r) = isMutable blk r
isMutable blk (AllocMerge c x y : r) =
do px <- isMutable blk x
py <- isMutable blk y
p1 <- itePred sym c px py
p2 <- isMutable blk r
orPred sym p1 p2
isMutable :: AllocInfo sym -> IO (Pred sym)
isMutable (AllocInfo _ _ Mutable _ _) = pure (truePred sym)
isMutable (AllocInfo _ _ Immutable _ _) = pure (falsePred sym)

--------------------------------------------------------------------------------
-- Other memory operations
Expand Down Expand Up @@ -1353,7 +1321,8 @@ allocMem :: (1 <= w) =>
-> String -- ^ Source location
-> Mem sym
-> Mem sym
allocMem a b sz alignment mut loc = memAddAlloc (Alloc a b sz mut alignment loc)
allocMem a b sz alignment mut loc =
memAddAlloc (allocMemAllocs b (AllocInfo a sz mut alignment loc))

-- | Allocate and initialize a new memory region.
allocAndWriteMem ::
Expand All @@ -1372,18 +1341,19 @@ allocAndWriteMem sym w a b tp alignment mut loc v m =
base <- natLit sym b
off <- bvLit sym w (BV.zero w)
let p = LLVMPointer base off
return (m & memAddAlloc (Alloc a b (Just sz) mut alignment loc)
return (m & allocMem a b (Just sz) alignment mut loc
& memAddWrite p (MemStore v tp alignment))

pushStackFrameMem :: Text -> Mem sym -> Mem sym
pushStackFrameMem nm = memState %~ \s ->
StackFrame (memStateAllocCount s) (memStateWriteCount s) nm emptyChanges s

popStackFrameMem :: Mem sym -> Mem sym
popStackFrameMem :: forall sym. Mem sym -> Mem sym
popStackFrameMem m = m & memState %~ popf
where popf (StackFrame _ _ _ (a,w) s) =
where popf :: MemState sym -> MemState sym
popf (StackFrame _ _ _ (a,w) s) =
s & memStateAddChanges c
where c = (mapMaybe pa a, w)
where c = (popMemAllocs a, w)

-- WARNING: The following code executes a stack pop underneath a branch
-- frame. This is necessary to get merges to work correctly
Expand All @@ -1394,16 +1364,11 @@ popStackFrameMem m = m & memState %~ popf
-- This does not appear to be causing problems for our
-- examples, but may be a source of subtle errors.
popf (BranchFrame _ wc (a,w) s) =
BranchFrame (length (fst c)) wc c $ popf s
where c = (mapMaybe pa a, w)
BranchFrame (sizeMemAllocs (fst c)) wc c $ popf s
where c = (popMemAllocs a, w)

popf _ = error "popStackFrameMem given unexpected memory"
popf EmptyMem{} = error "popStackFrameMem given unexpected memory"

pa (Alloc StackAlloc _ _ _ _ _) = Nothing
pa a@(Alloc HeapAlloc _ _ _ _ _) = Just a
pa a@(Alloc GlobalAlloc _ _ _ _ _) = Just a
pa a@(MemFree _ _) = Just a
pa (AllocMerge c x y) = Just (AllocMerge c (mapMaybe pa x) (mapMaybe pa y))

-- | Free a heap-allocated block of memory.
--
Expand All @@ -1423,37 +1388,12 @@ freeMem :: forall sym w .
IO (Mem sym, Pred sym, Pred sym)
freeMem sym w (LLVMPointer blk off) m loc =
do p1 <- bvEq sym off =<< bvLit sym w (BV.zero w)
p2 <- isHeapAllocated (return (falsePred sym)) (memAllocs m)
return (memAddAlloc (MemFree blk loc) m, p1, p2)
p2 <- isAllocatedGeneric sym isHeapMutable blk (memAllocs m)
return (memAddAlloc (freeMemAllocs blk loc) m, p1, p2)
where
isHeapAllocated :: IO (Pred sym) -> [MemAlloc sym] -> IO (Pred sym)
isHeapAllocated fallback [] = fallback
isHeapAllocated fallback (alloc : r) =
case alloc of
Alloc HeapAlloc a _ Mutable _ _ ->
do sameBlock <- natEq sym blk =<< natLit sym a
case asConstantPred sameBlock of
Just True -> return (truePred sym)
Just False -> isHeapAllocated fallback r
Nothing -> orPred sym sameBlock =<< isHeapAllocated fallback r
Alloc{} ->
isHeapAllocated fallback r
MemFree a _ ->
do sameBlock <- natEq sym blk a
case asConstantPred sameBlock of
Just True -> return (falsePred sym)
Just False -> isHeapAllocated fallback r
Nothing ->
do notSameBlock <- notPred sym sameBlock
andPred sym notSameBlock =<< isHeapAllocated fallback r
AllocMerge _ [] [] ->
isHeapAllocated fallback r
AllocMerge c xr yr ->
do p <- isHeapAllocated fallback r -- TODO: wrap this in a delay
px <- isHeapAllocated (return p) xr
py <- isHeapAllocated (return p) yr
itePred sym c px py

isHeapMutable :: AllocInfo sym -> IO (Pred sym)
isHeapMutable (AllocInfo HeapAlloc _ Mutable _ _) = pure (truePred sym)
isHeapMutable _ = pure (falsePred sym)

branchMem :: Mem sym -> Mem sym
branchMem = memState %~ \s ->
Expand Down Expand Up @@ -1491,35 +1431,20 @@ instance IsSymInterface sym => Eq (SomeAlloc sym) where

ppSomeAlloc :: forall sym. IsExprBuilder sym => SomeAlloc sym -> Doc
ppSomeAlloc (SomeAlloc atp base sz mut alignment loc) =
ppAlloc (Alloc atp base sz mut alignment loc :: MemAlloc sym)
ppAllocInfo (base, AllocInfo atp sz mut alignment loc :: AllocInfo sym)

-- | Find an overapproximation of the set of allocations with this number.
--
-- Ultimately, only one of these could have happened.
--
-- It may be possible to be more precise than this function currently is.
-- In particular, if we find an 'Alloc' with a matching block number before a
-- 'AllocMerge', then we can (maybe?) just return that 'Alloc'. And if one
-- call of @helper@ on a 'MemAlloc' returns anything nonempty, that can just
-- be returned (?).
possibleAllocs ::
forall sym .
(IsSymInterface sym) =>
Natural ->
Mem sym ->
[SomeAlloc sym]
possibleAllocs n = helper . memAllocs
where helper =
foldMap $
\case
MemFree _ _ -> []
Alloc atp base sz mut alignment loc ->
if base == n
then [SomeAlloc atp base sz mut alignment loc]
else []
AllocMerge (asConstantPred -> Just True) as1 _as2 -> helper as1
AllocMerge (asConstantPred -> Just False) _as1 as2 -> helper as2
AllocMerge _ as1 as2 -> helper as1 ++ helper as2
possibleAllocs n mem =
case possibleAllocInfo n (memAllocs mem) of
Nothing -> []
Just (AllocInfo atp sz mut alignment loc) ->
[SomeAlloc atp n sz mut alignment loc]

-- | Check if @LLVMPtr sym w@ points inside an allocation that is backed
-- by an SMT array store. If true, return a predicate that indicates
Expand Down
Loading