diff --git a/Cargo.toml b/Cargo.toml
index 4975b6b2..34b64f21 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -1,12 +1,14 @@
 [workspace]
 
 members = [
-    "chain",
+    "command",
+    "factory",
+    "frame",
     "memory",
+    "mesh",
+    "renderer",
+    "rendy",
     "resource",
-    "command",
-    "layout",
-    "layout-derive",
-    "graph",
-    "rendy"
+    "shader",
+    "wsi",
 ]
diff --git a/chain/Cargo.toml b/chain/Cargo.toml
index c8b19b68..a8704e74 100644
--- a/chain/Cargo.toml
+++ b/chain/Cargo.toml
@@ -4,7 +4,5 @@ version = "0.1.0"
 authors = ["omni-viral <scareaangel@gmail.com>"]
 
 [dependencies]
-bitflags = "1.0"
+ash = { path = "../../ash/ash" }
 fnv = "1.0"
-failure = "0.1"
-rendy-resource = { path = "../resource" }
diff --git a/chain/src/access.rs b/chain/src/access.rs
index 814f2b96..1fad92e6 100644
--- a/chain/src/access.rs
+++ b/chain/src/access.rs
@@ -1,89 +1,82 @@
-bitflags! {
-    /// Bitmask specifying memory access types that will participate in a memory dependency.
-    /// See Vulkan docs for detailed info:
-    /// <https://www.khronos.org/registry/vulkan/specs/1.1-extensions/man/html/VkAccessFlagBits.html>
-    #[repr(transparent)]
-    pub struct AccessFlags: u32 {
-        /// Access type performed by the device to read commands from indirect command buffer.
-        const INDIRECT_COMMAND_READ = 0x00000001;
 
-        /// Access type performed by the device to read from index buffer.
-        const INDEX_READ = 0x00000002;
+use ash::vk::AccessFlags;
 
-        /// Access type performed by the device to read from vertex attributes.
-        const VERTEX_ATTRIBUTE_READ = 0x00000004;
-
-        /// Access type performed by the device to read from uniform buffers.
-        const UNIFORM_READ = 0x00000008;
-
-        /// Access type performed by the device to read from input attachment.
-        const INPUT_ATTACHMENT_READ = 0x00000010;
-
-        /// Access type performed by the device to read from storage/uniform-texel/storage-texel buffers or sampled/storage images.
-        const SHADER_READ = 0x00000020;
-
-        /// Access type performed by the device to write to storage/storage-texel buffers or storage images.
-        const SHADER_WRITE = 0x00000040;
-
-        /// Access type performed by the device to read from color attachment.
-        const COLOR_ATTACHMENT_READ = 0x00000080;
-
-        /// Access type performed by the device to write to color attachment.
-        const COLOR_ATTACHMENT_WRITE = 0x00000100;
-
-        /// Access type performed by the device to read from depth-stencil attachment.
-        const DEPTH_STENCIL_ATTACHMENT_READ = 0x00000200;
-
-        /// Access type performed by the device to write to depth-stencil attachment.
-        const DEPTH_STENCIL_ATTACHMENT_WRITE = 0x00000400;
-
-        /// Access type performed by the device to read content from source of transfer operations.
-        const TRANSFER_READ = 0x00000800;
-
-        /// Access type performed by the device to write content to destination of transfer operations.
-        const TRANSFER_WRITE = 0x00001000;
-
-        /// Access type performed by host reading.
-        const HOST_READ = 0x00002000;
-
-        /// Access type performed by host writing.
-        const HOST_WRITE = 0x00004000;
+/// Add methods to fetch higher-level info from access flags.
+pub trait AccessFlagsExt {
+    /// Check if this access must be exclusive.
+    /// 
+    /// Basically this checks if all flags are known read flags.
+    fn exclusive(&self) -> bool;
+}
 
-        /// Access type performed to read data via non-specific entities.
-        const MEMORY_READ = 0x00008000;
+#[inline]
+#[allow(unused)]
+fn known_flags() -> AccessFlags { AccessFlags::INDIRECT_COMMAND_READ
+    | AccessFlags::INDEX_READ
+    | AccessFlags::VERTEX_ATTRIBUTE_READ
+    | AccessFlags::UNIFORM_READ
+    | AccessFlags::INPUT_ATTACHMENT_READ
+    | AccessFlags::SHADER_READ
+    | AccessFlags::SHADER_WRITE
+    | AccessFlags::COLOR_ATTACHMENT_READ
+    | AccessFlags::COLOR_ATTACHMENT_WRITE
+    | AccessFlags::DEPTH_STENCIL_ATTACHMENT_READ
+    | AccessFlags::DEPTH_STENCIL_ATTACHMENT_WRITE
+    | AccessFlags::TRANSFER_READ
+    | AccessFlags::TRANSFER_WRITE
+    | AccessFlags::HOST_READ
+    | AccessFlags::HOST_WRITE
+    | AccessFlags::MEMORY_READ
+    | AccessFlags::MEMORY_WRITE
+    // | AccessFlags::TRANSFORM_FEEDBACK_WRITE_EXT
+    // | AccessFlags::TRANSFORM_FEEDBACK_COUNTER_READ_EXT
+    // | AccessFlags::TRANSFORM_FEEDBACK_COUNTER_WRITE_EXT
+    // | AccessFlags::CONDITIONAL_RENDERING_READ_EXT
+    | AccessFlags::COMMAND_PROCESS_READ_NVX
+    | AccessFlags::COMMAND_PROCESS_WRITE_NVX
+    | AccessFlags::COLOR_ATTACHMENT_READ_NONCOHERENT_EXT
+    // | AccessFlags::SHADING_RATE_IMAGE_READ_NV
+    // | AccessFlags::ACCELERATION_STRUCTURE_READ_NVX
+    // | AccessFlags::ACCELERATION_STRUCTURE_WRITE_NVX
+}
 
-        /// Access type performed to write data via non-specific entities.
-        const MEMORY_WRITE = 0x00010000;
-    }
+#[inline]
+#[allow(unused)]
+fn write_flags() -> AccessFlags { AccessFlags::SHADER_WRITE
+    | AccessFlags::COLOR_ATTACHMENT_WRITE
+    | AccessFlags::DEPTH_STENCIL_ATTACHMENT_WRITE
+    | AccessFlags::TRANSFER_WRITE
+    | AccessFlags::HOST_WRITE
+    | AccessFlags::MEMORY_WRITE
+    // | AccessFlags::TRANSFORM_FEEDBACK_WRITE_EXT
+    // | AccessFlags::TRANSFORM_FEEDBACK_COUNTER_WRITE_EXT
+    | AccessFlags::COMMAND_PROCESS_WRITE_NVX
+    // | AccessFlags::ACCELERATION_STRUCTURE_WRITE_NVX
 }
 
-impl AccessFlags {
-    /// Check if flags contains at least on write flag.
-    pub fn is_write(&self) -> bool {
-        self.intersects(
-            AccessFlags::SHADER_WRITE
-                | AccessFlags::COLOR_ATTACHMENT_WRITE
-                | AccessFlags::DEPTH_STENCIL_ATTACHMENT_WRITE
-                | AccessFlags::TRANSFER_WRITE
-                | AccessFlags::HOST_WRITE
-                | AccessFlags::MEMORY_WRITE,
-        )
-    }
+#[inline]
+fn read_flags() -> AccessFlags { AccessFlags::INDIRECT_COMMAND_READ
+    | AccessFlags::INDEX_READ
+    | AccessFlags::VERTEX_ATTRIBUTE_READ
+    | AccessFlags::UNIFORM_READ
+    | AccessFlags::INPUT_ATTACHMENT_READ
+    | AccessFlags::SHADER_READ
+    | AccessFlags::COLOR_ATTACHMENT_READ
+    | AccessFlags::DEPTH_STENCIL_ATTACHMENT_READ
+    | AccessFlags::TRANSFER_READ
+    | AccessFlags::HOST_READ
+    | AccessFlags::MEMORY_READ
+    // | AccessFlags::TRANSFORM_FEEDBACK_COUNTER_READ_EXT
+    // | AccessFlags::CONDITIONAL_RENDERING_READ_EXT
+    | AccessFlags::COMMAND_PROCESS_READ_NVX
+    | AccessFlags::COLOR_ATTACHMENT_READ_NONCOHERENT_EXT
+    // | AccessFlags::SHADING_RATE_IMAGE_READ_NV
+    // | AccessFlags::ACCELERATION_STRUCTURE_READ_NVX
+}
 
-    /// Check if flags contains at least on read flag.
-    pub fn is_read(&self) -> bool {
-        self.intersects(
-            AccessFlags::INDIRECT_COMMAND_READ
-                | AccessFlags::INDEX_READ
-                | AccessFlags::VERTEX_ATTRIBUTE_READ
-                | AccessFlags::UNIFORM_READ
-                | AccessFlags::INPUT_ATTACHMENT_READ
-                | AccessFlags::SHADER_READ
-                | AccessFlags::COLOR_ATTACHMENT_READ
-                | AccessFlags::DEPTH_STENCIL_ATTACHMENT_READ
-                | AccessFlags::TRANSFER_READ
-                | AccessFlags::HOST_READ
-                | AccessFlags::MEMORY_READ,
-        )
+impl AccessFlagsExt for AccessFlags {
+    #[inline]
+    fn exclusive(&self) -> bool {
+        read_flags().subset(*self)
     }
 }
diff --git a/chain/src/chain/link.rs b/chain/src/chain/link.rs
index f1854770..d3f1e04b 100644
--- a/chain/src/chain/link.rs
+++ b/chain/src/chain/link.rs
@@ -1,8 +1,10 @@
-use access::AccessFlags;
+
+use ash::vk;
+
+use access::AccessFlagsExt;
 use node::State;
 use resource::Resource;
-use schedule::{FamilyId, QueueId, SubmissionId};
-use stage::PipelineStageFlags;
+use schedule::{FamilyIndex, QueueId, SubmissionId};
 
 /// State of the link associated with queue.
 /// Contains submissions range, combined access and stages bits by submissions from the range.
@@ -10,8 +12,8 @@ use stage::PipelineStageFlags;
 pub(crate) struct LinkQueueState {
     pub(crate) first: usize,
     pub(crate) last: usize,
-    pub(crate) access: AccessFlags,
-    pub(crate) stages: PipelineStageFlags,
+    pub(crate) access: vk::AccessFlags,
+    pub(crate) stages: vk::PipelineStageFlags,
 }
 
 impl LinkQueueState {
@@ -39,7 +41,7 @@ impl LinkQueueState {
 #[derive(Clone, Debug)]
 pub(crate) struct Link<R: Resource> {
     /// Combination of all accesses.
-    access: AccessFlags,
+    access: vk::AccessFlags,
 
     /// Combination of all usages.
     usage: R::Usage,
@@ -48,7 +50,7 @@ pub(crate) struct Link<R: Resource> {
     layout: R::Layout,
 
     /// Combination of all stages.
-    stages: PipelineStageFlags,
+    stages: vk::PipelineStageFlags,
 
     /// Number of queues involved.
     queue_count: usize,
@@ -57,7 +59,7 @@ pub(crate) struct Link<R: Resource> {
     queues: Vec<Option<LinkQueueState>>,
 
     /// Family of queues.
-    family: FamilyId,
+    family: FamilyIndex,
 }
 
 /// Node for the link.
@@ -107,7 +109,7 @@ where
 
     /// Get queue family that owns the resource at the link.
     /// All associated submissions must be from the same queue family.
-    pub(crate) fn family(&self) -> FamilyId {
+    pub(crate) fn family(&self) -> FamilyIndex {
         self.family
     }
 
@@ -122,7 +124,7 @@ where
     }
 
     /// Get access.
-    pub(crate) fn access(&self) -> AccessFlags {
+    pub(crate) fn access(&self) -> vk::AccessFlags {
         self.access
     }
 
@@ -137,7 +139,7 @@ where
     }
 
     // /// Get usage.
-    // pub(crate) fn stages(&self) -> PipelineStageFlags {
+    // pub(crate) fn stages(&self) -> vk::PipelineStageFlags {
     //     self.stages
     // }
 
@@ -150,7 +152,7 @@ where
     /// If compatible then the submission can be associated with the link.
     pub(crate) fn compatible(&self, node: &LinkNode<R>) -> bool {
         // If queue the same and states are compatible.
-        self.family == node.sid.family() && !(self.access | node.state.access).is_write()
+        self.family == node.sid.family() && !(self.access | node.state.access).exclusive()
     }
 
     /// Insert submission with specified state to the link.
diff --git a/chain/src/collect.rs b/chain/src/collect.rs
index f7e8d299..234eb862 100644
--- a/chain/src/collect.rs
+++ b/chain/src/collect.rs
@@ -8,7 +8,7 @@ use chain::{BufferChains, Chain, ImageChains, Link, LinkNode};
 use node::{Node, State};
 use resource::{Buffer, Image, Resource};
 
-use schedule::{FamilyId, Queue, QueueId, Schedule, Submission, SubmissionId};
+use schedule::{FamilyIndex, Queue, QueueId, Schedule, Submission, SubmissionId};
 
 use Id;
 
@@ -18,9 +18,9 @@ pub struct Unsynchronized;
 
 /// Result of node scheduler.
 #[derive(Debug)]
-pub struct Chains<S = Unsynchronized> {
+pub struct Chains {
     /// Contains submissions for nodes spread among queue schedule.
-    pub schedule: Schedule<S>,
+    pub schedule: Schedule<Unsynchronized>,
 
     /// Contains all buffer chains.
     pub buffers: BufferChains,
@@ -37,7 +37,7 @@ struct Fitness {
 
 struct ResolvedNode {
     id: usize,
-    family: FamilyId,
+    family: FamilyIndex,
     queues: Range<usize>,
     rev_deps: Vec<usize>,
     buffers: Vec<(usize, State<Buffer>)>,
@@ -48,7 +48,7 @@ impl Default for ResolvedNode {
     fn default() -> Self {
         ResolvedNode {
             id: 0,
-            family: FamilyId(0),
+            family: FamilyIndex(0),
             queues: 0..0,
             rev_deps: Vec::new(),
             buffers: Vec::new(),
@@ -68,7 +68,7 @@ struct ChainData<R: Resource> {
     chain: Chain<R>,
     last_link_wait_factor: usize,
     current_link_wait_factor: usize,
-    current_family: Option<FamilyId>,
+    current_family: Option<FamilyIndex>,
 }
 impl<R: Resource> Default for ChainData<R> {
     fn default() -> Self {
@@ -90,7 +90,7 @@ struct QueueData {
 /// This function tries to find most appropriate schedule for nodes execution.
 pub fn collect<Q>(nodes: Vec<Node>, max_queues: Q) -> Chains
 where
-    Q: Fn(FamilyId) -> usize,
+    Q: Fn(FamilyIndex) -> usize,
 {
     // Resolve nodes into a form faster to work with.
     let (nodes, mut unscheduled_nodes) = resolve_nodes(nodes, max_queues);
@@ -205,7 +205,7 @@ impl<I: Hash + Eq + Copy> LookupBuilder<I> {
 
 fn resolve_nodes<Q>(nodes: Vec<Node>, max_queues: Q) -> (ResolvedNodeSet, Vec<usize>)
 where
-    Q: Fn(FamilyId) -> usize,
+    Q: Fn(FamilyIndex) -> usize,
 {
     let node_count = nodes.len();
 
@@ -382,7 +382,7 @@ fn schedule_node<'a>(
 
 fn add_to_chain<R, S>(
     id: Id,
-    family: FamilyId,
+    family: FamilyIndex,
     chain_data: &mut ChainData<R>,
     sid: SubmissionId,
     submission: &mut Submission<S>,
diff --git a/chain/src/lib.rs b/chain/src/lib.rs
index f15576c9..48614785 100644
--- a/chain/src/lib.rs
+++ b/chain/src/lib.rs
@@ -1,55 +1,44 @@
 //! This crate can derive synchronization required
 //! for the dependency chain of the whole execution graph.
 
-// #![forbid(overflowing_literals)]
-// #![deny(missing_copy_implementations)]
-// #![deny(missing_debug_implementations)]
-// #![deny(missing_docs)]
-// #![deny(intra_doc_link_resolution_failure)]
-// #![deny(path_statements)]
-// #![deny(trivial_bounds)]
-// #![deny(type_alias_bounds)]
-// #![deny(unconditional_recursion)]
-// #![deny(unions_with_drop_fields)]
-// #![deny(while_true)]
-// #![deny(unused)]
-// #![deny(bad_style)]
-// #![deny(future_incompatible)]
-// #![warn(rust_2018_compatibility)]
-// #![warn(rust_2018_idioms)]
-
-#[macro_use]
-extern crate bitflags;
-
+#![forbid(overflowing_literals)]
+#![deny(missing_copy_implementations)]
+#![deny(missing_debug_implementations)]
+#![deny(missing_docs)]
+#![deny(intra_doc_link_resolution_failure)]
+#![deny(path_statements)]
+#![deny(trivial_bounds)]
+#![deny(type_alias_bounds)]
+#![deny(unconditional_recursion)]
+#![deny(unions_with_drop_fields)]
+#![deny(while_true)]
+#![deny(unused)]
+#![deny(bad_style)]
+#![deny(future_incompatible)]
+#![deny(rust_2018_compatibility)]
+#![deny(rust_2018_idioms)]
+
+extern crate ash;
 extern crate fnv;
 
-extern crate rendy_resource;
-
 /// Unique resource id.
 #[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
 pub struct Id(pub u64);
 
-/// ???
 mod access;
-/// ???
 mod chain;
-/// ???
 mod collect;
-/// ???
 mod node;
-/// ???
 mod resource;
-/// ???
 mod schedule;
-/// ???
 mod stage;
-/// ???
 mod sync;
 
+pub use access::AccessFlagsExt;
 pub use chain::Chain;
-pub use node::{Node, State};
+pub use collect::{collect, Chains, Unsynchronized};
+pub use node::{Node, State, BufferState, ImageState};
 pub use resource::{Buffer, Image, Resource};
-pub use stage::{PipelineStageFlags, GraphicsPipelineStage, ComputePipelineStage};
-pub use sync::SyncData;
-pub use schedule::Schedule;
-
+pub use schedule::{Family, FamilyIndex, Queue, QueueId, Schedule, Submission, SubmissionId};
+pub use stage::{ComputePipelineStage, GraphicsPipelineStage};
+pub use sync::{sync, SyncData, Barriers, BufferBarriers, ImageBarriers, Guard, Wait, Signal};
diff --git a/chain/src/node.rs b/chain/src/node.rs
index 647ee04f..e835c082 100644
--- a/chain/src/node.rs
+++ b/chain/src/node.rs
@@ -1,27 +1,33 @@
 use std::collections::hash_map::{HashMap, Iter as HashMapIter};
 
-use access::AccessFlags;
+use ash::vk;
+
 use resource::{Buffer, Image, Resource};
-use schedule::FamilyId;
-use stage::PipelineStageFlags;
+use schedule::FamilyIndex;
 use Id;
 
 /// State in which node uses resource and usage flags.
 #[derive(Clone, Copy, Debug)]
 pub struct State<R: Resource> {
     /// Access performed by the node.
-    pub access: AccessFlags,
+    pub access: vk::AccessFlags,
 
     /// Optional layout in which node can use resource.
     pub layout: R::Layout,
 
     /// Stages at which resource is accessed.
-    pub stages: PipelineStageFlags,
+    pub stages: vk::PipelineStageFlags,
 
     /// Usage flags required for resource.
     pub usage: R::Usage,
 }
 
+/// Type alias for `State<Buffer>`
+pub type BufferState = State<Buffer>;
+
+/// Type alias for `State<Image>`
+pub type ImageState = State<Image>;
+
 /// Description of node.
 #[derive(Clone, Debug)]
 pub struct Node {
@@ -29,7 +35,7 @@ pub struct Node {
     pub id: usize,
 
     /// Family required to execute the node.
-    pub family: FamilyId,
+    pub family: FamilyIndex,
 
     /// Dependencies of the node.
     /// Those are indices of other nodes in array.
@@ -44,7 +50,7 @@ pub struct Node {
 
 impl Node {
     /// Get family on which this node will be executed.
-    pub fn family(&self) -> FamilyId {
+    pub fn family(&self) -> FamilyIndex {
         self.family
     }
 
diff --git a/chain/src/resource.rs b/chain/src/resource.rs
index 2d891480..19e5c38b 100644
--- a/chain/src/resource.rs
+++ b/chain/src/resource.rs
@@ -1,10 +1,9 @@
-use rendy_resource::{buffer, image};
 use std::{
     fmt::Debug,
     ops::{BitOr, BitOrAssign},
 };
 
-use access::AccessFlags;
+use ash::vk;
 
 /// Abstracts resource types that uses different usage flags and layouts types.
 pub trait Resource: 'static {
@@ -18,21 +17,21 @@ pub trait Resource: 'static {
     fn no_usage() -> Self::Usage;
 
     /// Layout suitable for specified accesses.
-    fn layout_for(access: AccessFlags) -> Self::Layout;
+    fn layout_for(access: vk::AccessFlags) -> Self::Layout;
 
     /// Check if all usage flags required for access are set.
-    fn valid_usage(access: AccessFlags, usage: Self::Usage) -> bool;
+    fn valid_usage(access: vk::AccessFlags, usage: Self::Usage) -> bool;
 }
 
-const BUFFER_ACCESSES: [AccessFlags; 8] = [
-    AccessFlags::INDIRECT_COMMAND_READ,
-    AccessFlags::INDEX_READ,
-    AccessFlags::VERTEX_ATTRIBUTE_READ,
-    AccessFlags::UNIFORM_READ,
-    AccessFlags::SHADER_READ,
-    AccessFlags::SHADER_WRITE,
-    AccessFlags::TRANSFER_READ,
-    AccessFlags::TRANSFER_WRITE,
+const BUFFER_ACCESSES: [vk::AccessFlags; 8] = [
+    vk::AccessFlags::INDIRECT_COMMAND_READ,
+    vk::AccessFlags::INDEX_READ,
+    vk::AccessFlags::VERTEX_ATTRIBUTE_READ,
+    vk::AccessFlags::UNIFORM_READ,
+    vk::AccessFlags::SHADER_READ,
+    vk::AccessFlags::SHADER_WRITE,
+    vk::AccessFlags::TRANSFER_READ,
+    vk::AccessFlags::TRANSFER_WRITE,
 ];
 
 /// Buffer resource type.
@@ -40,48 +39,48 @@ const BUFFER_ACCESSES: [AccessFlags; 8] = [
 pub struct Buffer;
 
 impl Resource for Buffer {
-    type Usage = buffer::UsageFlags;
+    type Usage = vk::BufferUsageFlags;
     type Layout = ();
 
     fn no_usage() -> Self::Usage {
-        buffer::UsageFlags::empty()
+        vk::BufferUsageFlags::empty()
     }
 
-    fn layout_for(_access: AccessFlags) {}
+    fn layout_for(_access: vk::AccessFlags) {}
 
-    fn valid_usage(access: AccessFlags, usage: buffer::UsageFlags) -> bool {
+    fn valid_usage(access: vk::AccessFlags, usage: vk::BufferUsageFlags) -> bool {
         BUFFER_ACCESSES.iter().all(|&access_bit| {
-            !access.contains(access_bit) || usage.intersects(match access_bit {
-                AccessFlags::INDIRECT_COMMAND_READ => buffer::UsageFlags::INDIRECT_BUFFER,
-                AccessFlags::INDEX_READ => buffer::UsageFlags::INDEX_BUFFER,
-                AccessFlags::VERTEX_ATTRIBUTE_READ => buffer::UsageFlags::VERTEX_BUFFER,
-                AccessFlags::UNIFORM_READ => buffer::UsageFlags::UNIFORM_BUFFER,
-                AccessFlags::SHADER_READ => {
-                    buffer::UsageFlags::STORAGE_BUFFER
-                        | buffer::UsageFlags::UNIFORM_TEXEL_BUFFER
-                        | buffer::UsageFlags::STORAGE_TEXEL_BUFFER
+            !access.subset(access_bit) || usage.intersects(match access_bit {
+                vk::AccessFlags::INDIRECT_COMMAND_READ => vk::BufferUsageFlags::INDIRECT_BUFFER,
+                vk::AccessFlags::INDEX_READ => vk::BufferUsageFlags::INDEX_BUFFER,
+                vk::AccessFlags::VERTEX_ATTRIBUTE_READ => vk::BufferUsageFlags::VERTEX_BUFFER,
+                vk::AccessFlags::UNIFORM_READ => vk::BufferUsageFlags::UNIFORM_BUFFER,
+                vk::AccessFlags::SHADER_READ => {
+                    vk::BufferUsageFlags::STORAGE_BUFFER
+                        | vk::BufferUsageFlags::UNIFORM_TEXEL_BUFFER
+                        | vk::BufferUsageFlags::STORAGE_TEXEL_BUFFER
                 }
-                AccessFlags::SHADER_WRITE => {
-                    buffer::UsageFlags::STORAGE_BUFFER | buffer::UsageFlags::STORAGE_TEXEL_BUFFER
+                vk::AccessFlags::SHADER_WRITE => {
+                    vk::BufferUsageFlags::STORAGE_BUFFER | vk::BufferUsageFlags::STORAGE_TEXEL_BUFFER
                 }
-                AccessFlags::TRANSFER_READ => buffer::UsageFlags::TRANSFER_SRC,
-                AccessFlags::TRANSFER_WRITE => buffer::UsageFlags::TRANSFER_DST,
+                vk::AccessFlags::TRANSFER_READ => vk::BufferUsageFlags::TRANSFER_SRC,
+                vk::AccessFlags::TRANSFER_WRITE => vk::BufferUsageFlags::TRANSFER_DST,
                 _ => unreachable!(),
             })
         })
     }
 }
 
-const IMAGE_ACCESSES: [AccessFlags; 9] = [
-    AccessFlags::INPUT_ATTACHMENT_READ,
-    AccessFlags::COLOR_ATTACHMENT_READ,
-    AccessFlags::COLOR_ATTACHMENT_WRITE,
-    AccessFlags::DEPTH_STENCIL_ATTACHMENT_READ,
-    AccessFlags::DEPTH_STENCIL_ATTACHMENT_WRITE,
-    AccessFlags::SHADER_READ,
-    AccessFlags::SHADER_WRITE,
-    AccessFlags::TRANSFER_READ,
-    AccessFlags::TRANSFER_WRITE,
+const IMAGE_ACCESSES: [vk::AccessFlags; 9] = [
+    vk::AccessFlags::INPUT_ATTACHMENT_READ,
+    vk::AccessFlags::COLOR_ATTACHMENT_READ,
+    vk::AccessFlags::COLOR_ATTACHMENT_WRITE,
+    vk::AccessFlags::DEPTH_STENCIL_ATTACHMENT_READ,
+    vk::AccessFlags::DEPTH_STENCIL_ATTACHMENT_WRITE,
+    vk::AccessFlags::SHADER_READ,
+    vk::AccessFlags::SHADER_WRITE,
+    vk::AccessFlags::TRANSFER_READ,
+    vk::AccessFlags::TRANSFER_WRITE,
 ];
 
 /// Image resource type.
@@ -89,68 +88,68 @@ const IMAGE_ACCESSES: [AccessFlags; 9] = [
 pub struct Image;
 
 impl Resource for Image {
-    type Usage = image::UsageFlags;
+    type Usage = vk::ImageUsageFlags;
 
-    type Layout = image::Layout;
+    type Layout = vk::ImageLayout;
 
     fn no_usage() -> Self::Usage {
-        image::UsageFlags::empty()
+        vk::ImageUsageFlags::empty()
     }
 
-    fn layout_for(access: AccessFlags) -> image::Layout {
+    fn layout_for(access: vk::AccessFlags) -> vk::ImageLayout {
         IMAGE_ACCESSES
             .iter()
             .fold(None, |acc, &access_bit| {
-                if access.contains(access_bit) {
+                if access.subset(access_bit) {
                     let layout = match access_bit {
-                        AccessFlags::INPUT_ATTACHMENT_READ => image::Layout::ShaderReadOnlyOptimal,
-                        AccessFlags::COLOR_ATTACHMENT_READ => image::Layout::ColorAttachmentOptimal,
-                        AccessFlags::COLOR_ATTACHMENT_WRITE => {
-                            image::Layout::ColorAttachmentOptimal
+                        vk::AccessFlags::INPUT_ATTACHMENT_READ => vk::ImageLayout::SHADER_READ_ONLY_OPTIMAL,
+                        vk::AccessFlags::COLOR_ATTACHMENT_READ => vk::ImageLayout::COLOR_ATTACHMENT_OPTIMAL,
+                        vk::AccessFlags::COLOR_ATTACHMENT_WRITE => {
+                            vk::ImageLayout::COLOR_ATTACHMENT_OPTIMAL
                         }
-                        AccessFlags::DEPTH_STENCIL_ATTACHMENT_READ => {
-                            image::Layout::DepthStencilReadOnlyOptimal
+                        vk::AccessFlags::DEPTH_STENCIL_ATTACHMENT_READ => {
+                            vk::ImageLayout::DEPTH_STENCIL_READ_ONLY_OPTIMAL
                         }
-                        AccessFlags::DEPTH_STENCIL_ATTACHMENT_WRITE => {
-                            image::Layout::DepthStencilAttachmentOptimal
+                        vk::AccessFlags::DEPTH_STENCIL_ATTACHMENT_WRITE => {
+                            vk::ImageLayout::DEPTH_STENCIL_ATTACHMENT_OPTIMAL
                         }
-                        AccessFlags::TRANSFER_READ => image::Layout::TransferSrcOptimal,
-                        AccessFlags::TRANSFER_WRITE => image::Layout::TransferDstOptimal,
+                        vk::AccessFlags::TRANSFER_READ => vk::ImageLayout::TRANSFER_SRC_OPTIMAL,
+                        vk::AccessFlags::TRANSFER_WRITE => vk::ImageLayout::TRANSFER_DST_OPTIMAL,
                         _ => unreachable!(),
                     };
                     Some(match (acc, layout) {
                         (None, layout) => layout,
                         (Some(left), right) if left == right => left,
                         (
-                            Some(image::Layout::DepthStencilReadOnlyOptimal),
-                            image::Layout::DepthStencilAttachmentOptimal,
-                        ) => image::Layout::DepthStencilAttachmentOptimal,
+                            Some(vk::ImageLayout::DEPTH_STENCIL_READ_ONLY_OPTIMAL),
+                            vk::ImageLayout::DEPTH_STENCIL_ATTACHMENT_OPTIMAL,
+                        ) => vk::ImageLayout::DEPTH_STENCIL_ATTACHMENT_OPTIMAL,
                         (
-                            Some(image::Layout::DepthStencilAttachmentOptimal),
-                            image::Layout::DepthStencilReadOnlyOptimal,
-                        ) => image::Layout::DepthStencilAttachmentOptimal,
-                        (Some(_), _) => image::Layout::General,
+                            Some(vk::ImageLayout::DEPTH_STENCIL_ATTACHMENT_OPTIMAL),
+                            vk::ImageLayout::DEPTH_STENCIL_READ_ONLY_OPTIMAL,
+                        ) => vk::ImageLayout::DEPTH_STENCIL_ATTACHMENT_OPTIMAL,
+                        (Some(_), _) => vk::ImageLayout::GENERAL,
                     })
                 } else {
                     acc
                 }
-            }).unwrap_or(image::Layout::General)
+            }).unwrap_or(vk::ImageLayout::GENERAL)
     }
 
-    fn valid_usage(access: AccessFlags, usage: image::UsageFlags) -> bool {
+    fn valid_usage(access: vk::AccessFlags, usage: vk::ImageUsageFlags) -> bool {
         IMAGE_ACCESSES.iter().all(|&access_bit| {
-            !access.contains(access_bit) || usage.intersects(match access_bit {
-                AccessFlags::INPUT_ATTACHMENT_READ => image::UsageFlags::INPUT_ATTACHMENT,
-                AccessFlags::COLOR_ATTACHMENT_READ => image::UsageFlags::COLOR_ATTACHMENT,
-                AccessFlags::COLOR_ATTACHMENT_WRITE => image::UsageFlags::COLOR_ATTACHMENT,
-                AccessFlags::DEPTH_STENCIL_ATTACHMENT_READ => {
-                    image::UsageFlags::DEPTH_STENCIL_ATTACHMENT
+            !access.subset(access_bit) || usage.intersects(match access_bit {
+                vk::AccessFlags::INPUT_ATTACHMENT_READ => vk::ImageUsageFlags::INPUT_ATTACHMENT,
+                vk::AccessFlags::COLOR_ATTACHMENT_READ => vk::ImageUsageFlags::COLOR_ATTACHMENT,
+                vk::AccessFlags::COLOR_ATTACHMENT_WRITE => vk::ImageUsageFlags::COLOR_ATTACHMENT,
+                vk::AccessFlags::DEPTH_STENCIL_ATTACHMENT_READ => {
+                    vk::ImageUsageFlags::DEPTH_STENCIL_ATTACHMENT
                 }
-                AccessFlags::DEPTH_STENCIL_ATTACHMENT_WRITE => {
-                    image::UsageFlags::DEPTH_STENCIL_ATTACHMENT
+                vk::AccessFlags::DEPTH_STENCIL_ATTACHMENT_WRITE => {
+                    vk::ImageUsageFlags::DEPTH_STENCIL_ATTACHMENT
                 }
-                AccessFlags::TRANSFER_READ => image::UsageFlags::TRANSFER_SRC,
-                AccessFlags::TRANSFER_WRITE => image::UsageFlags::TRANSFER_DST,
+                vk::AccessFlags::TRANSFER_READ => vk::ImageUsageFlags::TRANSFER_SRC,
+                vk::AccessFlags::TRANSFER_WRITE => vk::ImageUsageFlags::TRANSFER_DST,
                 _ => unreachable!(),
             })
         })
diff --git a/chain/src/schedule/family.rs b/chain/src/schedule/family.rs
index b95d40fd..5d5bf246 100644
--- a/chain/src/schedule/family.rs
+++ b/chain/src/schedule/family.rs
@@ -5,19 +5,19 @@ use super::{
 
 /// Family id value.
 #[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
-pub struct FamilyId(pub u32);
+pub struct FamilyIndex(pub u32);
 
 /// Instances of this type contains array of `Queue`s.
 /// All contained queues has identical capabilities.
 #[derive(Clone, Debug)]
 pub struct Family<S> {
-    id: FamilyId,
+    id: FamilyIndex,
     queues: Vec<Queue<S>>,
 }
 
 impl<S> Family<S> {
     /// Create new empty `Family`
-    pub fn new(id: FamilyId) -> Self {
+    pub fn new(id: FamilyIndex) -> Self {
         Family {
             id,
             queues: Vec::default(),
@@ -25,7 +25,7 @@ impl<S> Family<S> {
     }
 
     /// Get id of the family.
-    pub fn id(&self) -> FamilyId {
+    pub fn id(&self) -> FamilyIndex {
         self.id
     }
 
diff --git a/chain/src/schedule/mod.rs b/chain/src/schedule/mod.rs
index 71732f1a..d504a29e 100644
--- a/chain/src/schedule/mod.rs
+++ b/chain/src/schedule/mod.rs
@@ -14,8 +14,9 @@ mod submission;
 
 use std::ops::{Index, IndexMut};
 
+#[allow(unreachable_pub)]
 pub use self::{
-    family::{Family, FamilyId},
+    family::{Family, FamilyIndex},
     queue::{Queue, QueueId},
     submission::{Submission, SubmissionId},
 };
@@ -25,7 +26,7 @@ use fnv::FnvHashMap;
 /// Whole passes schedule.
 #[derive(Clone, Debug)]
 pub struct Schedule<S> {
-    map: FnvHashMap<FamilyId, Family<S>>,
+    map: FnvHashMap<FamilyIndex, Family<S>>,
     ordered: Vec<SubmissionId>,
 }
 
@@ -71,12 +72,12 @@ impl<S> Schedule<S> {
     }
 
     /// Get reference to `Family` instance by the id.
-    pub fn family(&self, fid: FamilyId) -> Option<&Family<S>> {
+    pub fn family(&self, fid: FamilyIndex) -> Option<&Family<S>> {
         self.map.get(&fid)
     }
 
     /// Get mutable reference to `Family` instance by the id.
-    pub fn family_mut(&mut self, fid: FamilyId) -> Option<&mut Family<S>> {
+    pub fn family_mut(&mut self, fid: FamilyIndex) -> Option<&mut Family<S>> {
         self.map.get_mut(&fid)
     }
 
@@ -112,7 +113,7 @@ impl<S> Schedule<S> {
 
     /// Get mutable reference to `Family` instance by the id.
     /// This function will add empty `Family` if id is not present.
-    fn ensure_family(&mut self, fid: FamilyId) -> &mut Family<S> {
+    fn ensure_family(&mut self, fid: FamilyIndex) -> &mut Family<S> {
         self.map.entry(fid).or_insert_with(|| Family::new(fid))
     }
 
diff --git a/chain/src/schedule/queue.rs b/chain/src/schedule/queue.rs
index 32bec53d..4080a69a 100644
--- a/chain/src/schedule/queue.rs
+++ b/chain/src/schedule/queue.rs
@@ -1,5 +1,5 @@
 use super::{
-    family::FamilyId,
+    family::FamilyIndex,
     submission::{Submission, SubmissionId},
 };
 
@@ -7,7 +7,7 @@ use super::{
 #[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
 pub struct QueueId {
     /// Family id of the queue.
-    pub family: FamilyId,
+    pub family: FamilyIndex,
 
     /// Index of the queue.
     pub index: usize,
@@ -15,7 +15,7 @@ pub struct QueueId {
 
 impl QueueId {
     /// Create queue id from family id and index.
-    pub fn new(family: FamilyId, index: usize) -> Self {
+    pub fn new(family: FamilyIndex, index: usize) -> Self {
         QueueId {
             family: family,
             index,
@@ -23,7 +23,7 @@ impl QueueId {
     }
 
     /// Get family id.
-    pub fn family(&self) -> FamilyId {
+    pub fn family(&self) -> FamilyIndex {
         self.family
     }
 
diff --git a/chain/src/schedule/submission.rs b/chain/src/schedule/submission.rs
index bd83910a..a8d63d0b 100644
--- a/chain/src/schedule/submission.rs
+++ b/chain/src/schedule/submission.rs
@@ -1,6 +1,6 @@
 use fnv::FnvHashMap;
 
-use super::{family::FamilyId, queue::QueueId};
+use super::{family::FamilyIndex, queue::QueueId};
 use Id;
 
 /// Submission id.
@@ -20,7 +20,7 @@ impl SubmissionId {
     }
 
     /// Get family id.
-    pub fn family(&self) -> FamilyId {
+    pub fn family(&self) -> FamilyIndex {
         self.queue.family()
     }
 
@@ -52,7 +52,7 @@ impl<S> Submission<S> {
         self.node
     }
 
-    /// Get synchronization for `Submission`.
+    /// Get id of the `Submission`.
     pub fn id(&self) -> SubmissionId {
         self.id
     }
@@ -78,7 +78,13 @@ impl<S> Submission<S> {
     }
 
     /// Create new submission with specified pass.
-    pub(crate) fn new(node: usize, wait_factor: usize, submit_order: usize, id: SubmissionId, sync: S) -> Self {
+    pub(crate) fn new(
+        node: usize,
+        wait_factor: usize,
+        submit_order: usize,
+        id: SubmissionId,
+        sync: S,
+    ) -> Self {
         Submission {
             node,
             resource_links: FnvHashMap::default(),
diff --git a/chain/src/stage.rs b/chain/src/stage.rs
index 860cddfe..efe1ff85 100644
--- a/chain/src/stage.rs
+++ b/chain/src/stage.rs
@@ -1,89 +1,5 @@
-bitflags! {
-    /// Pipeline stages flags.
-    /// See Vulkan docs for detailed info:
-    /// <https://www.khronos.org/registry/vulkan/specs/1.1/html/vkspec.html#synchronization-pipeline-stages>
-    /// Man page: <https://www.khronos.org/registry/vulkan/specs/1.1-extensions/man/html/VkPipelineStageFlagBits.html>
-    #[repr(transparent)]
-    pub struct PipelineStageFlags: u32 {
-        /// Specifies the stage of the pipeline where any commands are initially received by the queue.
-        const TOP_OF_PIPE = 0x00000001;
-
-        /// Specifies the stage of the pipeline where Draw/DispatchIndirect data structures are consumed. This stage also includes reading commands written by vkCmdProcessCommandsNVX.
-        const DRAW_INDIRECT = 0x00000002;
-
-        /// Specifies the stage of the pipeline where vertex and index buffers are consumed.
-        const VERTEX_INPUT = 0x00000004;
-
-        /// Specifies the vertex shader stage.
-        const VERTEX_SHADER = 0x00000008;
-
-        /// Specifies the tessellation control shader stage.
-        const TESSELLATION_CONTROL_SHADER = 0x00000010;
-
-        /// Specifies the tessellation evaluation shader stage.
-        const TESSELLATION_EVALUATION_SHADER = 0x00000020;
-
-        /// Specifies the geometry shader stage.
-        const GEOMETRY_SHADER = 0x00000040;
-
-        /// Specifies the fragment shader stage.
-        const FRAGMENT_SHADER = 0x00000080;
-
-        /// Specifies the stage of the pipeline where early fragment tests
-        /// (depth and stencil tests before fragment shading) are performed.
-        /// This stage also includes subpass load operations for framebuffer attachments with a depth/stencil format.
-        const EARLY_FRAGMENT_TESTS = 0x00000100;
-
-        /// Specifies the stage of the pipeline where late fragment tests
-        /// (depth and stencil tests after fragment shading) are performed.
-        /// This stage also includes subpass store operations for framebuffer attachments with a depth/stencil format.
-        const LATE_FRAGMENT_TESTS = 0x00000200;
-
-        /// Specifies the stage of the pipeline after blending where the final color values are output from the pipeline.
-        /// This stage also includes subpass load and store operations and multisample resolve operations for framebuffer attachments with a color format.
-        const COLOR_ATTACHMENT_OUTPUT = 0x00000400;
-
-        /// Specifies the execution of a compute shader.
-        const COMPUTE_SHADER = 0x00000800;
-
-        /// Specifies the execution of copy commands.
-        /// This includes the operations resulting from all copy commands, clear commands
-        /// (with the exception of vkCmdClearAttachments), and vkCmdCopyQueryPoolResults.
-        const TRANSFER = 0x00001000;
-
-        /// Specifies the final stage in the pipeline where operations generated by all commands complete execution.
-        const BOTTOM_OF_PIPE = 0x00002000;
-
-        /// Specifies a pseudo-stage indicating execution on the host of reads/writes of device memory.
-        /// This stage is not invoked by any commands recorded in a command buffer.
-        const HOST = 0x00004000;
-
-        /// Specifies the execution of all graphics pipeline stages, and is equivalent to the logical OR of:
-        /// * STAGE_TOP_OF_PIPE
-        /// * STAGE_DRAW_INDIRECT
-        /// * STAGE_VERTEX_INPUT
-        /// * STAGE_VERTEX_SHADER
-        /// * STAGE_TESSELLATION_CONTROL_SHADER
-        /// * STAGE_TESSELLATION_EVALUATION_SHADER
-        /// * STAGE_GEOMETRY_SHADER
-        /// * STAGE_FRAGMENT_SHADER
-        /// * STAGE_EARLY_FRAGMENT_TESTS
-        /// * STAGE_LATE_FRAGMENT_TESTS
-        /// * STAGE_COLOR_ATTACHMENT_OUTPUT
-        /// * STAGE_BOTTOM_OF_PIPE
-        /// * STAGE_CONDITIONAL_RENDERING
-        const ALL_GRAPHICS = 0x00008000;
-
-        /// Is equivalent to the logical OR of every other pipeline stage flag that is supported on the queue it is used with.
-        const ALL_COMMANDS = 0x00010000;
-
-        /// Specifies the stage of the pipeline where the predicate of conditional rendering is consumed.
-        const CONDITIONAL_RENDERING_EXT = 0x00040000;
-
-        /// Specifies the stage of the pipeline where device-side generation of commands via vkCmdProcessCommandsNVX is handled.
-        const COMMAND_PROCESS_NVX = 0x00020000;
-    }
-}
+
+use ash::vk::PipelineStageFlags;
 
 /// Graphics pipeline stage.
 #[derive(Copy, Clone, Debug, PartialOrd, Ord, PartialEq, Eq)]
@@ -133,9 +49,7 @@ impl From<GraphicsPipelineStage> for PipelineStageFlags {
             GraphicsPipelineStage::VertexInput => Self::VERTEX_INPUT,
             GraphicsPipelineStage::VertexShader => Self::VERTEX_SHADER,
             GraphicsPipelineStage::TessellationControlShader => Self::TESSELLATION_CONTROL_SHADER,
-            GraphicsPipelineStage::TessellationEvaluationShader => {
-                Self::TESSELLATION_EVALUATION_SHADER
-            }
+            GraphicsPipelineStage::TessellationEvaluationShader => Self::TESSELLATION_EVALUATION_SHADER,
             GraphicsPipelineStage::GeometryShader => Self::GEOMETRY_SHADER,
             GraphicsPipelineStage::EarlyFragmentTests => Self::EARLY_FRAGMENT_TESTS,
             GraphicsPipelineStage::FragmentShader => Self::FRAGMENT_SHADER,
diff --git a/chain/src/sync.rs b/chain/src/sync.rs
index 3297cea2..b89fee60 100644
--- a/chain/src/sync.rs
+++ b/chain/src/sync.rs
@@ -1,16 +1,16 @@
 //! This module provide functions for find all required synchronizations (barriers and semaphores).
 //!
 
-use fnv::FnvHashMap;
 use std::ops::{Range, RangeFrom, RangeTo};
 
-use access::AccessFlags;
+use fnv::FnvHashMap;
+use ash::vk;
+
 use chain::{Chain, Link};
-use collect::{Chains, Unsynchronized};
+use collect::Chains;
 use node::State;
 use resource::{Buffer, Image, Resource};
 use schedule::{Queue, QueueId, Schedule, SubmissionId};
-use stage::PipelineStageFlags;
 use Id;
 
 /// Semaphore identifier.
@@ -50,13 +50,13 @@ impl<S> Signal<S> {
 /// Semaphore wait info.
 /// There must be paired signal.
 #[derive(Copy, Clone, Debug, PartialEq, Eq)]
-pub struct Wait<S>(S, PipelineStageFlags);
+pub struct Wait<S>(S, vk::PipelineStageFlags);
 
 impl<S> Wait<S> {
     /// Create waiting for specified point.
     /// At this point `Signal` must be created as well.
     /// `id` and `point` combination must be unique.
-    fn new(semaphore: S, stages: PipelineStageFlags) -> Self {
+    fn new(semaphore: S, stages: vk::PipelineStageFlags) -> Self {
         Wait(semaphore, stages)
     }
 
@@ -66,7 +66,7 @@ impl<S> Wait<S> {
     }
 
     /// Stage at which to wait.
-    pub fn stage(&self) -> PipelineStageFlags {
+    pub fn stage(&self) -> vk::PipelineStageFlags {
         self.1
     }
 }
@@ -78,7 +78,7 @@ pub struct Barrier<R: Resource> {
     pub queues: Option<Range<QueueId>>,
 
     /// State transition.
-    pub states: Range<(AccessFlags, R::Layout, PipelineStageFlags)>,
+    pub states: Range<(vk::AccessFlags, R::Layout, vk::PipelineStageFlags)>,
 }
 
 impl<R> Barrier<R>
@@ -96,18 +96,18 @@ where
         }
     }
 
-    fn transfer(queues: Range<QueueId>, states: Range<(AccessFlags, R::Layout)>) -> Self {
+    fn transfer(queues: Range<QueueId>, states: Range<(vk::AccessFlags, R::Layout)>) -> Self {
         Barrier {
             queues: Some(queues),
             states: (
                 states.start.0,
                 states.start.1,
-                PipelineStageFlags::TOP_OF_PIPE,
+                vk::PipelineStageFlags::TOP_OF_PIPE,
             )
                 ..(
                     states.end.0,
                     states.end.1,
-                    PipelineStageFlags::BOTTOM_OF_PIPE,
+                    vk::PipelineStageFlags::BOTTOM_OF_PIPE,
                 ),
         }
     }
@@ -115,22 +115,22 @@ where
     fn acquire(
         queues: Range<QueueId>,
         left: RangeFrom<R::Layout>,
-        right: RangeTo<(AccessFlags, R::Layout)>,
+        right: RangeTo<(vk::AccessFlags, R::Layout)>,
     ) -> Self {
         Self::transfer(
             queues,
-            (AccessFlags::empty(), left.start)..(right.end.0, right.end.1),
+            (vk::AccessFlags::empty(), left.start)..(right.end.0, right.end.1),
         )
     }
 
     fn release(
         queues: Range<QueueId>,
-        left: RangeFrom<(AccessFlags, R::Layout)>,
+        left: RangeFrom<(vk::AccessFlags, R::Layout)>,
         right: RangeTo<R::Layout>,
     ) -> Self {
         Self::transfer(
             queues,
-            (left.start.0, left.start.1)..(AccessFlags::empty(), right.end),
+            (left.start.0, left.start.1)..(vk::AccessFlags::empty(), right.end),
         )
     }
 }
@@ -255,10 +255,7 @@ impl SyncTemp {
 }
 
 /// Find required synchronization for all submissions in `Chains`.
-pub fn sync<F, S, W>(
-    chains: &Chains<Unsynchronized>,
-    mut new_semaphore: F,
-) -> Schedule<SyncData<S, W>>
+pub fn sync<F, S, W>(chains: &Chains, mut new_semaphore: F) -> Schedule<SyncData<S, W>>
 where
     F: FnMut() -> (S, W),
 {
diff --git a/command/Cargo.toml b/command/Cargo.toml
index 4d9931b8..f8bf4916 100644
--- a/command/Cargo.toml
+++ b/command/Cargo.toml
@@ -4,16 +4,7 @@ version = "0.1.0"
 authors = ["omni-viral <scareaangel@gmail.com>"]
 
 [dependencies]
-bitflags = "1.0"
+ash = { path = "../../ash/ash" }
 failure = "0.1"
 relevant = "0.2"
-rendy-memory = { path = "../memory" }
-rendy-resource = { path = "../resource" }
-rendy-chain = { path = "../chain" }
-gfx-hal = { git = "https://github.com/gfx-rs/gfx.git", optional = true }
-ash = { version = "0.24", optional = true }
-share = "0.1"
-
-[features]
-hal = ["gfx-hal", "rendy-memory/hal", "rendy-resource/hal"]
-vulkan = ["ash", "rendy-memory/vulkan", "rendy-resource/vulkan"]
+smallvec = "0.6"
diff --git a/command/src/buffer.rs b/command/src/buffer.rs
index 09108c64..2dbc7871 100644
--- a/command/src/buffer.rs
+++ b/command/src/buffer.rs
@@ -1,124 +1,139 @@
-//! Buffer module docs.
+//! Command buffer module docs.
 
+use ash::{version::DeviceV1_0, vk};
 use relevant::Relevant;
-use std::fmt::Debug;
+use std::borrow::Borrow;
 
-use device::CommandBuffer;
-use encoder::Encoder;
-use family::FamilyId;
-use frame::FrameBound;
+use crate::{capability::Capability, family::FamilyIndex};
 
 /// Command buffers of this level can be submitted to the command queues.
-#[derive(Clone, Copy, Debug)]
+#[derive(Clone, Copy, Debug, Default)]
 pub struct PrimaryLevel;
 
 /// Command buffers of this level can be executed as part of the primary buffers.
-#[derive(Clone, Copy, Debug)]
+#[derive(Clone, Copy, Debug, Default)]
 pub struct SecondaryLevel;
 
+/// Command buffer level.
+pub trait Level: Copy {
+    /// Get raw level value.
+    fn level(&self) -> vk::CommandBufferLevel;
+}
+
+impl Level for PrimaryLevel {
+    fn level(&self) -> vk::CommandBufferLevel {
+        vk::CommandBufferLevel::PRIMARY
+    }
+}
+
+impl Level for SecondaryLevel {
+    fn level(&self) -> vk::CommandBufferLevel {
+        vk::CommandBufferLevel::SECONDARY
+    }
+}
+
+impl Level for vk::CommandBufferLevel {
+    fn level(&self) -> vk::CommandBufferLevel {
+        *self
+    }
+}
+
 /// This flag specify that buffer can be reset individually.
-/// Without this flag buffer can be reset only together with all other buffers from pool.
-#[derive(Clone, Copy, Debug)]
+#[derive(Clone, Copy, Debug, Default)]
 pub struct IndividualReset;
 
+/// This flag specify that buffer cannot be reset individually.
+#[derive(Clone, Copy, Debug, Default)]
+pub struct NoIndividualReset;
+
+/// Specify flags required for command pool creation to allow individual buffer reset.
+pub trait Reset: Copy {
+    fn flags(&self) -> vk::CommandPoolCreateFlags;
+}
+
+impl Reset for IndividualReset {
+    fn flags(&self) -> vk::CommandPoolCreateFlags {
+        vk::CommandPoolCreateFlags::RESET_COMMAND_BUFFER
+    }
+}
+
+impl Reset for NoIndividualReset {
+    fn flags(&self) -> vk::CommandPoolCreateFlags {
+        vk::CommandPoolCreateFlags::empty()
+    }
+}
+
 /// Command buffer state in which all buffers start.
 /// Resetting also moves buffer to this state.
-#[derive(Clone, Copy, Debug)]
+#[derive(Clone, Copy, Debug, Default)]
 pub struct InitialState;
 
 /// Command buffer in recording state could be populated with commands.
-#[derive(Clone, Copy, Debug)]
+#[derive(Clone, Copy, Debug, Default)]
 pub struct RecordingState<U>(U);
 
 /// Command buffer in executable state can be submitted.
-#[derive(Clone, Copy, Debug)]
+#[derive(Clone, Copy, Debug, Default)]
 pub struct ExecutableState<U>(U);
 
 /// Command buffer in pending state are submitted to the device.
-/// Buffer in pending state must never be invalidated or reset because device may read it at the moment.
+/// Command buffer in pending state must never be invalidated or reset because device may read it at the moment.
 /// Proving device is done with buffer requires nontrivial strategies.
 /// Therefore moving buffer from pending state requires `unsafe` method.
-#[derive(Clone, Copy, Debug)]
+#[derive(Clone, Copy, Debug, Default)]
 pub struct PendingState<N>(N);
 
 /// One-shot buffers move to invalid state after execution.
 /// Invalidating any resource referenced in any command recorded to the buffer implicitly move it to the invalid state.
-#[derive(Clone, Copy, Debug)]
+#[derive(Clone, Copy, Debug, Default)]
 pub struct InvalidState;
 
-/// States in which command buffer can be destroyed.
-pub trait Droppable {}
-impl Droppable for InitialState {}
-impl<U> Droppable for RecordingState<U> {}
-impl<U> Droppable for ExecutableState<U> {}
-impl Droppable for InvalidState {}
-
 /// States in which command buffer can de reset.
-pub trait Resettable: Droppable {}
+pub trait Resettable {}
+impl Resettable for InitialState {}
 impl<U> Resettable for RecordingState<U> {}
 impl<U> Resettable for ExecutableState<U> {}
 impl Resettable for InvalidState {}
 
-/// Buffer with this usage flag will move to invalid state after execution.
+/// Command buffer with this usage flag will move to invalid state after execution.
 /// Resubmitting will require reset and rerecording commands.
-#[derive(Clone, Copy, Debug)]
+#[derive(Clone, Copy, Debug, Default)]
 pub struct OneShot;
 
-/// Buffer with this usage flag will move back to executable state after execution.
-#[derive(Clone, Copy, Debug)]
-pub struct MultiShot<S = ()>(S);
+/// Command buffer with this usage flag will move back to executable state after execution.
+#[derive(Clone, Copy, Debug, Default)]
+pub struct MultiShot<S = ()>(pub S);
 
 /// Additional flag for `MultiShot` that allows to resubmit buffer in pending state.
 /// Note that resubmitting pending buffers can hurt performance.
-#[derive(Clone, Copy, Debug)]
+#[derive(Clone, Copy, Debug, Default)]
 pub struct SimultaneousUse;
 
 /// Buffers with this usage flag must be secondary buffers executed entirely in render-pass.
-#[derive(Clone, Copy, Debug)]
+#[derive(Clone, Copy, Debug, Default)]
 pub struct RenderPassContinue;
 
-bitflags!{
-    /// Bitmask specifying usage behavior for command buffer
-    /// See Vulkan docs for detailed info:
-    /// <https://www.khronos.org/registry/vulkan/specs/1.1-extensions/man/html/VkCommandBufferUsageFlagBits.html>
-    #[repr(transparent)]
-    pub struct UsageFlags: u32 {
-        /// Specifies that each recording of the command buffer will only be submitted once,
-        /// and the command buffer will be reset and recorded again between each submission.
-        const ONE_TIME_SUBMIT = 0x00000001;
-
-        /// Specifies that a secondary command buffer is considered to be entirely inside a render pass.
-        /// If this is a primary command buffer, then this bit is ignored.
-        const RENDER_PASS_CONTINUE = 0x00000002;
-
-        /// Specifies that a command buffer can be resubmitted to a queue while it is in the pending state,
-        /// and recorded into multiple primary command buffers.
-        const SIMULTANEOUS_USE = 0x00000004;
-    }
-}
-
 /// Trait implemented by all usage types.
 pub trait Usage {
     /// State in which command buffer moves after completion.
-
-    fn flags(&self) -> UsageFlags;
+    fn flags(&self) -> vk::CommandBufferUsageFlags;
 }
 
 impl Usage for OneShot {
-    fn flags(&self) -> UsageFlags {
-        UsageFlags::ONE_TIME_SUBMIT
+    fn flags(&self) -> vk::CommandBufferUsageFlags {
+        vk::CommandBufferUsageFlags::ONE_TIME_SUBMIT
     }
 }
 
 impl Usage for MultiShot {
-    fn flags(&self) -> UsageFlags {
-        UsageFlags::empty()
+    fn flags(&self) -> vk::CommandBufferUsageFlags {
+        vk::CommandBufferUsageFlags::empty()
     }
 }
 
 impl Usage for MultiShot<SimultaneousUse> {
-    fn flags(&self) -> UsageFlags {
-        UsageFlags::SIMULTANEOUS_USE
+    fn flags(&self) -> vk::CommandBufferUsageFlags {
+        vk::CommandBufferUsageFlags::SIMULTANEOUS_USE
     }
 }
 
@@ -126,143 +141,249 @@ impl Usage for MultiShot<SimultaneousUse> {
 /// This wrapper defines state with usage, level and ability to be individually reset at type level.
 /// This way many methods become safe.
 #[derive(Debug)]
-pub struct Buffer<B, C, S, L, R = ()> {
-    inner: B,
+pub struct CommandBuffer<C, S, L = PrimaryLevel, R = NoIndividualReset> {
+    raw: vk::CommandBuffer,
     capability: C,
     state: S,
     level: L,
     reset: R,
-    family: FamilyId,
+    family: FamilyIndex,
     relevant: Relevant,
 }
 
-impl<B, C, R> Buffer<B, C, InitialState, PrimaryLevel, R> {
+impl<C, S, L, R> CommandBuffer<C, S, L, R> {
+    /// Wrap raw buffer handle.
+    ///
+    /// # Safety
+    ///
+    /// * `raw` must be valid command buffer handle.
+    /// * `capability` must be subset of `family` capability.
+    /// * `state` must represent actual state buffer currently in.
+    /// * command buffer must be allocated with specified `level`.
+    /// * If `reset` is `IndividualReset` then buffer must be allocated from pool created with `IndividualReset` marker.
+    /// * command buffer must be allocated from pool created for `family`.
+    pub unsafe fn from_raw(
+        raw: vk::CommandBuffer,
+        capability: C,
+        state: S,
+        level: L,
+        reset: R,
+        family: FamilyIndex,
+    ) -> Self {
+        CommandBuffer {
+            raw,
+            capability,
+            state,
+            level,
+            reset,
+            family,
+            relevant: Relevant,
+        }
+    }
+
+    /// Get raw command buffer handle.
+    ///
+    /// # Safety
+    ///
+    /// * Valid usage for command buffer must not be violated.
+    /// Particularly command buffer must not change its state.
+    /// Or `change_state` must be used to reflect accumulated change.
+    pub unsafe fn raw(&self) -> vk::CommandBuffer {
+        self.raw
+    }
+
+    /// Get raw command buffer handle.
+    ///
+    /// # Safety
+    ///
+    /// * Valid usage for command buffer must not be violated.
+    pub unsafe fn into_raw(self) -> vk::CommandBuffer {
+        self.relevant.dispose();
+        self.raw
+    }
+
+    /// Change state of the command buffer.
+    ///
+    /// # Safety
+    ///
+    /// * This method must be used only to reflect state changed due to raw handle usage.
+    pub unsafe fn change_state<U>(self, f: impl FnOnce(S) -> U) -> CommandBuffer<C, U, L, R> {
+        CommandBuffer {
+            raw: self.raw,
+            capability: self.capability,
+            state: f(self.state),
+            level: self.level,
+            reset: self.reset,
+            family: self.family,
+            relevant: self.relevant,
+        }
+    }
+
+    /// Get buffers capability.
+    pub fn capability(&self) -> C
+    where
+        C: Capability,
+    {
+        self.capability
+    }
+}
+
+impl<C, R> CommandBuffer<C, InitialState, PrimaryLevel, R> {
     /// Begin recording command buffer.
     ///
     /// # Parameters
     ///
     /// `usage` - specifies usage of the command buffer. Possible types are `OneShot`, `MultiShot`.
-    pub fn begin<U>(self, usage: U) -> Buffer<B, C, RecordingState<U>, PrimaryLevel, R>
+    pub fn begin<U>(
+        self,
+        device: &impl DeviceV1_0,
+        usage: U,
+    ) -> CommandBuffer<C, RecordingState<U>, PrimaryLevel, R>
+    where
+        U: Usage,
+    {
+        unsafe {
+            device
+                .begin_command_buffer(
+                    self.raw,
+                    &vk::CommandBufferBeginInfo::builder()
+                        .flags(usage.flags())
+                        .build(),
+                ).expect("Panic on OOM");
+
+            self.change_state(|_| RecordingState(usage))
+        }
+    }
+}
+
+impl<C, U, R> CommandBuffer<C, RecordingState<U>, PrimaryLevel, R> {
+    /// Finish recording command buffer.
+    ///
+    /// # Parameters
+    pub fn finish(
+        self,
+        device: &impl DeviceV1_0,
+    ) -> CommandBuffer<C, ExecutableState<U>, PrimaryLevel, R>
     where
         U: Usage,
     {
-        unimplemented!()
+        unsafe {
+            device.end_command_buffer(self.raw).expect("Panic on OOM");
+            self.change_state(|RecordingState(usage)| ExecutableState(usage))
+        }
     }
 }
 
 /// Structure contains command buffer ready for submission.
 #[derive(Debug)]
-pub struct Submit<S> {
-    raw: S,
-    family: FamilyId,
+#[allow(missing_copy_implementations)]
+pub struct Submit {
+    raw: vk::CommandBuffer,
+    family: FamilyIndex,
 }
 
-impl<S> Submit<S> {
+impl Submit {
     /// Get family this submit is associated with.
-    pub fn family(&self) -> FamilyId {
+    pub fn family(&self) -> FamilyIndex {
         self.family
     }
 
-    /// Unwrap inner submit value.
-    pub fn into_inner(self) -> S {
+    /// Get raw command buffer.
+    pub fn raw(&self) -> vk::CommandBuffer {
         self.raw
     }
 }
 
-impl<B, C, R> Buffer<B, C, ExecutableState<OneShot>, PrimaryLevel, R>
-where
-    B: CommandBuffer,
-{
+impl<C, S, R> CommandBuffer<C, ExecutableState<S>, PrimaryLevel, R> {
     /// produce `Submit` object that can be used to populate submission.
     pub fn submit_once(
         self,
     ) -> (
-        Submit<B::Submit>,
-        Buffer<B, C, PendingState<InvalidState>, PrimaryLevel, R>,
+        Submit,
+        CommandBuffer<C, PendingState<InvalidState>, PrimaryLevel, R>,
     ) {
-        unimplemented!()
+        let buffer = unsafe { self.change_state(|_| PendingState(InvalidState)) };
+
+        let submit = Submit {
+            raw: buffer.raw,
+            family: buffer.family,
+        };
+
+        (submit, buffer)
     }
 }
 
-impl<B, C, S, R> Buffer<B, C, ExecutableState<MultiShot<S>>, PrimaryLevel, R>
-where
-    B: CommandBuffer,
-{
+impl<C, S, R> CommandBuffer<C, ExecutableState<MultiShot<S>>, PrimaryLevel, R> {
     /// Produce `Submit` object that can be used to populate submission.
     pub fn submit(
         self,
     ) -> (
-        Submit<B::Submit>,
-        Buffer<B, C, PendingState<ExecutableState<MultiShot<S>>>, PrimaryLevel, R>,
+        Submit,
+        CommandBuffer<C, PendingState<ExecutableState<MultiShot<S>>>, PrimaryLevel, R>,
     ) {
-        unimplemented!()
+        let buffer = unsafe { self.change_state(|state| PendingState(state)) };
+
+        let submit = Submit {
+            raw: buffer.raw,
+            family: buffer.family,
+        };
+
+        (submit, buffer)
     }
 }
 
-impl<B, C, N, L, R> Buffer<B, C, PendingState<N>, L, R> {
+impl<C, N, L, R> CommandBuffer<C, PendingState<N>, L, R> {
     /// Mark command buffer as complete.
     ///
     /// # Safety
     ///
-    /// User must ensure that recorded commands are complete.
-    pub unsafe fn complete(self) -> Buffer<B, C, N, L, R> {
-        unimplemented!()
+    /// * Commands recoreded to this buffer must be complete.
+    /// Normally command buffer moved to this state when [`Submit`] object is created.
+    /// To ensure that recorded commands are complete once can [wait] for the [`Fence`] specified
+    /// when [submitting] created [`Submit`] object or in later submission to the same queue.
+    ///
+    /// [`Submit`]: struct.Submit
+    /// [wait]: ../ash/version/trait.DeviceV1_0.html#method.wait_for_fences
+    /// [`Fence`]: ../ash/vk/struct.Fence.html
+    /// [submitting]: ../ash/version/trait.DeviceV1_0.html#method.queue_submit
+    pub unsafe fn complete(self) -> CommandBuffer<C, N, L, R> {
+        self.change_state(|PendingState(state)| state)
+    }
+
+    /// Release command buffer.
+    ///
+    /// # Safety
+    ///
+    /// * It must be owned by `OwningCommandPool`
+    /// TODO: Use lifetimes to tie `CommandCommand buffer` to `OwningCommandPool`.
+    pub unsafe fn release(self) {
+        self.relevant.dispose();
     }
 }
 
-impl<B, C, S, L> Buffer<B, C, S, L, IndividualReset>
+impl<C, S, L> CommandBuffer<C, S, L, IndividualReset>
 where
     S: Resettable,
 {
     /// Reset command buffer.
-    pub fn reset(self) -> Buffer<B, C, InitialState, L, IndividualReset> {
-        unimplemented!()
+    pub fn reset(self) -> CommandBuffer<C, InitialState, L, IndividualReset> {
+        unsafe { self.change_state(|_| InitialState) }
     }
 }
 
-impl<B, C, S, L> Buffer<B, C, S, L>
+impl<C, S, L> CommandBuffer<C, S, L>
 where
     S: Resettable,
 {
-    /// Reset command buffer.
+    /// Mark command buffer as reset.
     ///
     /// # Safety
     ///
-    /// Mark command buffer as reset.
-    /// User must reset buffer via command pool and call this method for all commands buffers affected.
-    pub unsafe fn mark_reset(self) -> Buffer<B, C, InitialState, L> {
-        unimplemented!()
-    }
-}
-
-impl<B, C, U, L, R> Encoder<C> for Buffer<B, C, RecordingState<U>, L, R>
-where
-    B: CommandBuffer,
-{
-    type Buffer = B;
-
-    unsafe fn buffer(&mut self) -> &mut B {
-        &mut self.inner
-    }
-}
-
-impl<'a, F: 'a, B> CommandBuffer for FrameBound<'a, F, B>
-where
-    B: CommandBuffer,
-    F: Debug,
-{
-    type Submit = FrameBound<'a, F, B::Submit>;
-
-    unsafe fn submit(&self) -> FrameBound<'a, F, B::Submit> {
-        FrameBound::bind(self.inner_ref().submit(), self.frame())
-    }
-}
-
-impl<'a, F: 'a, B, S, L, C> Buffer<FrameBound<'a, F, B>, C, S, L> {
-    /// Release borrowed buffer. This allows to acquire next buffer from pool.
-    /// Whatever state this buffer was in it will be reset only after bounded frame is complete.
-    /// This allows safely to release borrowed buffer in pending state.
-    pub fn release(self) {
-        unimplemented!()
+    /// * This function must be used only to reflect command buffer being reset implicitly.
+    /// For instance:
+    /// * [`CommandPool::reset`](struct.CommandPool.html#method.reset) on pool from which the command buffer was allocated.
+    /// * Raw handle usage.
+    pub unsafe fn mark_reset(self) -> CommandBuffer<C, InitialState, L> {
+        self.change_state(|_| InitialState)
     }
 }
diff --git a/command/src/capability.rs b/command/src/capability.rs
index 23537a95..6ecd4dfc 100644
--- a/command/src/capability.rs
+++ b/command/src/capability.rs
@@ -1,29 +1,6 @@
 //! Capability module docs.
 
-use chain::PipelineStageFlags;
-
-bitflags! {
-    /// Bitmask specifying capabilities of queues in a queue family.
-    /// See Vulkan docs for detailed info:
-    /// <https://www.khronos.org/registry/vulkan/specs/1.1-extensions/man/html/VkQueueFlagBits.html>
-    #[repr(transparent)]
-    pub struct CapabilityFlags: u32 {
-        /// Queues from families with this capability flag set are able to perform graphics commands.
-        const GRAPHICS = 0x00000001;
-
-        /// Queues from families with this capability flag set are able to perform compute commands.
-        const COMPUTE = 0x00000002;
-
-        /// Queues from families with this capability flag set are able to perform transfer commands.
-        const TRANSFER = 0x00000004;
-
-        /// ???
-        const SPARSE_BINDING = 0x00000008;
-
-        /// ???
-        const PROTECTED = 0x00000010;
-    }
-}
+use ash::vk;
 
 /// Capable of transfer only.
 #[derive(Clone, Copy, Debug)]
@@ -49,94 +26,94 @@ pub struct General;
 pub trait Capability: Copy {
     /// Try to create capability instance from flags.
     /// Instance will be created if all required flags set.
-    fn from_flags(flags: CapabilityFlags) -> Option<Self>;
+    fn from_flags(flags: vk::QueueFlags) -> Option<Self>;
 
-    /// Convert into `CapabilityFlags`
-    fn into_flags(self) -> CapabilityFlags;
+    /// Convert into `vk::QueueFlags`
+    fn into_flags(self) -> vk::QueueFlags;
 }
 
-impl Capability for CapabilityFlags {
-    fn from_flags(flags: CapabilityFlags) -> Option<Self> {
+impl Capability for vk::QueueFlags {
+    fn from_flags(flags: vk::QueueFlags) -> Option<Self> {
         Some(flags)
     }
 
-    fn into_flags(self) -> CapabilityFlags {
+    fn into_flags(self) -> vk::QueueFlags {
         self
     }
 }
 
 impl Capability for Transfer {
-    fn from_flags(flags: CapabilityFlags) -> Option<Self> {
-        if flags.contains(CapabilityFlags::TRANSFER) {
+    fn from_flags(flags: vk::QueueFlags) -> Option<Self> {
+        if flags.subset(vk::QueueFlags::TRANSFER) {
             Some(Transfer)
         } else {
             None
         }
     }
 
-    fn into_flags(self) -> CapabilityFlags {
-        CapabilityFlags::TRANSFER
+    fn into_flags(self) -> vk::QueueFlags {
+        vk::QueueFlags::TRANSFER
     }
 }
 
 impl Capability for Execute {
-    fn from_flags(flags: CapabilityFlags) -> Option<Self> {
-        if flags.intersects(CapabilityFlags::COMPUTE | CapabilityFlags::GRAPHICS) {
+    fn from_flags(flags: vk::QueueFlags) -> Option<Self> {
+        if flags.intersects(vk::QueueFlags::COMPUTE | vk::QueueFlags::GRAPHICS) {
             Some(Execute)
         } else {
             None
         }
     }
 
-    fn into_flags(self) -> CapabilityFlags {
-        CapabilityFlags::COMPUTE | CapabilityFlags::GRAPHICS
+    fn into_flags(self) -> vk::QueueFlags {
+        vk::QueueFlags::COMPUTE | vk::QueueFlags::GRAPHICS
     }
 }
 
 impl Capability for Compute {
-    fn from_flags(flags: CapabilityFlags) -> Option<Self> {
-        if flags.contains(CapabilityFlags::COMPUTE) {
+    fn from_flags(flags: vk::QueueFlags) -> Option<Self> {
+        if flags.subset(vk::QueueFlags::COMPUTE) {
             Some(Compute)
         } else {
             None
         }
     }
 
-    fn into_flags(self) -> CapabilityFlags {
-        CapabilityFlags::COMPUTE
+    fn into_flags(self) -> vk::QueueFlags {
+        vk::QueueFlags::COMPUTE
     }
 }
 
 impl Capability for Graphics {
-    fn from_flags(flags: CapabilityFlags) -> Option<Self> {
-        if flags.contains(CapabilityFlags::GRAPHICS) {
+    fn from_flags(flags: vk::QueueFlags) -> Option<Self> {
+        if flags.subset(vk::QueueFlags::GRAPHICS) {
             Some(Graphics)
         } else {
             None
         }
     }
 
-    fn into_flags(self) -> CapabilityFlags {
-        CapabilityFlags::GRAPHICS
+    fn into_flags(self) -> vk::QueueFlags {
+        vk::QueueFlags::GRAPHICS
     }
 }
 
 impl Capability for General {
-    fn from_flags(flags: CapabilityFlags) -> Option<Self> {
-        if flags.contains(CapabilityFlags::GRAPHICS | CapabilityFlags::COMPUTE) {
+    fn from_flags(flags: vk::QueueFlags) -> Option<Self> {
+        if flags.subset(vk::QueueFlags::GRAPHICS | vk::QueueFlags::COMPUTE) {
             Some(General)
         } else {
             None
         }
     }
 
-    fn into_flags(self) -> CapabilityFlags {
-        CapabilityFlags::GRAPHICS | CapabilityFlags::COMPUTE
+    fn into_flags(self) -> vk::QueueFlags {
+        vk::QueueFlags::GRAPHICS | vk::QueueFlags::COMPUTE
     }
 }
 
 /// Check if capability supported.
-pub trait Supports<C> {
+pub trait Supports<C>: Capability {
     /// Check runtime capability.
     fn supports(&self) -> Option<C>;
 }
@@ -207,72 +184,71 @@ impl Supports<Graphics> for General {
     }
 }
 
-impl Supports<Transfer> for CapabilityFlags {
+impl Supports<Transfer> for vk::QueueFlags {
     fn supports(&self) -> Option<Transfer> {
         Transfer::from_flags(*self)
     }
 }
 
-impl Supports<Execute> for CapabilityFlags {
+impl Supports<Execute> for vk::QueueFlags {
     fn supports(&self) -> Option<Execute> {
         Execute::from_flags(*self)
     }
 }
 
-impl Supports<Compute> for CapabilityFlags {
+impl Supports<Compute> for vk::QueueFlags {
     fn supports(&self) -> Option<Compute> {
         Compute::from_flags(*self)
     }
 }
 
-impl Supports<Graphics> for CapabilityFlags {
+impl Supports<Graphics> for vk::QueueFlags {
     fn supports(&self) -> Option<Graphics> {
         Graphics::from_flags(*self)
     }
 }
 
 /// Get capabilities required by pipeline stages.
-pub fn required_queue_capability(stages: PipelineStageFlags) -> CapabilityFlags {
-    let mut capability = CapabilityFlags::empty();
-    if stages.contains(PipelineStageFlags::DRAW_INDIRECT) {
-        capability |= CapabilityFlags::GRAPHICS | CapabilityFlags::COMPUTE;
+pub fn required_queue_capability(stages: vk::PipelineStageFlags) -> vk::QueueFlags {
+    let mut capability = vk::QueueFlags::empty();
+    if stages.subset(vk::PipelineStageFlags::DRAW_INDIRECT) {
+        capability |= vk::QueueFlags::GRAPHICS | vk::QueueFlags::COMPUTE;
     }
-    if stages.contains(PipelineStageFlags::VERTEX_INPUT) {
-        capability |= CapabilityFlags::GRAPHICS;
+    if stages.subset(vk::PipelineStageFlags::VERTEX_INPUT) {
+        capability |= vk::QueueFlags::GRAPHICS;
     }
-    if stages.contains(PipelineStageFlags::VERTEX_SHADER) {
-        capability |= CapabilityFlags::GRAPHICS;
+    if stages.subset(vk::PipelineStageFlags::VERTEX_SHADER) {
+        capability |= vk::QueueFlags::GRAPHICS;
     }
-    if stages.contains(PipelineStageFlags::TESSELLATION_CONTROL_SHADER) {
-        capability |= CapabilityFlags::GRAPHICS;
+    if stages.subset(vk::PipelineStageFlags::TESSELLATION_CONTROL_SHADER) {
+        capability |= vk::QueueFlags::GRAPHICS;
     }
-    if stages.contains(PipelineStageFlags::TESSELLATION_EVALUATION_SHADER) {
-        capability |= CapabilityFlags::GRAPHICS;
+    if stages.subset(vk::PipelineStageFlags::TESSELLATION_EVALUATION_SHADER) {
+        capability |= vk::QueueFlags::GRAPHICS;
     }
-    if stages.contains(PipelineStageFlags::GEOMETRY_SHADER) {
-        capability |= CapabilityFlags::GRAPHICS;
+    if stages.subset(vk::PipelineStageFlags::GEOMETRY_SHADER) {
+        capability |= vk::QueueFlags::GRAPHICS;
     }
-    if stages.contains(PipelineStageFlags::FRAGMENT_SHADER) {
-        capability |= CapabilityFlags::GRAPHICS;
+    if stages.subset(vk::PipelineStageFlags::FRAGMENT_SHADER) {
+        capability |= vk::QueueFlags::GRAPHICS;
     }
-    if stages.contains(PipelineStageFlags::EARLY_FRAGMENT_TESTS) {
-        capability |= CapabilityFlags::GRAPHICS;
+    if stages.subset(vk::PipelineStageFlags::EARLY_FRAGMENT_TESTS) {
+        capability |= vk::QueueFlags::GRAPHICS;
     }
-    if stages.contains(PipelineStageFlags::LATE_FRAGMENT_TESTS) {
-        capability |= CapabilityFlags::GRAPHICS;
+    if stages.subset(vk::PipelineStageFlags::LATE_FRAGMENT_TESTS) {
+        capability |= vk::QueueFlags::GRAPHICS;
     }
-    if stages.contains(PipelineStageFlags::COLOR_ATTACHMENT_OUTPUT) {
-        capability |= CapabilityFlags::GRAPHICS;
+    if stages.subset(vk::PipelineStageFlags::COLOR_ATTACHMENT_OUTPUT) {
+        capability |= vk::QueueFlags::GRAPHICS;
     }
-    if stages.contains(PipelineStageFlags::COMPUTE_SHADER) {
-        capability |= CapabilityFlags::COMPUTE;
+    if stages.subset(vk::PipelineStageFlags::COMPUTE_SHADER) {
+        capability |= vk::QueueFlags::COMPUTE;
     }
-    if stages.contains(PipelineStageFlags::TRANSFER) {
-        capability |=
-            CapabilityFlags::GRAPHICS | CapabilityFlags::COMPUTE | CapabilityFlags::TRANSFER;
+    if stages.subset(vk::PipelineStageFlags::TRANSFER) {
+        capability |= vk::QueueFlags::GRAPHICS | vk::QueueFlags::COMPUTE | vk::QueueFlags::TRANSFER;
     }
-    if stages.contains(PipelineStageFlags::ALL_GRAPHICS) {
-        capability |= CapabilityFlags::GRAPHICS;
+    if stages.subset(vk::PipelineStageFlags::ALL_GRAPHICS) {
+        capability |= vk::QueueFlags::GRAPHICS;
     }
     capability
 }
diff --git a/command/src/device.rs b/command/src/device.rs
deleted file mode 100644
index dc7ad454..00000000
--- a/command/src/device.rs
+++ /dev/null
@@ -1,94 +0,0 @@
-//! Device module docs.
-
-use std::{borrow::Borrow, fmt::Debug};
-
-use resource;
-use fence::FenceCreateInfo;
-
-/// Abstract logical device.
-/// It inherits methods to allocate memory and create resources.
-pub trait Device: resource::Device {
-    /// Semaphore type that can be used with this device.
-    type Semaphore: Debug + 'static;
-
-    /// Fence type that can be used with this device.
-    type Fence: Debug + 'static;
-
-    /// Finished command buffer that can be submitted to the queues of this device.
-    type Submit: 'static;
-
-    /// Command pool type that can be used with this device.
-    type CommandPool: 'static;
-
-    /// Command buffer type that can be used with this device.
-    type CommandBuffer: CommandBuffer<Submit = Self::Submit> + 'static;
-
-    /// Command queue type that can be used with this device.
-    type CommandQueue: CommandQueue<
-            Semaphore = Self::Semaphore,
-            Fence = Self::Fence,
-            Submit = Self::Submit,
-        > + 'static;
-
-    /// Create new fence.
-    unsafe fn create_fence(&self, info: FenceCreateInfo) -> Self::Fence;
-
-    /// Reset fence.
-    unsafe fn reset_fence(&self, fence: &Self::Fence) {
-        self.reset_fences(Some(fence))
-    }
-
-    /// Reset multiple fences at once.
-    unsafe fn reset_fences<F>(&self, fences: F)
-    where
-        F: IntoIterator,
-        F::Item: Borrow<Self::Fence>,
-    {
-        fences.into_iter().for_each(|fence| self.reset_fence(fence.borrow()));
-    }
-}
-
-/// Abstract command buffer.
-/// It defines all methods required to begin/end recording and record commands.
-pub trait CommandBuffer {
-    /// This type is `Device::CommandBuffer` of device that created pool from which this buffer is allocated.
-    /// Raw command buffer can be cloned.
-    type Submit;
-
-    /// Get submittable object.
-    /// Buffer must be in executable state.
-    unsafe fn submit(&self) -> Self::Submit;
-}
-
-impl<'a, B: 'a> CommandBuffer for &'a mut B
-where
-    B: CommandBuffer,
-{
-    type Submit = B::Submit;
-
-    unsafe fn submit(&self) -> B::Submit {
-        B::submit(&**self)
-    }
-}
-
-/// Abstract command queue.
-/// It defines methods for submitting command buffers along with semaphores and fences.
-pub trait CommandQueue {
-    /// Semaphore type that can be used with this device.
-    type Semaphore: Debug + 'static;
-
-    /// Fence type that can be used with this device.
-    type Fence: Debug + 'static;
-
-    /// Finished command buffer that can be submitted to the queue.
-    type Submit: 'static;
-}
-
-impl<'a, Q: 'a> CommandQueue for &'a mut Q
-where
-    Q: CommandQueue,
-{
-    type Semaphore = Q::Semaphore;
-    type Fence = Q::Fence;
-    type Submit = Q::Submit;
-}
diff --git a/command/src/encoder/mod.rs b/command/src/encoder/mod.rs
index 31062008..baba0cf6 100644
--- a/command/src/encoder/mod.rs
+++ b/command/src/encoder/mod.rs
@@ -3,23 +3,25 @@
 
 mod clear;
 
-pub use self::clear::*;
+use ash::vk;
+use crate::buffer::{CommandBuffer, RecordingState};
 
-use capability::CapabilityFlags;
-use device::CommandBuffer;
+pub use self::clear::*;
 
 /// Encoder allow command recording in safe-ish abstract manner.
-pub trait Encoder<C = CapabilityFlags> {
-    /// Get command buffer.
-    type Buffer: CommandBuffer;
-
-    /// Get inner raw command buffer.
+pub trait Encoder<C = vk::QueueFlags> {
+    /// Get raw command buffer.
     ///
     /// # Safety
     ///
     /// Safety of commands recording through raw buffer is covered by corresponding functions.
-    /// Yet this method is unsafe because:
-    /// * Moving out raw buffer is very unsafe and should be avoided.
-    /// * Creating copies can be safe only if copies don't outlive encoder instance.
-    unsafe fn buffer(&mut self) -> &mut Self::Buffer;
+    /// Handle must not be used outside of `Encoder` scope.
+    /// Encoder implicitly finishes buffer recording.
+    unsafe fn raw(&mut self) -> vk::CommandBuffer;
+}
+
+impl<C, U, L, R> Encoder<C> for CommandBuffer<C, RecordingState<U>, L, R> {
+    unsafe fn raw(&mut self) -> vk::CommandBuffer {
+        CommandBuffer::raw(self)
+    }
 }
diff --git a/command/src/error.rs b/command/src/error.rs
deleted file mode 100644
index c51f2e49..00000000
--- a/command/src/error.rs
+++ /dev/null
@@ -1,11 +0,0 @@
-//! Error module docs.
-
-/// Error that can be returned by some functions
-/// indicating that logical device is lost.
-/// Those methods on objects created from the device will likely result in this error again.
-/// When device is lost user should free all objects created from it and destroy the device.
-/// After that user can create new device to continue.
-/// Note: physical device may be lost as well.
-#[derive(Clone, Copy, Debug, Fail)]
-#[fail(display = "Device lost. Re-initialization required")]
-pub struct DeviceLost;
diff --git a/command/src/family.rs b/command/src/family.rs
index f00e7735..a9e47387 100644
--- a/command/src/family.rs
+++ b/command/src/family.rs
@@ -1,75 +1,155 @@
 //! Family module docs.
 
-use capability::{Capability, CapabilityFlags};
-use device::Device;
-use pool::Pool;
-use queue::Queue;
+use ash::{version::DeviceV1_0, vk};
+
+use failure::Error;
+use relevant::Relevant;
+
+use crate::{
+    buffer::{Level, NoIndividualReset, Reset},
+    capability::Capability,
+    pool::{CommandPool, OwningCommandPool},
+};
 
 /// Unique family index.
 #[derive(Clone, Copy, Debug)]
-pub struct FamilyId(pub u32);
+pub struct FamilyIndex(pub u32);
 
 /// Family of the command queues.
 /// Queues from one family can share resources and execute command buffers associated with the family.
 /// All queues of the family have same capabilities.
 #[derive(Clone, Debug)]
-pub struct Family<Q, C> {
-    index: FamilyId,
-    queues: Vec<Queue<Q, C>>,
+pub struct Family<C: Capability = vk::QueueFlags> {
+    index: FamilyIndex,
+    queues: Vec<vk::Queue>,
+    min_image_transfer_granularity: vk::Extent3D,
     capability: C,
+    relevant: Relevant,
+}
+
+impl Family {
+    /// Query queue family from device.
+    ///
+    /// # Safety
+    ///
+    /// This function shouldn't be used more then once with the same parameters.
+    /// Raw queue handle queried from device can make `Family` usage invalid.
+    /// `family` must be one of the family indices used during `device` creation.
+    /// `queues` must be equal to number of queues specified for `family` during `device` creation.
+    /// `properties` must be the properties retuned for queue family from physical device.
+    pub unsafe fn from_device(
+        device: &impl DeviceV1_0,
+        family: FamilyIndex,
+        queues: u32,
+        properties: &vk::QueueFamilyProperties,
+    ) -> Self {
+        Family {
+            index: family,
+            queues: (0..queues)
+                .map(|queue_index| device.get_device_queue(family.0, queue_index))
+                .collect(),
+            min_image_transfer_granularity: properties.min_image_transfer_granularity,
+            capability: properties.queue_flags,
+            relevant: Relevant,
+        }
+    }
 }
 
-impl<Q, C> Family<Q, C> {
+impl<C: Capability> Family<C> {
+    /// Get id of the family.
+    pub fn index(&self) -> FamilyIndex {
+        self.index
+    }
+
     /// Get queues of the family.
-    pub fn queues(&mut self) -> &mut [Queue<Q, C>] {
+    pub fn queues(&mut self) -> &mut [vk::Queue] {
         &mut self.queues
     }
 
     /// Create command pool associated with the family.
     /// Command buffers created from the pool could be submitted to the queues of the family.
-    pub fn create_pool<D, R>(&mut self, device: &mut D, reset: R) -> Pool<D::CommandPool, C, R>
+    pub fn create_pool<R>(
+        &self,
+        device: &impl DeviceV1_0,
+        reset: R,
+    ) -> Result<CommandPool<C, R>, Error>
+    where
+        R: Reset,
+    {
+        let pool = unsafe {
+            // Is this family belong to specified device.
+            let raw = device.create_command_pool(
+                &vk::CommandPoolCreateInfo::builder()
+                    .queue_family_index(self.index.0)
+                    .flags(reset.flags())
+                    .build(),
+                None,
+            )?;
+
+            CommandPool::from_raw(raw, self.capability, reset, self.index)
+        };
+
+        Ok(pool)
+    }
+
+    /// Create command pool associated with the family.
+    /// Command buffers created from the pool could be submitted to the queues of the family.
+    /// Created pool owns its command buffers.
+    pub fn create_owning_pool<L>(
+        &self,
+        device: &impl DeviceV1_0,
+        level: L,
+    ) -> Result<OwningCommandPool<C, L>, Error>
     where
-        D: Device,
+        L: Level,
     {
-        unimplemented!()
+        self.create_pool(device, NoIndividualReset)
+            .map(|pool| unsafe { OwningCommandPool::from_inner(pool, level) })
+    }
+
+    /// Get family capability.
+    pub fn capability(&self) -> C {
+        self.capability
+    }
+
+    /// Dispose of queue family container.
+    pub fn dispose(self, device: &impl DeviceV1_0) {
+        for queue in self.queues {
+            unsafe {
+                let _ = device.queue_wait_idle(queue);
+            }
+        }
+
+        self.relevant.dispose();
     }
 }
 
-impl<Q, C> Family<Q, C>
+impl<C> Family<C>
 where
     C: Capability,
 {
-    /// Convert from some `Family<Q, C>` where `C` is something that implements
+    /// Convert from some `Family<C>` where `C` is something that implements
     /// `Capability`
-    pub fn from(family: Self) -> Family<Q, CapabilityFlags> {
+    pub fn into_flags(self) -> Family<vk::QueueFlags> {
         Family {
-            index: family.index,
-            queues: family
-                .queues
-                .into_iter()
-                .map(|queue| Queue {
-                    inner: queue.inner,
-                    capability: queue.capability.into_flags(),
-                }).collect::<Vec<_>>(),
-            capability: family.capability.into_flags(),
+            index: self.index,
+            queues: self.queues,
+            min_image_transfer_granularity: self.min_image_transfer_granularity,
+            capability: self.capability.into_flags(),
+            relevant: self.relevant,
         }
     }
 
-    /// Convert into a `Family<Q, C>` where `C` something that implements
+    /// Convert into a `Family<C>` where `C` something that implements
     /// `Capability`
-    pub fn into(family: Family<Q, CapabilityFlags>) -> Option<Self> {
+    pub fn from_flags(family: Family<vk::QueueFlags>) -> Option<Self> {
         if let Some(capability) = C::from_flags(family.capability) {
             Some(Family {
                 index: family.index,
-                queues: family
-                    .queues
-                    .into_iter()
-                    .map(|queue| Queue {
-                        inner: queue.inner,
-                        capability: C::from_flags(queue.capability)
-                            .expect("Unable to convert queue capability to a CapabilityFlag"),
-                    }).collect::<Vec<_>>(),
+                queues: family.queues,
+                min_image_transfer_granularity: family.min_image_transfer_granularity,
                 capability,
+                relevant: family.relevant,
             })
         } else {
             None
@@ -77,25 +157,22 @@ where
     }
 }
 
-/// Collection of all families.
-#[derive(Clone, Debug)]
-pub struct Families<Q> {
-    families: Vec<Family<Q, CapabilityFlags>>,
-}
-
-impl<Q> Families<Q> {
-    /// Create a new Families collection that is empty
-    pub fn new() -> Self {
-        Families {
-            families: Vec::new(),
-        }
-    }
-
-    /// Add a family to the `Families<Q>` group
-    pub fn add_family<C>(&mut self, family: Family<Q, C>)
-    where
-        C: Capability,
-    {
-        self.families.push(Family::from(family));
-    }
+/// Query queue families from device.
+///
+/// # Safety
+///
+/// This function shouldn't be used more then once with same parameters.
+/// Raw queue handle queried from device can make returned `Family` usage invalid.
+/// `families` iterator must yeild unique family indices with queue count used during `device` creation.
+/// `properties` must contain properties retuned for queue family from physical device for each family index yielded by `families`.
+pub unsafe fn families_from_device(
+    device: &impl DeviceV1_0,
+    families: impl IntoIterator<Item = (FamilyIndex, u32)>,
+    properties: &[vk::QueueFamilyProperties],
+) -> Vec<Family> {
+    families
+        .into_iter()
+        .map(|(index, queues)| {
+            Family::from_device(device, index, queues, &properties[index.0 as usize])
+        }).collect()
 }
diff --git a/command/src/fence.rs b/command/src/fence.rs
deleted file mode 100644
index 58c398f4..00000000
--- a/command/src/fence.rs
+++ /dev/null
@@ -1,18 +0,0 @@
-
-
-bitflags!{
-    /// Flags to specify initial state and behavior of the fence.
-    #[derive(Default)]
-    pub struct FenceCreateFlags: u32 {
-        /// Create fence in signaled state.
-        const CREATE_SIGNALED = 0x00000001;
-    }
-}
-
-/// Create info for fence.
-#[derive(Clone, Copy, Debug, Default)]
-pub struct FenceCreateInfo {
-    /// Creation flags.
-    pub flags: FenceCreateFlags,
-}
-
diff --git a/command/src/frame.rs b/command/src/frame.rs
deleted file mode 100644
index 94e055e5..00000000
--- a/command/src/frame.rs
+++ /dev/null
@@ -1,197 +0,0 @@
-//! Frame module docs.
-
-use error::DeviceLost;
-
-/// Unique index of the frame.
-/// It must be unique per render instance.
-#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
-pub struct FrameIndex(u64);
-
-/// Generate `Frame`s.
-#[derive(Debug)]
-#[allow(missing_copy_implementations)]
-pub struct FrameGen(u64);
-
-impl FrameGen {
-    /// Only one `FrameGen` should be used.
-    pub unsafe fn new() -> Self {
-        FrameGen(0)
-    }
-
-    /// Generate next `Frame`.
-    pub fn next<F>(&mut self) -> Frame<F> {
-        self.0 += 1;
-        unsafe {
-            Frame::new(FrameIndex(self.0))
-        }
-    }
-
-    /// Generate next `Frame`, fences included.
-    pub fn next_with_fences<F>(&mut self, fences: Vec<F>) -> Frame<F> {
-        self.0 += 1;
-        unsafe {
-            Frame::with_fences(FrameIndex(self.0), fences)
-        }
-    }
-}
-
-/// Single frame rendering task.
-/// Command buffers can be submitted as part of the `Frame`.
-/// Internally frame is just an index and fences.
-/// But semantically it owns list of submissions submitted through it.
-#[derive(Debug)]
-pub struct Frame<F> {
-    index: FrameIndex,
-    fences: Vec<F>,
-}
-
-impl<F> Frame<F> {
-    /// Create new frame instance.
-    ///
-    /// # Safety
-    ///
-    /// Index must be unique.
-    pub unsafe fn new(index: FrameIndex) -> Self {
-        Frame {
-            index,
-            fences: Vec::new(),
-        }
-    }
-    /// Create new frame instance.
-    ///
-    /// # Safety
-    ///
-    /// Index must be unique.
-    pub unsafe fn with_fences(index: FrameIndex, fences: Vec<F>) -> Self {
-        Frame {
-            index,
-            fences,
-        }
-    }
-
-    /// Get frame index.
-    pub fn index(&self) -> FrameIndex {
-        self.index
-    }
-
-    /// Takes slice of fences associated with this frame.
-    ///
-    pub unsafe fn fences(&self) -> &[F] {
-        &self.fences
-    }
-
-    /// Finish frame.
-    /// Returned `PendingFrame` can be used to wait the frame to complete on device.
-    pub fn finish(self) -> PendingFrame<F> {
-        PendingFrame {
-            index: self.index,
-            fences: self.fences,
-        }
-    }
-}
-
-/// Frame that is fully submitted for execution.
-/// User can wait for it to become `CompleteFrame`.
-#[derive(Debug)]
-pub struct PendingFrame<F> {
-    index: FrameIndex,
-    fences: Vec<F>,
-}
-
-impl<F> PendingFrame<F> {
-    /// Get frame index.
-    pub fn index(&self) -> FrameIndex {
-        self.index
-    }
-
-    /// Check if frame is complete on device.
-    pub fn is_complete<D>(&self, device: &D) -> bool {
-        unimplemented!("Check the fences")
-    }
-
-    /// Try to complete the frame.
-    /// Returns `Ok(CompleteFrame {...})` if `is_complete` will return `true.
-    /// Returns `Err(self)` otherwise.
-    pub fn complete<D>(self, device: &D) -> Result<CompleteFrame<F>, Self> {
-        if self.is_complete(device) {
-            Ok(CompleteFrame {
-                index: self.index,
-                fences: self.fences,
-            })
-        } else {
-            Err(self)
-        }
-    }
-
-    /// Wait for the frame to complete and return `CompleteFrame` as a proof.
-    pub fn wait<D>(self, device: &D) -> Result<CompleteFrame<F>, DeviceLost> {
-        unimplemented!("Wait for the fences");
-        Ok(CompleteFrame {
-            index: self.index,
-            fences: self.fences,
-        })
-    }
-}
-
-/// Proof that frame is complete.
-#[derive(Debug)]
-pub struct CompleteFrame<F> {
-    index: FrameIndex,
-    fences: Vec<F>,
-}
-
-impl<F> CompleteFrame<F> {
-    /// Get frame index.
-    pub fn index(&self) -> FrameIndex {
-        self.index
-    }
-}
-
-/// Frame bound instance.
-#[derive(Clone, Copy, Debug)]
-pub struct FrameBound<'a, F: 'a, T> {
-    frame: &'a Frame<F>,
-    value: T,
-}
-
-impl<'a, F: 'a, T> FrameBound<'a, F, T> {
-    /// Bind value to frame
-    pub fn bind(value: T, frame: &'a Frame<F>) -> Self {
-        FrameBound { frame, value }
-    }
-
-    /// Get reference to bound value.
-    ///
-    /// # Safety
-    ///
-    /// Unbound value usage must not break frame-binding semantics.
-    ///
-    pub unsafe fn inner_ref(&self) -> &T {
-        &self.value
-    }
-
-    /// Get mutable reference to bound value.
-    ///
-    /// # Safety
-    ///
-    /// Unbound value usage must not break frame-binding semantics.
-    ///
-    pub unsafe fn inner_mut(&mut self) -> &mut T {
-        &mut self.value
-    }
-
-    /// Unbind value from frame.
-    ///
-    /// # Safety
-    ///
-    /// Unbound value usage must not break frame-binding semantics.
-    ///
-    pub unsafe fn unbind(self) -> T {
-        self.value
-    }
-
-    /// Get frame this value bound to.
-    pub fn frame(&self) -> &'a Frame<F> {
-        self.frame
-    }
-}
diff --git a/command/src/impls/ash.rs b/command/src/impls/ash.rs
deleted file mode 100644
index 19833664..00000000
--- a/command/src/impls/ash.rs
+++ /dev/null
@@ -1,51 +0,0 @@
-use ash::{
-    self,
-    version::{DeviceV1_0, FunctionPointers},
-    vk,
-};
-
-use device::{CommandBuffer, CommandQueue, Device};
-use fence;
-
-impl From<fence::FenceCreateFlags> for vk::FenceCreateFlags {
-    fn from(flags: fence::FenceCreateFlags) -> Self {
-        Self::from_flags(flags.bits()).expect("Unsupported flags")
-    }
-}
-
-impl<V> Device for ash::Device<V>
-where
-    V: FunctionPointers,
-    ash::Device<V>: DeviceV1_0,
-{
-    type Semaphore = vk::Semaphore;
-    type Fence = vk::Fence;
-    type Submit = vk::CommandBuffer;
-    type CommandPool = (vk::DeviceFnV1_0, vk::CommandBuffer);
-    type CommandBuffer = (vk::DeviceFnV1_0, vk::CommandBuffer);
-    type CommandQueue = vk::Queue;
-
-    unsafe fn create_fence(&self, info: fence::FenceCreateInfo) -> Self::Fence {
-        use std::ptr::null;
-
-        DeviceV1_0::create_fence(self, &vk::FenceCreateInfo {
-            s_type: vk::StructureType::FenceCreateInfo,
-            p_next: null(),
-            flags: info.flags.into(),
-        }, None).unwrap()
-    }
-}
-
-impl CommandBuffer for (vk::DeviceFnV1_0, vk::CommandBuffer) {
-    type Submit = vk::CommandBuffer;
-
-    unsafe fn submit(&self) -> Self::Submit {
-        self.1
-    }
-}
-
-impl CommandQueue for vk::Queue {
-    type Semaphore = vk::Semaphore;
-    type Fence = vk::Fence;
-    type Submit = vk::CommandBuffer;
-}
diff --git a/command/src/impls/hal.rs b/command/src/impls/hal.rs
deleted file mode 100644
index c712b4a2..00000000
--- a/command/src/impls/hal.rs
+++ /dev/null
@@ -1,46 +0,0 @@
-use std::borrow::Borrow;
-use std::marker::PhantomData;
-
-use hal;
-
-use device::{CommandBuffer, CommandQueue, Device};
-use fence;
-
-impl<D, B> Device for (D, PhantomData<B>)
-where
-    B: hal::Backend,
-    D: Borrow<B::Device>,
-{
-    type Semaphore = B::Semaphore;
-    type Fence = B::Fence;
-    type Submit = B::CommandBuffer;
-    type CommandPool = B::CommandPool;
-    type CommandBuffer = (B::CommandBuffer, PhantomData<B>);
-    type CommandQueue = (B::CommandQueue, PhantomData<B>);
-
-    unsafe fn create_fence(&self, info: fence::FenceCreateInfo) -> Self::Fence {
-        hal::Device::create_fence(self.0.borrow(), info.flags.contains(fence::FenceCreateFlags::CREATE_SIGNALED))
-    }
-}
-
-impl<C, B> CommandBuffer for (C, PhantomData<B>)
-where
-    B: hal::Backend,
-    C: Borrow<B::CommandBuffer>,
-{
-    type Submit = B::CommandBuffer;
-
-    unsafe fn submit(&self) -> Self::Submit {
-        self.0.borrow().clone()
-    }
-}
-
-impl<C, B> CommandQueue for (C, PhantomData<B>)
-where
-    B: hal::Backend,
-    C: Borrow<B::CommandQueue>,
-{
-    type Semaphore = B::Semaphore;
-    type Fence = B::Fence;
-    type Submit = B::CommandBuffer;
-}
diff --git a/command/src/impls/mod.rs b/command/src/impls/mod.rs
deleted file mode 100644
index 0a3cc9cc..00000000
--- a/command/src/impls/mod.rs
+++ /dev/null
@@ -1,5 +0,0 @@
-#[cfg(feature = "hal")]
-mod hal;
-
-#[cfg(feature = "ash")]
-mod ash;
diff --git a/command/src/lib.rs b/command/src/lib.rs
index 577ab5db..c9debe7c 100644
--- a/command/src/lib.rs
+++ b/command/src/lib.rs
@@ -17,41 +17,27 @@
 #![warn(rust_2018_compatibility)]
 #![warn(rust_2018_idioms)]
 
+extern crate ash;
 
-#[macro_use]
-extern crate bitflags;
 #[macro_use]
 extern crate failure;
 extern crate relevant;
+extern crate smallvec;
 
-extern crate rendy_chain as chain;
-extern crate rendy_resource as resource;
-
-#[cfg(feature = "hal")]
-extern crate gfx_hal as hal;
-
-#[cfg(feature = "ash")]
-extern crate ash;
-
-mod impls;
-
-mod device;
-mod error;
-mod family;
-mod fence;
-mod frame;
 mod buffer;
 mod capability;
 mod encoder;
+mod family;
 mod pool;
-mod queue;
 
-pub use buffer::{Buffer, Submit};
-pub use capability::{Capability, CapabilityFlags};
-pub use device::{CommandBuffer, Device};
-pub use encoder::Encoder;
-pub use family::{Family, FamilyId, Families};
-pub use fence::{FenceCreateInfo, FenceCreateFlags};
-pub use frame::{Frame, FrameBound, FrameIndex, CompleteFrame, FrameGen};
-pub use pool::{Pool, OwningPool, FramePool};
-pub use queue::{Submission, Queue};
+pub use crate::{
+    buffer::{
+        CommandBuffer, ExecutableState, IndividualReset, InitialState, InvalidState, Level,
+        MultiShot, NoIndividualReset, OneShot, PendingState, PrimaryLevel, RecordingState,
+        RenderPassContinue, Resettable, SecondaryLevel, SimultaneousUse, Submit, Usage,
+    },
+    capability::{Capability, Compute, General, Graphics, Transfer},
+    encoder::Encoder,
+    family::{families_from_device, Family, FamilyIndex},
+    pool::{CommandPool, OwningCommandPool},
+};
diff --git a/command/src/pool.rs b/command/src/pool.rs
index 8b889419..e8cb5afd 100644
--- a/command/src/pool.rs
+++ b/command/src/pool.rs
@@ -1,71 +1,135 @@
-//! Pool module docs.
+//! CommandPool module docs.
 
-use std::fmt::Debug;
+use ash::{version::DeviceV1_0, vk};
 
+use failure::Error;
 use relevant::Relevant;
 
-use buffer::*;
-use capability::*;
-use device::{CommandBuffer, Device};
-use family::FamilyId;
-use frame::{CompleteFrame, Frame, FrameBound, FrameIndex};
+use crate::{buffer::*, capability::*, family::FamilyIndex};
 
 /// Simple pool wrapper.
 /// Doesn't provide any guarantees.
-/// Wraps raw buffers into `Buffer`.
+/// Wraps raw buffers into `CommandCommand buffer`.
 #[derive(Debug)]
-pub struct Pool<P, C, R = ()> {
-    inner: P,
+pub struct CommandPool<C = vk::QueueFlags, R = NoIndividualReset> {
+    raw: vk::CommandPool,
     capability: C,
     reset: R,
-    family: FamilyId,
+    family: FamilyIndex,
     relevant: Relevant,
 }
 
-impl<P, C, R> Pool<P, C, R> {
-    /// Allocate new buffer.
-    fn allocate_buffers<D, L>(
+impl<C, R> CommandPool<C, R>
+where
+    C: Capability,
+    R: Reset,
+{
+    /// Wrap raw command pool.
+    ///
+    /// # Safety
+    ///
+    /// * `raw` must be valid command pool handle.
+    /// * The command pool must be created for specified `family` index.
+    /// * `capability` must be subset of capabilites of the `family` the pool was created for.
+    /// * if `reset` is `IndividualReset` the pool must be created with individual command buffer reset flag set.
+    pub unsafe fn from_raw(
+        raw: vk::CommandPool,
+        capability: C,
+        reset: R,
+        family: FamilyIndex,
+    ) -> Self {
+        CommandPool {
+            raw,
+            capability,
+            reset,
+            family,
+            relevant: Relevant,
+        }
+    }
+
+    /// Allocate new command buffers.
+    pub fn allocate_buffers<L: Level>(
         &mut self,
-        device: &D,
+        device: &impl DeviceV1_0,
         level: L,
         count: usize,
-    ) -> Vec<Buffer<D::CommandBuffer, C, InitialState, L, R>>
+    ) -> Vec<CommandBuffer<C, InitialState, L, R>>
     where
-        P: Debug,
-        D: Device<CommandPool = P>,
+        L: Level,
     {
-        unimplemented!()
+        let buffers = unsafe {
+            device.allocate_command_buffers(
+                &vk::CommandBufferAllocateInfo::builder()
+                    .command_pool(self.raw)
+                    .level(level.level())
+                    .command_buffer_count(count as u32)
+                    .build(),
+            )
+        }.expect("Panic on OOM");
+
+        buffers
+            .into_iter()
+            .map(|raw| unsafe {
+                CommandBuffer::from_raw(
+                    raw,
+                    self.capability,
+                    InitialState,
+                    level,
+                    self.reset,
+                    self.family,
+                )
+            }).collect()
     }
 
     /// Free buffers.
     /// Buffers must be in droppable state.
-    fn free_buffers<D, L, S>(
+    /// TODO: Validate buffers were allocated from this pool.
+    pub fn free_buffers(
         &mut self,
-        device: &D,
-        buffers: Vec<Buffer<D::CommandBuffer, C, S, L, R>>,
-    ) where
-        P: Debug,
-        D: Device<CommandPool = P>,
-        S: Droppable,
-    {
-        unimplemented!()
+        device: &impl DeviceV1_0,
+        buffers: impl IntoIterator<Item = CommandBuffer<C, impl Resettable, impl Level, R>>,
+    ) {
+        let buffers = buffers
+            .into_iter()
+            .map(|buffer| unsafe { buffer.into_raw() })
+            .collect::<Vec<_>>();
+        unsafe {
+            device.free_command_buffers(self.raw, &buffers);
+        }
     }
 
     /// Reset all buffers of this pool.
-    pub unsafe fn reset(&mut self) {
-        unimplemented!()
+    ///
+    /// # Safety
+    ///
+    /// All buffers allocated from this pool must be marked reset.
+    /// See [`CommandBuffer::mark_reset`](struct.Command buffer.html#method.mark_reset)
+    pub unsafe fn reset(&mut self, device: &impl DeviceV1_0) {
+        device
+            .reset_command_pool(self.raw, Default::default())
+            .expect("Panic if OOM");
+    }
+
+    /// Dispose of command pool.
+    ///
+    /// # Safety
+    ///
+    /// * All buffers allocated from this pool must be [freed](#method.free_buffers).
+    pub unsafe fn dispose(self, device: &impl DeviceV1_0) {
+        device.destroy_command_pool(self.raw, None);
+        self.relevant.dispose();
     }
 }
 
-impl<P, R> Pool<P, CapabilityFlags, R> {
+impl<R> CommandPool<vk::QueueFlags, R> {
     /// Convert capability level
-    pub fn cast_capability<C>(self) -> Result<Pool<P, C, R>, Self>
+    pub fn from_flags<C>(self) -> Result<CommandPool<C, R>, Self>
     where
         C: Capability,
     {
         if let Some(capability) = C::from_flags(self.capability) {
-            Ok(Pool {
-                inner: self.inner,
+            Ok(CommandPool {
+                raw: self.raw,
                 capability,
                 reset: self.reset,
                 family: self.family,
@@ -82,153 +146,126 @@ impl<P, R> Pool<P, CapabilityFlags, R> {
 /// All buffers will be reset together via pool.
 /// Prior reset user must ensure all buffers are complete.
 #[derive(Debug)]
-pub struct OwningPool<P, B, C, R = ()> {
-    inner: Pool<P, C, R>,
-    buffers: Vec<B>,
+pub struct OwningCommandPool<C = vk::QueueFlags, L = PrimaryLevel> {
+    inner: CommandPool<C>,
+    level: L,
+    buffers: Vec<vk::CommandBuffer>,
     next: usize,
 }
 
-impl<P, B, C, R> OwningPool<P, B, C, R> {
+impl<C, L> OwningCommandPool<C, L>
+where
+    C: Capability,
+    L: Level,
+{
+    /// Wrap simple pool into owning version.
+    ///
+    /// # Safety
+    ///
+    /// * All buffers allocated from this pool must be [freed](#method.free_buffers).
+    pub unsafe fn from_inner(inner: CommandPool<C>, level: L) -> Self {
+        OwningCommandPool {
+            inner,
+            level,
+            buffers: Vec::new(),
+            next: 0,
+        }
+    }
+
     /// Reserve at least `count` buffers.
     /// Allocate if there are not enough unused buffers.
-    pub fn reserve(&mut self, count: usize) {
-        unimplemented!()
-    }
+    pub fn reserve(&mut self, device: &impl DeviceV1_0, count: usize) {
+        let total = self.next + count;
+        if total >= self.buffers.len() {
+            let add = total - self.buffers.len();
 
-    /// Acquire command buffer from pool.
-    /// The command buffer could be submitted only as part of submission for associated frame.
-    /// TODO: Check that buffer cannot be moved out.
-    pub fn acquire_buffer<D, L>(
-        &mut self,
-        device: &D,
-        level: L,
-    ) -> Buffer<&mut B, C, InitialState, L>
-    where
-        B: CommandBuffer + Debug + 'static,
-        D: Device<CommandBuffer = B, Submit = B::Submit>,
-    {
-        unimplemented!()
+            // TODO: avoid Vec allocation.
+            self.buffers.extend(
+                unsafe {
+                    device.allocate_command_buffers(
+                        &vk::CommandBufferAllocateInfo::builder()
+                            .command_pool(self.inner.raw)
+                            .level(self.level.level())
+                            .command_buffer_count(add as u32)
+                            .build(),
+                    )
+                }.expect("Panic on OOM"),
+            );
+        }
     }
 
-    /// Reset all buffers at once.
+    /// Acquire next unused command buffer from pool.
     ///
     /// # Safety
     ///
-    /// All buffers from this pool must be in resettable state.
-    /// Any primary buffer that references secondary buffer from this pool will be invalidated.
-    pub unsafe fn reset(&mut self) {
-        unimplemented!()
-    }
-}
-
-impl<P, B, R> OwningPool<P, B, CapabilityFlags, R> {
-    /// Convert capability level
-    pub fn cast_capability<C>(self) -> Result<OwningPool<P, B, C, R>, Self>
-    where
-        C: Capability,
-    {
-        match self.inner.cast_capability::<C>() {
-            Ok(inner) => Ok(OwningPool {
-                inner,
-                buffers: self.buffers,
-                next: self.next,
-            }),
-            Err(inner) => Err(OwningPool {
-                inner,
-                buffers: self.buffers,
-                next: self.next,
-            }),
+    /// * Acquired buffer must be [released](struct.Command buffer#method.release) when no longer needed.
+    pub fn acquire_buffer(
+        &mut self,
+        device: &impl DeviceV1_0,
+    ) -> CommandBuffer<C, InitialState, L> {
+        self.reserve(device, 1);
+        self.next += 1;
+        unsafe {
+            CommandBuffer::from_raw(
+                self.buffers[self.next - 1],
+                self.inner.capability,
+                InitialState,
+                self.level,
+                self.inner.reset,
+                self.inner.family,
+            )
         }
     }
-}
 
-/// `OwningPool` that can be bound to frame execution.
-/// All command buffers acquired from bound `FramePool` are guarantee
-/// to complete when frame's fence is set, and buffers can be reset.
-#[derive(Debug)]
-pub struct FramePool<P, B, C> {
-    inner: OwningPool<P, B, C>,
-    frame: Option<FrameIndex>,
-}
-
-impl<P, B, C> FramePool<P, B, C> {
-    /// Bind pool to particular frame.
-    ///
-    /// Command pools acquired from the bound pool could be submitted only within frame borrowing lifetime.
-    /// This ensures that frame's fences will be signaled after all commands from all command buffers from this pool
-    /// are complete.
-    ///
-    /// `reset` method must be called with `CompleteFrame` created from the bound `Frame` before binding to the another `Frame`.
+    /// Reset all buffers at once.
+    /// [`CommandPool::acquire_buffer`](#method.acquire_buffer) will reuse allocated buffers.
     ///
-    /// # Panics
+    /// # Safety
     ///
-    /// This function will panic if pool is still bound to frame.
+    /// * All buffers acquired from this pool must be released.
+    /// * Commands in buffers must be [complete](struct.Command buffer#method.complete).
     ///
-    pub fn bind<'a, 'b, F>(&'a mut self, frame: &'b Frame<F>) -> FrameBound<'b, F, &'a mut Self> {
-        assert!(
-            self.frame.is_none(),
-            "`FramePool::reset` must be called before binding to another frame"
-        );
-
-        self.frame = Some(frame.index());
-
-        FrameBound::bind(self, frame)
+    /// Note.
+    /// * Any primary buffer that references secondary buffer from this pool will be invalidated.
+    pub unsafe fn reset(&mut self, device: &impl DeviceV1_0) {
+        self.inner.reset(device);
+        self.next = 0;
     }
 
-    /// Reset all buffers at once.
-    ///
-    /// # Panics
+    /// Dispose of command pool.
     ///
-    /// This function will panic if pool wasn't bound to the specified frame.
+    /// # Safety
     ///
-    pub fn reset<F>(&mut self, complete: &CompleteFrame<F>) {
-        assert_eq!(
-            self.frame.take(),
-            Some(complete.index()),
-            "Pool must be bound to the specified frame"
-        );
-        unimplemented!()
+    /// Same as for [`CommandPool::reset`](#method.reset).
+    pub unsafe fn dispose(mut self, device: &impl DeviceV1_0) {
+        self.reset(device);
+        if !self.buffers.is_empty() {
+            device.free_command_buffers(self.inner.raw, &self.buffers);
+        }
+
+        self.inner.dispose(device);
     }
 }
 
-impl<P, B> FramePool<P, B, CapabilityFlags> {
-    /// Convert capability level
-    pub fn cast_capability<C>(self) -> Result<FramePool<P, B, C>, Self>
+impl<L> OwningCommandPool<vk::QueueFlags, L> {
+    /// Convert capability level.
+    pub fn from_flags<C>(self) -> Result<OwningCommandPool<C, L>, Self>
     where
         C: Capability,
     {
-        match self.inner.cast_capability::<C>() {
-            Ok(inner) => Ok(FramePool {
+        match self.inner.from_flags::<C>() {
+            Ok(inner) => Ok(OwningCommandPool {
                 inner,
-                frame: self.frame,
+                level: self.level,
+                buffers: self.buffers,
+                next: self.next,
             }),
-            Err(inner) => Err(FramePool {
+            Err(inner) => Err(OwningCommandPool {
                 inner,
-                frame: self.frame,
+                level: self.level,
+                buffers: self.buffers,
+                next: self.next,
             }),
         }
     }
 }
-
-impl<'a, 'b, P: 'b, B: 'b, C: 'b, F: 'a> FrameBound<'a, F, &'b mut FramePool<P, B, C>> {
-    /// Reserve at least `count` buffers.
-    /// Allocate if there are not enough unused buffers.
-    pub fn reserve(&mut self, count: usize) {
-        unimplemented!()
-    }
-
-    /// Acquire command buffer from pool.
-    /// The command buffer could be submitted only as part of submission for associated frame.
-    /// TODO: Check that buffer cannot be moved out.
-    pub fn acquire_buffer<D, L>(
-        &mut self,
-        device: &D,
-        level: L,
-    ) -> Buffer<FrameBound<'b, &mut B, F>, C, InitialState, L>
-    where
-        B: CommandBuffer + Debug + 'static,
-        D: Device<CommandBuffer = B, Submit = B::Submit>,
-    {
-        unimplemented!()
-    }
-}
diff --git a/command/src/queue.rs b/command/src/queue.rs
deleted file mode 100644
index f75eb1fe..00000000
--- a/command/src/queue.rs
+++ /dev/null
@@ -1,56 +0,0 @@
-//! Queue module docs.
-
-use std::borrow::Borrow;
-
-use buffer::Submit;
-use device::CommandQueue;
-
-/// Submission is a list of command buffers in executable state (in form of `Submit`s)
-/// together with semaphores to wait and semaphores signal.
-#[derive(Clone, Copy, Debug)]
-pub struct Submission<W, L, S> {
-    /// Semaphores to wait before executing commands.
-    pub waits: W,
-
-    /// Buffers with commands to execute.
-    pub buffers: L,
-
-    /// Semaphores to signal after executing commands.
-    pub signals: S,
-}
-
-/// Command queue with known capabilities.
-#[derive(Clone, Debug)]
-pub struct Queue<Q, C> {
-    pub(super) inner: Q,
-    pub(super) capability: C,
-}
-
-impl<Q, C> Queue<Q, C> {
-    /// Submit command buffers to the queue.
-    ///
-    /// # Panics
-    ///
-    /// This function panics if a command buffer in submission was created from
-    /// command pool associated with another queue family.
-    ///
-    /// # Safety
-    ///
-    /// User must ensure that for each semaphore to wait there must be queued signal of that semaphore.
-    /// [See Vulkan spec for details](https://www.khronos.org/registry/vulkan/specs/1.1-extensions/html/vkspec.html#commandbuffers-submission-progress)
-    ///
-    pub unsafe fn submit<I, WI, BI, SI, W, B, S, F>(&mut self, submission: I, fence: Option<F>)
-    where
-        Q: CommandQueue,
-        I: IntoIterator<Item = Submission<WI, BI, SI>>,
-        WI: IntoIterator<Item = W>,
-        BI: IntoIterator<Item = Submit<B>>,
-        SI: IntoIterator<Item = S>,
-        W: Borrow<Q::Semaphore>,
-        B: Borrow<Q::Submit>,
-        S: Borrow<Q::Semaphore>,
-        F: Borrow<Q::Fence>,
-    {
-        unimplemented!()
-    }
-}
diff --git a/factory/Cargo.toml b/factory/Cargo.toml
new file mode 100644
index 00000000..68833a31
--- /dev/null
+++ b/factory/Cargo.toml
@@ -0,0 +1,19 @@
+[package]
+name = "rendy-factory"
+version = "0.1.0"
+authors = ["omni-viral <scareaangel@gmail.com>"]
+
+[dependencies]
+ash = { path = "../../ash/ash" }
+derivative = "1.0"
+failure = "0.1"
+log = "0.4"
+relevant = "0.2"
+serde = { version = "1.0", optional = true, features = ["derive"] }
+smallvec = "0.6"
+winit = "0.17"
+
+rendy-memory = { path = "../memory" }
+rendy-resource = { path = "../resource" }
+rendy-command = { path = "../command" }
+rendy-wsi = { path = "../wsi" }
diff --git a/factory/src/config.rs b/factory/src/config.rs
new file mode 100644
index 00000000..6e21793b
--- /dev/null
+++ b/factory/src/config.rs
@@ -0,0 +1,175 @@
+use std::cmp::min;
+
+use ash::vk;
+
+use command::FamilyIndex;
+use memory::{allocator, HeapsConfig};
+
+#[derive(Clone, Derivative)]
+#[derivative(Debug, Default)]
+#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
+pub struct Config<H = BasicHeapsConfigure, Q = OneGraphicsQueue> {
+    /// Application name.
+    #[derivative(Default(value = "From::from(\"Rendy\")"))]
+    pub app_name: String,
+
+    /// Application version.
+    #[derivative(Default(value = "vk_make_version!(0,1,0)"))]
+    // #[derivative(Debug(format_with = "fmt_version"))]
+    pub app_version: u32,
+
+    /// Config for memory::Heaps.
+    pub heaps: H,
+
+    /// Config for queue families.
+    pub queues: Q,
+}
+/// Trait that represents some method to select a queue family.
+pub unsafe trait QueuesConfigure {
+    type Priorities: AsRef<[f32]>;
+    type Families: IntoIterator<Item = (FamilyIndex, Self::Priorities)>;
+
+    fn configure(self, families: &[vk::QueueFamilyProperties]) -> Self::Families;
+}
+
+/// QueuePicket that picks first graphics queue family.
+/// If possible it checks that queues of the family are capabile of presenting.
+
+#[derive(Clone, Debug, Default)]
+#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
+pub struct OneGraphicsQueue;
+
+unsafe impl QueuesConfigure for OneGraphicsQueue {
+    type Priorities = [f32; 1];
+    type Families = Option<(FamilyIndex, [f32; 1])>;
+    fn configure(self, families: &[vk::QueueFamilyProperties]) -> Option<(FamilyIndex, [f32; 1])> {
+        families
+            .iter()
+            .position(|f| f.queue_flags.subset(vk::QueueFlags::GRAPHICS) && f.queue_count > 0)
+            .map(|p| (FamilyIndex(p as u32), [1.0]))
+    }
+}
+
+/// Saved config for queues.
+#[derive(Clone, Debug)]
+#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
+pub struct SavedQueueConfig(Vec<(FamilyIndex, Vec<f32>)>);
+
+unsafe impl QueuesConfigure for SavedQueueConfig {
+    type Priorities = Vec<f32>;
+    type Families = Vec<(FamilyIndex, Vec<f32>)>;
+    fn configure(self, families: &[vk::QueueFamilyProperties]) -> Vec<(FamilyIndex, Vec<f32>)> {
+        if !self.0.iter().all(|&(index, ref priorities)| {
+            families
+                .get(index.0 as usize)
+                .map_or(false, |p| p.queue_count as usize >= priorities.len())
+        }) {
+            panic!("Config is out of date");
+        } else {
+            self.0
+        }
+    }
+}
+
+pub unsafe trait HeapsConfigure {
+    type Types: IntoIterator<Item = (vk::MemoryPropertyFlags, u32, HeapsConfig)>;
+    type Heaps: IntoIterator<Item = u64>;
+
+    fn configure(
+        self,
+        properties: &vk::PhysicalDeviceMemoryProperties,
+    ) -> (Self::Types, Self::Heaps);
+}
+
+/// Basic heaps config.
+#[derive(Clone, Debug, Default)]
+#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
+pub struct BasicHeapsConfigure;
+
+unsafe impl HeapsConfigure for BasicHeapsConfigure {
+    type Types = Vec<(vk::MemoryPropertyFlags, u32, HeapsConfig)>;
+    type Heaps = Vec<u64>;
+
+    fn configure(
+        self,
+        properties: &vk::PhysicalDeviceMemoryProperties,
+    ) -> (Self::Types, Self::Heaps) {
+        let types = (0..properties.memory_type_count)
+            .map(|index| &properties.memory_types[index as usize])
+            .map(|mt| {
+                let config = HeapsConfig {
+                    arena: if mt
+                        .property_flags
+                        .subset(allocator::ArenaAllocator::properties_required())
+                    {
+                        Some(allocator::ArenaConfig {
+                            arena_size: min(
+                                256 * 1024 * 1024,
+                                properties.memory_heaps[mt.heap_index as usize].size / 8,
+                            ),
+                        })
+                    } else {
+                        None
+                    },
+                    dynamic: if mt
+                        .property_flags
+                        .subset(allocator::DynamicAllocator::properties_required())
+                    {
+                        Some(allocator::DynamicConfig {
+                            max_block_size: min(
+                                32 * 1024 * 1024,
+                                properties.memory_heaps[mt.heap_index as usize].size / 8,
+                            ),
+                            block_size_granularity: min(
+                                256,
+                                properties.memory_heaps[mt.heap_index as usize].size / 1024,
+                            ),
+                            blocks_per_chunk: 64,
+                        })
+                    } else {
+                        None
+                    },
+                };
+
+                (mt.property_flags, mt.heap_index, config)
+            }).collect();
+
+        let heaps = (0..properties.memory_heap_count)
+            .map(|index| &properties.memory_heaps[index as usize])
+            .map(|heap| heap.size)
+            .collect();
+
+        (types, heaps)
+    }
+}
+
+/// Saved config for heaps.
+#[derive(Clone, Debug)]
+#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
+pub struct SavedHeapsConfig {
+    types: Vec<(vk::MemoryPropertyFlags, u32, HeapsConfig)>,
+    heaps: Vec<u64>,
+}
+
+unsafe impl HeapsConfigure for SavedHeapsConfig {
+    type Types = Vec<(vk::MemoryPropertyFlags, u32, HeapsConfig)>;
+    type Heaps = Vec<u64>;
+
+    fn configure(
+        self,
+        _properties: &vk::PhysicalDeviceMemoryProperties,
+    ) -> (Self::Types, Self::Heaps) {
+        (self.types, self.heaps)
+    }
+}
+
+#[allow(unused)]
+fn fmt_version(version: &u32, fmt: &mut std::fmt::Formatter) -> Result<(), std::fmt::Error> {
+    write!(
+        fmt,
+        "{}.{}.{}",
+        vk_version_major!(*version),
+        vk_version_minor!(*version),
+        vk_version_patch!(*version)
+    )
+}
diff --git a/factory/src/factory.rs b/factory/src/factory.rs
new file mode 100644
index 00000000..ff3f7544
--- /dev/null
+++ b/factory/src/factory.rs
@@ -0,0 +1,485 @@
+use std::{
+    ffi::{CStr, CString},
+    os::raw::c_char,
+};
+
+use ash::{
+    extensions::{Surface, Swapchain},
+    version::{DeviceV1_0, EntryV1_0, InstanceV1_0, V1_0},
+    vk, Device, Entry, Instance, LoadingError,
+};
+use failure::Error;
+use relevant::Relevant;
+use smallvec::SmallVec;
+use winit::Window;
+
+use command::{families_from_device, Family, FamilyIndex};
+use memory::{Block, Heaps, MemoryError, MemoryUsage, Write};
+use resource::{buffer::Buffer, image::Image, Resources};
+use wsi::{NativeSurface, Target};
+
+use config::{Config, HeapsConfigure, QueuesConfigure};
+use queue::Queue;
+
+#[derive(Debug, Fail)]
+#[fail(display = "{:#?}", _0)]
+pub struct EntryError(LoadingError);
+
+#[derive(Debug)]
+struct PhysicalDeviceInfo {
+    handle: vk::PhysicalDevice,
+    properties: vk::PhysicalDeviceProperties,
+    memory: vk::PhysicalDeviceMemoryProperties,
+    queues: Vec<vk::QueueFamilyProperties>,
+    features: vk::PhysicalDeviceFeatures,
+    extensions: Vec<vk::ExtensionProperties>,
+}
+
+/// The `Factory<D>` type represents the overall creation type for `rendy`.
+pub struct Factory {
+    instance: Instance<V1_0>,
+    physical: PhysicalDeviceInfo,
+    device: Device<V1_0>,
+    families: Vec<Family>,
+    heaps: Heaps,
+    resources: Resources,
+    surface: Surface,
+    swapchain: Swapchain,
+    native_surface: NativeSurface,
+    relevant: Relevant,
+}
+
+impl Factory {
+    /// Creates a new `Factory` based off of a `Config<Q, W>` with some `QueuesConfigure`
+    /// from the specified `vk::PhysicalDevice`.
+    pub fn new(config: Config<impl HeapsConfigure, impl QueuesConfigure>) -> Result<Self, Error> {
+        let entry = Entry::<V1_0>::new().map_err(EntryError)?;
+
+        let layers = entry.enumerate_instance_layer_properties()?;
+        info!("Available layers:\n{:#?}", layers);
+
+        let extensions = entry.enumerate_instance_extension_properties()?;
+        info!("Available extensions:\n{:#?}", extensions);
+
+        let instance = unsafe {
+            // Only present layers and extensions are enabled.
+            // Other parameters trivially valid.
+            entry.create_instance(
+                &vk::InstanceCreateInfo::builder()
+                    .application_info(
+                        &vk::ApplicationInfo::builder()
+                            .application_name(&CString::new(config.app_name)?)
+                            .application_version(config.app_version)
+                            .engine_name(CStr::from_bytes_with_nul_unchecked(b"rendy\0"))
+                            .engine_version(1)
+                            .api_version(vk_make_version!(1, 0, 0))
+                            .build(),
+                    ).enabled_extension_names(&extensions_to_enable(&extensions)?)
+                    .build(),
+                None,
+            )
+        }?;
+        // trace!("Instance created");
+
+        let surface = Surface::new(&entry, &instance)
+            .map_err(|missing| format_err!("{:#?} functions are missing", missing))?;
+        let native_surface = NativeSurface::new(&entry, &instance)
+            .map_err(|missing| format_err!("{:#?} functions are missing", missing))?;
+
+        let mut physicals = unsafe {
+            // Instance is valid.
+            // Physical device handlers are valid (enumerated from instance).
+            instance
+                .enumerate_physical_devices()?
+                .into_iter()
+                .map(|p| PhysicalDeviceInfo {
+                    handle: p,
+                    properties: instance.get_physical_device_properties(p),
+                    memory: instance.get_physical_device_memory_properties(p),
+                    queues: instance.get_physical_device_queue_family_properties(p),
+                    features: instance.get_physical_device_features(p),
+                    extensions: instance.enumerate_device_extension_properties(p).unwrap(),
+                })
+        }.collect::<Vec<_>>();
+
+        info!("Physical devices:\n{:#?}", physicals);
+
+        physicals.retain(|p| match extensions_to_enable(&p.extensions) {
+            Ok(_) => true,
+            Err(missing) => {
+                // trace!("{:#?} missing extensions {:#?}", p, missing);
+                false
+            }
+        });
+
+        let physical = physicals
+            .into_iter()
+            .min_by_key(|info| match info.properties.device_type {
+                vk::PhysicalDeviceType::DISCRETE_GPU => 0,
+                vk::PhysicalDeviceType::INTEGRATED_GPU => 1,
+                vk::PhysicalDeviceType::VIRTUAL_GPU => 2,
+                vk::PhysicalDeviceType::CPU => 3,
+                _ => 4,
+            }).ok_or(format_err!("No suitable physical devices found"))?;
+
+        let device_name = unsafe {
+            // Pointer is valid.
+            CStr::from_ptr(&physical.properties.device_name[0]).to_string_lossy()
+        };
+
+        info!("Physical device picked: {}", device_name);
+
+        let families = config.queues.configure(&physical.queues);
+
+        let (create_queues, get_queues): (SmallVec<[_; 32]>, SmallVec<[_; 32]>) = families
+            .into_iter()
+            .map(|(index, priorities)| {
+                let info = vk::DeviceQueueCreateInfo::builder()
+                    .queue_family_index(index.0)
+                    .queue_priorities(priorities.as_ref())
+                    .build();
+                let get = (index, priorities.as_ref().len() as u32);
+                (info, get)
+            }).unzip();
+
+        info!("Queues: {:#?}", get_queues);
+
+        let device = unsafe {
+            instance.create_device(
+                physical.handle,
+                &vk::DeviceCreateInfo::builder()
+                    .queue_create_infos(&create_queues)
+                    .enabled_extension_names(&extensions_to_enable(&physical.extensions).unwrap())
+                    // .enabled_features(&physical.features)
+                    .build(),
+                None,
+            )
+        }?;
+
+        let swapchain = Swapchain::new(&instance, &device)
+            .map_err(|missing| format_err!("{:#?} functions are missing", missing))?;
+
+        let (types, heaps) = config.heaps.configure(&physical.memory);
+        let heaps = heaps.into_iter().collect::<SmallVec<[_; 16]>>();
+        let types = types.into_iter().collect::<SmallVec<[_; 32]>>();
+
+        info!("Heaps: {:#?}\nTypes: {:#?}", heaps, types);
+
+        let heaps = unsafe { Heaps::new(types, heaps) };
+
+        let families = unsafe { families_from_device(&device, get_queues, &physical.queues) };
+
+        let factory = Factory {
+            instance,
+            physical,
+            device,
+            families,
+            heaps,
+            resources: Resources::new(),
+            surface,
+            swapchain,
+            native_surface,
+            relevant: Relevant,
+        };
+
+        // trace!("Factory created");
+
+        Ok(factory)
+    }
+
+    pub fn wait_idle(&self) {
+        unsafe {
+            self.device.device_wait_idle().unwrap();
+        }
+    }
+
+    pub fn dispose(mut self) {
+        self.wait_idle();
+        for family in self.families {
+            family.dispose(&self.device);
+        }
+
+        unsafe {
+            // All queues complete.
+            self.resources.cleanup(&self.device, &mut self.heaps);
+        }
+
+        self.heaps.dispose(&self.device);
+        unsafe {
+            self.device.destroy_device(None);
+            self.instance.destroy_instance(None);
+        }
+
+        self.relevant.dispose();
+        // trace!("Factory destroyed");
+    }
+
+    /// Creates a buffer that is managed with the specified properties.
+    pub fn create_buffer(
+        &mut self,
+        info: vk::BufferCreateInfo,
+        align: u64,
+        memory_usage: impl MemoryUsage,
+    ) -> Result<Buffer, MemoryError> {
+        self.resources
+            .create_buffer(&self.device, &mut self.heaps, info, align, memory_usage)
+    }
+
+    /// Upload buffer content.
+    ///
+    /// # Safety
+    ///
+    /// * Buffer must be created by this `Factory`.
+    /// * Caller must ensure that device won't write to or read from
+    /// the memory region occupied by this buffer.
+    pub unsafe fn upload_buffer(
+        &mut self,
+        buffer: &mut Buffer,
+        offset: u64,
+        content: &[u8],
+        family: FamilyIndex,
+        access: vk::AccessFlags,
+    ) -> Result<(), Error> {
+        if buffer
+            .block()
+            .properties()
+            .subset(vk::MemoryPropertyFlags::HOST_VISIBLE)
+        {
+            self.upload_visible_buffer(buffer, offset, content)
+        } else {
+            unimplemented!("Staging is not supported yet");
+        }
+    }
+
+    /// Update buffer bound to host visible memory.vk::AccessFlags.
+    ///
+    /// # Safety
+    ///
+    /// * Caller must ensure that device won't write to or read from
+    /// the memory region occupied by this buffer.
+    pub unsafe fn upload_visible_buffer(
+        &mut self,
+        buffer: &mut Buffer,
+        offset: u64,
+        content: &[u8],
+    ) -> Result<(), Error> {
+        let block = buffer.block_mut();
+        assert!(
+            block
+                .properties()
+                .subset(vk::MemoryPropertyFlags::HOST_VISIBLE)
+        );
+        let mut mapped = block.map(&self.device, offset..offset + content.len() as u64)?;
+        mapped
+            .write(&self.device, 0..content.len() as u64)?
+            .write(content);
+        Ok(())
+    }
+
+    /// Creates an image that is mananged with the specified properties.
+    pub fn create_image(
+        &mut self,
+        info: vk::ImageCreateInfo,
+        align: u64,
+        memory_usage: impl MemoryUsage,
+    ) -> Result<Image, MemoryError> {
+        self.resources
+            .create_image(&self.device, &mut self.heaps, info, align, memory_usage)
+    }
+
+    /// Create render target from window.
+    pub fn create_target(&self, window: Window, image_count: u32) -> Result<Target, Error> {
+        Target::new(
+            window,
+            image_count,
+            self.physical.handle,
+            &self.native_surface,
+            &self.surface,
+            &self.swapchain,
+        )
+    }
+
+    pub fn destroy_target(&self, target: Target) -> Window {
+        unsafe {
+            let (window, surface, swapchain) = target.dispose();
+            self.swapchain.destroy_swapchain_khr(swapchain, None);
+            // trace!("Swapchain destroyed");
+            self.surface.destroy_surface_khr(surface, None);
+            // trace!("Surface destroyed");
+            window
+        }
+    }
+
+    pub fn families(&self) -> &[Family] {
+        &self.families
+    }
+
+    pub fn queue(&mut self, family: FamilyIndex, queue: usize) -> Queue<'_> {
+        let raw = self.families[family.0 as usize].queues()[queue];
+        Queue {
+            fp: self.device().fp_v1_0(),
+            raw,
+        }
+    }
+
+    /// Get surface support for family.
+    pub fn target_support(&self, family: FamilyIndex, target: &Target) -> bool {
+        unsafe {
+            let surface = target.surface();
+            self.surface.get_physical_device_surface_support_khr(
+                self.physical.handle,
+                family.0,
+                surface,
+            )
+        }
+    }
+
+    /// Get device.
+    pub fn device(&self) -> &impl DeviceV1_0 {
+        &self.device
+    }
+
+    /// Get physical device.
+    pub fn physical(&self) -> vk::PhysicalDevice {
+        self.physical.handle
+    }
+
+    /// Get surface capabilities.
+    pub fn surface_capabilities(
+        &self,
+        target: &Target,
+    ) -> Result<vk::SurfaceCapabilitiesKHR, Error> {
+        unsafe {
+            self.surface.get_physical_device_surface_capabilities_khr(
+                self.physical.handle,
+                target.surface(),
+            )
+        }.map_err(Error::from)
+    }
+
+    /// Create new semaphore
+    pub fn create_semaphore(&self) -> vk::Semaphore {
+        unsafe {
+            self.device
+                .create_semaphore(&vk::SemaphoreCreateInfo::builder().build(), None)
+        }.expect("Panic on OOM")
+    }
+
+    /// Create new fence
+    pub fn create_fence(&self, signaled: bool) -> vk::Fence {
+        unsafe {
+            self.device.create_fence(
+                &vk::FenceCreateInfo::builder()
+                    .flags(if signaled {
+                        vk::FenceCreateFlags::SIGNALED
+                    } else {
+                        vk::FenceCreateFlags::empty()
+                    }).build(),
+                None,
+            )
+        }.expect("Panic on OOM")
+    }
+
+    /// Wait for the fence become signeled.
+    /// TODO:
+    /// * Add timeout.
+    /// * Add multifence version.
+    pub fn reset_fence(&self, fence: vk::Fence) {
+        unsafe {
+            // TODO: Handle device lost.
+            self.device.reset_fences(&[fence]).expect("Panic on OOM")
+        }
+    }
+
+    /// Wait for the fence become signeled.
+    /// TODO:
+    /// * Add timeout.
+    /// * Add multifence version.
+    pub fn wait_for_fence(&self, fence: vk::Fence) {
+        unsafe {
+            self.device
+                .wait_for_fences(&[fence], true, !0)
+                .expect("Panic on OOM") // TODO: Handle device lost.
+        }
+    }
+
+    // /// Inefficiently upload image data.
+    // pub fn _inefficiently_upload_image(
+    //     &mut self,
+    //     image: &mut Image,
+    //     data: &[u8],
+    //     layout: vk::ImageLayout,
+    // ) {
+    //     let mut staging_buffer = self.create_buffer(
+    //         vk::BufferCreateInfo::builder()
+    //             .size(data.len() as u64)
+    //             .usage(vk::BufferUsageFlags::TRANSFER_SRC)
+    //             .build(),
+    //         1,
+    //         Upload,
+    //     ).unwrap();
+
+    //     self.upload_visible_buffer(&mut staging_buffer, 0, data).unwrap();
+
+    //     let extent = image.extent();
+
+    //     let command_pool = self.families[0].create_owning_pool(&self.device, crate::command::PrimaryLevel).unwrap();
+    //     let command_buffer = command_pool.acquire_buffer(&self.device);
+    //     let command_buffer = command_buffer.begin(&self.device, crate::command::OneShot);
+    //     self.device.cmd_copy_buffer_to_image(
+    //         command_buffer.raw(),
+    //         staging_buffer.raw(),
+    //         image.raw(),
+    //         layout,
+    //         &[
+    //             vk::BufferImageCopy::builder()
+    //                 .buffer_row_length(extent.width * 4)
+    //                 .buffer_image_height(extent.height * extent.width * 4)
+    //                 .image_extent(extent)
+    //                 .build(),
+    //         ]
+    //     )
+    // }
+}
+
+unsafe fn extension_name_cstr(e: &vk::ExtensionProperties) -> &CStr {
+    CStr::from_ptr(e.extension_name[..].as_ptr())
+}
+
+fn extensions_to_enable(
+    available: &[vk::ExtensionProperties],
+) -> Result<Vec<*const c_char>, Error> {
+    let names = vec![
+        Surface::name().as_ptr(),
+        Swapchain::name().as_ptr(),
+        NativeSurface::name().as_ptr(),
+    ];
+
+    let not_found = unsafe {
+        names
+            .iter()
+            .cloned()
+            .filter_map(|name| {
+                let cstr_name = CStr::from_ptr(name);
+                if available
+                    .iter()
+                    .find(|e| extension_name_cstr(e) == cstr_name)
+                    .is_none()
+                {
+                    Some(cstr_name)
+                } else {
+                    None
+                }
+            }).collect::<Vec<_>>()
+    };
+
+    if not_found.is_empty() {
+        Ok(names)
+    } else {
+        Err(format_err!(
+            "Extensions {:#?} are not available: {:#?}",
+            not_found,
+            available
+        ))
+    }
+}
diff --git a/factory/src/lib.rs b/factory/src/lib.rs
new file mode 100644
index 00000000..2700e38c
--- /dev/null
+++ b/factory/src/lib.rs
@@ -0,0 +1,31 @@
+// TODO: module docs
+
+#[macro_use]
+pub extern crate ash;
+#[macro_use]
+extern crate derivative;
+#[macro_use]
+extern crate failure;
+#[macro_use]
+extern crate log;
+extern crate relevant;
+#[cfg(features = "serde")]
+extern crate serde;
+extern crate smallvec;
+extern crate winit;
+
+pub extern crate rendy_command as command;
+pub extern crate rendy_memory as memory;
+pub extern crate rendy_resource as resource;
+pub extern crate rendy_wsi as wsi;
+
+mod config;
+mod factory;
+mod queue;
+
+pub use config::{
+    BasicHeapsConfigure, Config, HeapsConfigure, OneGraphicsQueue, QueuesConfigure,
+    SavedHeapsConfig, SavedQueueConfig,
+};
+pub use factory::Factory;
+pub use queue::Queue;
diff --git a/factory/src/queue.rs b/factory/src/queue.rs
new file mode 100644
index 00000000..53e391d2
--- /dev/null
+++ b/factory/src/queue.rs
@@ -0,0 +1,30 @@
+use ash::vk;
+
+pub struct Queue<'a> {
+    pub(crate) fp: &'a vk::DeviceFnV1_0,
+    pub(crate) raw: vk::Queue,
+}
+
+impl<'a> Queue<'a> {
+    /// Wait queue for idle.
+    pub fn wait_idle(&self) {
+        let result = unsafe { self.fp.queue_wait_idle(self.raw) };
+        match result {
+            vk::Result::SUCCESS => (),
+            result => panic!("{:#?}", result),
+        }
+    }
+
+    /// Get raw handle.
+    pub fn raw(&self) -> vk::Queue {
+        self.raw
+    }
+
+    /// Submit to the queue.
+    pub fn submit(&mut self, submits: &[vk::SubmitInfo], fence: vk::Fence) {
+        let _ = unsafe {
+            self.fp
+                .queue_submit(self.raw, submits.len() as u32, submits.as_ptr(), fence)
+        };
+    }
+}
diff --git a/frame/Cargo.toml b/frame/Cargo.toml
new file mode 100644
index 00000000..b33875b1
--- /dev/null
+++ b/frame/Cargo.toml
@@ -0,0 +1,11 @@
+[package]
+name = "rendy-frame"
+version = "0.1.0"
+authors = ["omni-viral <scareaangel@gmail.com>"]
+
+[dependencies]
+ash = { path = "../../ash/ash" }
+failure = "0.1"
+relevant = "0.2"
+smallvec = "0.6"
+rendy-command = { path = "../command" }
diff --git a/frame/src/frame.rs b/frame/src/frame.rs
new file mode 100644
index 00000000..0f7233b5
--- /dev/null
+++ b/frame/src/frame.rs
@@ -0,0 +1,303 @@
+//! Frame module docs.
+
+use ash::{version::DeviceV1_0, vk};
+use failure::Error;
+use smallvec::SmallVec;
+use std::borrow::Borrow;
+
+use command::{
+    Capability, CommandBuffer, Encoder, ExecutableState, InitialState, MultiShot, OneShot,
+    OwningCommandPool, PrimaryLevel, RecordingState, Resettable, Submit,
+};
+
+/// Fences collection.
+pub type Fences = SmallVec<[vk::Fence; 8]>;
+
+/// Unique index of the frame.
+/// It must be unique per render instance.
+#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
+#[repr(transparent)]
+pub struct FrameIndex(u64);
+
+/// Generate `Frame`s.
+#[derive(Debug)]
+#[allow(missing_copy_implementations)]
+#[repr(transparent)]
+pub struct FrameGen {
+    next: u64,
+}
+
+impl FrameGen {
+    /// Create new `FrameGen`
+    pub fn new() -> Self {
+        FrameGen { next: 0 }
+    }
+
+    /// Generate next `Frame`.
+    pub fn next(&mut self) -> Frame {
+        self.next += 1;
+        Frame {
+            index: FrameIndex(self.next - 1),
+        }
+    }
+}
+
+/// Single frame rendering task.
+/// Command buffers can be submitted as part of the `Frame`.
+#[allow(missing_copy_implementations)]
+#[derive(Debug)]
+#[repr(transparent)]
+pub struct Frame {
+    index: FrameIndex,
+}
+
+impl Frame {
+    /// Get frame index.
+    pub fn index(&self) -> FrameIndex {
+        self.index
+    }
+}
+
+/// Frame that is fully submitted for execution.
+/// User can wait for it to become `CompleteFrame`.
+#[derive(Debug)]
+pub struct PendingFrame {
+    index: FrameIndex,
+    fences: Fences,
+}
+
+impl PendingFrame {
+    /// Get frame index.
+    pub fn index(&self) -> FrameIndex {
+        self.index
+    }
+
+    /// Check if frame is complete on device.
+    pub fn is_complete<D>(&self, device: &D) -> bool {
+        unimplemented!("Check the fences")
+    }
+
+    /// Try to complete the frame.
+    /// Returns `Ok(CompleteFrame {...})` if `is_complete` will return `true.
+    /// Returns `Err(self)` otherwise.
+    pub fn complete<D>(self, device: &D) -> Result<(CompleteFrame, Fences), Self> {
+        if self.is_complete(device) {
+            Ok((CompleteFrame { index: self.index }, self.fences))
+        } else {
+            Err(self)
+        }
+    }
+
+    /// Wait for the frame to complete and return `CompleteFrame` as a proof.
+    pub fn wait<D>(self, device: &D) -> Result<(CompleteFrame, Fences), Error> {
+        unimplemented!("Wait for the fences");
+        Ok((CompleteFrame { index: self.index }, self.fences))
+    }
+}
+
+/// Proof that frame is complete.
+#[derive(Debug)]
+#[allow(missing_copy_implementations)]
+pub struct CompleteFrame {
+    index: FrameIndex,
+}
+
+impl CompleteFrame {
+    /// Get frame index.
+    pub fn index(&self) -> FrameIndex {
+        self.index
+    }
+}
+
+/// Frame bound instance.
+#[derive(Clone, Copy, Debug)]
+pub struct FrameBound<'a, T> {
+    frame: &'a Frame,
+    value: T,
+}
+
+impl<'a, T> FrameBound<'a, T> {
+    /// Bind value to frame
+    pub fn bind(value: T, frame: &'a Frame) -> Self {
+        FrameBound { frame, value }
+    }
+
+    /// Get reference to bound value.
+    ///
+    /// # Safety
+    ///
+    /// Unbound value usage must not break frame-binding semantics.
+    ///
+    pub unsafe fn value_ref(&self) -> &T {
+        &self.value
+    }
+
+    /// Get mutable reference to bound value.
+    ///
+    /// # Safety
+    ///
+    /// Unbound value usage must not break frame-binding semantics.
+    ///
+    pub unsafe fn value_mut(&mut self) -> &mut T {
+        &mut self.value
+    }
+
+    /// Unbind value from frame.
+    ///
+    /// # Safety
+    ///
+    /// Unbound value usage must not break frame-binding semantics.
+    ///
+    pub unsafe fn unbind(self) -> T {
+        self.value
+    }
+
+    /// Get frame this value bound to.
+    pub fn frame(&self) -> &'a Frame {
+        self.frame
+    }
+}
+
+/// Timeline of frames, complete, pending and next.
+#[derive(Debug)]
+pub struct Frames {
+    pending: SmallVec<[PendingFrame; 5]>,
+    next: Frame,
+}
+
+impl Frames {
+    /// Get next frame reference.
+    fn next(&self) -> &Frame {
+        &self.next
+    }
+
+    /// Bind value to next frame.
+    fn bind_to_next<T>(&self, value: T) -> FrameBound<'_, T> {
+        FrameBound::bind(value, &self.next)
+    }
+
+    /// Get upper bound of complete frames.
+    fn complete_until(&self) -> FrameIndex {
+        self.pending.first().map_or(self.next.index, |p| p.index)
+    }
+
+    fn complete(&self, index: FrameIndex) -> Option<CompleteFrame> {
+        if self.complete_until() > index {
+            Some(CompleteFrame { index })
+        } else {
+            None
+        }
+    }
+}
+
+/// `OwningCommandPool` that can be bound to frame execution.
+/// All command buffers acquired from bound `FramePool` are guarantee
+/// to complete when frame's fence is set, and buffers can be reset.
+#[derive(Debug)]
+pub struct FramePool<C, R> {
+    inner: OwningCommandPool<C, R>,
+    frame: Option<FrameIndex>,
+}
+
+impl<C, R> FramePool<C, R> {
+    /// Bind pool to particular frame.
+    ///
+    /// Command pools acquired from the bound pool could be submitted only within frame borrowing lifetime.
+    /// This ensures that frame's fences will be signaled after all commands from all command buffers from this pool
+    /// are complete.
+    ///
+    /// `reset` method must be called with `CompleteFrame` created from the bound `Frame` before binding to the another `Frame`.
+    ///
+    /// # Panics
+    ///
+    /// This function will panic if pool is still bound to frame.
+    ///
+    pub fn bind<'a, F>(&'a mut self, frame: &'a Frame) -> FrameBound<'a, &'a mut Self> {
+        assert!(
+            self.frame.is_none(),
+            "`FramePool::reset` must be called before binding to another frame"
+        );
+
+        self.frame = Some(frame.index());
+
+        FrameBound::bind(self, frame)
+    }
+
+    /// Reset all buffers at once.
+    ///
+    /// # Panics
+    ///
+    /// This function will panic if pool wasn't bound to the specified frame.
+    ///
+    pub fn reset(&mut self, complete: &CompleteFrame) {
+        assert_eq!(
+            self.frame.take(),
+            Some(complete.index()),
+            "CommandPool must be bound to the specified frame"
+        );
+        unimplemented!()
+    }
+}
+
+impl<R> FramePool<vk::QueueFlags, R> {
+    /// Convert capability level
+    pub fn from_flags<C>(self) -> Result<FramePool<C, R>, Self>
+    where
+        C: Capability,
+    {
+        match self.inner.from_flags::<C>() {
+            Ok(inner) => Ok(FramePool {
+                inner,
+                frame: self.frame,
+            }),
+            Err(inner) => Err(FramePool {
+                inner,
+                frame: self.frame,
+            }),
+        }
+    }
+}
+
+impl<'a, C: 'a, R: 'a> FrameBound<'a, &'a mut FramePool<C, R>> {
+    /// Reserve at least `count` buffers.
+    /// Allocate if there are not enough unused buffers.
+    pub fn reserve(&mut self, count: usize) {
+        unimplemented!()
+    }
+
+    /// Acquire command buffer from pool.
+    /// The command buffer could be submitted only as part of submission for associated frame.
+    /// TODO: Check that buffer cannot be moved out.
+    pub fn acquire_buffer<D, L>(
+        &mut self,
+        device: &impl DeviceV1_0,
+        level: L,
+    ) -> FrameBound<'a, CommandBuffer<C, InitialState, L>> {
+        unimplemented!()
+    }
+}
+
+impl<'a, S, L, C> FrameBound<'a, CommandBuffer<C, S, L>>
+where
+    S: Resettable,
+{
+    /// Release borrowed buffer. This allows to acquire next buffer from pool.
+    /// Whatever state this buffer was in it will be reset only after bounded frame is complete.
+    /// This allows safely to release borrowed buffer in pending state.
+    pub fn release(self) {
+        unimplemented!()
+    }
+}
+
+impl<'a, C, R> FrameBound<'a, CommandBuffer<C, ExecutableState<OneShot>, PrimaryLevel, R>> {
+    /// Produce `Submit` object that can be used to populate submission.
+    pub fn submit(self) -> (FrameBound<'a, Submit>,) {
+        unimplemented!()
+    }
+}
+
+impl<'a, C, U, L, R> Encoder<C> for FrameBound<'a, CommandBuffer<C, RecordingState<U>, L, R>> {
+    unsafe fn raw(&mut self) -> vk::CommandBuffer {
+        CommandBuffer::raw(&self.value)
+    }
+}
diff --git a/frame/src/lib.rs b/frame/src/lib.rs
new file mode 100644
index 00000000..bd498177
--- /dev/null
+++ b/frame/src/lib.rs
@@ -0,0 +1,8 @@
+extern crate ash;
+extern crate failure;
+extern crate rendy_command as command;
+extern crate smallvec;
+
+mod frame;
+
+pub use frame::{CompleteFrame, Frame, FrameBound, FrameGen, Frames, PendingFrame};
diff --git a/graph/Cargo.toml b/graph/Cargo.toml
index 414e22f0..6c22a681 100644
--- a/graph/Cargo.toml
+++ b/graph/Cargo.toml
@@ -4,6 +4,7 @@ version = "0.1.0"
 authors = ["omni-viral <scareaangel@gmail.com>"]
 
 [dependencies]
+ash = { path = "../../ash/ash" }
 bitflags = "1.0"
 derivative = "1.0"
 failure = "0.1"
diff --git a/graph/src/graph/mod.rs b/graph/src/graph/mod.rs
index 2e44689d..b0cbd343 100644
--- a/graph/src/graph/mod.rs
+++ b/graph/src/graph/mod.rs
@@ -4,7 +4,7 @@
 // };
 
 use chain;
-use command::{Device, FamilyId, Queue, FrameGen, Submission, CapabilityFlags};
+use command::{Device, FamilyIndex, Queue, FrameGen, Submission, CapabilityFlags};
 use resource::{buffer, image};
 
 // use smallvec::SmallVec;
@@ -24,6 +24,7 @@ pub struct Graph<D: Device, T> {
     buffers: Vec<buffer::Buffer<D::Memory, D::Buffer>>,
     images: Vec<image::Image<D::Memory, D::Image>>,
     frame_gen: FrameGen,
+    fences: Vec<D::Fence>,
 }
 
 impl<D, T> Graph<D, T>
@@ -53,21 +54,20 @@ where
         mut queues: Q,
         device: &mut D,
         aux: &mut T,
-        mut fences: Vec<D::Fence>,
     ) -> Vec<D::Fence>
     where
-        Q: FnMut(FamilyId, usize) -> Option<&'a mut Queue<D::CommandQueue, CapabilityFlags>>
+        Q: FnMut(FamilyIndex, usize) -> Option<&'a mut Queue<D::CommandQueue, CapabilityFlags>>
     {
         unsafe {
-            device.reset_fences(&fences);
+            device.reset_fences(&self.fences);
         }
-        while fences.len() < self.schedule.queue_count() {
-            fences.push(unsafe { // ?
+        while self.fences.len() < self.schedule.queue_count() {
+            self.fences.push(unsafe { // ?
                 device.create_fence(Default::default())
             });
         }
 
-        let frame = self.frame_gen.next_with_fences(fences);
+        let frame = self.frame_gen.next_with_fences(std::mem::replace(&mut self.fences, Vec::new()));
 
         let ref semaphores = self.semaphores;
 
@@ -77,7 +77,7 @@ where
         for submission in self.schedule.ordered() {
             let sid = submission.id();
             let qid = sid.queue();
-            let queue = queues(FamilyId(qid.family().0), qid.index()).expect("`queues` must contain all queues");
+            let queue = queues(FamilyIndex(qid.family().0), qid.index()).expect("`queues` must contain all queues");
 
             let node = self.nodes.get_mut(submission.node()).expect("`Node` must exist");
 
@@ -86,14 +86,14 @@ where
             node.run(
                 device,
                 aux,
-                unimplemented!(),
+                &frame,
                 &mut node_submits,
             );
 
             let last_in_queue = sid.index() + 1 == self.schedule.queue(qid).unwrap().len();
             let fence = if last_in_queue {
                 fence_index += 1;
-                Some(&fences[fence_index - 1])
+                Some(&self.fences[fence_index - 1])
             } else {
                 None
             };
@@ -194,7 +194,7 @@ where
 //         Y: FnMut(image::Kind, Format, image::Usage, &mut D, &mut T) -> I,
 //         P: IntoIterator<Item = PresentBuilder<'a, B>>,
 //     {
-//         trace!("Build Graph");
+//         // trace!("Build Graph");
 //         use chain::{build, pass::Pass};
 
 //         let families = families.into_iter().collect::<Vec<_>>();
@@ -209,7 +209,7 @@ where
 //             )));
 //         }
 
-//         trace!("Schedule nodes execution");
+//         // trace!("Schedule nodes execution");
 //         let passes: Vec<Pass> = nodes
 //             .iter()
 //             .enumerate()
@@ -220,9 +220,9 @@ where
 //             find_family::<B, _>(families.iter().cloned(), qid).max_queues()
 //         });
 
-//         trace!("Scheduled nodes execution {:#?}", chains);
+//         // trace!("Scheduled nodes execution {:#?}", chains);
 
-//         trace!("Allocate buffers");
+//         // trace!("Allocate buffers");
 //         let buffers = self
 //             .buffers
 //             .iter()
@@ -239,7 +239,7 @@ where
 //             })
 //             .collect::<Vec<_>>();
 
-//         trace!("Allocate images");
+//         // trace!("Allocate images");
 //         let images = self
 //             .images
 //             .iter()
@@ -261,23 +261,23 @@ where
 //         let mut built_nodes: Vec<Option<Box<AnyNode<B, D, T>>>> =
 //             (0..nodes.len()).map(|_| None).collect();
 
-//         trace!("Synchronize");
+//         // trace!("Synchronize");
 //         let mut semaphores = GenId::new();
 //         let schedule = sync(&chains, || {
 //             let id = semaphores.next();
 //             (id, id)
 //         });
-//         trace!("Schedule: {:#?}", schedule);
+//         // trace!("Schedule: {:#?}", schedule);
 
-//         trace!("Build nodes");
+//         // trace!("Build nodes");
 //         for family in schedule.iter() {
-//             trace!("For family {:#?}", family);
+//             // trace!("For family {:#?}", family);
 //             for queue in family.iter() {
-//                 trace!("For queue {:#?}", queue.id());
+//                 // trace!("For queue {:#?}", queue.id());
 //                 for (sid, submission) in queue.iter() {
-//                     trace!("For submission {:#?}", sid);
+//                     // trace!("For submission {:#?}", sid);
 //                     let builder = nodes[submission.pass().0].take().unwrap();
-//                     trace!("Build node {}", builder.name());
+//                     // trace!("Build node {}", builder.name());
 //                     let node = builder.build(
 //                         submission,
 //                         &chains.buffers,
diff --git a/graph/src/lib.rs b/graph/src/lib.rs
index fe21104a..1cd571d0 100644
--- a/graph/src/lib.rs
+++ b/graph/src/lib.rs
@@ -17,6 +17,7 @@
 #![warn(rust_2018_compatibility)]
 #![warn(rust_2018_idioms)]
 
+extern crate ash;
 extern crate rendy_chain as chain;
 extern crate rendy_command as command;
 extern crate rendy_resource as resource;
@@ -25,6 +26,5 @@ extern crate smallvec;
 mod node;
 mod graph;
 
-
 pub use node::{Node, NodeDesc, NodeBuilder};
 pub use graph::Graph;
diff --git a/graph/src/node/mod.rs b/graph/src/node/mod.rs
index 1381f61d..d96515ae 100644
--- a/graph/src/node/mod.rs
+++ b/graph/src/node/mod.rs
@@ -1,29 +1,24 @@
 //! Defines node - building block for framegraph.
 //!
 
+use std::any::Any;
+use ash::{version::DeviceV1_0, vk};
+
 use chain::{
     State,
-    Buffer,
-    Image,
+    BufferState,
+    ImageState,
     Id,
 };
 
 use command::{
-    Submit,
+    Encoder,
     Capability,
-    CapabilityFlags,
-    Device,
-    FamilyId,
-    Frame,
-    FrameBound,
-    FramePool,
+    FamilyIndex,
+    Frames,
 };
-// use resource::{buffer, image};
 
-#[doc(hidden)]
-pub trait FrameBoundSubmits<'a, D: Device + ?Sized> {
-    type Submits: IntoIterator<Item = Submit<FrameBound<'a, D::Fence, D::Submit>>>;
-}
+use resource::{Buffer, Image};
 
 /// The node is building block of the framegraph.
 /// Node defines set of resources and operations to perform over them.
@@ -34,17 +29,15 @@ pub trait FrameBoundSubmits<'a, D: Device + ?Sized> {
 /// `D` - device type.
 /// `T` - auxiliary data type.
 ///
-pub trait Node<D: Device + ?Sized, T: ?Sized>:
-    for<'a> FrameBoundSubmits<'a, D> + Sized + 'static
-{
+pub trait Node<T: ?Sized>: Sized + Sync + Send + 'static {
     /// Capability required by node.
     /// Graph will execute this node on command queue that supports this capability level.
     type Capability: Capability;
 
     /// Description type to instantiate the node.
-    type Desc: NodeDesc<D, T, Node = Self>;
+    type Desc: NodeDesc<T, Node = Self>;
 
-    /// Builder creation.
+    /// Desc creation.
     /// Convenient method if builder implements `Default`.
     fn desc() -> Self::Desc
     where
@@ -53,48 +46,57 @@ pub trait Node<D: Device + ?Sized, T: ?Sized>:
         Default::default()
     }
 
+    /// Builder creation.
+    fn builder(self) -> NodeBuilder<T>
+    where
+        Self::Desc: Default,
+    {
+        Self::desc().builder()
+    }
+
     /// Record commands required by node.
     /// Returned submits are guaranteed to be submitted within specified frame.
-    fn run<'a>(
+    fn run<'a, E, F>(
         &mut self,
-        device: &D,
-        aux: &T,
-        frame: &'a Frame<D::Fence>,
-    ) -> <Self as FrameBoundSubmits<'a, D>>::Submits;
+        device: &impl DeviceV1_0,
+        aux: &mut T,
+        complete_frame: &'a CompleteFrame,
+        frames: &'a Frames,
+        encoder: E,
+    )
+    where
+        E: Encoder<Self::Capability>,
+    ;
 }
 
 /// Resources wrapper.
 /// Wraps resources requested by the node.
 /// This wrapper guarantees that lifetime of resources is bound to the node lifetime.
-/// Also it automatically inserts synchronization required to make access declared by node correct.
 #[derive(Clone, Debug)]
-pub struct Resources<'a, B: 'a, I: 'a> {
-    buffers: Vec<&'a B>,
-    images: Vec<&'a I>,
-    barriers: Barriers,
+pub struct Resources<'a> {
+    buffers: Vec<&'a Buffer>,
+    images: Vec<&'a Image>,
 }
 
-/// Set of barriers the node must insert before and after commands.
-#[derive(Clone, Copy, Debug)]
-pub struct Barriers;
-
 /// Builder of the node.
 /// Implementation of the builder type provide framegraph with static information about node
 /// that is used for building the node.
-pub trait NodeDesc<D: Device + ?Sized, T: ?Sized>: Sized + 'static {
+pub trait NodeDesc<T: ?Sized>: Sized + 'static {
     /// Node this builder builds.
-    type Node: Node<D, T>;
+    type Node: Node<T>;
 
-    /// Capability required by node.
-    /// Graph will execute this node on command queue that supports this capability level.
+    /// Builder creation.
+    fn builder(self) -> NodeBuilder<T> {
+        NodeBuilder::new(self)
+    }
 
     /// Get set or buffer resources the node uses.
-    fn buffers(&self) -> Vec<State<Buffer>> {
+    fn buffers(&self) -> Vec<BufferState> {
         Vec::new()
     }
 
     /// Get set or image resources the node uses.
-    fn images(&self) -> Vec<State<Image>> {
+    fn images(&self) -> Vec<ImageState> {
         Vec::new()
     }
 
@@ -111,85 +113,74 @@ pub trait NodeDesc<D: Device + ?Sized, T: ?Sized>: Sized + 'static {
     ///
     fn build(
         &self,
-        device: &D,
+        device: &impl DeviceV1_0,
         aux: &mut T,
-        pool: FramePool<D::CommandPool, D::CommandBuffer, <Self::Node as Node<D, T>>::Capability>,
-        resources: Resources<'_, D::Buffer, D::Image>,
+        resources: Resources<'_>,
     ) -> Self::Node;
 }
 
 /// Trait-object safe `Node`.
-pub trait AnyNode<D: Device + ?Sized, T: ?Sized> {
+pub unsafe trait AnyNode<T: ?Sized>: Any + Sync + Send {
     /// Record commands required by node.
     /// Recorded buffers go into `submits`.
     fn run(
         &mut self,
-        device: &D,
-        aux: &T,
+        device: &impl DeviceV1_0,
+        aux: &mut T,
         frame: &Frame<D::Fence>,
-        raw_submits: &mut Vec<D::Submit>,
+        encoder: &mut AnyEncoder<D>,
     );
 }
 
-impl<D, T, N> AnyNode<D, T> for N
+unsafe impl<T, N> AnyNode<T> for N
 where
-    D: Device + ?Sized,
     T: ?Sized,
-    N: Node<D, T>,
+    N: Node<T>,
 {
     fn run(
         &mut self,
-        device: &D,
-        aux: &T,
+        device: &impl DeviceV1_0,
+        aux: &mut T,
         frame: &Frame<D::Fence>,
-        raw_submits: &mut Vec<D::Submit>,
+        encoder: &mut AnyEncoder<D>,
     ) {
-        let submits = Node::run(self, device, aux, frame)
-            .into_iter()
-            .map(|submit| unsafe {
-                // Graph guarantee to submit those within frame to the correct queue.
-                submit.into_inner().unbind()
-            });
-
-        raw_submits.extend(submits);
+        Node::run(self, device, aux, frame, encoder.capability::<N::Capability>())
     }
 }
 
 /// Trait-object safe `NodeDesc`.
-pub trait AnyNodeDesc<D: Device + ?Sized, T: ?Sized> {
+pub unsafe trait AnyNodeDesc<T: ?Sized> {
     /// Find family suitable for the node.
-    fn family(&self, families: &[(CapabilityFlags, FamilyId)]) -> Option<FamilyId>;
+    fn family(&self, families: &[(CapabilityFlags, FamilyIndex)]) -> Option<FamilyIndex>;
 
     /// Build the node.
     fn build(
         &self,
-        device: &D,
+        device: &impl DeviceV1_0,
         aux: &mut T,
-        pool: FramePool<D::CommandPool, D::CommandBuffer, CapabilityFlags>,
         resources: Resources<'_, D::Buffer, D::Image>,
-    ) -> Box<dyn AnyNode<D, T>>;
+    ) -> Box<dyn AnyNode<T>>;
 }
 
-impl<D, T, N> AnyNodeDesc<D, T> for N
+unsafe impl<T, N> AnyNodeDesc<T> for N
 where
-    D: Device + ?Sized,
     T: ?Sized,
-    N: NodeDesc<D, T>,
+    N: NodeDesc<T>,
 {
-    fn family(&self, families: &[(CapabilityFlags, FamilyId)]) -> Option<FamilyId> {
+    fn family(&self, families: &[(CapabilityFlags, FamilyIndex)]) -> Option<FamilyIndex> {
         families
             .iter()
-            .find(|&(cap, _)| <N::Node as Node<D, T>>::Capability::from_flags(*cap).is_some())
+            .find(|&(cap, _)| <N::Node as Node<T>>::Capability::from_flags(*cap).is_some())
             .map(|&(_, id)| id)
     }
 
     fn build(
         &self,
-        device: &D,
+        device: &impl DeviceV1_0,
         aux: &mut T,
         pool: FramePool<D::CommandPool, D::CommandBuffer, CapabilityFlags>,
         resources: Resources<'_, D::Buffer, D::Image>,
-    ) -> Box<dyn AnyNode<D, T>> {
+    ) -> Box<dyn AnyNode<T>> {
         let node = NodeDesc::build(
             self,
             device,
@@ -205,26 +196,25 @@ where
 
 /// Builder for the node.
 #[allow(missing_debug_implementations)]
-pub struct NodeBuilder<D: Device + ?Sized, T: ?Sized> {
-    pub(crate) desc: Box<dyn AnyNodeDesc<D, T>>,
+pub struct NodeBuilder<T: ?Sized> {
+    pub(crate) desc: Box<dyn AnyNodeDesc<T>>,
     pub(crate) buffers: Vec<Id>,
     pub(crate) images: Vec<Id>,
     pub(crate) dependencies: Vec<usize>,
 }
 
-impl<D, T> NodeBuilder<D, T>
+impl<T> NodeBuilder<T>
 where
     D: Device + ?Sized,
     T: ?Sized,
 {
     /// Create new builder.
-    pub fn new<N>() -> Self
+    pub fn new<N>(desc: N) -> Self
     where
-        N: Node<D, T>,
-        N::Desc: Default,
+        N: NodeDesc<T>,
     {
         NodeBuilder {
-            desc: Box::new(N::desc()),
+            desc: Box::new(desc),
             buffers: Vec::new(),
             images: Vec::new(),
             dependencies: Vec::new(),
@@ -277,11 +267,14 @@ where
     #[allow(unused)]
     pub(crate) fn build(
         &self,
-        device: &D,
+        device: &impl DeviceV1_0,
         aux: &mut T,
-        pool: FramePool<D::CommandPool, D::CommandBuffer, CapabilityFlags>,
-        resources: Resources<'_, D::Buffer, D::Image>,
-    ) -> Box<dyn AnyNode<D, T>> {
-        self.desc.build(device, aux, pool, resources)
+        resources: Resources<'_, Buffer, Image>,
+    ) -> Box<dyn AnyNode<T>> {
+        self.desc.build(device, aux, resources)
     }
 }
+
+pub struct AnyEncoder {
+    
+}
diff --git a/memory/Cargo.toml b/memory/Cargo.toml
index 5ea34572..cbc57d69 100644
--- a/memory/Cargo.toml
+++ b/memory/Cargo.toml
@@ -4,20 +4,15 @@ version = "0.1.0"
 authors = ["omni-viral <scareaangel@gmail.com>"]
 
 [dependencies]
-bitflags = "1.0"
+ash = { path = "../../ash/ash" }
+derivative = "1.0"
 failure = "0.1"
-veclist = "0.1"
-gfx-hal = { git = "https://github.com/gfx-rs/gfx.git", optional = true }
-ash = { version = "0.24", optional = true }
-serde = { version = "1.0", optional = true, features = ["derive"] }
-smallvec = "0.6"
+log = "0.4"
 hibitset = "0.5"
 relevant = "0.2"
-derivative = "1.0"
+serde = { version = "1.0", optional = true, features = ["derive"] }
+smallvec = "0.6"
+veclist = "0.1"
 
 [dev-dependencies]
 rand = "0.5"
-
-[features]
-hal = ["gfx-hal"]
-vulkan = ["ash"]
diff --git a/memory/src/allocator/arena.rs b/memory/src/allocator/arena.rs
index 7c1ba133..1071ccd3 100644
--- a/memory/src/allocator/arena.rs
+++ b/memory/src/allocator/arena.rs
@@ -1,10 +1,11 @@
-use std::{collections::VecDeque, fmt::Debug, ops::Range, ptr::NonNull};
+use std::{collections::VecDeque, ops::Range, ptr::NonNull};
+
+use ash::{version::DeviceV1_0, vk};
 
 use relevant::Relevant;
 
 use allocator::Allocator;
 use block::Block;
-use device::Device;
 use error::*;
 use mapping::*;
 use memory::*;
@@ -13,9 +14,9 @@ use util::*;
 /// Memory block allocated from `ArenaAllocator`
 #[derive(Derivative)]
 #[derivative(Debug)]
-pub struct ArenaBlock<T> {
-    #[derivative(Debug(bound = "T: Debug", format_with = "super::memory_ptr_fmt"))]
-    memory: *const Memory<T>,
+pub struct ArenaBlock {
+    // #[derivative(Debug(format_with = "::memory::memory_ptr_fmt"))]
+    memory: *const Memory,
     arena_index: u64,
     ptr: NonNull<u8>,
     range: Range<u64>,
@@ -23,11 +24,11 @@ pub struct ArenaBlock<T> {
     relevant: Relevant,
 }
 
-unsafe impl<T: Send> Send for ArenaBlock<T> {}
-unsafe impl<T: Sync> Sync for ArenaBlock<T> {}
+unsafe impl Send for ArenaBlock {}
+unsafe impl Sync for ArenaBlock {}
 
-impl<T> ArenaBlock<T> {
-    fn shared_memory(&self) -> &Memory<T> {
+impl ArenaBlock {
+    fn shared_memory(&self) -> &Memory {
         // Memory won't be freed until last block created from it deallocated.
         unsafe { &*self.memory }
     }
@@ -41,16 +42,14 @@ impl<T> ArenaBlock<T> {
     }
 }
 
-impl<T: 'static> Block for ArenaBlock<T> {
-    type Memory = T;
-
+impl Block for ArenaBlock {
     #[inline]
-    fn properties(&self) -> Properties {
+    fn properties(&self) -> vk::MemoryPropertyFlags {
         self.shared_memory().properties()
     }
 
     #[inline]
-    fn memory(&self) -> &T {
+    fn memory(&self) -> vk::DeviceMemory {
         self.shared_memory().raw()
     }
 
@@ -60,11 +59,11 @@ impl<T: 'static> Block for ArenaBlock<T> {
     }
 
     #[inline]
-    fn map<'a, D>(
+    fn map<'a>(
         &'a mut self,
-        _device: &D,
+        _device: &impl DeviceV1_0,
         range: Range<u64>,
-    ) -> Result<MappedRange<'a, T>, MappingError> {
+    ) -> Result<MappedRange<'a>, MappingError> {
         assert!(
             range.start <= range.end,
             "Memory mapping region must have valid size"
@@ -80,7 +79,7 @@ impl<T: 'static> Block for ArenaBlock<T> {
     }
 
     #[inline]
-    fn unmap<D>(&mut self, _device: &D) {
+    fn unmap(&mut self, _device: &impl DeviceV1_0) {
         debug_assert!(self.shared_memory().host_visible());
     }
 }
@@ -104,31 +103,31 @@ pub struct ArenaConfig {
 /// Allocation strategy requires minimal overhead and implementation is fast.
 /// But holding single block will completely stop memory recycling.
 #[derive(Debug)]
-pub struct ArenaAllocator<T> {
+pub struct ArenaAllocator {
     memory_type: u32,
-    memory_properties: Properties,
+    memory_properties: vk::MemoryPropertyFlags,
     arena_size: u64,
     offset: u64,
-    arenas: VecDeque<Arena<T>>,
+    arenas: VecDeque<Arena>,
 }
 
 #[derive(Derivative)]
 #[derivative(Debug)]
-struct Arena<T> {
+struct Arena {
     used: u64,
     free: u64,
     #[derivative(Debug = "ignore")]
-    memory: Box<Memory<T>>,
+    memory: Box<Memory>,
     ptr: NonNull<u8>,
 }
 
-unsafe impl<T: Send> Send for Arena<T> {}
-unsafe impl<T: Sync> Sync for Arena<T> {}
+unsafe impl Send for Arena {}
+unsafe impl Sync for Arena {}
 
-impl<T: 'static> ArenaAllocator<T> {
+impl ArenaAllocator {
     /// Get properties required by the allocator.
-    pub fn properties_required() -> Properties {
-        Properties::HOST_VISIBLE
+    pub fn properties_required() -> vk::MemoryPropertyFlags {
+        vk::MemoryPropertyFlags::HOST_VISIBLE
     }
 
     /// Maximum allocation size.
@@ -139,8 +138,16 @@ impl<T: 'static> ArenaAllocator<T> {
     /// Create new `ArenaAllocator`
     /// for `memory_type` with `memory_properties` specified,
     /// with `ArenaConfig` provided.
-    pub fn new(memory_type: u32, memory_properties: Properties, config: ArenaConfig) -> Self {
-        assert!(memory_properties.contains(Self::properties_required()));
+    pub fn new(
+        memory_type: u32,
+        memory_properties: vk::MemoryPropertyFlags,
+        config: ArenaConfig,
+    ) -> Self {
+        info!(
+            "Create new 'arena' allocator: type: '{}', properties: '{}' config: '{:#?}'",
+            memory_type, memory_properties, config
+        );
+        assert!(memory_properties.subset(Self::properties_required()));
         assert!(
             fits_usize(config.arena_size),
             "Arena size must fit in both usize and u64"
@@ -155,22 +162,17 @@ impl<T: 'static> ArenaAllocator<T> {
     }
 
     /// Perform full cleanup of the memory allocated.
-    pub fn dispose<D>(mut self, device: &D)
-    where
-        D: Device<Memory = T>,
-    {
+    pub fn dispose(mut self, device: &impl DeviceV1_0) {
         self.cleanup(device, 0);
-        assert!(
-            self.arenas.is_empty(),
-            "Arenas are not empty during allocator disposal. Arenas: {:#?}",
-            self.arenas
-        );
+        if !self.arenas.is_empty() {
+            error!(
+                "Arenas are not empty during allocator disposal. Arenas: {:#?}",
+                self.arenas
+            );
+        }
     }
 
-    fn cleanup<D>(&mut self, device: &D, off: usize) -> u64
-    where
-        D: Device<Memory = T>,
-    {
+    fn cleanup(&mut self, device: &impl DeviceV1_0, off: usize) -> u64 {
         let mut freed = 0;
         while self.arenas.len() > off {
             if self.arenas[0].used > self.arenas[0].free {
@@ -180,31 +182,30 @@ impl<T: 'static> ArenaAllocator<T> {
             let arena = self.arenas.pop_front().unwrap();
 
             unsafe {
-                device.unmap(arena.memory.raw());
+                // trace!("Unmap memory: {:#?}", arena.memory);
+                device.unmap_memory(arena.memory.raw());
 
                 freed += arena.memory.size();
-                device.free(arena.memory.into_raw());
+                device.free_memory(arena.memory.raw(), None);
             }
         }
         freed
     }
 }
 
-impl<T: 'static> Allocator for ArenaAllocator<T> {
-    type Memory = T;
-
-    type Block = ArenaBlock<T>;
+impl Allocator for ArenaAllocator {
+    type Block = ArenaBlock;
 
-    fn alloc<D>(
+    fn alloc(
         &mut self,
-        device: &D,
+        device: &impl DeviceV1_0,
         size: u64,
         align: u64,
-    ) -> Result<(ArenaBlock<T>, u64), MemoryError>
-    where
-        D: Device<Memory = T>,
-    {
-        debug_assert!(self.memory_properties.host_visible());
+    ) -> Result<(ArenaBlock, u64), MemoryError> {
+        debug_assert!(
+            self.memory_properties
+                .subset(vk::MemoryPropertyFlags::HOST_VISIBLE)
+        );
 
         assert!(size <= self.arena_size);
         assert!(align <= self.arena_size);
@@ -234,12 +235,19 @@ impl<T: 'static> Allocator for ArenaAllocator<T> {
         }
 
         let (memory, ptr) = unsafe {
-            let raw = device.allocate(self.memory_type, self.arena_size)?;
-
-            let ptr = match device.map(&raw, 0..self.arena_size) {
-                Ok(ptr) => ptr,
+            let raw = device.allocate_memory(
+                &vk::MemoryAllocateInfo::builder()
+                    .memory_type_index(self.memory_type)
+                    .allocation_size(self.arena_size)
+                    .build(),
+                None,
+            )?;
+
+            let ptr = match device.map_memory(raw, 0, self.arena_size, vk::MemoryMapFlags::empty())
+            {
+                Ok(ptr) => NonNull::new_unchecked(ptr as *mut u8),
                 Err(error) => {
-                    device.free(raw);
+                    device.free_memory(raw, None);
                     return Err(error.into());
                 }
             };
@@ -271,10 +279,7 @@ impl<T: 'static> Allocator for ArenaAllocator<T> {
         Ok((block, self.arena_size))
     }
 
-    fn free<D>(&mut self, device: &D, block: Self::Block) -> u64
-    where
-        D: Device<Memory = T>,
-    {
+    fn free(&mut self, device: &impl DeviceV1_0, block: Self::Block) -> u64 {
         let index = block.arena_index - self.offset;
         assert!(
             fits_usize(index),
diff --git a/memory/src/allocator/dedicated.rs b/memory/src/allocator/dedicated.rs
index 386185c0..7cfbeb34 100644
--- a/memory/src/allocator/dedicated.rs
+++ b/memory/src/allocator/dedicated.rs
@@ -1,32 +1,33 @@
-use std::{marker::PhantomData, ops::Range, ptr::NonNull};
+use std::{ops::Range, ptr::NonNull};
+
+use ash::{version::DeviceV1_0, vk};
 
 use allocator::Allocator;
 use block::Block;
-use device::Device;
 use error::*;
 use mapping::{mapped_fitting_range, MappedRange};
 use memory::*;
 
 /// Memory block allocated from `DedicatedAllocator`
 #[derive(Debug)]
-pub struct DedicatedBlock<T> {
-    memory: Memory<T>,
+pub struct DedicatedBlock {
+    memory: Memory,
     mapping: Option<(NonNull<u8>, Range<u64>)>,
 }
 
-unsafe impl<T: Send> Send for DedicatedBlock<T> {}
-unsafe impl<T: Sync> Sync for DedicatedBlock<T> {}
+unsafe impl Send for DedicatedBlock {}
+unsafe impl Sync for DedicatedBlock {}
 
-impl<T> DedicatedBlock<T> {
+impl DedicatedBlock {
     /// Get inner memory.
     /// Panics if mapped.
-    pub fn unwrap_memory(self) -> Memory<T> {
+    pub fn unwrap_memory(self) -> Memory {
         assert!(self.mapping.is_none());
         self.memory
     }
 
     /// Make unmapped block.
-    pub fn from_memory(memory: Memory<T>) -> Self {
+    pub fn from_memory(memory: Memory) -> Self {
         DedicatedBlock {
             memory,
             mapping: None,
@@ -34,16 +35,14 @@ impl<T> DedicatedBlock<T> {
     }
 }
 
-impl<T: 'static> Block for DedicatedBlock<T> {
-    type Memory = T;
-
+impl Block for DedicatedBlock {
     #[inline]
-    fn properties(&self) -> Properties {
+    fn properties(&self) -> vk::MemoryPropertyFlags {
         self.memory.properties()
     }
 
     #[inline]
-    fn memory(&self) -> &T {
+    fn memory(&self) -> vk::DeviceMemory {
         self.memory.raw()
     }
 
@@ -52,14 +51,11 @@ impl<T: 'static> Block for DedicatedBlock<T> {
         0..self.memory.size()
     }
 
-    fn map<'a, D>(
+    fn map<'a>(
         &'a mut self,
-        device: &D,
+        device: &impl DeviceV1_0,
         range: Range<u64>,
-    ) -> Result<MappedRange<'a, T>, MappingError>
-    where
-        D: Device<Memory = T>,
-    {
+    ) -> Result<MappedRange<'a>, MappingError> {
         assert!(
             range.start <= range.end,
             "Memory mapping region must have valid size"
@@ -73,9 +69,7 @@ impl<T: 'static> Block for DedicatedBlock<T> {
             {
                 Ok(MappedRange::from_raw(&self.memory, ptr, range))
             } else {
-                if self.mapping.take().is_some() {
-                    device.unmap(&self.memory.raw());
-                }
+                self.unmap(device);
                 let mapping = MappedRange::new(&self.memory, device, range.clone())?;
                 self.mapping = Some((mapping.ptr(), mapping.range()));
                 Ok(mapping)
@@ -83,13 +77,11 @@ impl<T: 'static> Block for DedicatedBlock<T> {
         }
     }
 
-    fn unmap<D>(&mut self, device: &D)
-    where
-        D: Device<Memory = T>,
-    {
+    fn unmap(&mut self, device: &impl DeviceV1_0) {
         if self.mapping.take().is_some() {
             unsafe {
-                device.unmap(self.memory());
+                // trace!("Unmap memory: {:#?}", self.memory);
+                device.unmap_memory(self.memory.raw());
             }
         }
     }
@@ -103,48 +95,48 @@ impl<T: 'static> Block for DedicatedBlock<T> {
 /// `Heaps` use this allocator when none of sub-allocators bound to the memory type
 /// can handle size required.
 #[derive(Debug)]
-pub struct DedicatedAllocator<T> {
+pub struct DedicatedAllocator {
     memory_type: u32,
-    memory_properties: Properties,
+    memory_properties: vk::MemoryPropertyFlags,
     used: u64,
-    pd: PhantomData<T>,
 }
 
-impl<T> DedicatedAllocator<T> {
+impl DedicatedAllocator {
     /// Get properties required by the allocator.
-    pub fn properties_required() -> Properties {
-        Properties::empty()
+    pub fn properties_required() -> vk::MemoryPropertyFlags {
+        vk::MemoryPropertyFlags::empty()
     }
 
     /// Create new `ArenaAllocator`
     /// for `memory_type` with `memory_properties` specified
-    pub fn new(memory_type: u32, memory_properties: Properties) -> Self {
+    pub fn new(memory_type: u32, memory_properties: vk::MemoryPropertyFlags) -> Self {
         DedicatedAllocator {
             memory_type,
             memory_properties,
             used: 0,
-            pd: PhantomData,
         }
     }
 }
 
-impl<T: 'static> Allocator for DedicatedAllocator<T> {
-    type Memory = T;
-    type Block = DedicatedBlock<T>;
+impl Allocator for DedicatedAllocator {
+    type Block = DedicatedBlock;
 
     #[inline]
-    fn alloc<D>(
+    fn alloc(
         &mut self,
-        device: &D,
+        device: &impl DeviceV1_0,
         size: u64,
         _align: u64,
-    ) -> Result<(DedicatedBlock<T>, u64), MemoryError>
-    where
-        D: Device<Memory = T>,
-    {
+    ) -> Result<(DedicatedBlock, u64), MemoryError> {
         let memory = unsafe {
             Memory::from_raw(
-                device.allocate(self.memory_type, size)?,
+                device.allocate_memory(
+                    &vk::MemoryAllocateInfo::builder()
+                        .memory_type_index(self.memory_type)
+                        .allocation_size(size)
+                        .build(),
+                    None,
+                )?,
                 size,
                 self.memory_properties,
             )
@@ -156,21 +148,18 @@ impl<T: 'static> Allocator for DedicatedAllocator<T> {
     }
 
     #[inline]
-    fn free<D>(&mut self, device: &D, mut block: DedicatedBlock<T>) -> u64
-    where
-        D: Device<Memory = T>,
-    {
+    fn free(&mut self, device: &impl DeviceV1_0, mut block: DedicatedBlock) -> u64 {
         block.unmap(device);
         let size = block.memory.size();
         self.used -= size;
         unsafe {
-            device.free(block.memory.into_raw());
+            device.free_memory(block.memory.raw(), None);
         }
         size
     }
 }
 
-impl<T> Drop for DedicatedAllocator<T> {
+impl Drop for DedicatedAllocator {
     fn drop(&mut self) {
         assert_eq!(self.used, 0);
     }
diff --git a/memory/src/allocator/dynamic.rs b/memory/src/allocator/dynamic.rs
index 235e1b86..f16b3cbd 100644
--- a/memory/src/allocator/dynamic.rs
+++ b/memory/src/allocator/dynamic.rs
@@ -1,11 +1,12 @@
+use std::{ops::Range, ptr::NonNull};
+
+use ash::{version::DeviceV1_0, vk};
 use hibitset::{BitSet, BitSetLike};
 use relevant::Relevant;
-use std::{fmt::Debug, ops::Range, ptr::NonNull};
 use veclist::VecList;
 
 use allocator::Allocator;
 use block::Block;
-use device::Device;
 use error::*;
 use mapping::*;
 use memory::*;
@@ -14,21 +15,21 @@ use util::*;
 /// Memory block allocated from `DynamicAllocator`
 #[derive(Derivative)]
 #[derivative(Debug)]
-pub struct DynamicBlock<T> {
+pub struct DynamicBlock {
     index: u32,
-    #[derivative(Debug(bound = "T: Debug", format_with = "super::memory_ptr_fmt"))]
-    memory: *const Memory<T>,
+    // #[derivative(Debug(format_with = "super::memory_ptr_fmt"))]
+    memory: *const Memory,
     ptr: Option<NonNull<u8>>,
     range: Range<u64>,
     #[derivative(Debug = "ignore")]
     relevant: Relevant,
 }
 
-unsafe impl<T: Send> Send for DynamicBlock<T> {}
-unsafe impl<T: Sync> Sync for DynamicBlock<T> {}
+unsafe impl Send for DynamicBlock {}
+unsafe impl Sync for DynamicBlock {}
 
-impl<T> DynamicBlock<T> {
-    fn shared_memory(&self) -> &Memory<T> {
+impl DynamicBlock {
+    fn shared_memory(&self) -> &Memory {
         // Memory won't be freed until last block created from it deallocated.
         unsafe { &*self.memory }
     }
@@ -42,16 +43,14 @@ impl<T> DynamicBlock<T> {
     }
 }
 
-impl<T: 'static> Block for DynamicBlock<T> {
-    type Memory = T;
-
+impl Block for DynamicBlock {
     #[inline]
-    fn properties(&self) -> Properties {
+    fn properties(&self) -> vk::MemoryPropertyFlags {
         self.shared_memory().properties()
     }
 
     #[inline]
-    fn memory(&self) -> &T {
+    fn memory(&self) -> vk::DeviceMemory {
         self.shared_memory().raw()
     }
 
@@ -61,11 +60,11 @@ impl<T: 'static> Block for DynamicBlock<T> {
     }
 
     #[inline]
-    fn map<'a, D>(
+    fn map<'a>(
         &'a mut self,
-        _device: &D,
+        _device: &impl DeviceV1_0,
         range: Range<u64>,
-    ) -> Result<MappedRange<'a, T>, MappingError> {
+    ) -> Result<MappedRange<'a>, MappingError> {
         assert!(
             range.start <= range.end,
             "Memory mapping region must have valid size"
@@ -87,7 +86,7 @@ impl<T: 'static> Block for DynamicBlock<T> {
     }
 
     #[inline]
-    fn unmap<D>(&mut self, _device: &D) {}
+    fn unmap(&mut self, _device: &impl DeviceV1_0) {}
 }
 
 /// Config for `DynamicAllocator`.
@@ -112,12 +111,12 @@ pub struct DynamicConfig {
 /// Every freed block can be recycled independently.
 /// Memory objects can be returned to the system if whole memory object become unused (not implemented yet).
 #[derive(Debug)]
-pub struct DynamicAllocator<T> {
+pub struct DynamicAllocator {
     /// Memory type that this allocator allocates.
     memory_type: u32,
 
     /// Memory properties of the memory type.
-    memory_properties: Properties,
+    memory_properties: vk::MemoryPropertyFlags,
 
     /// Number of blocks per chunk.
     blocks_per_chunk: u32,
@@ -127,14 +126,14 @@ pub struct DynamicAllocator<T> {
 
     /// List of chunk lists.
     /// Each index corresponds to `block_size_granularity * index` size.
-    sizes: Vec<Size<T>>,
+    sizes: Vec<Size>,
 }
 
 /// List of chunks
 #[derive(Debug)]
-struct Size<T> {
+struct Size {
     /// List of chunks.
-    chunks: VecList<Chunk<T>>,
+    chunks: VecList<Chunk>,
 
     /// Total chunks count.
     total_chunks: u32,
@@ -143,10 +142,10 @@ struct Size<T> {
     blocks: BitSet,
 }
 
-impl<T: 'static> DynamicAllocator<T> {
+impl DynamicAllocator {
     /// Get properties required by the allocator.
-    pub fn properties_required() -> Properties {
-        Properties::empty()
+    pub fn properties_required() -> vk::MemoryPropertyFlags {
+        vk::MemoryPropertyFlags::empty()
     }
 
     /// Maximum allocation size.
@@ -157,9 +156,17 @@ impl<T: 'static> DynamicAllocator<T> {
     /// Create new `ArenaAllocator`
     /// for `memory_type` with `memory_properties` specified,
     /// with `ArenaConfig` provided.
-    pub fn new(memory_type: u32, memory_properties: Properties, mut config: DynamicConfig) -> Self {
+    pub fn new(
+        memory_type: u32,
+        memory_properties: vk::MemoryPropertyFlags,
+        mut config: DynamicConfig,
+    ) -> Self {
+        info!(
+            "Create new allocator: type: '{}', properties: '{}' config: '{:#?}'",
+            memory_type, memory_properties, config
+        );
         // This is hack to simplify implementation of chunk cleaning.
-        config.blocks_per_chunk = ::std::mem::size_of::<usize>() as u32 * 8;
+        config.blocks_per_chunk = std::mem::size_of::<usize>() as u32 * 8;
 
         assert_ne!(
             config.block_size_granularity, 0,
@@ -170,7 +177,7 @@ impl<T: 'static> DynamicAllocator<T> {
             .max_block_size
             .checked_mul(config.blocks_per_chunk.into())
             .expect("Max chunk size must fit u64 to allocate it from Vulkan");
-        if memory_properties.host_visible() {
+        if memory_properties.subset(vk::MemoryPropertyFlags::HOST_VISIBLE) {
             assert!(
                 fits_usize(max_chunk_size),
                 "Max chunk size must fit usize for mapping"
@@ -223,21 +230,34 @@ impl<T: 'static> DynamicAllocator<T> {
     }
 
     /// Allocate super-block to use as chunk memory.
-    fn alloc_chunk<D>(&mut self, device: &D, size: u64) -> Result<(Chunk<T>, u64), MemoryError>
-    where
-        D: Device<Memory = T>,
-    {
+    fn alloc_chunk(
+        &mut self,
+        device: &impl DeviceV1_0,
+        size: u64,
+    ) -> Result<(Chunk, u64), MemoryError> {
+        // trace!("Allocate new chunk: size: {}", size);
         if size > self.max_block_size() {
             // Allocate from device.
             let (memory, mapping) = unsafe {
                 // Valid memory type specified.
-                let raw = device.allocate(self.memory_type, size)?;
-
-                let mapping = if self.memory_properties.host_visible() {
-                    match device.map(&raw, 0..size) {
-                        Ok(mapping) => Some(mapping),
+                let raw = device.allocate_memory(
+                    &vk::MemoryAllocateInfo {
+                        memory_type_index: self.memory_type,
+                        allocation_size: size,
+                        ..Default::default()
+                    },
+                    None,
+                )?;
+
+                let mapping = if self
+                    .memory_properties
+                    .subset(vk::MemoryPropertyFlags::HOST_VISIBLE)
+                {
+                    // trace!("Map new memory object");
+                    match device.map_memory(raw, 0, size, vk::MemoryMapFlags::empty()) {
+                        Ok(mapping) => Some(NonNull::new_unchecked(mapping as *mut u8)),
                         Err(error) => {
-                            device.free(raw);
+                            device.free_memory(raw, None);
                             return Err(error.into());
                         }
                     }
@@ -257,16 +277,21 @@ impl<T: 'static> DynamicAllocator<T> {
 
     /// Allocate super-block to use as chunk memory.
     #[warn(dead_code)]
-    fn free_chunk<D>(&mut self, device: &D, chunk: Chunk<T>) -> u64
-    where
-        D: Device<Memory = T>,
-    {
+    fn free_chunk(&mut self, device: &impl DeviceV1_0, chunk: Chunk) -> u64 {
+        // trace!("Free chunk: {:#?}", chunk);
         match chunk {
             Chunk::Dedicated(boxed, _) => {
                 let size = boxed.size();
                 unsafe {
-                    device.unmap(boxed.raw());
-                    device.free(boxed.into_raw());
+                    if self
+                        .memory_properties
+                        .subset(vk::MemoryPropertyFlags::HOST_VISIBLE)
+                    {
+                        // trace!("Unmap memory: {:#?}", boxed);
+                        device.unmap_memory(boxed.raw());
+                    }
+                    device.free_memory(boxed.raw(), None);
+                    boxed.dispose();
                 }
                 size
             }
@@ -275,14 +300,12 @@ impl<T: 'static> DynamicAllocator<T> {
     }
 
     /// Allocate from chunk.
-    fn alloc_from_chunk<D>(
+    fn alloc_from_chunk(
         &mut self,
-        device: &D,
+        device: &impl DeviceV1_0,
         size: u64,
-    ) -> Result<(DynamicBlock<T>, u64), MemoryError>
-    where
-        D: Device<Memory = T>,
-    {
+    ) -> Result<(DynamicBlock, u64), MemoryError> {
+        // trace!("Allocate block. type: {}, size: {}", self.memory_type, size);
         let size_index = self.size_index(size);
         let (block_index, allocated) = match (&self.sizes[size_index].blocks).iter().next() {
             Some(block_index) => {
@@ -330,21 +353,24 @@ impl<T: 'static> DynamicAllocator<T> {
             allocated,
         ))
     }
+
+    /// Perform full cleanup of the memory allocated.
+    pub fn dispose(self) {
+        for size in self.sizes {
+            assert_eq!(size.total_chunks, 0);
+        }
+    }
 }
 
-impl<T: 'static> Allocator for DynamicAllocator<T> {
-    type Memory = T;
-    type Block = DynamicBlock<T>;
+impl Allocator for DynamicAllocator {
+    type Block = DynamicBlock;
 
-    fn alloc<D>(
+    fn alloc(
         &mut self,
-        device: &D,
+        device: &impl DeviceV1_0,
         size: u64,
         align: u64,
-    ) -> Result<(DynamicBlock<T>, u64), MemoryError>
-    where
-        D: Device<Memory = T>,
-    {
+    ) -> Result<(DynamicBlock, u64), MemoryError> {
         use std::cmp::max;
         let size = max(size, align);
 
@@ -352,10 +378,8 @@ impl<T: 'static> Allocator for DynamicAllocator<T> {
         self.alloc_from_chunk(device, size)
     }
 
-    fn free<D>(&mut self, device: &D, block: DynamicBlock<T>) -> u64
-    where
-        D: Device<Memory = T>,
-    {
+    fn free(&mut self, device: &impl DeviceV1_0, block: DynamicBlock) -> u64 {
+        // trace!("Free block: {:#?}", block);
         let size_index = self.size_index(block.size());
         let block_index = block.index;
         block.dispose();
@@ -376,6 +400,7 @@ impl<T: 'static> Allocator for DynamicAllocator<T> {
                 .chunks
                 .pop(chunk_index as usize)
                 .expect("Chunk must exist");
+            self.sizes[size_index].total_chunks -= 1;
             self.free_chunk(device, chunk)
         } else {
             0
@@ -385,19 +410,19 @@ impl<T: 'static> Allocator for DynamicAllocator<T> {
 
 /// Block allocated for chunk.
 #[derive(Debug)]
-enum Chunk<T> {
+enum Chunk {
     /// Allocated from device.
-    Dedicated(Box<Memory<T>>, Option<NonNull<u8>>),
+    Dedicated(Box<Memory>, Option<NonNull<u8>>),
 
     /// Allocated from chunk of bigger blocks.
-    Dynamic(DynamicBlock<T>),
+    Dynamic(DynamicBlock),
 }
 
-unsafe impl<T: Send> Send for Chunk<T> {}
-unsafe impl<T: Sync> Sync for Chunk<T> {}
+unsafe impl Send for Chunk {}
+unsafe impl Sync for Chunk {}
 
-impl<T: 'static> Chunk<T> {
-    fn shared_memory(&self) -> &Memory<T> {
+impl Chunk {
+    fn shared_memory(&self) -> &Memory {
         match self {
             Chunk::Dedicated(boxed, _) => &*boxed,
             Chunk::Dynamic(chunk_block) => chunk_block.shared_memory(),
@@ -420,7 +445,7 @@ impl<T: 'static> Chunk<T> {
 }
 
 fn max_blocks_per_size() -> u32 {
-    let value = (::std::mem::size_of::<usize>() * 8).pow(4);
+    let value = (std::mem::size_of::<usize>() * 8).pow(4);
     assert!(fits_u32(value));
     value as u32
 }
@@ -428,7 +453,7 @@ fn max_blocks_per_size() -> u32 {
 fn check_bit_range_set(bitset: &BitSet, range: Range<u32>) -> bool {
     debug_assert!(range.start <= range.end);
     use hibitset::BitSetLike;
-    let layer_size = ::std::mem::size_of::<usize>() as u32 * 8;
+    let layer_size = std::mem::size_of::<usize>() as u32 * 8;
 
     assert_eq!(
         range.start % layer_size,
diff --git a/memory/src/allocator/mod.rs b/memory/src/allocator/mod.rs
index 75d97e2d..54769655 100644
--- a/memory/src/allocator/mod.rs
+++ b/memory/src/allocator/mod.rs
@@ -1,16 +1,13 @@
 //! This module provides `Allocator` trait and few allocators that implements the trait.
 
-use std::{any::Any, fmt};
+use ash::version::DeviceV1_0;
 
 mod arena;
 mod dedicated;
 mod dynamic;
-// mod chunk;
 
 use block::Block;
-use device::Device;
 use error::MemoryError;
-use memory::Memory;
 
 pub use self::{
     arena::{ArenaAllocator, ArenaBlock, ArenaConfig},
@@ -20,39 +17,19 @@ pub use self::{
 
 /// Allocator trait implemented for various allocators.
 pub trait Allocator {
-    /// Memory type.
-    type Memory: Any;
-
     /// Block type returned by allocator.
-    type Block: Block<Memory = Self::Memory>;
+    type Block: Block;
 
     /// Allocate block of memory.
     /// On success returns allocated block and amount of memory consumed from device.
-    fn alloc<D>(
+    fn alloc(
         &mut self,
-        device: &D,
+        device: &impl DeviceV1_0,
         size: u64,
         align: u64,
-    ) -> Result<(Self::Block, u64), MemoryError>
-    where
-        D: Device<Memory = Self::Memory>;
+    ) -> Result<(Self::Block, u64), MemoryError>;
 
     /// Free block of memory.
     /// Returns amount of memory returned to the device.
-    fn free<D>(&mut self, device: &D, block: Self::Block) -> u64
-    where
-        D: Device<Memory = Self::Memory>;
-}
-
-fn memory_ptr_fmt<T: fmt::Debug>(
-    memory: &*const Memory<T>,
-    fmt: &mut fmt::Formatter<'_>,
-) -> Result<(), fmt::Error> {
-    unsafe {
-        if fmt.alternate() {
-            write!(fmt, "*const {:#?}", **memory)
-        } else {
-            write!(fmt, "*const {:?}", **memory)
-        }
-    }
+    fn free(&mut self, device: &impl DeviceV1_0, block: Self::Block) -> u64;
 }
diff --git a/memory/src/block.rs b/memory/src/block.rs
index 2e598c2b..2b4afda0 100644
--- a/memory/src/block.rs
+++ b/memory/src/block.rs
@@ -1,39 +1,33 @@
-use device::Device;
+use std::ops::Range;
+
+use ash::{version::DeviceV1_0, vk};
+
 use error::MappingError;
 use mapping::MappedRange;
-use memory::Properties;
-use std::{any::Any, ops::Range};
 
 /// Block that owns a `Range` of the `Memory`.
 /// Implementor must ensure that there can't be any other blocks
 /// with overlapping range (either through type system or safety notes for unsafe functions).
 /// Provides access to safe memory range mapping.
 pub trait Block {
-    /// Memory type.
-    type Memory: Any;
-
     /// Get memory properties of the block.
-    fn properties(&self) -> Properties;
+    fn properties(&self) -> vk::MemoryPropertyFlags;
 
     /// Get raw memory object.
-    fn memory(&self) -> &Self::Memory;
+    fn memory(&self) -> vk::DeviceMemory;
 
     /// Get memory range owned by this block.
     fn range(&self) -> Range<u64>;
 
     /// Get mapping for the buffer range.
     /// Memory writes to the region performed by device become available for the host.
-    fn map<'a, D>(
+    fn map<'a>(
         &'a mut self,
-        device: &D,
+        device: &impl DeviceV1_0,
         range: Range<u64>,
-    ) -> Result<MappedRange<'a, Self::Memory>, MappingError>
-    where
-        D: Device<Memory = Self::Memory>;
+    ) -> Result<MappedRange<'a>, MappingError>;
 
     /// Release memory mapping. Must be called after successful `map` call.
     /// No-op if block is not mapped.
-    fn unmap<D>(&mut self, device: &D)
-    where
-        D: Device<Memory = Self::Memory>;
+    fn unmap(&mut self, device: &impl DeviceV1_0);
 }
diff --git a/memory/src/device.rs b/memory/src/device.rs
deleted file mode 100644
index 02ecbd70..00000000
--- a/memory/src/device.rs
+++ /dev/null
@@ -1,42 +0,0 @@
-use error::*;
-use std::{ops::Range, ptr::NonNull};
-
-/// Trait for memory allocation and mapping.
-pub trait Device: Sized {
-    /// Memory type that can be used with this device.
-    type Memory: 'static;
-
-    /// Allocate memory object.
-    ///
-    /// # Parameters
-    /// `size`  - size of the memory object to allocate.
-    /// `index` - memory type index.
-    unsafe fn allocate(&self, index: u32, size: u64) -> Result<Self::Memory, AllocationError>;
-
-    /// Free memory object.
-    unsafe fn free(&self, memory: Self::Memory);
-
-    /// Map memory range.
-    /// Only one range for the given memory object can be mapped.
-    unsafe fn map(
-        &self,
-        memory: &Self::Memory,
-        range: Range<u64>,
-    ) -> Result<NonNull<u8>, MappingError>;
-
-    /// Unmap memory.
-    unsafe fn unmap(&self, memory: &Self::Memory);
-
-    /// Invalidate mapped regions guaranteeing that device writes to the memory,
-    /// which have been made visible to the host-write and host-read access types, are made visible to the host
-    unsafe fn invalidate<'a>(
-        &self,
-        regions: impl IntoIterator<Item = (&'a Self::Memory, Range<u64>)>,
-    ) -> Result<(), OutOfMemoryError>;
-
-    /// Flush mapped regions guaranteeing that host writes to the memory can be made available to device access
-    unsafe fn flush<'a>(
-        &self,
-        regions: impl IntoIterator<Item = (&'a Self::Memory, Range<u64>)>,
-    ) -> Result<(), OutOfMemoryError>;
-}
diff --git a/memory/src/error.rs b/memory/src/error.rs
index f9b02567..c0000c4e 100644
--- a/memory/src/error.rs
+++ b/memory/src/error.rs
@@ -1,4 +1,5 @@
-use usage::UsageValue;
+use ash;
+use usage::MemoryUsageValue;
 
 /// Typical memory error - out of available memory.
 #[derive(Clone, Copy, Debug, Fail)]
@@ -79,7 +80,7 @@ pub enum AllocationError {
         _0,
         _1
     )]
-    NoSuitableMemory(u32, UsageValue),
+    NoSuitableMemory(u32, MemoryUsageValue),
 }
 
 impl From<OutOfMemoryError> for AllocationError {
@@ -121,3 +122,55 @@ impl From<MappingError> for MemoryError {
         MemoryError::MappingError(error)
     }
 }
+
+impl From<ash::vk::Result> for OutOfMemoryError {
+    fn from(result: ash::vk::Result) -> OutOfMemoryError {
+        match result {
+            ash::vk::Result::SUCCESS => panic!("Unexpected success"),
+            ash::vk::Result::ERROR_OUT_OF_HOST_MEMORY => OutOfMemoryError::OutOfHostMemory,
+            ash::vk::Result::ERROR_OUT_OF_DEVICE_MEMORY => OutOfMemoryError::OutOfDeviceMemory,
+            _ => panic!("unexpected error"),
+        }
+    }
+}
+
+impl From<ash::vk::Result> for MappingError {
+    fn from(result: ash::vk::Result) -> MappingError {
+        match result {
+            ash::vk::Result::SUCCESS => panic!("Unexpected success"),
+            ash::vk::Result::ERROR_OUT_OF_HOST_MEMORY => OutOfMemoryError::OutOfHostMemory.into(),
+            ash::vk::Result::ERROR_OUT_OF_DEVICE_MEMORY => {
+                OutOfMemoryError::OutOfDeviceMemory.into()
+            }
+            ash::vk::Result::ERROR_MEMORY_MAP_FAILED => MappingError::MappingFailed,
+            _ => panic!("unexpected error"),
+        }
+    }
+}
+
+impl From<ash::vk::Result> for AllocationError {
+    fn from(result: ash::vk::Result) -> AllocationError {
+        match result {
+            ash::vk::Result::SUCCESS => panic!("Unexpected success"),
+            ash::vk::Result::ERROR_OUT_OF_HOST_MEMORY => OutOfMemoryError::OutOfHostMemory.into(),
+            ash::vk::Result::ERROR_OUT_OF_DEVICE_MEMORY => {
+                OutOfMemoryError::OutOfDeviceMemory.into()
+            }
+            _ => panic!("unexpected error"),
+        }
+    }
+}
+
+impl From<ash::vk::Result> for MemoryError {
+    fn from(result: ash::vk::Result) -> MemoryError {
+        match result {
+            ash::vk::Result::SUCCESS => panic!("Unexpected success"),
+            ash::vk::Result::ERROR_OUT_OF_HOST_MEMORY => OutOfMemoryError::OutOfHostMemory.into(),
+            ash::vk::Result::ERROR_OUT_OF_DEVICE_MEMORY => {
+                OutOfMemoryError::OutOfDeviceMemory.into()
+            }
+            ash::vk::Result::ERROR_MEMORY_MAP_FAILED => MappingError::MappingFailed.into(),
+            _ => panic!("unexpected error"),
+        }
+    }
+}
diff --git a/memory/src/heaps.rs b/memory/src/heaps.rs
index 545d4ee6..b490e881 100644
--- a/memory/src/heaps.rs
+++ b/memory/src/heaps.rs
@@ -1,40 +1,39 @@
 use std::ops::Range;
 
+use ash::{version::DeviceV1_0, vk};
+
 use allocator::*;
 use smallvec::SmallVec;
 
 use block::Block;
-use device::Device;
 use error::*;
 use mapping::*;
-use memory::*;
-use usage::{Usage, UsageValue};
+use usage::{MemoryUsage, MemoryUsageValue};
 use util::*;
 
 /// Config for `Heaps` allocator.
 #[derive(Clone, Copy, Debug)]
 #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
-pub struct Config {
+pub struct HeapsConfig {
     /// Config for arena sub-allocator.
     pub arena: Option<ArenaConfig>,
 
     /// Config for dynamic sub-allocator.
     pub dynamic: Option<DynamicConfig>,
-    // chunk: Option<ChunkConfig>,
 }
 
 /// Heaps available on particular physical device.
 #[derive(Debug)]
-pub struct Heaps<T> {
-    types: Vec<MemoryType<T>>,
+pub struct Heaps {
+    types: Vec<MemoryType>,
     heaps: Vec<MemoryHeap>,
 }
 
-impl<T: 'static> Heaps<T> {
-    /// This must be called with `Properties` fetched from physical device.
+impl Heaps {
+    /// This must be called with `vk::MemoryPropertyFlags` fetched from physical device.
     pub unsafe fn new<P, H>(types: P, heaps: H) -> Self
     where
-        P: IntoIterator<Item = (Properties, u32, Config)>,
+        P: IntoIterator<Item = (vk::MemoryPropertyFlags, u32, HeapsConfig)>,
         H: IntoIterator<Item = u64>,
     {
         let heaps = heaps
@@ -68,18 +67,14 @@ impl<T: 'static> Heaps<T> {
     /// for intended `usage`,
     /// with `size`
     /// and `align` requirements.
-    pub fn allocate<D, U>(
+    pub fn allocate(
         &mut self,
-        device: &D,
+        device: &impl DeviceV1_0,
         mask: u32,
-        usage: U,
+        usage: impl MemoryUsage,
         size: u64,
         align: u64,
-    ) -> Result<MemoryBlock<T>, MemoryError>
-    where
-        D: Device<Memory = T>,
-        U: Usage,
-    {
+    ) -> Result<MemoryBlock, MemoryError> {
         debug_assert!(fits_u32(self.types.len()));
 
         let (memory_index, _, _) = {
@@ -105,7 +100,7 @@ impl<T: 'static> Heaps<T> {
                 .ok_or(OutOfMemoryError::HeapsExhausted)?
         };
 
-        self.allocate_from::<D, U>(device, memory_index as u32, usage, size, align)
+        self.allocate_from(device, memory_index as u32, usage, size, align)
     }
 
     /// Allocate memory block
@@ -113,18 +108,15 @@ impl<T: 'static> Heaps<T> {
     /// for intended `usage`,
     /// with `size`
     /// and `align` requirements.
-    fn allocate_from<D, U>(
+    fn allocate_from(
         &mut self,
-        device: &D,
+        device: &impl DeviceV1_0,
         memory_index: u32,
-        usage: U,
+        usage: impl MemoryUsage,
         size: u64,
         align: u64,
-    ) -> Result<MemoryBlock<T>, MemoryError>
-    where
-        D: Device<Memory = T>,
-        U: Usage,
-    {
+    ) -> Result<MemoryBlock, MemoryError> {
+        // trace!("Alloc block: type '{}', usage '{:#?}', size: '{}', align: '{}'", memory_index, usage.value(), size, align);
         assert!(fits_usize(memory_index));
 
         let ref mut memory_type = self.types[memory_index as usize];
@@ -146,10 +138,8 @@ impl<T: 'static> Heaps<T> {
     /// Free memory block.
     ///
     /// Memory block must be allocated from this heap.
-    pub fn free<D>(&mut self, device: &D, block: MemoryBlock<T>)
-    where
-        D: Device<Memory = T>,
-    {
+    pub fn free(&mut self, device: &impl DeviceV1_0, block: MemoryBlock) {
+        // trace!("Free block '{:#?}'", block);
         let memory_index = block.memory_index;
         debug_assert!(fits_usize(memory_index));
 
@@ -162,10 +152,7 @@ impl<T: 'static> Heaps<T> {
     /// Dispose of allocator.
     /// Cleanup allocators before dropping.
     /// Will panic if memory instances are left allocated.
-    pub fn dispose<D>(self, device: &D)
-    where
-        D: Device<Memory = T>,
-    {
+    pub fn dispose(self, device: &impl DeviceV1_0) {
         for mt in self.types {
             mt.dispose(device)
         }
@@ -174,12 +161,12 @@ impl<T: 'static> Heaps<T> {
 
 /// Memory block allocated from `Heaps`.
 #[derive(Debug)]
-pub struct MemoryBlock<T> {
-    block: BlockFlavor<T>,
+pub struct MemoryBlock {
+    block: BlockFlavor,
     memory_index: u32,
 }
 
-impl<T> MemoryBlock<T> {
+impl MemoryBlock {
     /// Get memory type id.
     pub fn memory_type(&self) -> u32 {
         self.memory_index
@@ -187,11 +174,11 @@ impl<T> MemoryBlock<T> {
 }
 
 #[derive(Debug)]
-enum BlockFlavor<T> {
-    Dedicated(DedicatedBlock<T>),
-    Arena(ArenaBlock<T>),
-    Dynamic(DynamicBlock<T>),
-    // Chunk(ChunkBlock<T>),
+enum BlockFlavor {
+    Dedicated(DedicatedBlock),
+    Arena(ArenaBlock),
+    Dynamic(DynamicBlock),
+    // Chunk(ChunkBlock),
 }
 
 macro_rules! any_block {
@@ -224,16 +211,14 @@ macro_rules! any_block {
     }};
 }
 
-impl<T: 'static> Block for MemoryBlock<T> {
-    type Memory = T;
-
+impl Block for MemoryBlock {
     #[inline]
-    fn properties(&self) -> Properties {
+    fn properties(&self) -> vk::MemoryPropertyFlags {
         any_block!(&self.block => block.properties())
     }
 
     #[inline]
-    fn memory(&self) -> &T {
+    fn memory(&self) -> vk::DeviceMemory {
         any_block!(&self.block => block.memory())
     }
 
@@ -242,21 +227,15 @@ impl<T: 'static> Block for MemoryBlock<T> {
         any_block!(&self.block => block.range())
     }
 
-    fn map<'a, D>(
+    fn map<'a>(
         &'a mut self,
-        device: &D,
+        device: &impl DeviceV1_0,
         range: Range<u64>,
-    ) -> Result<MappedRange<'a, T>, MappingError>
-    where
-        D: Device<Memory = T>,
-    {
+    ) -> Result<MappedRange<'a>, MappingError> {
         any_block!(&mut self.block => block.map(device, range))
     }
 
-    fn unmap<D>(&mut self, device: &D)
-    where
-        D: Device<Memory = T>,
-    {
+    fn unmap(&mut self, device: &impl DeviceV1_0) {
         any_block!(&mut self.block => block.unmap(device))
     }
 }
@@ -278,69 +257,69 @@ impl MemoryHeap {
 }
 
 #[derive(Debug)]
-struct MemoryType<T> {
+struct MemoryType {
     heap_index: usize,
-    properties: Properties,
-    dedicated: DedicatedAllocator<T>,
-    arena: Option<ArenaAllocator<T>>,
-    dynamic: Option<DynamicAllocator<T>>,
-    // chunk: Option<ChunkAllocator<T>>,
+    properties: vk::MemoryPropertyFlags,
+    dedicated: DedicatedAllocator,
+    arena: Option<ArenaAllocator>,
+    dynamic: Option<DynamicAllocator>,
+    // chunk: Option<ChunkAllocator>,
 }
 
-impl<T: 'static> MemoryType<T> {
-    fn new(memory_type: u32, heap_index: usize, properties: Properties, config: Config) -> Self {
+impl MemoryType {
+    fn new(
+        memory_type: u32,
+        heap_index: usize,
+        properties: vk::MemoryPropertyFlags,
+        config: HeapsConfig,
+    ) -> Self {
         MemoryType {
             properties,
             heap_index,
             dedicated: DedicatedAllocator::new(memory_type, properties),
-            arena: if properties.contains(ArenaAllocator::<T>::properties_required()) {
+            arena: if properties.subset(ArenaAllocator::properties_required()) {
                 config
                     .arena
                     .map(|config| ArenaAllocator::new(memory_type, properties, config))
             } else {
                 None
             },
-            dynamic: if properties.contains(DynamicAllocator::<T>::properties_required()) {
+            dynamic: if properties.subset(DynamicAllocator::properties_required()) {
                 config
                     .dynamic
                     .map(|config| DynamicAllocator::new(memory_type, properties, config))
             } else {
                 None
             },
-            // chunk: if properties.contains(ChunkAllocator::<T>::properties_required()) {
-            //     config.chunk.map(|config| ChunkAllocator::new(memory_type, properties, config))
-            // } else {
-            //     None
-            // },
         }
     }
 
-    fn alloc<D, U>(
+    fn alloc(
         &mut self,
-        device: &D,
-        usage: U,
+        device: &impl DeviceV1_0,
+        usage: impl MemoryUsage,
         size: u64,
         align: u64,
-    ) -> Result<(BlockFlavor<T>, u64), MemoryError>
-    where
-        D: Device<Memory = T>,
-        U: Usage,
-    {
+    ) -> Result<(BlockFlavor, u64), MemoryError> {
         match (usage.value(), self.arena.as_mut(), self.dynamic.as_mut()) {
-            (UsageValue::Upload, Some(ref mut arena), _)
-            | (UsageValue::Download, Some(ref mut arena), _)
+            (MemoryUsageValue::Upload, Some(ref mut arena), _)
+            | (MemoryUsageValue::Download, Some(ref mut arena), _)
                 if size <= arena.max_allocation() =>
             {
                 arena
                     .alloc(device, size, align)
                     .map(|(block, allocated)| (BlockFlavor::Arena(block), allocated))
             }
-            (UsageValue::Dynamic, _, Some(ref mut dynamic)) if size <= dynamic.max_allocation() => {
+            (MemoryUsageValue::Dynamic, _, Some(ref mut dynamic))
+                if size <= dynamic.max_allocation() =>
+            {
                 dynamic
                     .alloc(device, size, align)
                     .map(|(block, allocated)| (BlockFlavor::Dynamic(block), allocated))
             }
-            (UsageValue::Data, _, Some(ref mut dynamic)) if size <= dynamic.max_allocation() => {
+            (MemoryUsageValue::Data, _, Some(ref mut dynamic))
+                if size <= dynamic.max_allocation() =>
+            {
                 dynamic
                     .alloc(device, size, align)
                     .map(|(block, allocated)| (BlockFlavor::Dynamic(block), allocated))
@@ -352,24 +331,20 @@ impl<T: 'static> MemoryType<T> {
         }
     }
 
-    fn free<D>(&mut self, device: &D, block: BlockFlavor<T>) -> u64
-    where
-        D: Device<Memory = T>,
-    {
+    fn free(&mut self, device: &impl DeviceV1_0, block: BlockFlavor) -> u64 {
         match block {
             BlockFlavor::Dedicated(block) => self.dedicated.free(device, block),
             BlockFlavor::Arena(block) => self.arena.as_mut().unwrap().free(device, block),
             BlockFlavor::Dynamic(block) => self.dynamic.as_mut().unwrap().free(device, block),
-            // BlockFlavor::Chunk(block) => self.chunk.free(device, block),
         }
     }
 
-    fn dispose<D>(self, device: &D)
-    where
-        D: Device<Memory = T>,
-    {
+    fn dispose(self, device: &impl DeviceV1_0) {
         if let Some(arena) = self.arena {
             arena.dispose(device);
         }
+        if let Some(dynamic) = self.dynamic {
+            dynamic.dispose();
+        }
     }
 }
diff --git a/memory/src/impls/ash.rs b/memory/src/impls/ash.rs
deleted file mode 100644
index 2e268046..00000000
--- a/memory/src/impls/ash.rs
+++ /dev/null
@@ -1,138 +0,0 @@
-use ash::{
-    self,
-    version::{DeviceV1_0, FunctionPointers},
-};
-use device::Device;
-use error::*;
-use smallvec::SmallVec;
-use std::{
-    ops::Range,
-    ptr::{null, null_mut, NonNull},
-};
-
-impl From<ash::vk::Result> for OutOfMemoryError {
-    fn from(result: ash::vk::Result) -> OutOfMemoryError {
-        match result {
-            ash::vk::Result::Success => panic!("Unexpected success"),
-            ash::vk::Result::ErrorOutOfHostMemory => OutOfMemoryError::OutOfHostMemory,
-            ash::vk::Result::ErrorOutOfDeviceMemory => OutOfMemoryError::OutOfDeviceMemory,
-            _ => panic!("unexpected error"),
-        }
-    }
-}
-
-impl From<ash::vk::Result> for MappingError {
-    fn from(result: ash::vk::Result) -> MappingError {
-        match result {
-            ash::vk::Result::Success => panic!("Unexpected success"),
-            ash::vk::Result::ErrorOutOfHostMemory => OutOfMemoryError::OutOfHostMemory.into(),
-            ash::vk::Result::ErrorOutOfDeviceMemory => OutOfMemoryError::OutOfDeviceMemory.into(),
-            ash::vk::Result::ErrorMemoryMapFailed => MappingError::MappingFailed,
-            _ => panic!("unexpected error"),
-        }
-    }
-}
-
-impl From<ash::vk::Result> for AllocationError {
-    fn from(result: ash::vk::Result) -> AllocationError {
-        match result {
-            ash::vk::Result::Success => panic!("Unexpected success"),
-            ash::vk::Result::ErrorOutOfHostMemory => OutOfMemoryError::OutOfHostMemory.into(),
-            ash::vk::Result::ErrorOutOfDeviceMemory => OutOfMemoryError::OutOfDeviceMemory.into(),
-            _ => panic!("unexpected error"),
-        }
-    }
-}
-
-impl From<ash::vk::Result> for MemoryError {
-    fn from(result: ash::vk::Result) -> MemoryError {
-        match result {
-            ash::vk::Result::Success => panic!("Unexpected success"),
-            ash::vk::Result::ErrorOutOfHostMemory => OutOfMemoryError::OutOfHostMemory.into(),
-            ash::vk::Result::ErrorOutOfDeviceMemory => OutOfMemoryError::OutOfDeviceMemory.into(),
-            ash::vk::Result::ErrorMemoryMapFailed => MappingError::MappingFailed.into(),
-            _ => panic!("unexpected error"),
-        }
-    }
-}
-
-impl<V> Device for ash::Device<V>
-where
-    V: FunctionPointers,
-    ash::Device<V>: DeviceV1_0,
-{
-    type Memory = ash::vk::DeviceMemory;
-
-    unsafe fn allocate(
-        &self,
-        index: u32,
-        size: u64,
-    ) -> Result<ash::vk::DeviceMemory, AllocationError> {
-        Ok(self.allocate_memory(
-            &ash::vk::MemoryAllocateInfo {
-                s_type: ash::vk::StructureType::MemoryAllocateInfo,
-                p_next: null(),
-                allocation_size: size,
-                memory_type_index: index,
-            },
-            None,
-        )?)
-    }
-
-    unsafe fn free(&self, memory: ash::vk::DeviceMemory) {
-        self.free_memory(memory, None);
-    }
-
-    unsafe fn map(
-        &self,
-        memory: &ash::vk::DeviceMemory,
-        range: Range<u64>,
-    ) -> Result<NonNull<u8>, MappingError> {
-        let ptr = self.map_memory(
-            *memory,
-            range.start,
-            range.end - range.start,
-            ash::vk::MemoryMapFlags::empty(),
-        )?;
-        debug_assert_ne!(ptr, null_mut());
-        Ok(NonNull::new_unchecked(ptr as *mut u8))
-    }
-
-    unsafe fn unmap(&self, memory: &ash::vk::DeviceMemory) {
-        self.unmap_memory(*memory)
-    }
-
-    unsafe fn invalidate<'a>(
-        &self,
-        regions: impl IntoIterator<Item = (&'a ash::vk::DeviceMemory, Range<u64>)>,
-    ) -> Result<(), OutOfMemoryError> {
-        let ranges = regions
-            .into_iter()
-            .map(|(memory, range)| ash::vk::MappedMemoryRange {
-                s_type: ash::vk::StructureType::MappedMemoryRange,
-                p_next: null(),
-                memory: *memory,
-                offset: range.start,
-                size: range.end - range.start,
-            }).collect::<SmallVec<[_; 32]>>();
-        self.invalidate_mapped_memory_ranges(&ranges)?;
-        Ok(())
-    }
-
-    unsafe fn flush<'a>(
-        &self,
-        regions: impl IntoIterator<Item = (&'a ash::vk::DeviceMemory, Range<u64>)>,
-    ) -> Result<(), OutOfMemoryError> {
-        let ranges = regions
-            .into_iter()
-            .map(|(memory, range)| ash::vk::MappedMemoryRange {
-                s_type: ash::vk::StructureType::MappedMemoryRange,
-                p_next: null(),
-                memory: *memory,
-                offset: range.start,
-                size: range.end - range.start,
-            }).collect::<SmallVec<[_; 32]>>();
-        self.flush_mapped_memory_ranges(&ranges)?;
-        Ok(())
-    }
-}
diff --git a/memory/src/impls/hal.rs b/memory/src/impls/hal.rs
deleted file mode 100644
index 0b3950bf..00000000
--- a/memory/src/impls/hal.rs
+++ /dev/null
@@ -1,149 +0,0 @@
-//! Adapter for gfx-hal
-
-use hal::{self, Device as HalDevice};
-use std::{borrow::Borrow, marker::PhantomData, ops::Range, ptr::NonNull};
-
-use device::Device;
-use error::*;
-use heaps::*;
-use memory::*;
-use util::*;
-
-impl From<hal::device::OutOfMemory> for OutOfMemoryError {
-    fn from(_: hal::device::OutOfMemory) -> OutOfMemoryError {
-        OutOfMemoryError::OutOfDeviceMemory
-    }
-}
-
-impl From<hal::device::OutOfMemory> for MappingError {
-    fn from(_: hal::device::OutOfMemory) -> MappingError {
-        OutOfMemoryError::OutOfDeviceMemory.into()
-    }
-}
-
-impl From<hal::device::OutOfMemory> for AllocationError {
-    fn from(_: hal::device::OutOfMemory) -> AllocationError {
-        OutOfMemoryError::OutOfDeviceMemory.into()
-    }
-}
-
-impl From<hal::device::OutOfMemory> for MemoryError {
-    fn from(_: hal::device::OutOfMemory) -> MemoryError {
-        OutOfMemoryError::OutOfDeviceMemory.into()
-    }
-}
-
-impl From<hal::mapping::Error> for MappingError {
-    fn from(error: hal::mapping::Error) -> MappingError {
-        match error {
-            hal::mapping::Error::InvalidAccess => MappingError::HostInvisible,
-            hal::mapping::Error::OutOfBounds => MappingError::OutOfBounds,
-            hal::mapping::Error::OutOfMemory => OutOfMemoryError::OutOfHostMemory.into(),
-        }
-    }
-}
-
-impl From<hal::mapping::Error> for MemoryError {
-    fn from(error: hal::mapping::Error) -> MemoryError {
-        match error {
-            hal::mapping::Error::InvalidAccess => MappingError::HostInvisible.into(),
-            hal::mapping::Error::OutOfBounds => MappingError::OutOfBounds.into(),
-            hal::mapping::Error::OutOfMemory => OutOfMemoryError::OutOfHostMemory.into(),
-        }
-    }
-}
-
-impl From<hal::memory::Properties> for Properties {
-    fn from(value: hal::memory::Properties) -> Self {
-        let mut result = Properties::empty();
-        if value.contains(hal::memory::Properties::DEVICE_LOCAL) {
-            result |= Properties::DEVICE_LOCAL;
-        }
-        if value.contains(hal::memory::Properties::COHERENT) {
-            result |= Properties::HOST_COHERENT;
-        }
-        if value.contains(hal::memory::Properties::CPU_CACHED) {
-            result |= Properties::HOST_CACHED;
-        }
-        if value.contains(hal::memory::Properties::CPU_VISIBLE) {
-            result |= Properties::HOST_VISIBLE;
-        }
-        if value.contains(hal::memory::Properties::LAZILY_ALLOCATED) {
-            result |= Properties::LAZILY_ALLOCATED;
-        }
-        result
-    }
-}
-
-impl<D, B> Device for (D, PhantomData<B>)
-where
-    B: hal::Backend,
-    D: Borrow<B::Device>,
-{
-    type Memory = B::Memory;
-
-    unsafe fn allocate(&self, index: u32, size: u64) -> Result<B::Memory, AllocationError> {
-        assert!(
-            fits_usize(index),
-            "Numbers of memory types can't exceed usize limit"
-        );
-        let index = index as usize;
-        Ok(self
-            .0
-            .borrow()
-            .allocate_memory(hal::MemoryTypeId(index), size)?)
-    }
-
-    unsafe fn free(&self, memory: B::Memory) {
-        self.0.borrow().free_memory(memory)
-    }
-
-    unsafe fn map(
-        &self,
-        memory: &B::Memory,
-        range: Range<u64>,
-    ) -> Result<NonNull<u8>, MappingError> {
-        let ptr = self.0.borrow().map_memory(memory, range)?;
-        debug_assert!(!ptr.is_null());
-        Ok(NonNull::new_unchecked(ptr))
-    }
-
-    unsafe fn unmap(&self, memory: &B::Memory) {
-        self.0.borrow().unmap_memory(memory)
-    }
-
-    unsafe fn invalidate<'a>(
-        &self,
-        regions: impl IntoIterator<Item = (&'a B::Memory, Range<u64>)>,
-    ) -> Result<(), OutOfMemoryError> {
-        self.0.borrow().invalidate_mapped_memory_ranges(regions);
-        Ok(())
-    }
-
-    unsafe fn flush<'a>(
-        &self,
-        regions: impl IntoIterator<Item = (&'a B::Memory, Range<u64>)>,
-    ) -> Result<(), OutOfMemoryError> {
-        self.0.borrow().flush_mapped_memory_ranges(regions);
-        Ok(())
-    }
-}
-
-/// Fetch data necessary from `Backend::PhysicalDevice`
-#[allow(unused)]
-unsafe fn heaps_from_physical_device<B>(
-    physical: &B::PhysicalDevice,
-    config: Config,
-) -> Heaps<B::Memory>
-where
-    B: hal::Backend,
-{
-    let memory_properties = ::hal::PhysicalDevice::memory_properties(physical);
-    Heaps::new(
-        memory_properties
-            .memory_types
-            .into_iter()
-            .map(|mt| (mt.properties.into(), mt.heap_index as u32, config)),
-        memory_properties.memory_heaps,
-    )
-}
diff --git a/memory/src/impls/mod.rs b/memory/src/impls/mod.rs
deleted file mode 100644
index 95e98725..00000000
--- a/memory/src/impls/mod.rs
+++ /dev/null
@@ -1,5 +0,0 @@
-#[cfg(feature = "gfx-hal")]
-mod hal;
-
-#[cfg(feature = "ash")]
-mod ash;
diff --git a/memory/src/lib.rs b/memory/src/lib.rs
index 1128f92e..1d73ce89 100644
--- a/memory/src/lib.rs
+++ b/memory/src/lib.rs
@@ -17,35 +17,24 @@
 #![warn(rust_2018_compatibility)]
 #![warn(rust_2018_idioms)]
 
-#[macro_use]
-extern crate bitflags;
-
+extern crate ash;
 #[macro_use]
 extern crate derivative;
-
 #[macro_use]
 extern crate failure;
-extern crate veclist;
-
+extern crate hibitset;
+#[macro_use]
+extern crate log;
+extern crate relevant;
 #[cfg(feature = "serde")]
 #[macro_use]
 extern crate serde;
-
-extern crate hibitset;
-extern crate relevant;
 extern crate smallvec;
-
-#[cfg(test)]
-extern crate rand;
-
-#[cfg(test)]
-mod test;
+extern crate veclist;
 
 mod block;
-mod device;
 mod error;
 mod heaps;
-mod impls;
 mod mapping;
 mod memory;
 mod util;
@@ -54,15 +43,8 @@ pub mod allocator;
 pub mod usage;
 
 pub use block::Block;
-pub use device::Device;
 pub use error::{AllocationError, MappingError, MemoryError, OutOfMemoryError};
-pub use heaps::{Config, Heaps, MemoryBlock};
+pub use heaps::{Heaps, HeapsConfig, MemoryBlock};
 pub use mapping::{write::Write, Coherent, MappedRange, MaybeCoherent, NonCoherent};
-pub use memory::{Memory, Properties};
-pub use usage::Usage;
-
-#[cfg(feature = "gfx-hal")]
-extern crate gfx_hal as hal;
-
-#[cfg(feature = "ash")]
-extern crate ash;
+pub use memory::Memory;
+pub use usage::MemoryUsage;
diff --git a/memory/src/mapping/mod.rs b/memory/src/mapping/mod.rs
index 4b1d8969..6ad4292d 100644
--- a/memory/src/mapping/mod.rs
+++ b/memory/src/mapping/mod.rs
@@ -1,9 +1,9 @@
 mod range;
 pub(crate) mod write;
 
-use std::{fmt::Debug, ops::Range, ptr::NonNull};
+use ash::{version::DeviceV1_0, vk};
+use std::{ops::Range, ptr::NonNull};
 
-use device::Device;
 use error::{MappingError, MemoryError};
 use memory::Memory;
 use util::fits_usize;
@@ -11,7 +11,7 @@ use util::fits_usize;
 pub(crate) use self::range::{
     mapped_fitting_range, mapped_slice, mapped_slice_mut, mapped_sub_range,
 };
-use self::write::{Write, WriteFlush};
+use self::write::{Write, WriteCoherent, WriteFlush};
 
 /// Non-coherent marker.
 #[derive(Clone, Copy, Debug)]
@@ -28,9 +28,9 @@ pub struct MaybeCoherent(bool);
 /// Represents range of the memory mapped to the host.
 /// Provides methods for safer host access to the memory.
 #[derive(Debug)]
-pub struct MappedRange<'a, T: 'static, C = MaybeCoherent> {
+pub struct MappedRange<'a, C = MaybeCoherent> {
     /// Memory object that is mapped.
-    memory: &'a T,
+    memory: &'a Memory,
 
     /// Pointer to range mapped memory.
     ptr: NonNull<u8>,
@@ -42,22 +42,19 @@ pub struct MappedRange<'a, T: 'static, C = MaybeCoherent> {
     coherent: C,
 }
 
-impl<'a, T: 'static> MappedRange<'a, T, MaybeCoherent> {
+impl<'a> MappedRange<'a> {
     /// Map range of memory.
     ///
     /// # Safety
     ///
-    /// Only one range for the given memory object can be mapped.
-    /// Memory object must be not mapped.
-    /// Memory object must be created with device specified.
-    pub unsafe fn new<D>(
-        memory: &'a Memory<T>,
-        device: &D,
+    /// * Only one range for the given memory object can be mapped.
+    /// * Memory object must be not mapped.
+    /// * Memory object must be created with device specified.
+    pub unsafe fn new(
+        memory: &'a Memory,
+        device: &impl DeviceV1_0,
         range: Range<u64>,
-    ) -> Result<Self, MappingError>
-    where
-        D: Device<Memory = T>,
-    {
+    ) -> Result<Self, MappingError> {
         assert!(
             range.start <= range.end,
             "Memory mapping region must have valid size"
@@ -68,63 +65,78 @@ impl<'a, T: 'static> MappedRange<'a, T, MaybeCoherent> {
         );
         assert!(memory.host_visible());
 
-        let ptr = device.map(memory.raw(), range.clone())?;
+        let ptr = device.map_memory(
+            memory.raw(),
+            range.start,
+            range.end - range.start,
+            vk::MemoryMapFlags::empty(),
+        )?;
         assert!(
-            (ptr.as_ptr() as usize).wrapping_neg() <= (range.end - range.start) as usize,
+            (ptr as usize).wrapping_neg() <= (range.end - range.start) as usize,
             "Resulting pointer value + range length must fit in usize",
         );
 
-        Ok(Self::from_raw(memory, ptr, range))
+        Ok(Self::from_raw(
+            memory,
+            NonNull::new_unchecked(ptr as *mut u8),
+            range,
+        ))
     }
 
     /// Construct mapped range from raw mapping
-    pub unsafe fn from_raw(memory: &'a Memory<T>, ptr: NonNull<u8>, range: Range<u64>) -> Self {
+    ///
+    /// # Safety
+    ///
+    /// `memory` `range` must be mapped to host memory region pointer by `ptr`.
+    pub unsafe fn from_raw(memory: &'a Memory, ptr: NonNull<u8>, range: Range<u64>) -> Self {
         MappedRange {
             ptr,
             range,
-            memory: memory.raw(),
+            memory,
             coherent: MaybeCoherent(memory.host_coherent()),
         }
     }
 
-    /// Get raw mapping pointer
+    /// Get pointer to beginning of memory region.
     pub fn ptr(&self) -> NonNull<u8> {
         self.ptr
     }
 
-    /// Get raw mapping pointer
+    /// Get mapped range.
     pub fn range(&self) -> Range<u64> {
         self.range.clone()
     }
 
     /// Fetch readable slice of sub-range to be read.
     /// Invalidating range if memory is not coherent.
-    /// `range.end - range.start` must be multiple of `size_of::<T>()`.
-    /// `mapping offset + range.start` must be multiple of `align_of::<T>()`.
+    /// `range.end - range.start` must be multiple of `size_of::()`.
+    /// `mapping offset + range.start` must be multiple of `align_of::()`.
     ///
     /// # Safety
     ///
-    /// Caller must ensure that device won't write to the memory region for until the borrow ends.
-    /// `T` Must be plain-old-data type with memory layout compatible with data written by the device.
-    pub unsafe fn read<'b, D, U>(
+    /// * Caller must ensure that device won't write to the memory region until the borrowing ends.
+    /// * `T` Must be plain-old-data type with memory layout compatible with data written by the device.
+    pub unsafe fn read<'b, T>(
         &'b mut self,
-        device: &D,
+        device: &impl DeviceV1_0,
         range: Range<u64>,
-    ) -> Result<&'b [U], MemoryError>
+    ) -> Result<&'b [T], MemoryError>
     where
         'a: 'b,
-        D: Device<Memory = T>,
-        T: Debug + 'static,
-        U: Copy,
+        T: Copy,
     {
         let (ptr, range) = mapped_sub_range(self.ptr, self.range.clone(), range)
             .ok_or_else(|| MappingError::OutOfBounds)?;
 
         if self.coherent.0 {
-            device.invalidate(Some((self.memory, range.clone())))?;
+            device.invalidate_mapped_memory_ranges(&[vk::MappedMemoryRange::builder()
+                .memory(self.memory.raw())
+                .offset(self.range.start)
+                .size(self.range.end - self.range.start)
+                .build()])?;
         }
 
-        let slice = mapped_slice::<U>(ptr, range)?;
+        let slice = mapped_slice::<T>(ptr, range)?;
         Ok(slice)
     }
 
@@ -133,34 +145,99 @@ impl<'a, T: 'static> MappedRange<'a, T, MaybeCoherent> {
     ///
     /// # Safety
     ///
-    /// Caller must ensure that device won't write to or read from the memory region.
-    pub unsafe fn write<'b, D, U>(
+    /// * Caller must ensure that device won't write to or read from the memory region.
+    pub unsafe fn write<'b, T>(
         &'b mut self,
-        device: &'b D,
+        device: &'b impl DeviceV1_0,
         range: Range<u64>,
-    ) -> Result<impl Write<U> + 'b, MappingError>
+    ) -> Result<impl Write<T> + 'b, MappingError>
     where
         'a: 'b,
-        D: Device<Memory = T>,
-        T: Debug + 'static,
-        U: Copy,
+        T: Copy,
     {
         let (ptr, range) = mapped_sub_range(self.ptr, self.range.clone(), range)
             .ok_or_else(|| MappingError::OutOfBounds)?;
 
-        if self.coherent.0 {
-            device.invalidate(Some((self.memory, range.clone())))?;
+        if !self.coherent.0 {
+            device.invalidate_mapped_memory_ranges(&[vk::MappedMemoryRange::builder()
+                .memory(self.memory.raw())
+                .offset(self.range.start)
+                .size(self.range.end - self.range.start)
+                .build()])?;
         }
 
-        let slice = mapped_slice_mut::<U>(ptr, range.clone())?;
+        let slice = mapped_slice_mut::<T>(ptr, range.clone())?;
 
         Ok(WriteFlush {
             slice,
-            flush: if self.coherent.0 {
+            flush: if !self.coherent.0 {
                 Some((device, self.memory, range))
             } else {
                 None
             },
         })
     }
+
+    /// Convert into mapped range with statically known coherency.
+    pub fn coherent(self) -> Result<MappedRange<'a, Coherent>, MappedRange<'a, NonCoherent>> {
+        if self.coherent.0 {
+            Ok(MappedRange {
+                memory: self.memory,
+                ptr: self.ptr,
+                range: self.range,
+                coherent: Coherent,
+            })
+        } else {
+            Err(MappedRange {
+                memory: self.memory,
+                ptr: self.ptr,
+                range: self.range,
+                coherent: NonCoherent,
+            })
+        }
+    }
+}
+
+impl<'a> From<MappedRange<'a, Coherent>> for MappedRange<'a> {
+    fn from(range: MappedRange<'a, Coherent>) -> Self {
+        MappedRange {
+            memory: range.memory,
+            ptr: range.ptr,
+            range: range.range,
+            coherent: MaybeCoherent(true),
+        }
+    }
+}
+
+impl<'a> From<MappedRange<'a, NonCoherent>> for MappedRange<'a> {
+    fn from(range: MappedRange<'a, NonCoherent>) -> Self {
+        MappedRange {
+            memory: range.memory,
+            ptr: range.ptr,
+            range: range.range,
+            coherent: MaybeCoherent(false),
+        }
+    }
+}
+
+impl<'a> MappedRange<'a, Coherent> {
+    /// Fetch writer to the sub-region.
+    ///
+    /// # Safety
+    ///
+    /// * Caller must ensure that device won't write to or read from the memory region.
+    pub unsafe fn write<'b, U>(
+        &'b mut self,
+        range: Range<u64>,
+    ) -> Result<impl Write<U> + 'b, MappingError>
+    where
+        U: Copy,
+    {
+        let (ptr, range) = mapped_sub_range(self.ptr, self.range.clone(), range)
+            .ok_or_else(|| MappingError::OutOfBounds)?;
+
+        let slice = mapped_slice_mut::<U>(ptr, range.clone())?;
+
+        Ok(WriteCoherent { slice })
+    }
 }
diff --git a/memory/src/mapping/range.rs b/memory/src/mapping/range.rs
index eb7c29ab..3c65c370 100644
--- a/memory/src/mapping/range.rs
+++ b/memory/src/mapping/range.rs
@@ -53,48 +53,48 @@ pub(crate) fn mapped_sub_range(
 /// User must ensure that:
 /// * this function won't create aliasing slices.
 /// * returned slice doesn't outlive mapping.
-pub(crate) unsafe fn mapped_slice_mut<'a, U>(
+pub(crate) unsafe fn mapped_slice_mut<'a, T>(
     ptr: NonNull<u8>,
     range: Range<u64>,
-) -> Result<&'a mut [U], MappingError> {
+) -> Result<&'a mut [T], MappingError> {
     let size = (range.end - range.start) as usize;
     assert_eq!(
-        size % size_of::<U>(),
+        size % size_of::<T>(),
         0,
         "Range length must be multiple of element size"
     );
     let offset = ptr.as_ptr() as usize;
-    if offset % align_of::<U>() > 0 {
+    if offset % align_of::<T>() > 0 {
         return Err(MappingError::Unaligned {
-            align: align_of::<U>(),
+            align: align_of::<T>(),
             offset,
         });
     }
 
-    Ok(from_raw_parts_mut(ptr.as_ptr() as *mut U, size))
+    Ok(from_raw_parts_mut(ptr.as_ptr() as *mut T, size))
 }
 
 /// # Safety
 ///
 /// User must ensure that:
 /// * returned slice doesn't outlive mapping.
-pub(crate) unsafe fn mapped_slice<'a, U>(
+pub(crate) unsafe fn mapped_slice<'a, T>(
     ptr: NonNull<u8>,
     range: Range<u64>,
-) -> Result<&'a [U], MappingError> {
+) -> Result<&'a [T], MappingError> {
     let size = (range.end - range.start) as usize;
     assert_eq!(
-        size % size_of::<U>(),
+        size % size_of::<T>(),
         0,
         "Range length must be multiple of element size"
     );
     let offset = ptr.as_ptr() as usize;
-    if offset % align_of::<U>() > 0 {
+    if offset % align_of::<T>() > 0 {
         return Err(MappingError::Unaligned {
-            align: align_of::<U>(),
+            align: align_of::<T>(),
             offset,
         });
     }
 
-    Ok(from_raw_parts(ptr.as_ptr() as *const U, size))
+    Ok(from_raw_parts(ptr.as_ptr() as *const T, size))
 }
diff --git a/memory/src/mapping/write.rs b/memory/src/mapping/write.rs
index e68d2674..796d096f 100644
--- a/memory/src/mapping/write.rs
+++ b/memory/src/mapping/write.rs
@@ -1,23 +1,23 @@
-use device::Device;
 use std::{ops::Range, ptr::copy_nonoverlapping};
 
+use ash::{version::DeviceV1_0, vk::MappedMemoryRange};
+use memory::Memory;
+
 /// Trait for memory region suitable for host writes.
-pub trait Write<U: Copy> {
-    /// Get mutable slice of `U` bound to mapped range.
+pub trait Write<T: Copy> {
+    /// Get mutable slice of `T` bound to mapped range.
     ///
     /// # Safety
     ///
-    /// Slice returned by this function could be hazardous.
-    /// User must ensure that bit actual patterns represents valid values of `U`
-    /// or not attempt to read them.
-    unsafe fn slice(&mut self) -> &mut [U];
+    /// * Returned slice should not be read.
+    unsafe fn slice(&mut self) -> &mut [T];
 
     /// Write data into mapped memory sub-region.
     ///
     /// # Panic
     ///
     /// Panics if `data.len()` is greater than this sub-region len.
-    fn write(&mut self, data: &[U]) {
+    fn write(&mut self, data: &[T]) {
         unsafe {
             let slice = self.slice();
             assert!(data.len() <= slice.len());
@@ -27,50 +27,58 @@ pub trait Write<U: Copy> {
 }
 
 #[derive(Debug)]
-pub(super) struct WriteFlush<'a, U: 'a, T: 'static, D: Device<Memory = T> + 'a> {
-    pub(super) slice: &'a mut [U],
-    pub(super) flush: Option<(&'a D, &'a T, Range<u64>)>,
+pub(super) struct WriteFlush<'a, T: 'a, D: DeviceV1_0 + 'a> {
+    pub(super) slice: &'a mut [T],
+    pub(super) flush: Option<(&'a D, &'a Memory, Range<u64>)>,
 }
 
-impl<'a, U, T, D> Drop for WriteFlush<'a, U, T, D>
+impl<'a, T, D> Drop for WriteFlush<'a, T, D>
 where
-    U: 'a,
-    T: 'static,
-    D: Device<Memory = T> + 'a,
+    T: 'a,
+    D: DeviceV1_0 + 'a,
 {
     fn drop(&mut self) {
         if let Some((device, memory, range)) = self.flush.take() {
+            // trace!("Flush memory range {:#?} @ {} .. {}", memory, range.start, range.end);
             unsafe {
                 device
-                    .flush(Some((memory, range)))
-                    .expect("Should flush successfully");
+                    .flush_mapped_memory_ranges(&[MappedMemoryRange::builder()
+                        .memory(memory.raw())
+                        .offset(range.start)
+                        .size(range.end - range.start)
+                        .build()]).expect("Should flush successfully");
             }
         }
     }
 }
 
-impl<'a, U, T, D> Write<U> for WriteFlush<'a, U, T, D>
+impl<'a, T, D> Write<T> for WriteFlush<'a, T, D>
 where
-    U: Copy + 'a,
-    T: 'a,
-    D: Device<Memory = T> + 'a,
+    T: Copy + 'a,
+    D: DeviceV1_0 + 'a,
 {
-    unsafe fn slice(&mut self) -> &mut [U] {
+    /// # Safety
+    ///
+    /// [See doc comment for trait method](trait.Write#method.slice)
+    unsafe fn slice(&mut self) -> &mut [T] {
         self.slice
     }
 }
 
 #[warn(dead_code)]
 #[derive(Debug)]
-pub(super) struct WriteCoherent<'a, U: 'a> {
-    pub(super) slice: &'a mut [U],
+pub(super) struct WriteCoherent<'a, T: 'a> {
+    pub(super) slice: &'a mut [T],
 }
 
-impl<'a, U> Write<U> for WriteCoherent<'a, U>
+impl<'a, T> Write<T> for WriteCoherent<'a, T>
 where
-    U: Copy + 'a,
+    T: Copy + 'a,
 {
-    unsafe fn slice(&mut self) -> &mut [U] {
+    /// # Safety
+    ///
+    /// [See doc comment for trait method](trait.Write#method.slice)
+    unsafe fn slice(&mut self) -> &mut [T] {
         self.slice
     }
 }
diff --git a/memory/src/memory.rs b/memory/src/memory.rs
index 0769b214..f248066c 100644
--- a/memory/src/memory.rs
+++ b/memory/src/memory.rs
@@ -1,94 +1,20 @@
+// use std::fmt;
+use ash::vk;
 use relevant::Relevant;
 
-bitflags! {
-    /// Memory property flags.
-    /// Bitmask specifying properties for a memory type.
-    /// See Vulkan docs for detailed info:
-    /// <https://www.khronos.org/registry/vulkan/specs/1.1-extensions/man/html/VkMemoryPropertyFlagBits.html>
-    #[repr(transparent)]
-    pub struct Properties: u32 {
-        /// Specifies that memory allocated with this type is the most efficient for device access.
-        /// This property will be set if and only if the memory type belongs to a heap with the DEVICE_LOCAL bit set.
-        const DEVICE_LOCAL = 0x00000001;
-
-        /// Specifies that memory allocated with this type can be mapped for host access using `Device::map`.
-        const HOST_VISIBLE = 0x00000002;
-
-        /// Specifies that the host cache management commands
-        /// `Device::flush` and `Device::invalidate` are not needed
-        /// to flush host writes to the device or make device writes visible to the host, respectively.
-        const HOST_COHERENT = 0x00000004;
-
-        /// Specifies that memory allocated with this type is cached on the host.
-        /// Host memory accesses to uncached memory are slower than to cached memory,
-        /// however uncached memory is always host coherent.
-        const HOST_CACHED = 0x00000008;
-
-        /// Specifies that the memory type only allows device access to the memory.
-        /// Memory types must not have both `LAZILY_ALLOCATED` and `HOST_VISIBLE` set.
-        /// Additionally, the object’s backing memory may be provided by the implementation lazily as specified in [Lazily Allocated Memory](https://www.khronos.org/registry/vulkan/specs/1.1-extensions/html/vkspec.html#memory-device-lazy_allocation).
-        const LAZILY_ALLOCATED = 0x00000010;
-
-        /// Specifies that the memory type only allows device access to the memory,
-        /// and allows protected queue operations to access the memory.
-        /// Memory types must not have `PROTECTED` bit set and any of `HOST_VISIBLE` bit set, or `HOST_COHERENT` bit set, or `HOST_CACHED` bit set.
-        const PROTECTED = 0x00000020;
-    }
-}
-
-impl Properties {
-    /// Check if memory with this properties local for device.
-    /// Implies fast access by the device.
-    pub fn device_local(self) -> bool {
-        self.contains(Self::DEVICE_LOCAL)
-    }
-
-    /// Check if memory with this properties visible to host.
-    /// Can be mapped to the host memory.
-    pub fn host_visible(self) -> bool {
-        self.contains(Self::HOST_VISIBLE)
-    }
-
-    /// Check if host access to the mapped range of the memory with this properties is coherent.
-    /// Mapped range of the non-coherent memory must be:
-    /// * invalidated to make device writes available to the host
-    /// * flushed to make host writes available to the device
-    pub fn host_coherent(self) -> bool {
-        self.contains(Self::HOST_COHERENT)
-    }
-
-    /// Check if host access to the mapped region of the memory with this properties is done through cache.
-    /// Cached read can be faster for the host to perform.
-    /// Prefer cached memory for 'device to host' data flow.
-    pub fn host_cached(self) -> bool {
-        self.contains(Self::HOST_CACHED)
-    }
-
-    /// Check if memory with this properties allow lazy allocation.
-    /// Lazy memory could be used for transient attachments.
-    pub fn lazily_allocated(self) -> bool {
-        self.contains(Self::LAZILY_ALLOCATED)
-    }
-
-    /// Check if protected queue operations allowed to access memory with this properties.
-    pub fn protected(self) -> bool {
-        self.contains(Self::PROTECTED)
-    }
-}
-
 /// Memory object wrapper.
 /// Contains size and properties of the memory.
 #[derive(Debug)]
-pub struct Memory<T> {
-    raw: T,
+pub struct Memory {
+    raw: vk::DeviceMemory,
     size: u64,
-    properties: Properties,
+    properties: vk::MemoryPropertyFlags,
     relevant: Relevant,
 }
 
-impl<T> Memory<T> {
+impl Memory {
     /// Get memory properties.
-    pub fn properties(&self) -> Properties {
+    pub fn properties(&self) -> vk::MemoryPropertyFlags {
         self.properties
     }
 
@@ -98,23 +24,20 @@ impl<T> Memory<T> {
     }
 
     /// Get raw memory.
-    pub fn raw(&self) -> &T {
-        &self.raw
-    }
-
-    /// Get raw memory.
-    pub fn raw_mut(&mut self) -> &mut T {
-        &mut self.raw
-    }
-
-    /// Convert into raw
-    pub fn into_raw(self) -> T {
-        self.relevant.dispose();
+    pub fn raw(&self) -> vk::DeviceMemory {
         self.raw
     }
 
     /// Create memory from raw object.
-    pub unsafe fn from_raw(raw: T, size: u64, properties: Properties) -> Self {
+    ///
+    /// # Safety
+    ///
+    /// TODO:
+    pub unsafe fn from_raw(
+        raw: vk::DeviceMemory,
+        size: u64,
+        properties: vk::MemoryPropertyFlags,
+    ) -> Self {
         Memory {
             properties,
             raw,
@@ -124,14 +47,34 @@ impl<T> Memory<T> {
     }
 
     /// Check if this memory is host-visible and can be mapped.
-    /// `memory.host_visible()` is equivalent to `memory.properties().contains(Properties::HOST_VISIBLE)`
+    /// `memory.host_visible()` is equivalent to `memory.properties().subset(Properties::HOST_VISIBLE)`
     pub fn host_visible(&self) -> bool {
-        self.properties.contains(Properties::HOST_VISIBLE)
+        self.properties
+            .subset(vk::MemoryPropertyFlags::HOST_VISIBLE)
     }
 
     /// Check if this memory is host-coherent and doesn't require invalidating or flushing.
-    /// `memory.host_coherent()` is equivalent to `memory.properties().contains(Properties::HOST_COHERENT)`
+    /// `memory.host_coherent()` is equivalent to `memory.properties().subset(Properties::HOST_COHERENT)`
     pub fn host_coherent(&self) -> bool {
-        self.properties.contains(Properties::HOST_COHERENT)
+        self.properties
+            .subset(vk::MemoryPropertyFlags::HOST_COHERENT)
+    }
+
+    /// Dispose of memory object.
+    pub(crate) fn dispose(self) {
+        self.relevant.dispose();
     }
 }
+
+// pub(crate) fn memory_ptr_fmt(
+//     memory: &*const Memory,
+//     fmt: &mut fmt::Formatter<'_>,
+// ) -> Result<(), fmt::Error> {
+//     unsafe {
+//         if fmt.alternate() {
+//             write!(fmt, "*const {:#?}", **memory)
+//         } else {
+//             write!(fmt, "*const {:?}", **memory)
+//         }
+//     }
+// }
diff --git a/memory/src/test/allocator.rs b/memory/src/test/allocator.rs
deleted file mode 100644
index 562ad174..00000000
--- a/memory/src/test/allocator.rs
+++ /dev/null
@@ -1,228 +0,0 @@
-#![allow(dead_code)]
-
-use std::{cell::RefCell, collections::HashSet, ops::Range, ptr::NonNull};
-
-use rand;
-use veclist::VecList;
-
-use allocator::{ArenaConfig, DynamicConfig};
-use block::Block;
-use device::Device;
-use error::{AllocationError, MappingError, MemoryError, OutOfMemoryError};
-use heaps::{Config, Heaps, MemoryBlock};
-use memory::Properties;
-use usage::*;
-
-struct Inner {
-    freed: HashSet<u64>,
-    next: u64,
-}
-
-struct MockDevice(RefCell<Inner>);
-
-impl MockDevice {
-    fn new() -> Self {
-        MockDevice(RefCell::new(Inner {
-            freed: HashSet::new(),
-            next: 0,
-        }))
-    }
-}
-
-impl Device for MockDevice {
-    type Memory = u64;
-
-    unsafe fn allocate(&self, _index: u32, _size: u64) -> Result<u64, AllocationError> {
-        let mut inner = self.0.borrow_mut();
-        let id = inner.next;
-        inner.next = id + 1;
-        Ok(id)
-    }
-
-    unsafe fn free(&self, memory: u64) {
-        assert!(self.0.borrow_mut().freed.insert(memory), "Double-free");
-    }
-
-    unsafe fn map(&self, _memory: &u64, _range: Range<u64>) -> Result<NonNull<u8>, MappingError> {
-        Ok(NonNull::dangling())
-    }
-
-    unsafe fn unmap(&self, _memory: &u64) {}
-
-    unsafe fn invalidate<'a>(
-        &self,
-        _regions: impl IntoIterator<Item = (&'a u64, Range<u64>)>,
-    ) -> Result<(), OutOfMemoryError> {
-        unimplemented!()
-    }
-    unsafe fn flush<'a>(
-        &self,
-        _regions: impl IntoIterator<Item = (&'a u64, Range<u64>)>,
-    ) -> Result<(), OutOfMemoryError> {
-        unimplemented!()
-    }
-}
-
-fn init() -> Heaps<u64> {
-    let arena_config = ArenaConfig {
-        arena_size: 32 * 1024,
-    };
-    let dynamic_config = DynamicConfig {
-        blocks_per_chunk: 64,
-        block_size_granularity: 256,
-        max_block_size: 32 * 1024,
-    };
-    let small_dynamic_config = DynamicConfig {
-        blocks_per_chunk: 64,
-        block_size_granularity: 32,
-        max_block_size: 1024,
-    };
-
-    unsafe {
-        Heaps::new(
-            vec![
-                (
-                    Properties::DEVICE_LOCAL,
-                    0,
-                    Config {
-                        arena: None,
-                        dynamic: Some(dynamic_config),
-                    },
-                ),
-                (
-                    Properties::HOST_VISIBLE | Properties::HOST_COHERENT | Properties::DEVICE_LOCAL,
-                    1,
-                    Config {
-                        arena: None,
-                        dynamic: Some(small_dynamic_config),
-                    },
-                ),
-                (
-                    Properties::HOST_VISIBLE | Properties::HOST_COHERENT,
-                    2,
-                    Config {
-                        arena: Some(arena_config),
-                        dynamic: Some(dynamic_config),
-                    },
-                ),
-                (
-                    Properties::HOST_VISIBLE | Properties::HOST_COHERENT | Properties::HOST_CACHED,
-                    2,
-                    Config {
-                        arena: Some(arena_config),
-                        dynamic: Some(dynamic_config),
-                    },
-                ),
-            ],
-            vec![16 * 1024 * 1024, 1 * 1024 * 1024, 32 * 1024 * 1024],
-        )
-    }
-}
-
-fn random_usage() -> UsageValue {
-    match rand::random::<u8>() % 4 {
-        0 => UsageValue::Data,
-        1 => UsageValue::Download,
-        2 => UsageValue::Upload,
-        3 => UsageValue::Dynamic,
-        _ => unreachable!(),
-    }
-}
-
-#[derive(Debug)]
-struct Allocation {
-    mask: u32,
-    usage: UsageValue,
-    size: u64,
-    align: u64,
-}
-
-impl Allocation {
-    fn random() -> Self {
-        let usage = random_usage();
-
-        let mask = (rand::random::<u32>() % 3) | (1 << rand::random::<u32>() % 2);
-
-        let mask = match usage {
-            UsageValue::Data => mask,
-            _ => mask << 1,
-        };
-
-        Allocation {
-            mask,
-            usage,
-            size: 1 << (rand::random::<u32>() % 10),
-            align: 1 << (rand::random::<u32>() % 10),
-        }
-    }
-
-    fn allocate(
-        &self,
-        heaps: &mut Heaps<u64>,
-        device: &MockDevice,
-    ) -> Result<MemoryBlock<u64>, MemoryError> {
-        let block = heaps.allocate(device, self.mask, self.usage, self.size, self.align)?;
-
-        assert!(block.range().end - block.range().start >= self.size);
-        assert_eq!(
-            block.range().start % self.align,
-            0,
-            "Block: {:#?} allocated without requested align {}",
-            block,
-            self.align
-        );
-        assert!(self.usage.memory_fitness(block.properties()).is_some());
-        assert_ne!((1 << block.memory_type()) & self.mask, 0);
-        Ok(block)
-    }
-}
-
-#[test]
-fn heaps_init() {
-    let heaps = init();
-    drop(heaps);
-}
-
-#[test]
-fn blocks_test() {
-    let mut heaps = init();
-    let ref device = MockDevice::new();
-    let mut blocks = VecList::new();
-
-    for _ in 0..32 {
-        match rand::random::<u8>() % 2 {
-            0 => {
-                let allocation = Allocation::random();
-                match allocation.allocate(&mut heaps, &device) {
-                    Ok(block) => {
-                        blocks.push(block);
-                    }
-                    Err(err) => {
-                        panic!(
-                            "Error({}) occurred for {:#?}. Blocks: {:#?}",
-                            err, allocation, blocks
-                        );
-                    }
-                }
-            }
-            _ if blocks.upper_bound() > 1 => {
-                let index = rand::random::<usize>() % blocks.upper_bound();
-                if let Some(block) = blocks.pop(index) {
-                    heaps.free(device, block);
-                }
-            }
-            _ => {}
-        }
-    }
-
-    for i in 0..blocks.upper_bound() {
-        if let Some(block) = blocks.pop(i) {
-            heaps.free(device, block);
-        }
-    }
-
-    drop(blocks);
-
-    println!("Dropping Heaps");
-    heaps.dispose(device);
-}
diff --git a/memory/src/test/mod.rs b/memory/src/test/mod.rs
deleted file mode 100644
index 7b06ae31..00000000
--- a/memory/src/test/mod.rs
+++ /dev/null
@@ -1 +0,0 @@
-mod allocator;
diff --git a/memory/src/usage.rs b/memory/src/usage.rs
index c93b94fb..8e7c42c4 100644
--- a/memory/src/usage.rs
+++ b/memory/src/usage.rs
@@ -1,19 +1,19 @@
 //! Defines usage types for memory bocks.
 //! See `Usage` and implementations for details.
 
-use memory::Properties;
+use ash::vk::MemoryPropertyFlags;
 
 /// Memory usage trait.
-pub trait Usage {
+pub trait MemoryUsage {
     /// Comparable fitness value.
     type Fitness: Copy + Ord;
 
     /// Get runtime usage value.
-    fn value(self) -> UsageValue;
+    fn value(&self) -> MemoryUsageValue;
 
     /// Get comparable fitness value for memory properties.
     /// Should return `None` if memory doesn't fit.
-    fn memory_fitness(&self, properties: Properties) -> Option<Self::Fitness>;
+    fn memory_fitness(&self, properties: MemoryPropertyFlags) -> Option<Self::Fitness>;
 }
 
 /// Full speed GPU access.
@@ -22,24 +22,24 @@ pub trait Usage {
 #[derive(Clone, Copy, Debug)]
 pub struct Data;
 
-impl Usage for Data {
+impl MemoryUsage for Data {
     type Fitness = u8;
 
     #[inline]
-    fn value(self) -> UsageValue {
-        UsageValue::Data
+    fn value(&self) -> MemoryUsageValue {
+        MemoryUsageValue::Data
     }
 
     #[inline]
-    fn memory_fitness(&self, properties: Properties) -> Option<u8> {
-        if !properties.contains(Properties::DEVICE_LOCAL) {
+    fn memory_fitness(&self, properties: MemoryPropertyFlags) -> Option<u8> {
+        if !properties.subset(MemoryPropertyFlags::DEVICE_LOCAL) {
             None
         } else {
             Some(
-                ((!properties.contains(Properties::HOST_VISIBLE)) as u8) << 3
-                    | ((!properties.contains(Properties::LAZILY_ALLOCATED)) as u8) << 2
-                    | ((!properties.contains(Properties::HOST_CACHED)) as u8) << 1
-                    | ((!properties.contains(Properties::HOST_COHERENT)) as u8) << 0
+                ((!properties.subset(MemoryPropertyFlags::HOST_VISIBLE)) as u8) << 3
+                    | ((!properties.subset(MemoryPropertyFlags::LAZILY_ALLOCATED)) as u8) << 2
+                    | ((!properties.subset(MemoryPropertyFlags::HOST_CACHED)) as u8) << 1
+                    | ((!properties.subset(MemoryPropertyFlags::HOST_COHERENT)) as u8) << 0
                     | 0,
             )
         }
@@ -53,24 +53,24 @@ impl Usage for Data {
 #[derive(Clone, Copy, Debug)]
 pub struct Dynamic;
 
-impl Usage for Dynamic {
+impl MemoryUsage for Dynamic {
     type Fitness = u8;
 
     #[inline]
-    fn value(self) -> UsageValue {
-        UsageValue::Dynamic
+    fn value(&self) -> MemoryUsageValue {
+        MemoryUsageValue::Dynamic
     }
 
     #[inline]
-    fn memory_fitness(&self, properties: Properties) -> Option<u8> {
-        if !properties.contains(Properties::HOST_VISIBLE) {
+    fn memory_fitness(&self, properties: MemoryPropertyFlags) -> Option<u8> {
+        if !properties.subset(MemoryPropertyFlags::HOST_VISIBLE) {
             None
         } else {
-            assert!(!properties.contains(Properties::LAZILY_ALLOCATED));
+            assert!(!properties.subset(MemoryPropertyFlags::LAZILY_ALLOCATED));
             Some(
-                (properties.contains(Properties::DEVICE_LOCAL) as u8) << 2
-                    | (properties.contains(Properties::HOST_COHERENT) as u8) << 1
-                    | ((!properties.contains(Properties::HOST_CACHED)) as u8) << 0
+                (properties.subset(MemoryPropertyFlags::DEVICE_LOCAL) as u8) << 2
+                    | (properties.subset(MemoryPropertyFlags::HOST_COHERENT) as u8) << 1
+                    | ((!properties.subset(MemoryPropertyFlags::HOST_CACHED)) as u8) << 0
                     | 0,
             )
         }
@@ -83,24 +83,24 @@ impl Usage for Dynamic {
 #[derive(Clone, Copy, Debug)]
 pub struct Upload;
 
-impl Usage for Upload {
+impl MemoryUsage for Upload {
     type Fitness = u8;
 
     #[inline]
-    fn value(self) -> UsageValue {
-        UsageValue::Upload
+    fn value(&self) -> MemoryUsageValue {
+        MemoryUsageValue::Upload
     }
 
     #[inline]
-    fn memory_fitness(&self, properties: Properties) -> Option<u8> {
-        if !properties.contains(Properties::HOST_VISIBLE) {
+    fn memory_fitness(&self, properties: MemoryPropertyFlags) -> Option<u8> {
+        if !properties.subset(MemoryPropertyFlags::HOST_VISIBLE) {
             None
         } else {
-            assert!(!properties.contains(Properties::LAZILY_ALLOCATED));
+            assert!(!properties.subset(MemoryPropertyFlags::LAZILY_ALLOCATED));
             Some(
-                ((!properties.contains(Properties::DEVICE_LOCAL)) as u8) << 2
-                    | ((!properties.contains(Properties::HOST_CACHED)) as u8) << 0
-                    | (properties.contains(Properties::HOST_COHERENT) as u8) << 1
+                ((!properties.subset(MemoryPropertyFlags::DEVICE_LOCAL)) as u8) << 2
+                    | ((!properties.subset(MemoryPropertyFlags::HOST_CACHED)) as u8) << 0
+                    | (properties.subset(MemoryPropertyFlags::HOST_COHERENT) as u8) << 1
                     | 0,
             )
         }
@@ -113,24 +113,24 @@ impl Usage for Upload {
 #[derive(Clone, Copy, Debug)]
 pub struct Download;
 
-impl Usage for Download {
+impl MemoryUsage for Download {
     type Fitness = u8;
 
     #[inline]
-    fn value(self) -> UsageValue {
-        UsageValue::Download
+    fn value(&self) -> MemoryUsageValue {
+        MemoryUsageValue::Download
     }
 
     #[inline]
-    fn memory_fitness(&self, properties: Properties) -> Option<u8> {
-        if !properties.contains(Properties::HOST_VISIBLE) {
+    fn memory_fitness(&self, properties: MemoryPropertyFlags) -> Option<u8> {
+        if !properties.subset(MemoryPropertyFlags::HOST_VISIBLE) {
             None
         } else {
-            assert!(!properties.contains(Properties::LAZILY_ALLOCATED));
+            assert!(!properties.subset(MemoryPropertyFlags::LAZILY_ALLOCATED));
             Some(
-                ((!properties.contains(Properties::DEVICE_LOCAL)) as u8) << 2
-                    | (properties.contains(Properties::HOST_CACHED) as u8) << 1
-                    | (properties.contains(Properties::HOST_COHERENT) as u8) << 0
+                ((!properties.subset(MemoryPropertyFlags::DEVICE_LOCAL)) as u8) << 2
+                    | (properties.subset(MemoryPropertyFlags::HOST_CACHED) as u8) << 1
+                    | (properties.subset(MemoryPropertyFlags::HOST_COHERENT) as u8) << 0
                     | 0,
             )
         }
@@ -139,7 +139,7 @@ impl Usage for Download {
 
 /// Dynamic value that specify memory usage flags.
 #[derive(Clone, Copy, Debug)]
-pub enum UsageValue {
+pub enum MemoryUsageValue {
     /// Runtime counterpart for `Data`.
     Data,
     /// Runtime counterpart for `Dynamic`.
@@ -150,21 +150,21 @@ pub enum UsageValue {
     Download,
 }
 
-impl Usage for UsageValue {
+impl MemoryUsage for MemoryUsageValue {
     type Fitness = u8;
 
     #[inline]
-    fn value(self) -> UsageValue {
-        self
+    fn value(&self) -> MemoryUsageValue {
+        *self
     }
 
     #[inline]
-    fn memory_fitness(&self, properties: Properties) -> Option<u8> {
+    fn memory_fitness(&self, properties: MemoryPropertyFlags) -> Option<u8> {
         match self {
-            UsageValue::Data => Data.memory_fitness(properties),
-            UsageValue::Dynamic => Dynamic.memory_fitness(properties),
-            UsageValue::Upload => Upload.memory_fitness(properties),
-            UsageValue::Download => Download.memory_fitness(properties),
+            MemoryUsageValue::Data => Data.memory_fitness(properties),
+            MemoryUsageValue::Dynamic => Dynamic.memory_fitness(properties),
+            MemoryUsageValue::Upload => Upload.memory_fitness(properties),
+            MemoryUsageValue::Download => Download.memory_fitness(properties),
         }
     }
 }
diff --git a/memory/src/util.rs b/memory/src/util.rs
index 6663bd6f..8ce109bb 100644
--- a/memory/src/util.rs
+++ b/memory/src/util.rs
@@ -48,15 +48,11 @@ impl IntegerFitting for u64 {
     }
 }
 
-#[cfg(
-    not(
-        any(
-            target_pointer_width = "16",
-            target_pointer_width = "32",
-            target_pointer_width = "64"
-        )
-    )
-)]
+#[cfg(not(any(
+    target_pointer_width = "16",
+    target_pointer_width = "32",
+    target_pointer_width = "64"
+)))]
 impl IntegerFitting for u64 {
     fn fits_usize(self) -> bool {
         true
@@ -124,14 +120,6 @@ pub(crate) fn fits_usize<T: IntegerFitting>(value: T) -> bool {
     value.fits_usize()
 }
 
-// pub(crate) fn fits_isize<T: IntegerFitting>(value: T) -> bool {
-//     value.fits_isize()
-// }
-
-// pub(crate) fn fits_u64(value: usize) -> bool {
-//     u64::usize_fits(value)
-// }
-
 pub(crate) fn fits_u32(value: usize) -> bool {
     u32::usize_fits(value)
 }
diff --git a/mesh/Cargo.toml b/mesh/Cargo.toml
new file mode 100644
index 00000000..3285df79
--- /dev/null
+++ b/mesh/Cargo.toml
@@ -0,0 +1,25 @@
+[package]
+name = "rendy-mesh"
+version = "0.1.0"
+authors = ["omni-viral <scareaangel@gmail.com>"]
+description = "Helper crate for gfx-hal to create and use meshes with vertex semantic"
+keywords = ["gfx", "gfx-hal", "graphics"]
+readme = "README.md"
+license = "MIT/Apache-2.0"
+repository = "https://github.com/omni-viral/gfx-mesh.git"
+documentation = "https://docs.rs/crate/gfx-mesh/0.1.0/gfx-mesh"
+
+[dependencies]
+ash = { path = "../../ash/ash" }
+failure = "0.1"
+rendy-memory = { path = "../memory" }
+rendy-command = { path = "../command" }
+rendy-resource = { path = "../resource" }
+rendy-factory = { path = "../factory" }
+serde = { version = "1.0", optional = true }
+
+[target.'cfg(features = "serde")'.dependencies]
+smallvec = { version = "0.6", features = ["serde"] }
+
+[target.'cfg(not(features = "serde"))'.dependencies]
+smallvec = { version = "0.6" }
diff --git a/mesh/README.md b/mesh/README.md
new file mode 100644
index 00000000..e13e6dc6
--- /dev/null
+++ b/mesh/README.md
@@ -0,0 +1,38 @@
+
+# `gfx-mesh`
+
+Helper crate for `gfx-hal` to create and use meshes with vertex semantics.
+
+# Vertex semantics
+
+Vertex formats usually has semantics attached to field names.
+This crate provides traits and types to have semantics explicitly defined on the type level.
+
+`Position`, `Normal`, `TexCoord` etc. are attributes that have unambiguous semantics.
+Users can define their own attribute types by implementing the `Attribute` trait.
+
+While the attribute type on its own is a trivial vertex format (with single attribute), complex vertex formats are created by composing attribute types.
+
+The `WithAttribute` trait allows to get formatting info for individual attributes defined in a vertex format.
+The `Query` trait allows to get formatting info for several attributes at once.
+
+`VertexFormat` queried from vertex formats can be used to build graphics pipelines and bind required vertex buffers from mesh to command buffer.
+
+To define a custom vertex format type, the `AsVertexFormat` trait must be implemented providing a `VertexFormat` associated constant.
+
+`WithAttribute` can be implemented also for all attributes and `VertexFormat` associated constant in `AsVertexFormat` can be defined more clearly utilizing `WithAttribute` implementation.
+`Query` is automatically implemented.
+
+# Mesh
+
+`Mesh` is a collection of vertex buffers and optionally an index buffer together with vertex formats of the buffers and index type. Also there is a primitive type specified which defines how vertices form primitives (lines, triangles etc).
+To create instances of `Mesh` you need to use `MeshBuilder`.
+
+1. Fill `MeshBuilder` with typed vertex data.
+1. Provide the index data.
+1. Set the primitive type (Triangles list by default).
+1. Call `MeshBuilder::build`. It uses `Factory` from `gfx-render` to create buffers and upload data.
+
+Here is your fresh new `Mesh`. Or an `Error` from `gfx-render`.
+
+To bind vertex buffers to a command buffer use `Mesh::bind` with a sorted array of `VertexFormat`s (the same that was used to setup the graphics pipeline).
diff --git a/mesh/src/lib.rs b/mesh/src/lib.rs
new file mode 100644
index 00000000..4beb4a08
--- /dev/null
+++ b/mesh/src/lib.rs
@@ -0,0 +1,30 @@
+//!
+//! This crates provides means to deal with vertex buffers and meshes.
+//!
+//! `Attribute` and `VertexFormat` allow vertex structure to declare semantics.
+//! `Mesh` can be created from typed vertex structures and provides mechanism to bind
+//! vertex attributes required by shader interface.
+//!
+
+extern crate ash;
+extern crate failure;
+extern crate rendy_command as command;
+extern crate rendy_factory as factory;
+extern crate rendy_memory as memory;
+extern crate rendy_resource as resource;
+
+#[cfg(feature = "serde")]
+#[macro_use]
+extern crate serde;
+
+extern crate smallvec;
+
+mod mesh;
+mod utils;
+mod vertex;
+
+pub use mesh::{Bind, Incompatible, IndexBuffer, Indices, Mesh, MeshBuilder, VertexBuffer};
+pub use vertex::{
+    AsAttribute, AsVertex, Attribute, Color, Normal, PosColor, PosNorm, PosNormTangTex, PosNormTex,
+    PosTex, Position, Query, Tangent, TexCoord, VertexFormat, WithAttribute,
+};
diff --git a/mesh/src/mesh.rs b/mesh/src/mesh.rs
new file mode 100644
index 00000000..fa5a17cd
--- /dev/null
+++ b/mesh/src/mesh.rs
@@ -0,0 +1,386 @@
+//!
+//! Manage vertex and index buffers of single objects with ease.
+//!
+
+use std::borrow::Cow;
+use std::mem::size_of;
+
+use failure::Error;
+
+use ash::vk;
+use smallvec::SmallVec;
+
+use command::FamilyIndex;
+use factory::Factory;
+use memory::usage::Dynamic;
+use resource::Buffer;
+
+use utils::{cast_cow, is_slice_sorted, is_slice_sorted_by_key};
+use vertex::{AsVertex, VertexFormat};
+
+/// Vertex buffer with it's format
+#[derive(Debug)]
+pub struct VertexBuffer {
+    buffer: Buffer,
+    format: VertexFormat<'static>,
+    len: u32,
+}
+
+/// Index buffer with it's type
+#[derive(Debug)]
+pub struct IndexBuffer {
+    buffer: Buffer,
+    index_type: vk::IndexType,
+    len: u32,
+}
+
+/// Abstracts over two types of indices and their absence.
+#[derive(Debug)]
+pub enum Indices<'a> {
+    /// No indices.
+    None,
+
+    /// `u16` per index.
+    U16(Cow<'a, [u16]>),
+
+    /// `u32` per index.
+    U32(Cow<'a, [u32]>),
+}
+
+impl From<Vec<u16>> for Indices<'static> {
+    fn from(vec: Vec<u16>) -> Self {
+        Indices::U16(vec.into())
+    }
+}
+
+impl<'a> From<&'a [u16]> for Indices<'a> {
+    fn from(slice: &'a [u16]) -> Self {
+        Indices::U16(slice.into())
+    }
+}
+
+impl<'a> From<Cow<'a, [u16]>> for Indices<'a> {
+    fn from(cow: Cow<'a, [u16]>) -> Self {
+        Indices::U16(cow)
+    }
+}
+
+impl From<Vec<u32>> for Indices<'static> {
+    fn from(vec: Vec<u32>) -> Self {
+        Indices::U32(vec.into())
+    }
+}
+
+impl<'a> From<&'a [u32]> for Indices<'a> {
+    fn from(slice: &'a [u32]) -> Self {
+        Indices::U32(slice.into())
+    }
+}
+
+impl<'a> From<Cow<'a, [u32]>> for Indices<'a> {
+    fn from(cow: Cow<'a, [u32]>) -> Self {
+        Indices::U32(cow)
+    }
+}
+
+/// Generics-free mesh builder.
+/// Useful for creating mesh from non-predefined set of data.
+/// Like from glTF.
+#[derive(Clone, Debug)]
+#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
+pub struct MeshBuilder<'a> {
+    vertices: SmallVec<[(Cow<'a, [u8]>, VertexFormat<'static>); 16]>,
+    indices: Option<(Cow<'a, [u8]>, vk::IndexType)>,
+    prim: vk::PrimitiveTopology,
+}
+
+impl<'a> MeshBuilder<'a> {
+    /// Create empty builder.
+    pub fn new() -> Self {
+        MeshBuilder {
+            vertices: SmallVec::new(),
+            indices: None,
+            prim: vk::PrimitiveTopology::TRIANGLE_LIST,
+        }
+    }
+
+    /// Set indices buffer to the `MeshBuilder`
+    pub fn with_indices<I>(mut self, indices: I) -> Self
+    where
+        I: Into<Indices<'a>>,
+    {
+        self.set_indices(indices);
+        self
+    }
+
+    /// Set indices buffer to the `MeshBuilder`
+    pub fn set_indices<I>(&mut self, indices: I) -> &mut Self
+    where
+        I: Into<Indices<'a>>,
+    {
+        self.indices = match indices.into() {
+            Indices::None => None,
+            Indices::U16(i) => Some((cast_cow(i), vk::IndexType::UINT16)),
+            Indices::U32(i) => Some((cast_cow(i), vk::IndexType::UINT32)),
+        };
+        self
+    }
+
+    /// Add another vertices to the `MeshBuilder`
+    pub fn with_vertices<V, D>(mut self, vertices: D) -> Self
+    where
+        V: AsVertex + 'a,
+        D: Into<Cow<'a, [V]>>,
+    {
+        self.add_vertices(vertices);
+        self
+    }
+
+    /// Add another vertices to the `MeshBuilder`
+    pub fn add_vertices<V, D>(&mut self, vertices: D) -> &mut Self
+    where
+        V: AsVertex + 'a,
+        D: Into<Cow<'a, [V]>>,
+    {
+        self.vertices.push((cast_cow(vertices.into()), V::VERTEX));
+        self
+    }
+
+    /// Sets the primitive type of the mesh.
+    ///
+    /// By default, meshes are constructed as triangle lists.
+    pub fn with_prim_type(mut self, prim: vk::PrimitiveTopology) -> Self {
+        self.prim = prim;
+        self
+    }
+
+    /// Sets the primitive type of the mesh.
+    ///
+    /// By default, meshes are constructed as triangle lists.
+    pub fn set_prim_type(&mut self, prim: vk::PrimitiveTopology) -> &mut Self {
+        self.prim = prim;
+        self
+    }
+
+    /// Builds and returns the new mesh.
+    pub fn build(&self, family: FamilyIndex, factory: &mut Factory) -> Result<Mesh, Error> {
+        Ok(Mesh {
+            vbufs: self
+                .vertices
+                .iter()
+                .map(|(vertices, format)| {
+                    let len = vertices.len() as u32 / format.stride;
+                    Ok(VertexBuffer {
+                        buffer: {
+                            let mut buffer = factory.create_buffer(
+                                vk::BufferCreateInfo::builder()
+                                    .size(vertices.len() as _)
+                                    .usage(
+                                        vk::BufferUsageFlags::VERTEX_BUFFER
+                                            | vk::BufferUsageFlags::TRANSFER_DST,
+                                    ).build(),
+                                1,
+                                Dynamic,
+                            )?;
+                            unsafe {
+                                // New buffer can't be touched by device yet.
+                                factory.upload_buffer(
+                                    &mut buffer,
+                                    0,
+                                    vertices,
+                                    family,
+                                    vk::AccessFlags::VERTEX_ATTRIBUTE_READ,
+                                )?;
+                            }
+                            buffer
+                        },
+                        format: format.clone(),
+                        len,
+                    })
+                }).collect::<Result<_, Error>>()?,
+            ibuf: match self.indices {
+                None => None,
+                Some((ref indices, index_type)) => {
+                    let stride = match index_type {
+                        vk::IndexType::UINT16 => size_of::<u16>(),
+                        vk::IndexType::UINT32 => size_of::<u32>(),
+                        _ => unreachable!(),
+                    };
+                    let len = indices.len() as u32 / stride as u32;
+                    Some(IndexBuffer {
+                        buffer: {
+                            let mut buffer = factory.create_buffer(
+                                vk::BufferCreateInfo::builder()
+                                    .size(indices.len() as _)
+                                    .usage(
+                                        vk::BufferUsageFlags::INDEX_BUFFER
+                                            | vk::BufferUsageFlags::TRANSFER_DST,
+                                    ).build(),
+                                1,
+                                Dynamic,
+                            )?;
+                            unsafe {
+                                // New buffer can't be touched by device yet.
+                                factory.upload_buffer(
+                                    &mut buffer,
+                                    0,
+                                    indices,
+                                    family,
+                                    vk::AccessFlags::INDEX_READ,
+                                )?;
+                            }
+                            buffer
+                        },
+                        index_type,
+                        len,
+                    })
+                }
+            },
+            prim: self.prim,
+        })
+    }
+}
+
+/// Single mesh is a collection of buffers that provides available attributes.
+/// Exactly one mesh is used per drawing call in common.
+#[derive(Debug)]
+pub struct Mesh {
+    vbufs: Vec<VertexBuffer>,
+    ibuf: Option<IndexBuffer>,
+    prim: vk::PrimitiveTopology,
+}
+
+impl Mesh {
+    /// Build new mesh with `HMeshBuilder`
+    pub fn new<'a>() -> MeshBuilder<'a> {
+        MeshBuilder::new()
+    }
+
+    /// vk::PrimitiveTopology type of the `Mesh`
+    pub fn primitive(&self) -> vk::PrimitiveTopology {
+        self.prim
+    }
+
+    // /// Bind buffers to specified attribute locations.
+    // pub fn bind<'a>(
+    //     &'a self,
+    //     formats: &[VertexFormat],
+    //     vertex: &mut VertexBufferSet<'a, B>,
+    // ) -> Result<Bind<'a, B>, Incompatible> {
+    //     debug_assert!(is_slice_sorted(formats));
+    //     debug_assert!(is_slice_sorted_by_key(&self.vbufs, |vbuf| &vbuf.format));
+    //     debug_assert!(vertex.0.is_empty());
+
+    //     let mut next = 0;
+    //     let mut vertex_count = None;
+    //     for format in formats {
+    //         if let Some(index) = find_compatible_buffer(&self.vbufs[next..], format) {
+    //             // Ensure buffer is valid
+    //             vertex.0.push((self.vbufs[index].buffer.raw(), 0));
+    //             next = index + 1;
+    //             assert!(vertex_count.is_none() || vertex_count == Some(self.vbufs[index].len));
+    //             vertex_count = Some(self.vbufs[index].len);
+    //         } else {
+    //             // Can't bind
+    //             return Err(Incompatible);
+    //         }
+    //     }
+    //     Ok(self
+    //         .ibuf
+    //         .as_ref()
+    //         .map(|ibuf| Bind::Indexed {
+    //             buffer: ibuf.buffer.raw(),
+    //             offset: 0,
+    //             index_type: ibuf.index_type,
+    //             count: ibuf.len,
+    //         })
+    //         .unwrap_or(Bind::Unindexed {
+    //             count: vertex_count.unwrap_or(0),
+    //         }))
+    // }
+}
+
+/// Error type returned by `Mesh::bind` in case of mesh's vertex buffers are incompatible with requested vertex formats.
+#[derive(Clone, Copy, Debug)]
+pub struct Incompatible;
+
+/// Result of buffers bindings.
+/// It only contains `IndexBufferView` (if index buffers exists)
+/// and vertex count.
+/// Vertex buffers are in separate `VertexBufferSet`
+#[derive(Copy, Clone, Debug)]
+pub enum Bind<'a> {
+    /// Indexed binding.
+    Indexed {
+        /// The buffer to bind.
+        buffer: &'a Buffer,
+        /// The offset into the buffer to start at.
+        offset: u64,
+        /// The type of the table elements (`u16` or `u32`).
+        index_type: vk::IndexType,
+        /// Indices count to use in `draw_indexed` method.
+        count: u32,
+    },
+    /// Not indexed binding.
+    Unindexed {
+        /// Vertex count to use in `draw` method.
+        count: u32,
+    },
+}
+
+// impl<'a> Bind<'a> {
+//     /// Record drawing command for this biding.
+//     pub fn draw(&self, vertex: VertexBufferSet, encoder: &mut RenderSubpassCommon) {
+//         encoder.bind_vertex_buffers(0, vertex);
+//         match *self {
+//             Bind::Indexed {
+//                 buffer,
+//                 offset,
+//                 index_type,
+//                 count,
+//             } => {
+//                 encoder.bind_index_buffer(IndexBufferView {
+//                     buffer,
+//                     offset,
+//                     index_type,
+//                 });
+//                 encoder.draw_indexed(0..count, 0, 0..1);
+//             }
+//             Bind::Unindexed { count } => {
+//                 encoder.draw(0..count, 0..1);
+//             }
+//         }
+//     }
+// }
+
+/// Helper function to find buffer with compatible format.
+fn find_compatible_buffer(vbufs: &[VertexBuffer], format: &VertexFormat) -> Option<usize> {
+    debug_assert!(is_slice_sorted_by_key(&*format.attributes, |a| a.offset));
+    for (i, vbuf) in vbufs.iter().enumerate() {
+        debug_assert!(is_slice_sorted_by_key(&*vbuf.format.attributes, |a| a.offset));
+        if is_compatible(&vbuf.format, format) {
+            return Some(i);
+        }
+    }
+    None
+}
+
+/// Check is vertex format `left` is compatible with `right`.
+/// `left` must have same `stride` and contain all attributes from `right`.
+fn is_compatible(left: &VertexFormat, right: &VertexFormat) -> bool {
+    if left.stride != right.stride {
+        return false;
+    }
+
+    // Don't start searching from index 0 because attributes are sorted
+    let mut skip = 0;
+    right.attributes.iter().all(|r| {
+        left.attributes[skip..]
+            .iter()
+            .position(|l| *l == *r)
+            .map_or(false, |p| {
+                skip += p;
+                true
+            })
+    })
+}
diff --git a/mesh/src/utils.rs b/mesh/src/utils.rs
new file mode 100644
index 00000000..75ed7c4b
--- /dev/null
+++ b/mesh/src/utils.rs
@@ -0,0 +1,53 @@
+use std::borrow::Cow;
+
+pub fn is_slice_sorted<T: Ord>(slice: &[T]) -> bool {
+    is_slice_sorted_by_key(slice, |i| i)
+}
+
+pub fn is_slice_sorted_by_key<'a, T, K: Ord, F: Fn(&'a T) -> K>(slice: &'a [T], f: F) -> bool {
+    if let Some((first, slice)) = slice.split_first() {
+        let mut cmp = f(first);
+        for item in slice {
+            let item = f(item);
+            if cmp > item {
+                return false;
+            }
+            cmp = item;
+        }
+    }
+    true
+}
+
+pub fn cast_vec<T>(mut vec: Vec<T>) -> Vec<u8> {
+    use std::mem;
+
+    let raw_len = mem::size_of::<T>() * vec.len();
+    let len = raw_len;
+
+    let cap = mem::size_of::<T>() * vec.capacity();
+
+    let ptr = vec.as_mut_ptr();
+    mem::forget(vec);
+    unsafe { Vec::from_raw_parts(ptr as _, len, cap) }
+}
+
+pub fn cast_slice<T>(slice: &[T]) -> &[u8] {
+    use std::{mem, slice::from_raw_parts};
+
+    let raw_len = mem::size_of::<T>() * slice.len();
+    let len = raw_len;
+
+    let ptr = slice.as_ptr();
+    mem::forget(slice);
+    unsafe { from_raw_parts(ptr as _, len) }
+}
+
+pub fn cast_cow<T>(cow: Cow<[T]>) -> Cow<[u8]>
+where
+    T: Clone,
+{
+    match cow {
+        Cow::Borrowed(slice) => Cow::Borrowed(cast_slice(slice)),
+        Cow::Owned(vec) => Cow::Owned(cast_vec(vec)),
+    }
+}
diff --git a/mesh/src/vertex.rs b/mesh/src/vertex.rs
new file mode 100644
index 00000000..513bdac2
--- /dev/null
+++ b/mesh/src/vertex.rs
@@ -0,0 +1,415 @@
+//! Built-in vertex formats.
+
+use std::{borrow::Cow, fmt::Debug};
+
+use ash::vk::Format;
+
+#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord)]
+pub struct Attribute {
+    pub format: Format,
+    pub offset: u32,
+}
+
+/// Trait for vertex attributes to implement
+pub trait AsAttribute: Debug + PartialEq + Copy + Send + Sync {
+    /// Name of the attribute
+    const NAME: &'static str;
+
+    /// Size of the attribute.
+    const SIZE: u32;
+
+    /// Attribute format.
+    const FORMAT: Format;
+}
+
+/// Type for position attribute of vertex.
+#[repr(C)]
+#[derive(Clone, Copy, Debug, PartialEq)]
+#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
+pub struct Position(pub [f32; 3]);
+impl<T> From<T> for Position
+where
+    T: Into<[f32; 3]>,
+{
+    fn from(from: T) -> Self {
+        Position(from.into())
+    }
+}
+impl AsAttribute for Position {
+    const NAME: &'static str = "position";
+    const SIZE: u32 = 12;
+    const FORMAT: Format = Format::R32G32B32_SFLOAT;
+}
+
+/// Type for color attribute of vertex
+#[repr(C)]
+#[derive(Clone, Copy, Debug, PartialEq)]
+#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
+pub struct Color(pub [f32; 4]);
+impl<T> From<T> for Color
+where
+    T: Into<[f32; 4]>,
+{
+    fn from(from: T) -> Self {
+        Color(from.into())
+    }
+}
+impl AsAttribute for Color {
+    const NAME: &'static str = "color";
+    const SIZE: u32 = 16;
+    const FORMAT: Format = Format::R32G32B32A32_SFLOAT;
+}
+
+/// Type for texture coord attribute of vertex
+#[repr(C)]
+#[derive(Clone, Copy, Debug, PartialEq)]
+#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
+pub struct Normal(pub [f32; 3]);
+impl<T> From<T> for Normal
+where
+    T: Into<[f32; 3]>,
+{
+    fn from(from: T) -> Self {
+        Normal(from.into())
+    }
+}
+
+impl AsAttribute for Normal {
+    const NAME: &'static str = "normal";
+    const SIZE: u32 = 12;
+    const FORMAT: Format = Format::R32G32B32_SFLOAT;
+}
+
+/// Type for tangent attribute of vertex
+#[repr(C)]
+#[derive(Clone, Copy, Debug, PartialEq)]
+#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
+pub struct Tangent(pub [f32; 3]);
+impl<T> From<T> for Tangent
+where
+    T: Into<[f32; 3]>,
+{
+    fn from(from: T) -> Self {
+        Tangent(from.into())
+    }
+}
+
+impl AsAttribute for Tangent {
+    const NAME: &'static str = "tangent";
+    const SIZE: u32 = 12;
+    const FORMAT: Format = Format::R32G32B32_SFLOAT;
+}
+
+/// Type for texture coord attribute of vertex
+#[repr(C)]
+#[derive(Clone, Copy, Debug, PartialEq)]
+#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
+pub struct TexCoord(pub [f32; 2]);
+impl<T> From<T> for TexCoord
+where
+    T: Into<[f32; 2]>,
+{
+    fn from(from: T) -> Self {
+        TexCoord(from.into())
+    }
+}
+
+impl AsAttribute for TexCoord {
+    const NAME: &'static str = "tex_coord";
+    const SIZE: u32 = 8;
+    const FORMAT: Format = Format::R32G32_SFLOAT;
+}
+
+/// Vertex format contains information to initialize graphics pipeline
+/// Attributes must be sorted by offset.
+#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord)]
+#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
+pub struct VertexFormat<'a> {
+    /// Attributes for format.
+    pub attributes: Cow<'a, [Attribute]>,
+
+    /// Size of single vertex.
+    pub stride: u32,
+}
+
+/// Trait implemented by all valid vertex formats.
+pub trait AsVertex: Copy + Sized + Send + Sync {
+    /// List of all attributes formats with name and offset.
+    const VERTEX: VertexFormat<'static>;
+
+    /// Returns attribute of vertex by type
+    #[inline]
+    fn attribute<F>() -> Attribute
+    where
+        F: AsAttribute,
+        Self: WithAttribute<F>,
+    {
+        <Self as WithAttribute<F>>::ATTRIBUTE
+    }
+}
+
+impl<T> AsVertex for T
+where
+    T: AsAttribute,
+{
+    const VERTEX: VertexFormat<'static> = VertexFormat {
+        attributes: Cow::Borrowed(&[Attribute {
+            format: T::FORMAT,
+            offset: 0,
+        }]),
+        stride: T::SIZE,
+    };
+}
+
+/// Trait implemented by all valid vertex formats for each field
+pub trait WithAttribute<F: AsAttribute>: AsVertex {
+    /// Individual format of the attribute for this vertex format
+    const ATTRIBUTE: Attribute;
+}
+
+impl<T> WithAttribute<T> for T
+where
+    T: AsAttribute,
+{
+    const ATTRIBUTE: Attribute = Attribute {
+        format: T::FORMAT,
+        offset: 0,
+    };
+}
+
+/// Vertex format with position and RGBA8 color attributes.
+#[repr(C)]
+#[derive(Clone, Copy, Debug, PartialEq)]
+#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
+pub struct PosColor {
+    /// Position of the vertex in 3D space.
+    pub position: Position,
+    /// RGBA color value of the vertex.
+    pub color: Color,
+}
+
+impl AsVertex for PosColor {
+    const VERTEX: VertexFormat<'static> = VertexFormat {
+        attributes: Cow::Borrowed(&[
+            <Self as WithAttribute<Position>>::ATTRIBUTE,
+            <Self as WithAttribute<Color>>::ATTRIBUTE,
+        ]),
+        stride: Position::SIZE + Color::SIZE,
+    };
+}
+
+impl WithAttribute<Position> for PosColor {
+    const ATTRIBUTE: Attribute = Attribute {
+        offset: 0,
+        format: Position::FORMAT,
+    };
+}
+
+impl WithAttribute<Color> for PosColor {
+    const ATTRIBUTE: Attribute = Attribute {
+        offset: Position::SIZE,
+        format: Color::FORMAT,
+    };
+}
+
+/// Vertex format with position, normal, and UV texture coordinate attributes.
+#[repr(C)]
+#[derive(Clone, Copy, Debug, PartialEq)]
+#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
+pub struct PosNorm {
+    /// Position of the vertex in 3D space.
+    pub position: Position,
+    /// Normal vector of the vertex.
+    pub normal: Normal,
+}
+
+impl AsVertex for PosNorm {
+    const VERTEX: VertexFormat<'static> = VertexFormat {
+        attributes: Cow::Borrowed(&[
+            <Self as WithAttribute<Position>>::ATTRIBUTE,
+            <Self as WithAttribute<Normal>>::ATTRIBUTE,
+        ]),
+        stride: Position::SIZE + Normal::SIZE + TexCoord::SIZE,
+    };
+}
+
+impl WithAttribute<Position> for PosNorm {
+    const ATTRIBUTE: Attribute = Attribute {
+        offset: 0,
+        format: Position::FORMAT,
+    };
+}
+
+impl WithAttribute<Normal> for PosNorm {
+    const ATTRIBUTE: Attribute = Attribute {
+        offset: Position::SIZE,
+        format: Normal::FORMAT,
+    };
+}
+
+/// Vertex format with position and UV texture coordinate attributes.
+#[repr(C)]
+#[derive(Clone, Copy, Debug, PartialEq)]
+#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
+pub struct PosTex {
+    /// Position of the vertex in 3D space.
+    pub position: [f32; 3],
+    /// UV texture coordinates used by the vertex.
+    pub tex_coord: [f32; 2],
+}
+
+impl AsVertex for PosTex {
+    const VERTEX: VertexFormat<'static> = VertexFormat {
+        attributes: Cow::Borrowed(&[
+            <Self as WithAttribute<Position>>::ATTRIBUTE,
+            <Self as WithAttribute<TexCoord>>::ATTRIBUTE,
+        ]),
+        stride: Position::SIZE + TexCoord::SIZE,
+    };
+}
+
+impl WithAttribute<Position> for PosTex {
+    const ATTRIBUTE: Attribute = Attribute {
+        offset: 0,
+        format: Position::FORMAT,
+    };
+}
+
+impl WithAttribute<TexCoord> for PosTex {
+    const ATTRIBUTE: Attribute = Attribute {
+        offset: Position::SIZE,
+        format: TexCoord::FORMAT,
+    };
+}
+
+/// Vertex format with position, normal, and UV texture coordinate attributes.
+#[repr(C)]
+#[derive(Clone, Copy, Debug, PartialEq)]
+#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
+pub struct PosNormTex {
+    /// Position of the vertex in 3D space.
+    pub position: Position,
+    /// Normal vector of the vertex.
+    pub normal: Normal,
+    /// UV texture coordinates used by the vertex.
+    pub tex_coord: TexCoord,
+}
+
+impl AsVertex for PosNormTex {
+    const VERTEX: VertexFormat<'static> = VertexFormat {
+        attributes: Cow::Borrowed(&[
+            <Self as WithAttribute<Position>>::ATTRIBUTE,
+            <Self as WithAttribute<Normal>>::ATTRIBUTE,
+            <Self as WithAttribute<TexCoord>>::ATTRIBUTE,
+        ]),
+        stride: Position::SIZE + Normal::SIZE + TexCoord::SIZE,
+    };
+}
+
+impl WithAttribute<Position> for PosNormTex {
+    const ATTRIBUTE: Attribute = Attribute {
+        offset: 0,
+        format: Position::FORMAT,
+    };
+}
+
+impl WithAttribute<Normal> for PosNormTex {
+    const ATTRIBUTE: Attribute = Attribute {
+        offset: Position::SIZE,
+        format: Normal::FORMAT,
+    };
+}
+
+impl WithAttribute<TexCoord> for PosNormTex {
+    const ATTRIBUTE: Attribute = Attribute {
+        offset: Position::SIZE + Normal::SIZE,
+        format: TexCoord::FORMAT,
+    };
+}
+
+/// Vertex format with position, normal, tangent, and UV texture coordinate attributes.
+#[repr(C)]
+#[derive(Clone, Copy, Debug, PartialEq)]
+#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
+pub struct PosNormTangTex {
+    /// Position of the vertex in 3D space.
+    pub position: Position,
+    /// Normal vector of the vertex.
+    pub normal: Normal,
+    /// Tangent vector of the vertex.
+    pub tangent: Tangent,
+    /// UV texture coordinates used by the vertex.
+    pub tex_coord: TexCoord,
+}
+
+impl AsVertex for PosNormTangTex {
+    const VERTEX: VertexFormat<'static> = VertexFormat {
+        attributes: Cow::Borrowed(&[
+            <Self as WithAttribute<Position>>::ATTRIBUTE,
+            <Self as WithAttribute<Normal>>::ATTRIBUTE,
+            <Self as WithAttribute<Tangent>>::ATTRIBUTE,
+            <Self as WithAttribute<TexCoord>>::ATTRIBUTE,
+        ]),
+        stride: Position::SIZE + Normal::SIZE + Tangent::SIZE + TexCoord::SIZE,
+    };
+}
+
+impl WithAttribute<Position> for PosNormTangTex {
+    const ATTRIBUTE: Attribute = Attribute {
+        offset: 0,
+        format: Position::FORMAT,
+    };
+}
+
+impl WithAttribute<Normal> for PosNormTangTex {
+    const ATTRIBUTE: Attribute = Attribute {
+        offset: Position::SIZE,
+        format: Normal::FORMAT,
+    };
+}
+
+impl WithAttribute<Tangent> for PosNormTangTex {
+    const ATTRIBUTE: Attribute = Attribute {
+        offset: Position::SIZE + Normal::SIZE,
+        format: Tangent::FORMAT,
+    };
+}
+
+impl WithAttribute<TexCoord> for PosNormTangTex {
+    const ATTRIBUTE: Attribute = Attribute {
+        offset: Position::SIZE + Normal::SIZE + Tangent::SIZE,
+        format: TexCoord::FORMAT,
+    };
+}
+
+/// Allows to query specific `Attribute`s of `AsVertex`
+pub trait Query<T>: AsVertex {
+    /// Attributes from tuple `T`
+    const QUERIED_ATTRIBUTES: &'static [(&'static str, Attribute)];
+}
+
+macro_rules! impl_query {
+    ($($a:ident),*) => {
+        impl<VF $(,$a)*> Query<($($a,)*)> for VF
+            where VF: AsVertex,
+            $(
+                $a: AsAttribute,
+                VF: WithAttribute<$a>,
+            )*
+        {
+            const QUERIED_ATTRIBUTES: &'static [(&'static str, Attribute)] = &[
+                $(
+                    ($a::NAME, <VF as WithAttribute<$a>>::ATTRIBUTE),
+                )*
+            ];
+        }
+
+        impl_query!(@ $($a),*);
+    };
+    (@) => {};
+    (@ $head:ident $(,$tail:ident)*) => {
+        impl_query!($($tail),*);
+    };
+}
+
+impl_query!(A, B, C, D, E, F, G, H, I, J, K, L, M, N, O, P, Q, R, S, T, U, V, W, X, Y, Z);
diff --git a/renderer/Cargo.toml b/renderer/Cargo.toml
new file mode 100644
index 00000000..88e0fe9c
--- /dev/null
+++ b/renderer/Cargo.toml
@@ -0,0 +1,16 @@
+[package]
+name = "rendy-renderer"
+version = "0.1.0"
+authors = ["omni-viral <scareaangel@gmail.com>"]
+
+[dependencies]
+ash = { path = "../../ash/ash" }
+derivative = "1.0"
+failure = "0.1"
+log = "0.4"
+relevant = "0.2"
+winit = "0.17"
+
+rendy-wsi = { path = "../wsi" }
+rendy-factory = { path = "../factory" }
+rendy-frame = { path = "../frame" }
diff --git a/renderer/src/lib.rs b/renderer/src/lib.rs
new file mode 100644
index 00000000..36f95394
--- /dev/null
+++ b/renderer/src/lib.rs
@@ -0,0 +1,32 @@
+#[macro_use]
+extern crate derivative;
+extern crate failure;
+extern crate winit;
+
+extern crate rendy_factory as factory;
+extern crate rendy_frame as frame;
+extern crate rendy_wsi as wsi;
+
+use factory::Factory;
+use frame::Frames;
+
+pub trait Renderer<T> {
+    type Desc: RendererBuilder<T>;
+
+    fn builder() -> Self::Desc
+    where
+        Self::Desc: Default,
+    {
+        Self::Desc::default()
+    }
+
+    fn run(&mut self, factory: &mut Factory, data: &mut T);
+    fn dispose(self, factory: &mut Factory, data: &mut T);
+}
+
+pub trait RendererBuilder<T> {
+    type Error;
+    type Renderer: Renderer<T>;
+
+    fn build(self, factory: &mut Factory, data: &mut T) -> Result<Self::Renderer, Self::Error>;
+}
diff --git a/rendy/Cargo.toml b/rendy/Cargo.toml
index 79e3dfd4..6124e580 100644
--- a/rendy/Cargo.toml
+++ b/rendy/Cargo.toml
@@ -4,18 +4,25 @@ version = "0.1.0"
 authors = ["omni-viral <scareaangel@gmail.com>"]
 
 [dependencies]
+
+rendy-command = { path = "../command" }
+rendy-factory = { path = "../factory" }
+rendy-frame = { path = "../frame" }
 rendy-memory = { path = "../memory" }
+rendy-mesh = { path = "../mesh" }
+rendy-shader = { path = "../shader" }
+rendy-renderer = { path = "../renderer" }
 rendy-resource = { path = "../resource" }
-rendy-command = { path = "../command" }
-derivative = "1.0"
-winit = "0.17"
-gfx-hal = { git = "https://github.com/gfx-rs/gfx", optional = true }
-ash = { version = "0.24", optional = true }
+rendy-wsi = { path = "../wsi" }
 
-[features]
-hal = ["gfx-hal", "rendy-memory/hal", "rendy-resource/hal", "rendy-command/hal"]
-vulkan = ["ash", "rendy-memory/vulkan", "rendy-resource/vulkan", "rendy-command/vulkan"]
+[dev-dependencies]
+ash = { path = "../../ash/ash" }
+env_logger = "0.5"
+# derivative = "1.0"
+failure = "0.1"
+log = "0.4"
+# relevant = "0.2"
+# serde = { version = "1.0", optional = true, features = ["derive"] }
+# smallvec = "0.6"
+winit = "0.17"
 
-[[example]]
-name = "basic"
-required-features = ["hal"]
\ No newline at end of file
diff --git a/rendy/examples/basic.rs b/rendy/examples/basic.rs
deleted file mode 100644
index 38138872..00000000
--- a/rendy/examples/basic.rs
+++ /dev/null
@@ -1,49 +0,0 @@
-extern crate gfx_hal as hal;
-extern crate rendy;
-extern crate winit;
-
-use hal::{Adapter, Backend, Instance};
-use rendy::{
-    command::{CapabilityFlags, Families, Family, FamilyId},
-    Config, Device, Factory, QueuesPicker, RenderBuilder,
-};
-use winit::{EventsLoop, WindowBuilder};
-
-use std::marker::PhantomData;
-
-fn main() -> Result<(), ()> {
-    // Create a window with winit.
-    let mut events_loop = EventsLoop::new();
-    let window = WindowBuilder::new()
-        .with_title("Part 00: Triangle")
-        .with_dimensions((848, 480).into())
-        .build(&events_loop)
-        .unwrap();
-
-    let render_config = RenderBuilder::new().with_window(window).build();
-    let config = Config::new(vec![render_config]);
-
-    // TODO: migrate example to `ash`
-    // let instance = backend::Instance::create("Rendy basic example", 1);
-
-    // let adapter = instance.enumerate_adapters().remove(0);
-
-    // type HalDevice = (
-    //     <backend::Backend as Backend>::Device,
-    //     PhantomData<backend::Backend>,
-    // );
-
-    //let _factory = rendy::init::<HalDevice, PickFirst, backend::Backend>(config);
-
-    Ok(())
-}
-
-struct PickFirst;
-impl QueuesPicker for PickFirst {
-    fn pick_queues<Q>(
-        &self,
-        families: Vec<Families<Q>>,
-    ) -> Result<(Family<Q, CapabilityFlags>, u32), ()> {
-        unimplemented!()
-    }
-}
diff --git a/rendy/examples/init.rs b/rendy/examples/init.rs
new file mode 100644
index 00000000..11b7f848
--- /dev/null
+++ b/rendy/examples/init.rs
@@ -0,0 +1,15 @@
+extern crate ash;
+extern crate failure;
+extern crate rendy;
+
+use rendy::factory::{Config, Factory};
+fn main() -> Result<(), failure::Error> {
+    env_logger::init();
+
+    let config: Config = Default::default();
+
+    let factory: Factory = Factory::new(config)?;
+
+    factory.dispose();
+    Ok(())
+}
diff --git a/rendy/examples/simple.comp b/rendy/examples/simple.comp
new file mode 100644
index 00000000..90cbb5b7
--- /dev/null
+++ b/rendy/examples/simple.comp
@@ -0,0 +1,31 @@
+#version 450
+
+layout(local_size_x = 32, local_size_y = 32) in;
+layout(set = 0, binding = 1, rgba32f) uniform imageBuffer bunnies;
+
+layout(push_constant) uniform Push {
+  float delta_time;
+};
+
+void main() {
+    vec4 bunny = imageLoad(bunnies, int(gl_GlobalInvocationID.x));
+    vec2 pos = bunny.rg;
+    vec2 vel = bunny.ba;
+
+    pos += vel + (delta_time * delta_time) / 2;
+    vel += delta_time;
+
+    float l = sign(pos.x);
+    float r = sign(1.0 - pos.x);
+
+    pos.x = 2.0 + r * l * pos.x - 2.0 * r;
+    vel.x = 2.0 + r * l * vel.x - 2.0 * r;
+
+    float b = sign(pos.y);
+    float t = sign(1.0 - pos.y);
+
+    pos.y = 2.0 + t * b * pos.y - 2.0 * t;
+    vel.y = 2.0 + t * b * vel.y - 2.0 * t;
+
+    imageStore(bunnies, int(gl_GlobalInvocationID.x), vec4(pos, vel));
+}
diff --git a/rendy/examples/simple.frag b/rendy/examples/simple.frag
new file mode 100644
index 00000000..1b566a34
--- /dev/null
+++ b/rendy/examples/simple.frag
@@ -0,0 +1,12 @@
+#version 450
+#extension GL_ARB_separate_shader_objects : enable
+
+layout(early_fragment_tests) in;
+
+layout(location = 0) in vec2 uv;
+// layout(set = 0, binding = 0) uniform sampler2D bunny_image;
+layout(location = 0) out vec4 outColor;
+
+void main() {
+    outColor = vec4(gl_FragCoord.zzz, 1.0); //texture(bunny_image, uv);
+}
diff --git a/rendy/examples/simple.rs b/rendy/examples/simple.rs
new file mode 100644
index 00000000..e2b837a1
--- /dev/null
+++ b/rendy/examples/simple.rs
@@ -0,0 +1,634 @@
+extern crate ash;
+#[macro_use]
+extern crate failure;
+#[macro_use]
+extern crate log;
+extern crate rendy;
+extern crate winit;
+
+use ash::{version::DeviceV1_0, vk};
+use std::{
+    ffi::CStr,
+    time::{Duration, Instant},
+};
+
+use failure::Error;
+
+use rendy::{
+    command::{
+        CommandBuffer, CommandPool, ExecutableState, FamilyIndex, Graphics, MultiShot,
+        NoIndividualReset, OneShot, OwningCommandPool, PendingState, PrimaryLevel,
+    },
+    factory::{Config, Factory},
+    memory::usage::{Data, Dynamic},
+    mesh::{AsVertex, Mesh, PosColor},
+    renderer::{Renderer, RendererBuilder},
+    resource::{Buffer, Image},
+    shader::compile_to_spirv,
+    wsi::Target,
+};
+
+use winit::{EventsLoop, Window, WindowBuilder};
+
+struct FramebufferEtc {
+    depth: Image,
+    depth_view: vk::ImageView,
+    color_view: vk::ImageView,
+    framebuffer: vk::Framebuffer,
+    acquire: vk::Semaphore,
+    release: vk::Semaphore,
+    fence: vk::Fence,
+    command_pool: CommandPool<Graphics>,
+    command_buffer: Option<CommandBuffer<Graphics, PendingState<ExecutableState<MultiShot>>>>,
+    indirect_buffer: Buffer,
+    indirect_buffer_dirty: bool,
+}
+
+struct SimpleRenderer {
+    mesh: Mesh,
+    // texture: (Image, vk::ImageView),
+    target: Target,
+    family_index: FamilyIndex,
+    render_pass: vk::RenderPass,
+    layout: vk::PipelineLayout,
+    pipeline: vk::Pipeline,
+    framebuffers: Vec<FramebufferEtc>,
+    acquire: vk::Semaphore,
+    count: u32,
+}
+
+struct SimpleRendererBuilder {
+    window: Window,
+    vertices: Vec<PosColor>,
+    count: u32,
+}
+
+impl Renderer<()> for SimpleRenderer {
+    type Desc = SimpleRendererBuilder;
+
+    fn run(&mut self, factory: &mut Factory, (): &mut ()) {
+        // trace!("Render frame");
+
+        let next_image = self.target.next_image(self.acquire).unwrap();
+        // trace!("Next image acquired");
+
+        let index = next_image.indices()[0];
+        let ref mut framebuffer = self.framebuffers[index as usize];
+        // trace!("Framebuffer picked");
+
+        let submit = unsafe {
+            // Waiting for fence before reset.
+            factory.wait_for_fence(framebuffer.fence);
+            // trace!("Fence got signaled");
+
+            let command_buffer = framebuffer.command_buffer.take().unwrap().complete();
+            let (submit, command_buffer) = command_buffer.submit();
+            framebuffer.command_buffer = Some(command_buffer);
+            // trace!("Command buffer ready to resubmit");
+            submit
+        };
+
+        std::mem::swap(&mut self.acquire, &mut framebuffer.acquire);
+
+        unsafe {
+            if framebuffer.indirect_buffer_dirty {
+                let command = vk::DrawIndirectCommand::builder()
+                    .vertex_count(6)
+                    .instance_count(self.count)
+                    .build();
+
+                factory
+                    .upload_visible_buffer(&mut framebuffer.indirect_buffer, 0, unsafe {
+                        std::slice::from_raw_parts(
+                            &command as *const _ as *const u8,
+                            std::mem::size_of_val(&command),
+                        )
+                    }).unwrap();
+
+                framebuffer.indirect_buffer_dirty = false;
+                // trace!("Indirect command updated");
+            }
+
+            factory.reset_fence(framebuffer.fence);
+            // trace!("Fence reset");
+
+            let mut queue = factory.queue(self.family_index, 0);
+            queue.submit(
+                &[vk::SubmitInfo::builder()
+                    .wait_semaphores(&[framebuffer.acquire])
+                    .wait_dst_stage_mask(&[vk::PipelineStageFlags::COLOR_ATTACHMENT_OUTPUT])
+                    .signal_semaphores(&[framebuffer.release])
+                    .command_buffers(&[submit.raw()])
+                    .build()],
+                framebuffer.fence,
+            );
+            // trace!("Command buffer submitted");
+
+            next_image
+                .queue_present(queue.raw(), &[framebuffer.release])
+                .unwrap();
+            // trace!("Next image present queued");
+        }
+    }
+
+    fn dispose(self, factory: &mut Factory, (): &mut ()) {
+        factory.queue(self.family_index, 0).wait_idle();
+        drop(self.mesh);
+        // trace!("Mesh dropped");
+        unsafe {
+            for mut framebuffer in self.framebuffers {
+                factory
+                    .device()
+                    .destroy_framebuffer(framebuffer.framebuffer, None);
+                // trace!("Frambuffer destroyed");
+                factory
+                    .device()
+                    .destroy_image_view(framebuffer.color_view, None);
+                // trace!("Color view destroyed");
+                factory
+                    .device()
+                    .destroy_image_view(framebuffer.depth_view, None);
+                // trace!("Depth view destroyed");
+                drop(framebuffer.depth);
+                // trace!("Depth image destroyed");
+
+                framebuffer.command_pool.free_buffers(
+                    factory.device(),
+                    framebuffer.command_buffer.map(|cbuf| cbuf.complete()),
+                );
+                framebuffer.command_pool.dispose(factory.device());
+                // trace!("CommandPool destroyed");
+            }
+            factory.device().destroy_pipeline(self.pipeline, None);
+            // trace!("Pipeline destroyed");
+            factory.device().destroy_render_pass(self.render_pass, None);
+            // trace!("Render-pass destroyed");
+        }
+        factory.destroy_target(self.target);
+        // trace!("Target destroyed");
+    }
+}
+
+compile_to_spirv!(
+    struct ComputeShader {
+        kind: Compute,
+        lang: GLSL,
+        file: "examples/simple.comp",
+    }
+
+    struct VertexShader {
+        kind: Vertex,
+        lang: GLSL,
+        file: "examples/simple.vert",
+    }
+
+    struct FragmentShader {
+        kind: Fragment,
+        lang: GLSL,
+        file: "examples/simple.frag",
+    }
+);
+
+impl RendererBuilder<()> for SimpleRendererBuilder {
+    type Error = Error;
+    type Renderer = SimpleRenderer;
+
+    fn build(self, factory: &mut Factory, (): &mut ()) -> Result<SimpleRenderer, Error> {
+        let target = factory.create_target(self.window, 3)?;
+
+        let extent = target.extent();
+
+        let (index, _) = factory
+            .families()
+            .iter()
+            .enumerate()
+            .find(|(index, family)| {
+                let graphics = family.capability().subset(vk::QueueFlags::GRAPHICS);
+                let presentation = factory.target_support(family.index(), &target);
+                graphics && presentation
+            }).ok_or_else(|| format_err!("Can't find queue capable of graphics and presentation"))?;
+
+        let family_index = FamilyIndex(index as u32);
+
+        let mesh = Mesh::new()
+            .with_vertices(self.vertices)
+            .with_prim_type(vk::PrimitiveTopology::TRIANGLE_LIST)
+            .build(FamilyIndex(0), factory)?;
+
+        let render_pass = unsafe {
+            // Seems OK.
+            // TODO: Provide better safety explanation.
+            factory.device().create_render_pass(
+                &vk::RenderPassCreateInfo::builder()
+                    .attachments(&[
+                        vk::AttachmentDescription::builder()
+                            .format(target.format())
+                            .samples(vk::SampleCountFlags::TYPE_1)
+                            .load_op(vk::AttachmentLoadOp::CLEAR)
+                            .store_op(vk::AttachmentStoreOp::STORE)
+                            .stencil_load_op(vk::AttachmentLoadOp::DONT_CARE)
+                            .stencil_store_op(vk::AttachmentStoreOp::DONT_CARE)
+                            .initial_layout(vk::ImageLayout::UNDEFINED)
+                            .final_layout(vk::ImageLayout::PRESENT_SRC_KHR)
+                            .build(),
+                        vk::AttachmentDescription::builder()
+                            .format(vk::Format::D32_SFLOAT)
+                            .load_op(vk::AttachmentLoadOp::CLEAR)
+                            .store_op(vk::AttachmentStoreOp::DONT_CARE)
+                            .stencil_load_op(vk::AttachmentLoadOp::DONT_CARE)
+                            .stencil_store_op(vk::AttachmentStoreOp::DONT_CARE)
+                            .initial_layout(vk::ImageLayout::UNDEFINED)
+                            .final_layout(vk::ImageLayout::DEPTH_STENCIL_ATTACHMENT_OPTIMAL)
+                            .build(),
+                    ]).subpasses(&[vk::SubpassDescription::builder()
+                        .pipeline_bind_point(vk::PipelineBindPoint::GRAPHICS)
+                        .color_attachments(&[vk::AttachmentReference::builder()
+                            .attachment(0)
+                            .layout(vk::ImageLayout::COLOR_ATTACHMENT_OPTIMAL)
+                            .build()]).depth_stencil_attachment(
+                            &vk::AttachmentReference::builder()
+                                .attachment(1)
+                                .layout(vk::ImageLayout::DEPTH_STENCIL_ATTACHMENT_OPTIMAL)
+                                .build(),
+                        ).build()]).dependencies(&[
+                        vk::SubpassDependency::builder()
+                            .src_subpass(!0)
+                            .src_stage_mask(vk::PipelineStageFlags::TOP_OF_PIPE)
+                            .src_access_mask(vk::AccessFlags::empty())
+                            .dst_subpass(0)
+                            .dst_access_mask(vk::AccessFlags::COLOR_ATTACHMENT_WRITE)
+                            .dst_stage_mask(vk::PipelineStageFlags::COLOR_ATTACHMENT_OUTPUT)
+                            .build(),
+                        vk::SubpassDependency::builder()
+                            .src_subpass(0)
+                            .src_access_mask(vk::AccessFlags::COLOR_ATTACHMENT_WRITE)
+                            .src_stage_mask(vk::PipelineStageFlags::COLOR_ATTACHMENT_OUTPUT)
+                            .dst_subpass(!0)
+                            .dst_stage_mask(vk::PipelineStageFlags::BOTTOM_OF_PIPE)
+                            .dst_access_mask(vk::AccessFlags::empty())
+                            .build(),
+                    ]).build(),
+                None,
+            )
+        }?;
+
+        let layout = unsafe {
+            factory
+                .device()
+                .create_pipeline_layout(&vk::PipelineLayoutCreateInfo::builder().build(), None)
+        }?;
+
+        let (vertex, fragment) = unsafe {
+            let vertex = factory.device().create_shader_module(
+                &vk::ShaderModuleCreateInfo::builder()
+                    .code(VertexShader::SPIRV)
+                    .build(),
+                None,
+            )?;
+
+            let fragment = factory.device().create_shader_module(
+                &vk::ShaderModuleCreateInfo::builder()
+                    .code(FragmentShader::SPIRV)
+                    .build(),
+                None,
+            )?;
+
+            (vertex, fragment)
+        };
+
+        let pipeline = unsafe {
+            let mut pipelines =
+                    factory
+                        .device()
+                        .create_graphics_pipelines(
+                            vk::PipelineCache::null(),
+                            &[
+                                vk::GraphicsPipelineCreateInfo::builder()
+                                    .stages(&[
+                                        vk::PipelineShaderStageCreateInfo::builder()
+                                            .stage(vk::ShaderStageFlags::VERTEX)
+                                            .module(vertex)
+                                            .name(CStr::from_bytes_with_nul_unchecked(b"main\0"))
+                                            .build(),
+                                        vk::PipelineShaderStageCreateInfo::builder()
+                                            .stage(vk::ShaderStageFlags::FRAGMENT)
+                                            .module(fragment)
+                                            .name(CStr::from_bytes_with_nul_unchecked(b"main\0"))
+                                            .build(),
+                                    ]).vertex_input_state(
+                                        &vk::PipelineVertexInputStateCreateInfo::builder()
+                                        // .vertex_binding_descriptions(&[
+                                        //     vk::VertexInputBindingDescription::builder()
+                                        //         .binding(0)
+                                        //         .stride(PosColor::VERTEX.stride)
+                                        //         .input_rate(vk::VertexInputRate::VERTEX)
+                                        //         .build(),
+                                        // ])
+                                        // .vertex_attribute_descriptions(
+                                        //     &PosColor::VERTEX.attributes.iter().enumerate().map(|(location, attribute)|
+                                        //         vk::VertexInputAttributeDescription::builder()
+                                        //             .location(location as u32)
+                                        //             .binding(0)
+                                        //             .format(attribute.format)
+                                        //             .offset(attribute.offset)
+                                        //             .build()
+                                        //     ).collect::<Vec<_>>()
+                                        // )
+                                        .build(),
+                                    ).input_assembly_state(
+                                        &vk::PipelineInputAssemblyStateCreateInfo::builder()
+                                            .topology(vk::PrimitiveTopology::TRIANGLE_LIST)
+                                            .build(),
+                                    ).viewport_state(
+                                        &vk::PipelineViewportStateCreateInfo::builder()
+                                            .viewports(&[vk::Viewport::builder()
+                                                .width(extent.width as f32)
+                                                .height(extent.height as f32)
+                                                .min_depth(0.0)
+                                                .max_depth(1.0)
+                                                .build()]).scissors(&[vk::Rect2D::builder()
+                                                .extent(extent)
+                                                .build()]).build(),
+                                    ).rasterization_state(
+                                        &vk::PipelineRasterizationStateCreateInfo::builder()
+                                            .build(),
+                                    ).multisample_state(
+                                        &vk::PipelineMultisampleStateCreateInfo::builder()
+                                            .rasterization_samples(vk::SampleCountFlags::TYPE_1)
+                                            .build(),
+                                    ).depth_stencil_state(
+                                        &vk::PipelineDepthStencilStateCreateInfo::builder()
+                                            .depth_test_enable(1)
+                                            .depth_write_enable(1)
+                                            .depth_compare_op(vk::CompareOp::LESS)
+                                            .depth_bounds_test_enable(1)
+                                            .min_depth_bounds(0.0)
+                                            .max_depth_bounds(1.0)
+                                            .build(),
+                                    ).color_blend_state(
+                                        &vk::PipelineColorBlendStateCreateInfo::builder()
+                                            .attachments(&[
+                                                vk::PipelineColorBlendAttachmentState::builder()
+                                                    .blend_enable(1)
+                                                    .src_color_blend_factor(
+                                                        vk::BlendFactor::ONE_MINUS_DST_ALPHA,
+                                                    ).dst_color_blend_factor(vk::BlendFactor::DST_ALPHA)
+                                                    .color_blend_op(vk::BlendOp::ADD)
+                                                    .src_alpha_blend_factor(
+                                                        vk::BlendFactor::ONE_MINUS_DST_ALPHA,
+                                                    ).dst_alpha_blend_factor(vk::BlendFactor::ONE)
+                                                    .alpha_blend_op(vk::BlendOp::ADD)
+                                                    .color_write_mask(vk::ColorComponentFlags::all())
+                                                    .build()
+                                                ],
+                                            )
+                                            .build(),
+                                    ).layout(layout)
+                                    .render_pass(render_pass)
+                                    .base_pipeline_index(-1)
+                                    .build(),
+                            ],
+                            None,
+                        ).map_err(|(_, error)| error)?;
+
+            pipelines.remove(0)
+        };
+
+        let framebuffers = unsafe {
+            target
+                .images()
+                .iter()
+                .map(|&image| {
+                    let depth = factory.create_image(
+                        vk::ImageCreateInfo::builder()
+                            .image_type(vk::ImageType::TYPE_2D)
+                            .format(vk::Format::D32_SFLOAT)
+                            .extent(vk::Extent3D {
+                                width: target.extent().width,
+                                height: target.extent().height,
+                                depth: 1,
+                            }).mip_levels(1)
+                            .array_layers(1)
+                            .samples(vk::SampleCountFlags::TYPE_1)
+                            .tiling(vk::ImageTiling::OPTIMAL)
+                            .usage(vk::ImageUsageFlags::DEPTH_STENCIL_ATTACHMENT)
+                            .sharing_mode(vk::SharingMode::EXCLUSIVE)
+                            .initial_layout(vk::ImageLayout::UNDEFINED)
+                            .build(),
+                        1,
+                        Data,
+                    )?;
+                    let depth_view = factory.device().create_image_view(
+                        &vk::ImageViewCreateInfo::builder()
+                            .image(depth.raw())
+                            .view_type(vk::ImageViewType::TYPE_2D)
+                            .format(vk::Format::D32_SFLOAT)
+                            .subresource_range(
+                                vk::ImageSubresourceRange::builder()
+                                    .aspect_mask(vk::ImageAspectFlags::COLOR)
+                                    .level_count(1)
+                                    .layer_count(1)
+                                    .build(),
+                            ).build(),
+                        None,
+                    )?;
+                    let color_view = factory.device().create_image_view(
+                        &vk::ImageViewCreateInfo::builder()
+                            .image(image)
+                            .view_type(vk::ImageViewType::TYPE_2D)
+                            .format(target.format())
+                            .subresource_range(
+                                vk::ImageSubresourceRange::builder()
+                                    .aspect_mask(vk::ImageAspectFlags::COLOR)
+                                    .level_count(1)
+                                    .layer_count(1)
+                                    .build(),
+                            ).build(),
+                        None,
+                    )?;
+                    let framebuffer = factory.device().create_framebuffer(
+                        &vk::FramebufferCreateInfo::builder()
+                            .render_pass(render_pass)
+                            .attachments(&[color_view, depth_view])
+                            .width(target.extent().width)
+                            .height(target.extent().height)
+                            .layers(1)
+                            .build(),
+                        None,
+                    )?;
+
+                    let mut command_pool = unsafe {
+                        let ref family = factory.families()[family_index.0 as usize];
+                        family
+                            .create_pool(factory.device(), NoIndividualReset)?
+                            .from_flags()
+                            .unwrap()
+                    };
+
+                    let indirect_buffer = factory.create_buffer(
+                        vk::BufferCreateInfo::builder()
+                            .size(std::mem::size_of::<vk::DrawIndirectCommand>() as u64)
+                            .usage(vk::BufferUsageFlags::INDIRECT_BUFFER)
+                            .build(),
+                        1,
+                        Dynamic,
+                    )?;
+
+                    let command_buffer = command_pool
+                        .allocate_buffers(factory.device(), PrimaryLevel, 1)
+                        .remove(0);
+                    let command_buffer = command_buffer.begin(factory.device(), MultiShot(()));
+
+                    unsafe {
+                        // Unsafe command recording.
+                        factory.device().cmd_begin_render_pass(
+                            command_buffer.raw(),
+                            &vk::RenderPassBeginInfo::builder()
+                                .render_pass(render_pass)
+                                .framebuffer(framebuffer)
+                                .render_area(vk::Rect2D::builder().extent(target.extent()).build())
+                                .clear_values(&[
+                                    vk::ClearValue {
+                                        color: vk::ClearColorValue {
+                                            uint32: [0, 0, 0, 255],
+                                        },
+                                    },
+                                    vk::ClearValue {
+                                        depth_stencil: vk::ClearDepthStencilValue {
+                                            depth: 1.0,
+                                            stencil: 0,
+                                        },
+                                    },
+                                ]).build(),
+                            vk::SubpassContents::INLINE,
+                        );
+
+                        factory.device().cmd_bind_pipeline(
+                            command_buffer.raw(),
+                            vk::PipelineBindPoint::GRAPHICS,
+                            pipeline,
+                        );
+
+                        factory.device().cmd_bind_descriptor_sets(
+                            command_buffer.raw(),
+                            vk::PipelineBindPoint::GRAPHICS,
+                            layout,
+                            0,
+                            &[], // &[descriptor_set],
+                            &[],
+                        );
+
+                        factory.device().cmd_draw_indirect(
+                            command_buffer.raw(),
+                            indirect_buffer.raw(),
+                            0,
+                            1,
+                            0,
+                        );
+
+                        factory.device().cmd_end_render_pass(command_buffer.raw());
+                    };
+
+                    let command_buffer = command_buffer.finish(factory.device());
+                    let command_buffer = Some(command_buffer.submit().1);
+
+                    Ok(FramebufferEtc {
+                        depth,
+                        depth_view,
+                        color_view,
+                        framebuffer,
+                        acquire: factory.create_semaphore(),
+                        release: factory.create_semaphore(),
+                        fence: factory.create_fence(true),
+                        command_buffer,
+                        command_pool,
+                        indirect_buffer,
+                        indirect_buffer_dirty: true,
+                    })
+                }).collect::<Result<Vec<_>, Error>>()
+        }?;
+
+        Ok(SimpleRenderer {
+            mesh,
+            // texture: unimplemented!(),
+            target,
+            family_index,
+            render_pass,
+            layout,
+            pipeline,
+            framebuffers,
+            acquire: factory.create_semaphore(),
+            count: self.count,
+        })
+    }
+}
+
+fn main() -> Result<(), failure::Error> {
+    env_logger::Builder::from_default_env()
+        .default_format_timestamp_nanos(true)
+        .init();
+
+    let config: Config = Default::default();
+
+    let mut factory: Factory = Factory::new(config)?;
+
+    let mut event_loop = EventsLoop::new();
+
+    let window = WindowBuilder::new()
+        .with_title("Rendy example")
+        .build(&event_loop)?;
+
+    event_loop.poll_events(|_| ());
+
+    let renderer_builder = SimpleRendererBuilder {
+        window,
+        vertices: vec![
+            PosColor {
+                position: [0.0, -0.5, 0.5].into(),
+                color: [1.0, 0.0, 0.0, 1.0].into(),
+            },
+            PosColor {
+                position: [-0.5, 0.5, 0.5].into(),
+                color: [0.0, 1.0, 0.0, 1.0].into(),
+            },
+            PosColor {
+                position: [0.5, 0.5, 0.5].into(),
+                color: [0.0, 0.0, 1.0, 1.0].into(),
+            },
+        ],
+        count: 300_000,
+    };
+
+    let mut renderer = renderer_builder.build(&mut factory, &mut ())?;
+
+    // trace!("Start rendering");
+    let mut counter = 0..;
+    let started = Instant::now();
+    let duration = Duration::new(10, 0);
+    counter
+        .by_ref()
+        .take_while(|_| started.elapsed() < duration)
+        .for_each(|_| {
+            event_loop.poll_events(|_| ());
+            renderer.run(&mut factory, &mut ());
+            // std::thread::sleep(Duration::new(0, 1_000_000));
+        });
+
+    let total_micros = duration.as_secs() * 1_000_000 + duration.subsec_micros() as u64;
+
+    info!(
+        "Rendered {} frames for {}.{:03} secs. FPS: {}",
+        counter.start,
+        duration.as_secs(),
+        duration.subsec_millis(),
+        counter.start * 1_000_000 / total_micros
+    );
+    // trace!("Stop rendering");
+
+    renderer.dispose(&mut factory, &mut ());
+    // trace!("Render disposed");
+
+    factory.dispose();
+    // trace!("Factory disposed");
+    Ok(())
+}
diff --git a/rendy/examples/simple.vert b/rendy/examples/simple.vert
new file mode 100644
index 00000000..4d87a540
--- /dev/null
+++ b/rendy/examples/simple.vert
@@ -0,0 +1,26 @@
+#version 450
+#extension GL_ARB_separate_shader_objects : enable
+// layout(set = 0, binding = 1, rgba32f) uniform imageBuffer bunnies;
+
+vec2 positions[6] = vec2[](
+    vec2(0.0, 0.0),
+    vec2(1.0, 0.0),
+    vec2(1.0, 1.0),
+    vec2(1.0, 1.0),
+    vec2(0.0, 1.0),
+    vec2(0.0, 0.0)
+);
+
+layout(location = 0) out vec2 uv;
+
+void main() {
+    float isin = sin(gl_InstanceIndex * 3469);
+    float icos = cos(gl_InstanceIndex * 7901);
+    vec2 vertex = positions[gl_VertexIndex];
+    vec4 bunny = vec4(isin, icos, 0.0, 0.0);// imageLoad(bunnies, gl_InstanceIndex);
+    vec2 pos = bunny.rg;
+    pos += vertex * 0.003;
+    pos = pos / 1.003 * 2.0 - 1.0;
+    uv = vertex;
+    gl_Position = vec4(pos, isin * icos, 1.0);
+}
diff --git a/rendy/examples/window.rs b/rendy/examples/window.rs
new file mode 100644
index 00000000..dc5f7bed
--- /dev/null
+++ b/rendy/examples/window.rs
@@ -0,0 +1,40 @@
+extern crate ash;
+extern crate failure;
+extern crate rendy;
+
+extern crate env_logger;
+extern crate winit;
+
+use rendy::factory::{Config, Factory};
+use std::time::{Duration, Instant};
+use winit::{EventsLoop, WindowBuilder};
+
+fn main() -> Result<(), failure::Error> {
+    let started = Instant::now();
+
+    env_logger::init();
+
+    let config: Config = Default::default();
+
+    let factory: Factory = Factory::new(config)?;
+
+    let mut event_loop = EventsLoop::new();
+
+    let window = WindowBuilder::new()
+        .with_title("Rendy example")
+        .build(&event_loop)?;
+
+    event_loop.poll_events(|_| ());
+
+    let target = factory.create_target(window, 3)?;
+
+    while started.elapsed() < Duration::new(5, 0) {
+        event_loop.poll_events(|_| ());
+        std::thread::sleep(Duration::new(0, 1_000_000));
+    }
+
+    factory.destroy_target(target);
+
+    factory.dispose();
+    Ok(())
+}
diff --git a/rendy/src/config.rs b/rendy/src/config.rs
deleted file mode 100644
index dc671a89..00000000
--- a/rendy/src/config.rs
+++ /dev/null
@@ -1,66 +0,0 @@
-use winit::Window;
-
-// TODO: figure out what these values should be
-#[derive(Debug, Clone, Default)]
-pub struct MemoryConfig {}
-
-#[derive(Debug, Clone, Default)]
-pub struct Config {
-    memory: MemoryConfig,
-    renders: Vec<RenderConfig>,
-}
-
-impl Config {
-    pub fn new(renders: Vec<RenderConfig>) -> Self {
-        Config {
-            memory: MemoryConfig {},
-            renders,
-        }
-    }
-}
-
-#[derive(Debug, Clone, Derivative)]
-#[derivative(Default)]
-pub struct RenderConfig {
-    // #[derivative(Debug = "ignore")]
-    // windows: Vec<Window>,
-    #[derivative(Default(value = "3"))]
-    image_count: u32,
-}
-
-impl RenderConfig {
-    pub fn new() -> RenderBuilder {
-        RenderBuilder::new()
-    }
-}
-
-pub struct RenderBuilder {
-    windows: Vec<Window>,
-    image_count: u32,
-}
-
-impl RenderBuilder {
-    pub fn new() -> Self {
-        RenderBuilder {
-            windows: Vec::new(),
-            image_count: 3,
-        }
-    }
-
-    pub fn with_window(mut self, window: Window) -> Self {
-        self.windows.push(window);
-        self
-    }
-
-    pub fn with_image_count(mut self, image_count: u32) -> Self {
-        self.image_count = image_count;
-        self
-    }
-
-    pub fn build(self) -> RenderConfig {
-        RenderConfig {
-            //windows: self.windows,
-            image_count: self.image_count,
-        }
-    }
-}
diff --git a/rendy/src/device.rs b/rendy/src/device.rs
deleted file mode 100644
index 827c26b0..00000000
--- a/rendy/src/device.rs
+++ /dev/null
@@ -1,9 +0,0 @@
-use command::Device as CommandDevice;
-use memory::Device as MemoryDevice;
-use resource::Device as ResourceDevice;
-
-/// Collective trait that represents the capabilites a device used in
-/// `rendy` must have.
-pub trait Device: MemoryDevice + ResourceDevice + CommandDevice {
-    type Surface;
-}
diff --git a/rendy/src/factory.rs b/rendy/src/factory.rs
deleted file mode 100644
index 487a6d28..00000000
--- a/rendy/src/factory.rs
+++ /dev/null
@@ -1,114 +0,0 @@
-use command::Families;
-use memory::{Config as MemoryConfig, Heaps, MemoryError, Properties, Usage};
-use resource::{
-    buffer::{self, Buffer},
-    image::{self, Image},
-    ResourceError, Resources, SharingMode,
-};
-use winit::Window;
-
-use config::{Config, RenderConfig};
-use device::Device;
-use queue::QueuesPicker;
-use render::{Render, Target};
-
-/// The `Factory<D>` type represents the overall creation type for `rendy`.
-pub struct Factory<D: Device> {
-    pub device: D,
-    families: Families<D::CommandQueue>,
-    heaps: Heaps<D::Memory>,
-    resources: Resources<D::Memory, D::Buffer, D::Image>,
-}
-
-impl<D> Factory<D>
-where
-    D: Device,
-{
-    /// Creates a new `Factory` based off of a `Config<Q, W>` with some `QueuesPicker`
-    /// from the specified `PhysicalDevice`.
-    pub fn new<P, Q>(config: Config, queue_picker: Q) -> Result<Factory<D>, ()>
-    where
-        Q: QueuesPicker,
-    {
-        let heaps = unimplemented!();
-        let device = unimplemented!();
-        let families = unimplemented!();
-
-        Ok(Factory {
-            device,
-            families,
-            heaps,
-            resources: Resources::new(),
-        })
-    }
-
-    /// Creates a buffer that is managed with the specified properties.
-    pub fn create_buffer<U>(
-        &mut self,
-        size: u64,
-        usage: buffer::UsageFlags,
-        sharing: SharingMode,
-        align: u64,
-        memory_usage: U,
-    ) -> Result<Buffer<D::Memory, D::Buffer>, MemoryError>
-    where
-        U: Usage,
-    {
-        let info = buffer::CreateInfo {
-            size,
-            usage,
-            sharing,
-        };
-
-        self.resources
-            .create_buffer(&self.device, &mut self.heaps, info, align, memory_usage)
-    }
-
-    /// Creates an image that is mananged with the specified properties.
-    pub fn create_image<U>(
-        &mut self,
-        kind: image::Kind,
-        format: image::Format,
-        extent: image::Extent3D,
-        mips: u32,
-        array: u32,
-        samples: image::SampleCountFlags,
-        tiling: image::ImageTiling,
-        usage: image::UsageFlags,
-        flags: image::ImageCreateFlags,
-        sharing: SharingMode,
-        align: u64,
-        memory_usage: U,
-    ) -> Result<Image<D::Memory, D::Image>, ResourceError>
-    where
-        U: Usage,
-    {
-        let info = image::CreateInfo {
-            kind,
-            format,
-            extent,
-            mips,
-            array,
-            samples,
-            tiling,
-            usage,
-            flags,
-            sharing,
-        };
-
-        self.resources
-            .create_image(&self.device, &mut self.heaps, info, align, memory_usage)
-    }
-
-    // pub fn create_surface<R>(window: &Window) -> Target<D, R> {
-    //     unimplemented!()
-    // }
-
-    // /// Build a `Render<D, T>` from the `RenderBuilder` and a render info
-    // pub fn build_render<'a, R, T>(builder: RenderBuilder, render_config: RenderConfig) -> R
-    // where
-    //     R: Render<D, T>,
-    // {
-    //     unimplemented!()
-    // }
-}
diff --git a/rendy/src/impls/ash.rs b/rendy/src/impls/ash.rs
deleted file mode 100644
index 8b137891..00000000
--- a/rendy/src/impls/ash.rs
+++ /dev/null
@@ -1 +0,0 @@
-
diff --git a/rendy/src/impls/hal.rs b/rendy/src/impls/hal.rs
deleted file mode 100644
index a1ed2278..00000000
--- a/rendy/src/impls/hal.rs
+++ /dev/null
@@ -1,71 +0,0 @@
-use command::{Families, FamilyId};
-use config::Config;
-use device::Device;
-use factory::Factory;
-use queue::QueuesPicker;
-
-use hal;
-use winit::Window;
-
-use std::borrow::Borrow;
-use std::marker::PhantomData;
-
-impl<B, D> Device for (D, PhantomData<B>)
-where
-    B: hal::Backend,
-    D: Borrow<B::Device>,
-{
-    type Surface = B::Surface;
-}
-
-/// Initalize rendy
-#[cfg(feature = "hal")]
-pub fn init<D, Q, B>(config: Config, queue_picker: Q) -> Result<(Factory<D>), ()>
-where
-    D: Device,
-    Q: QueuesPicker,
-    B: BackendEx,
-{
-    let instance = B::init();
-    unimplemented!()
-}
-
-/// Extend backend trait with initialization method and surface creation method.
-pub trait BackendEx: hal::Backend {
-    type Instance: hal::Instance<Backend = Self> + Send + Sync;
-    fn init() -> Self::Instance;
-    fn create_surface(instance: &Self::Instance, window: &Window) -> Self::Surface;
-}
-
-#[cfg(feature = "gfx-backend-vulkan")]
-impl BackendEx for vulkan::Backend {
-    type Instance = vulkan::Instance;
-    fn init() -> Self::Instance {
-        vulkan::Instance::create("gfx-render", 1)
-    }
-    fn create_surface(instance: &Self::Instance, window: &Window) -> Self::Surface {
-        instance.create_surface(window)
-    }
-}
-
-#[cfg(feature = "gfx-backend-metal")]
-impl BackendEx for metal::Backend {
-    type Instance = metal::Instance;
-    fn init() -> Self::Instance {
-        metal::Instance::create("gfx-render", 1)
-    }
-    fn create_surface(instance: &Self::Instance, window: &Window) -> Self::Surface {
-        instance.create_surface(window)
-    }
-}
-
-#[cfg(feature = "gfx-backend-dx12")]
-impl BackendEx for dx12::Backend {
-    type Instance = dx12::Instance;
-    fn init() -> Self::Instance {
-        dx12::Instance::create("gfx-render", 1)
-    }
-    fn create_surface(instance: &Self::Instance, window: &Window) -> Self::Surface {
-        instance.create_surface(window)
-    }
-}
diff --git a/rendy/src/impls/mod.rs b/rendy/src/impls/mod.rs
deleted file mode 100644
index b58bbfc5..00000000
--- a/rendy/src/impls/mod.rs
+++ /dev/null
@@ -1,5 +0,0 @@
-#[cfg(feature = "hal")]
-pub mod hal;
-
-#[cfg(feature = "ash")]
-pub mod ash;
diff --git a/rendy/src/init.rs b/rendy/src/init.rs
deleted file mode 100644
index 4a7321b1..00000000
--- a/rendy/src/init.rs
+++ /dev/null
@@ -1,18 +0,0 @@
-use config::Config;
-use device::Device;
-use factory::Factory;
-use queue::QueuesPicker;
-
-/// Initalize rendy
-#[cfg(feature = "hal")]
-pub use impls::hal::init;
-
-/// Initialize rendy
-#[cfg(not(feature = "hal"))]
-pub fn init<D, Q>(_config: Config, queue_picker: Q) -> Result<(Factory<D>), ()>
-where
-    D: Device,
-    Q: QueuesPicker,
-{
-    unimplemented!()
-}
diff --git a/rendy/src/lib.rs b/rendy/src/lib.rs
index 07b91155..68e72685 100644
--- a/rendy/src/lib.rs
+++ b/rendy/src/lib.rs
@@ -1,31 +1,9 @@
-// TODO: module docs
-
 pub extern crate rendy_command as command;
+pub extern crate rendy_factory as factory;
+pub extern crate rendy_frame as frame;
 pub extern crate rendy_memory as memory;
+pub extern crate rendy_mesh as mesh;
+pub extern crate rendy_renderer as renderer;
 pub extern crate rendy_resource as resource;
-
-extern crate winit;
-
-#[macro_use]
-extern crate derivative;
-
-#[cfg(feature = "hal")]
-pub extern crate gfx_hal as hal;
-
-#[cfg(feature = "ash")]
-pub extern crate ash;
-
-mod impls;
-
-mod config;
-mod device;
-mod factory;
-mod init;
-mod queue;
-mod render;
-
-pub use config::{Config, MemoryConfig, RenderBuilder, RenderConfig};
-pub use device::Device;
-pub use factory::Factory;
-pub use init::init;
-pub use queue::QueuesPicker;
+pub extern crate rendy_shader as shader;
+pub extern crate rendy_wsi as wsi;
diff --git a/rendy/src/queue.rs b/rendy/src/queue.rs
deleted file mode 100644
index c17cb91f..00000000
--- a/rendy/src/queue.rs
+++ /dev/null
@@ -1,9 +0,0 @@
-use command::{CapabilityFlags, Families, Family};
-
-/// Trait that represents some method to select a queue family.
-pub trait QueuesPicker {
-    fn pick_queues<Q>(
-        &self,
-        families: Vec<Families<Q>>,
-    ) -> Result<(Family<Q, CapabilityFlags>, u32), ()>;
-}
diff --git a/rendy/src/render.rs b/rendy/src/render.rs
deleted file mode 100644
index 306e0d7c..00000000
--- a/rendy/src/render.rs
+++ /dev/null
@@ -1,20 +0,0 @@
-use command::Frame;
-use device::Device;
-use factory::Factory;
-use winit::Window;
-
-pub trait Render<D, T>
-where
-    D: Device,
-{
-    fn run(&mut self, data: &mut T, factory: &mut Factory<D>, frame: &mut Frame<D::Fence>);
-}
-
-pub struct Target<D, R>
-where
-    D: Device,
-{
-    surface: D::Surface,
-    render: R,
-    frame: Frame<D::Fence>,
-}
diff --git a/resource/Cargo.toml b/resource/Cargo.toml
index f9c4f8a2..7291f651 100644
--- a/resource/Cargo.toml
+++ b/resource/Cargo.toml
@@ -4,15 +4,8 @@ version = "0.1.0"
 authors = ["omni-viral <scareaangel@gmail.com>"]
 
 [dependencies]
-bitflags = "1.0"
+ash = { path = "../../ash/ash" }
+log = "0.4"
 crossbeam-channel = "0.2"
-rendy-memory = { path = "../memory" }
 relevant = "0.2"
-failure = "0.1"
-derivative = "1.0"
-gfx-hal = { git = "https://github.com/gfx-rs/gfx.git", optional = true }
-ash = { version = "0.24", optional = true }
-
-[features]
-hal = ["gfx-hal", "rendy-memory/hal"]
-vulkan = ["ash", "rendy-memory/vulkan"]
+rendy-memory = { path = "../memory" }
diff --git a/resource/src/buffer/mod.rs b/resource/src/buffer/mod.rs
index 4f1c1744..f98e14ef 100644
--- a/resource/src/buffer/mod.rs
+++ b/resource/src/buffer/mod.rs
@@ -2,25 +2,13 @@
 
 mod usage;
 
+use ash::vk;
+
 pub use self::usage::*;
-use memory::MemoryBlock;
+use memory::{Block, MemoryBlock};
 use relevant::Relevant;
 
 use escape::Escape;
-use SharingMode;
-
-/// Contains information required to create a buffer.
-#[derive(Clone, Copy, Debug, PartialEq, Eq)]
-pub struct CreateInfo {
-    /// Size of the buffer required.
-    pub size: u64,
-
-    /// Intended usage flags. Limits memory types suitable for the buffer.
-    pub usage: UsageFlags,
-
-    /// Specifies command queues from which families can access the buffer.
-    pub sharing: SharingMode,
-}
 
 /// Generic buffer object wrapper.
 ///
@@ -29,14 +17,35 @@ pub struct CreateInfo {
 /// `T` - type of the memory object of memory block.
 /// `B` - raw buffer type.
 #[derive(Debug)]
-pub struct Buffer<M, B> {
-    pub(crate) inner: Escape<Inner<M, B>>,
-    pub(crate) info: CreateInfo,
+pub struct Buffer {
+    pub(crate) inner: Escape<Inner>,
+    pub(crate) info: vk::BufferCreateInfo,
 }
 
 #[derive(Debug)]
-pub(crate) struct Inner<M, B> {
-    pub(crate) block: MemoryBlock<M>,
-    pub(crate) raw: B,
+pub(crate) struct Inner {
+    pub(crate) block: MemoryBlock,
+    pub(crate) raw: vk::Buffer,
     pub(crate) relevant: Relevant,
 }
+
+impl Buffer {
+    /// Get buffers memory block.
+    pub fn block(&self) -> &impl Block {
+        &self.inner.block
+    }
+
+    /// Get buffers memory block.
+    pub fn block_mut(&mut self) -> &mut impl Block {
+        &mut self.inner.block
+    }
+
+    /// Get raw buffer handle.
+    ///
+    /// # Safety
+    ///
+    /// Raw buffer handler should not be usage to violate this object valid usage.
+    pub unsafe fn raw(&self) -> vk::Buffer {
+        self.inner.raw
+    }
+}
diff --git a/resource/src/buffer/usage.rs b/resource/src/buffer/usage.rs
index 6a08fc90..09fda75a 100644
--- a/resource/src/buffer/usage.rs
+++ b/resource/src/buffer/usage.rs
@@ -1,41 +1,5 @@
-use memory::usage::{Data, Download, Dynamic, Upload, Usage as MemoryUsage, UsageValue};
-
-bitflags! {
-    /// Bitmask specifying allowed usage of a buffer.
-    /// See Vulkan docs for detailed info:
-    /// <https://www.khronos.org/registry/vulkan/specs/1.1-extensions/man/html/VkBufferUsageFlagBits.html>
-    #[repr(transparent)]
-    pub struct UsageFlags: u32 {
-        /// Specifies that the buffer can be used as the source of a transfer command.
-        const TRANSFER_SRC = 0x00000001;
-
-        /// Specifies that the buffer can be used as the destination of a transfer command.
-        const TRANSFER_DST = 0x00000002;
-
-        /// Specifies that the buffer can be used to create a `BufferView` suitable for occupying a descriptor set slot of type `UNIFORM_TEXEL_BUFFER`.
-        const UNIFORM_TEXEL_BUFFER = 0x00000004;
-
-        /// Specifies that the buffer can be used to create a `BufferView` suitable for occupying a descriptor set slot of type `STORAGE_TEXEL_BUFFER`.
-        const STORAGE_TEXEL_BUFFER = 0x00000008;
-
-        /// Specifies that the buffer can be used in a descriptor buffer info suitable for occupying a descriptor set slot either of
-        /// type `UNIFORM_BUFFER` or `UNIFORM_BUFFER_DYNAMIC`.
-        const UNIFORM_BUFFER = 0x00000010;
-
-        /// Specifies that the buffer can be used in a descriptor buffer info suitable for occupying a descriptor set slot either of
-        /// type `STORAGE_BUFFER` or `STORAGE_BUFFER_DYNAMIC`.
-        const STORAGE_BUFFER = 0x00000020;
-
-        /// Specifies that the buffer is suitable for vertex indices.
-        const INDEX_BUFFER = 0x00000040;
-
-        /// Specifies that the buffer is suitable for vertex attributes.
-        const VERTEX_BUFFER = 0x00000080;
-
-        /// Specifies that the buffer is suitable for indirect commands.
-        const INDIRECT_BUFFER = 0x00000100;
-    }
-}
+use ash::vk::BufferUsageFlags;
+use memory::usage::{Data, Download, Dynamic, MemoryUsage, MemoryUsageValue, Upload};
 
 /// Usage trait that must implemented by usage types.
 /// This trait provides a way to convert type-level usage to the value-level flags.
@@ -44,20 +8,20 @@ pub trait Usage {
     type MemoryUsage: MemoryUsage;
 
     /// Convert usage to the flags.
-    fn flags(&self) -> UsageFlags;
+    fn flags(&self) -> BufferUsageFlags;
 
     /// Get suggested memory usage.
     fn memory(&self) -> Self::MemoryUsage;
 }
 
-impl Usage for (UsageFlags, UsageValue) {
-    type MemoryUsage = UsageValue;
+impl Usage for (BufferUsageFlags, MemoryUsageValue) {
+    type MemoryUsage = MemoryUsageValue;
 
-    fn flags(&self) -> UsageFlags {
+    fn flags(&self) -> BufferUsageFlags {
         self.0
     }
 
-    fn memory(&self) -> UsageValue {
+    fn memory(&self) -> MemoryUsageValue {
         self.1
     }
 }
@@ -71,8 +35,8 @@ pub struct VertexBuffer;
 impl Usage for VertexBuffer {
     type MemoryUsage = Data;
 
-    fn flags(&self) -> UsageFlags {
-        UsageFlags::TRANSFER_DST | UsageFlags::VERTEX_BUFFER
+    fn flags(&self) -> BufferUsageFlags {
+        BufferUsageFlags::TRANSFER_DST | BufferUsageFlags::VERTEX_BUFFER
     }
 
     fn memory(&self) -> Data {
@@ -89,8 +53,8 @@ pub struct IndexBuffer;
 impl Usage for IndexBuffer {
     type MemoryUsage = Data;
 
-    fn flags(&self) -> UsageFlags {
-        UsageFlags::TRANSFER_DST | UsageFlags::INDEX_BUFFER
+    fn flags(&self) -> BufferUsageFlags {
+        BufferUsageFlags::TRANSFER_DST | BufferUsageFlags::INDEX_BUFFER
     }
 
     fn memory(&self) -> Data {
@@ -106,8 +70,8 @@ pub struct UniformBuffer;
 impl Usage for UniformBuffer {
     type MemoryUsage = Dynamic;
 
-    fn flags(&self) -> UsageFlags {
-        UsageFlags::UNIFORM_BUFFER
+    fn flags(&self) -> BufferUsageFlags {
+        BufferUsageFlags::UNIFORM_BUFFER
     }
 
     fn memory(&self) -> Dynamic {
@@ -122,8 +86,8 @@ pub struct UploadBuffer;
 impl Usage for UploadBuffer {
     type MemoryUsage = Upload;
 
-    fn flags(&self) -> UsageFlags {
-        UsageFlags::TRANSFER_SRC
+    fn flags(&self) -> BufferUsageFlags {
+        BufferUsageFlags::TRANSFER_SRC
     }
 
     fn memory(&self) -> Upload {
@@ -138,8 +102,8 @@ pub struct DownloadBuffer;
 impl Usage for DownloadBuffer {
     type MemoryUsage = Download;
 
-    fn flags(&self) -> UsageFlags {
-        UsageFlags::TRANSFER_DST
+    fn flags(&self) -> BufferUsageFlags {
+        BufferUsageFlags::TRANSFER_DST
     }
 
     fn memory(&self) -> Download {
diff --git a/resource/src/device.rs b/resource/src/device.rs
deleted file mode 100644
index 7a289db4..00000000
--- a/resource/src/device.rs
+++ /dev/null
@@ -1,87 +0,0 @@
-use buffer;
-use error;
-use image;
-use memory;
-use MemoryRequirements;
-
-/// Trait for resource creation, memory allocation and mapping.
-pub trait Device: memory::Device + Sized {
-    /// Image sampler.
-    type Sampler: 'static;
-
-    /// Buffer type that can be used with this device.
-    /// `UnboundedBuffer` can be converted to `Buffer` by `bind_buffer`.
-    type Buffer: 'static;
-
-    /// Unbounded buffer type that can be used with this device.
-    /// `UnboundBuffer` hasn't been associated with memory yet.
-    type UnboundBuffer: 'static;
-
-    /// View to the buffer.
-    type BufferView: 'static;
-
-    /// Memory type that can be used with this device.
-    /// `UnboundedImage` can be converted to `Image` by `bind_image`.
-    type Image: 'static;
-
-    /// Unbounded image type that can be used with this device.
-    /// `UnboundImage` hasn't been associated with memory yet.
-    type UnboundImage: 'static;
-
-    /// View to the image.
-    type ImageView: 'static;
-
-    /// Create new unbound buffer object.
-    fn create_buffer(
-        &self,
-        info: buffer::CreateInfo,
-    ) -> Result<Self::UnboundBuffer, memory::OutOfMemoryError>;
-
-    /// Fetch buffer memory requirements.
-    fn buffer_requirements(&self, buffer: &Self::UnboundBuffer) -> MemoryRequirements;
-
-    /// Bind memory range to the buffer.
-    ///
-    /// # Safety
-    ///
-    /// `offset` must be less than the size of memory.
-    /// memory must have been allocated using one of the memory types allowed in the `mask` member of the `MemoryRequirements` structure returned from a call to `buffer_requirements` with buffer.
-    /// `offset` must be an integer multiple of the alignment member of the `MemoryRequirements` structure returned from a call to `buffer_requirements` with buffer.
-    /// The size member of the `MemoryRequirements` structure returned from a call to `buffer_requirements` with buffer must be less than or equal to the size of memory minus `offset`.
-    unsafe fn bind_buffer(
-        &self,
-        buffer: Self::UnboundBuffer,
-        memory: &Self::Memory,
-        offset: u64,
-    ) -> Result<Self::Buffer, error::BindError>;
-
-    /// Destroy buffer object.
-    unsafe fn destroy_buffer(&self, buffer: Self::Buffer);
-
-    /// Create new unbound image object.
-    fn create_image(
-        &self,
-        info: image::CreateInfo,
-    ) -> Result<Self::UnboundImage, error::ImageCreationError>;
-
-    /// Fetch image memory requirements.
-    fn image_requirements(&self, image: &Self::UnboundImage) -> MemoryRequirements;
-
-    /// Bind memory to the image.
-    ///
-    /// # Safety
-    ///
-    /// `offset` must be less than the size of memory.
-    /// memory must have been allocated using one of the memory types allowed in the `mask` member of the `MemoryRequirements` structure returned from a call to `image_requirements` with image.
-    /// `offset` must be an integer multiple of the alignment member of the `MemoryRequirements` structure returned from a call to `image_requirements` with image.
-    /// The size member of the `MemoryRequirements` structure returned from a call to `image_requirements` with image must be less than or equal to the size of memory minus `offset`.
-    unsafe fn bind_image(
-        &self,
-        image: Self::UnboundImage,
-        memory: &Self::Memory,
-        offset: u64,
-    ) -> Result<Self::Image, error::BindError>;
-
-    /// Destroy image object.
-    unsafe fn destroy_image(&self, image: Self::Image);
-}
diff --git a/resource/src/error.rs b/resource/src/error.rs
deleted file mode 100644
index de9cd8f1..00000000
--- a/resource/src/error.rs
+++ /dev/null
@@ -1,84 +0,0 @@
-use image;
-use memory;
-
-/// Image creation error.
-#[derive(Clone, Copy, Debug, Fail)]
-pub enum ImageCreationError {
-    /// An unsupported format was attempted to be used.
-    #[fail(display = "Unsupported format")]
-    UnsupportedFormat(image::Format),
-
-    /// Multi-sampled array textures or cubes are not supported.
-    #[fail(display = "Unsupported kind")]
-    Kind,
-
-    /// Invalid samples for the device.
-    #[fail(display = "Unsupported amount of samples")]
-    Samples(image::SampleCountFlags),
-
-    /// Unsupported size in one of the dimensions.
-    #[fail(display = "Unsupported size")]
-    UnsupportedSize(u32),
-
-    /// The data size provided doesn't match the destination.
-    #[fail(display = "Data size mismatch")]
-    DataSizeMismatch,
-
-    /// The usage requested isn't supported.
-    #[fail(display = "Unsupported usage")]
-    UnsupportedUsage(image::UsageFlags),
-
-    /// The memory of the host or device is used up.
-    #[fail(display = "Out of memory")]
-    OutOfMemoryError(memory::OutOfMemoryError),
-}
-
-/// Resource binding error.
-#[derive(Clone, Copy, Debug, Fail)]
-pub enum BindError {
-    /// Requested binding to memory that doesn't support the required operations.
-    #[fail(display = "Binding to wrong memory")]
-    WrongMemory,
-
-    /// Requested binding to an invalid memory.
-    #[fail(display = "Binding to out of bounds memory")]
-    OutOfBounds,
-
-    /// The memory of the host or device is used up.
-    #[fail(display = "Out of memory")]
-    OutOfMemoryError(memory::OutOfMemoryError),
-}
-
-/// Generic resource error.
-#[derive(Clone, Copy, Debug, Fail)]
-pub enum ResourceError {
-    /// Image creation error.
-    #[fail(display = "Image creation error")]
-    ImageCreationError(ImageCreationError),
-
-    /// Memory error.
-    #[fail(display = "Memory error")]
-    MemoryError(memory::MemoryError),
-
-    /// Bind error.
-    #[fail(display = "Bind error")]
-    BindError(BindError),
-}
-
-impl From<ImageCreationError> for ResourceError {
-    fn from(error: ImageCreationError) -> Self {
-        ResourceError::ImageCreationError(error)
-    }
-}
-
-impl From<memory::MemoryError> for ResourceError {
-    fn from(error: memory::MemoryError) -> Self {
-        ResourceError::MemoryError(error)
-    }
-}
-
-impl From<BindError> for ResourceError {
-    fn from(error: BindError) -> Self {
-        ResourceError::BindError(error)
-    }
-}
diff --git a/resource/src/escape.rs b/resource/src/escape.rs
index 80e7f0d2..db5f9d46 100644
--- a/resource/src/escape.rs
+++ b/resource/src/escape.rs
@@ -22,10 +22,12 @@ pub(crate) struct Escape<T> {
 
 impl<T> Escape<T> {
     /// Unwrap the value.
+    #[allow(unused)]
     pub(crate) fn into_inner(escape: Self) -> T {
         Self::deconstruct(escape).0
     }
 
+    #[allow(unused)]
     fn deconstruct(mut escape: Self) -> (T, Sender<T>) {
         unsafe {
             let value = read(&mut *escape.value);
@@ -51,7 +53,10 @@ impl<T> DerefMut for Escape<T> {
 
 impl<T> Drop for Escape<T> {
     fn drop(&mut self) {
-        let value = unsafe { read(&mut *self.value) };
+        let value = unsafe {
+            // `ManuallyDrop` will prevent `self.value` usage.
+            read(&mut *self.value)
+        };
         self.sender.send(value)
     }
 }
@@ -95,7 +100,14 @@ impl<T> Terminal<T> {
 
     /// Get iterator over values from dropped `Escape` instances that was created by this `Terminal`.
     pub(crate) fn drain<'a>(&'a mut self) -> impl Iterator<Item = T> + 'a {
-        repeat(()).scan((), move |&mut (), ()| self.receiver.try_recv())
+        repeat(()).scan((), move |&mut (), ()| {
+            // trace!("Drain escape");
+            if !self.receiver.is_empty() {
+                self.receiver.recv()
+            } else {
+                None
+            }
+        })
     }
 }
 
@@ -105,8 +117,8 @@ impl<T> Drop for Terminal<T> {
             ManuallyDrop::drop(&mut self.sender);
             match self.receiver.recv() {
                 None => {}
-                _ => {
-                    panic!("Terminal must be dropped after all `Escape`s");
+                Some(_) => {
+                    error!("Terminal must be dropped after all `Escape`s");
                 }
             }
         }
diff --git a/resource/src/image/format.rs b/resource/src/image/format.rs
deleted file mode 100644
index b0b30468..00000000
--- a/resource/src/image/format.rs
+++ /dev/null
@@ -1,270 +0,0 @@
-//! This module defines formats of the image supported by the implementation.
-
-pub use self::Format::*;
-
-/// Image format.
-#[allow(bad_style, missing_docs)]
-#[derive(Clone, Copy, Debug, PartialEq, Eq)]
-pub enum Format {
-    UNDEFINED = 0,
-    R4G4_UNORM_PACK8 = 1,
-    R4G4B4A4_UNORM_PACK16 = 2,
-    B4G4R4A4_UNORM_PACK16 = 3,
-    R5G6B5_UNORM_PACK16 = 4,
-    B5G6R5_UNORM_PACK16 = 5,
-    R5G5B5A1_UNORM_PACK16 = 6,
-    B5G5R5A1_UNORM_PACK16 = 7,
-    A1R5G5B5_UNORM_PACK16 = 8,
-    R8_UNORM = 9,
-    R8_SNORM = 10,
-    R8_USCALED = 11,
-    R8_SSCALED = 12,
-    R8_UINT = 13,
-    R8_SINT = 14,
-    R8_SRGB = 15,
-    R8G8_UNORM = 16,
-    R8G8_SNORM = 17,
-    R8G8_USCALED = 18,
-    R8G8_SSCALED = 19,
-    R8G8_UINT = 20,
-    R8G8_SINT = 21,
-    R8G8_SRGB = 22,
-    R8G8B8_UNORM = 23,
-    R8G8B8_SNORM = 24,
-    R8G8B8_USCALED = 25,
-    R8G8B8_SSCALED = 26,
-    R8G8B8_UINT = 27,
-    R8G8B8_SINT = 28,
-    R8G8B8_SRGB = 29,
-    B8G8R8_UNORM = 30,
-    B8G8R8_SNORM = 31,
-    B8G8R8_USCALED = 32,
-    B8G8R8_SSCALED = 33,
-    B8G8R8_UINT = 34,
-    B8G8R8_SINT = 35,
-    B8G8R8_SRGB = 36,
-    R8G8B8A8_UNORM = 37,
-    R8G8B8A8_SNORM = 38,
-    R8G8B8A8_USCALED = 39,
-    R8G8B8A8_SSCALED = 40,
-    R8G8B8A8_UINT = 41,
-    R8G8B8A8_SINT = 42,
-    R8G8B8A8_SRGB = 43,
-    B8G8R8A8_UNORM = 44,
-    B8G8R8A8_SNORM = 45,
-    B8G8R8A8_USCALED = 46,
-    B8G8R8A8_SSCALED = 47,
-    B8G8R8A8_UINT = 48,
-    B8G8R8A8_SINT = 49,
-    B8G8R8A8_SRGB = 50,
-    A8B8G8R8_UNORM_PACK32 = 51,
-    A8B8G8R8_SNORM_PACK32 = 52,
-    A8B8G8R8_USCALED_PACK32 = 53,
-    A8B8G8R8_SSCALED_PACK32 = 54,
-    A8B8G8R8_UINT_PACK32 = 55,
-    A8B8G8R8_SINT_PACK32 = 56,
-    A8B8G8R8_SRGB_PACK32 = 57,
-    A2R10G10B10_UNORM_PACK32 = 58,
-    A2R10G10B10_SNORM_PACK32 = 59,
-    A2R10G10B10_USCALED_PACK32 = 60,
-    A2R10G10B10_SSCALED_PACK32 = 61,
-    A2R10G10B10_UINT_PACK32 = 62,
-    A2R10G10B10_SINT_PACK32 = 63,
-    A2B10G10R10_UNORM_PACK32 = 64,
-    A2B10G10R10_SNORM_PACK32 = 65,
-    A2B10G10R10_USCALED_PACK32 = 66,
-    A2B10G10R10_SSCALED_PACK32 = 67,
-    A2B10G10R10_UINT_PACK32 = 68,
-    A2B10G10R10_SINT_PACK32 = 69,
-    R16_UNORM = 70,
-    R16_SNORM = 71,
-    R16_USCALED = 72,
-    R16_SSCALED = 73,
-    R16_UINT = 74,
-    R16_SINT = 75,
-    R16_SFLOAT = 76,
-    R16G16_UNORM = 77,
-    R16G16_SNORM = 78,
-    R16G16_USCALED = 79,
-    R16G16_SSCALED = 80,
-    R16G16_UINT = 81,
-    R16G16_SINT = 82,
-    R16G16_SFLOAT = 83,
-    R16G16B16_UNORM = 84,
-    R16G16B16_SNORM = 85,
-    R16G16B16_USCALED = 86,
-    R16G16B16_SSCALED = 87,
-    R16G16B16_UINT = 88,
-    R16G16B16_SINT = 89,
-    R16G16B16_SFLOAT = 90,
-    R16G16B16A16_UNORM = 91,
-    R16G16B16A16_SNORM = 92,
-    R16G16B16A16_USCALED = 93,
-    R16G16B16A16_SSCALED = 94,
-    R16G16B16A16_UINT = 95,
-    R16G16B16A16_SINT = 96,
-    R16G16B16A16_SFLOAT = 97,
-    R32_UINT = 98,
-    R32_SINT = 99,
-    R32_SFLOAT = 100,
-    R32G32_UINT = 101,
-    R32G32_SINT = 102,
-    R32G32_SFLOAT = 103,
-    R32G32B32_UINT = 104,
-    R32G32B32_SINT = 105,
-    R32G32B32_SFLOAT = 106,
-    R32G32B32A32_UINT = 107,
-    R32G32B32A32_SINT = 108,
-    R32G32B32A32_SFLOAT = 109,
-    R64_UINT = 110,
-    R64_SINT = 111,
-    R64_SFLOAT = 112,
-    R64G64_UINT = 113,
-    R64G64_SINT = 114,
-    R64G64_SFLOAT = 115,
-    R64G64B64_UINT = 116,
-    R64G64B64_SINT = 117,
-    R64G64B64_SFLOAT = 118,
-    R64G64B64A64_UINT = 119,
-    R64G64B64A64_SINT = 120,
-    R64G64B64A64_SFLOAT = 121,
-    B10G11R11_UFLOAT_PACK32 = 122,
-    E5B9G9R9_UFLOAT_PACK32 = 123,
-    D16_UNORM = 124,
-    X8_D24_UNORM_PACK32 = 125,
-    D32_SFLOAT = 126,
-    S8_UINT = 127,
-    D16_UNORM_S8_UINT = 128,
-    D24_UNORM_S8_UINT = 129,
-    D32_SFLOAT_S8_UINT = 130,
-    BC1_RGB_UNORM_BLOCK = 131,
-    BC1_RGB_SRGB_BLOCK = 132,
-    BC1_RGBA_UNORM_BLOCK = 133,
-    BC1_RGBA_SRGB_BLOCK = 134,
-    BC2_UNORM_BLOCK = 135,
-    BC2_SRGB_BLOCK = 136,
-    BC3_UNORM_BLOCK = 137,
-    BC3_SRGB_BLOCK = 138,
-    BC4_UNORM_BLOCK = 139,
-    BC4_SNORM_BLOCK = 140,
-    BC5_UNORM_BLOCK = 141,
-    BC5_SNORM_BLOCK = 142,
-    BC6H_UFLOAT_BLOCK = 143,
-    BC6H_SFLOAT_BLOCK = 144,
-    BC7_UNORM_BLOCK = 145,
-    BC7_SRGB_BLOCK = 146,
-    ETC2_R8G8B8_UNORM_BLOCK = 147,
-    ETC2_R8G8B8_SRGB_BLOCK = 148,
-    ETC2_R8G8B8A1_UNORM_BLOCK = 149,
-    ETC2_R8G8B8A1_SRGB_BLOCK = 150,
-    ETC2_R8G8B8A8_UNORM_BLOCK = 151,
-    ETC2_R8G8B8A8_SRGB_BLOCK = 152,
-    EAC_R11_UNORM_BLOCK = 153,
-    EAC_R11_SNORM_BLOCK = 154,
-    EAC_R11G11_UNORM_BLOCK = 155,
-    EAC_R11G11_SNORM_BLOCK = 156,
-    ASTC_4x4_UNORM_BLOCK = 157,
-    ASTC_4x4_SRGB_BLOCK = 158,
-    ASTC_5x4_UNORM_BLOCK = 159,
-    ASTC_5x4_SRGB_BLOCK = 160,
-    ASTC_5x5_UNORM_BLOCK = 161,
-    ASTC_5x5_SRGB_BLOCK = 162,
-    ASTC_6x5_UNORM_BLOCK = 163,
-    ASTC_6x5_SRGB_BLOCK = 164,
-    ASTC_6x6_UNORM_BLOCK = 165,
-    ASTC_6x6_SRGB_BLOCK = 166,
-    ASTC_8x5_UNORM_BLOCK = 167,
-    ASTC_8x5_SRGB_BLOCK = 168,
-    ASTC_8x6_UNORM_BLOCK = 169,
-    ASTC_8x6_SRGB_BLOCK = 170,
-    ASTC_8x8_UNORM_BLOCK = 171,
-    ASTC_8x8_SRGB_BLOCK = 172,
-    ASTC_10x5_UNORM_BLOCK = 173,
-    ASTC_10x5_SRGB_BLOCK = 174,
-    ASTC_10x6_UNORM_BLOCK = 175,
-    ASTC_10x6_SRGB_BLOCK = 176,
-    ASTC_10x8_UNORM_BLOCK = 177,
-    ASTC_10x8_SRGB_BLOCK = 178,
-    ASTC_10x10_UNORM_BLOCK = 179,
-    ASTC_10x10_SRGB_BLOCK = 180,
-    ASTC_12x10_UNORM_BLOCK = 181,
-    ASTC_12x10_SRGB_BLOCK = 182,
-    ASTC_12x12_UNORM_BLOCK = 183,
-    ASTC_12x12_SRGB_BLOCK = 184,
-    G8B8G8R8_422_UNORM = 1000156000,
-    B8G8R8G8_422_UNORM = 1000156001,
-    G8_B8_R8_3PLANE_420_UNORM = 1000156002,
-    G8_B8R8_2PLANE_420_UNORM = 1000156003,
-    G8_B8_R8_3PLANE_422_UNORM = 1000156004,
-    G8_B8R8_2PLANE_422_UNORM = 1000156005,
-    G8_B8_R8_3PLANE_444_UNORM = 1000156006,
-    R10X6_UNORM_PACK16 = 1000156007,
-    R10X6G10X6_UNORM_2PACK16 = 1000156008,
-    R10X6G10X6B10X6A10X6_UNORM_4PACK16 = 1000156009,
-    G10X6B10X6G10X6R10X6_422_UNORM_4PACK16 = 1000156010,
-    B10X6G10X6R10X6G10X6_422_UNORM_4PACK16 = 1000156011,
-    G10X6_B10X6_R10X6_3PLANE_420_UNORM_3PACK16 = 1000156012,
-    G10X6_B10X6R10X6_2PLANE_420_UNORM_3PACK16 = 1000156013,
-    G10X6_B10X6_R10X6_3PLANE_422_UNORM_3PACK16 = 1000156014,
-    G10X6_B10X6R10X6_2PLANE_422_UNORM_3PACK16 = 1000156015,
-    G10X6_B10X6_R10X6_3PLANE_444_UNORM_3PACK16 = 1000156016,
-    R12X4_UNORM_PACK16 = 1000156017,
-    R12X4G12X4_UNORM_2PACK16 = 1000156018,
-    R12X4G12X4B12X4A12X4_UNORM_4PACK16 = 1000156019,
-    G12X4B12X4G12X4R12X4_422_UNORM_4PACK16 = 1000156020,
-    B12X4G12X4R12X4G12X4_422_UNORM_4PACK16 = 1000156021,
-    G12X4_B12X4_R12X4_3PLANE_420_UNORM_3PACK16 = 1000156022,
-    G12X4_B12X4R12X4_2PLANE_420_UNORM_3PACK16 = 1000156023,
-    G12X4_B12X4_R12X4_3PLANE_422_UNORM_3PACK16 = 1000156024,
-    G12X4_B12X4R12X4_2PLANE_422_UNORM_3PACK16 = 1000156025,
-    G12X4_B12X4_R12X4_3PLANE_444_UNORM_3PACK16 = 1000156026,
-    G16B16G16R16_422_UNORM = 1000156027,
-    B16G16R16G16_422_UNORM = 1000156028,
-    G16_B16_R16_3PLANE_420_UNORM = 1000156029,
-    G16_B16R16_2PLANE_420_UNORM = 1000156030,
-    G16_B16_R16_3PLANE_422_UNORM = 1000156031,
-    G16_B16R16_2PLANE_422_UNORM = 1000156032,
-    G16_B16_R16_3PLANE_444_UNORM = 1000156033,
-    PVRTC1_2BPP_UNORM_BLOCK_IMG = 1000054000,
-    PVRTC1_4BPP_UNORM_BLOCK_IMG = 1000054001,
-    PVRTC2_2BPP_UNORM_BLOCK_IMG = 1000054002,
-    PVRTC2_4BPP_UNORM_BLOCK_IMG = 1000054003,
-    PVRTC1_2BPP_SRGB_BLOCK_IMG = 1000054004,
-    PVRTC1_4BPP_SRGB_BLOCK_IMG = 1000054005,
-    PVRTC2_2BPP_SRGB_BLOCK_IMG = 1000054006,
-    PVRTC2_4BPP_SRGB_BLOCK_IMG = 1000054007,
-    // G8B8G8R8_422_UNORM_KHR = Format::G8B8G8R8_422_UNORM as isize,
-    // B8G8R8G8_422_UNORM_KHR = Format::B8G8R8G8_422_UNORM as isize,
-    // G8_B8_R8_3PLANE_420_UNORM_KHR = Format::G8_B8_R8_3PLANE_420_UNORM as isize,
-    // G8_B8R8_2PLANE_420_UNORM_KHR = Format::G8_B8R8_2PLANE_420_UNORM as isize,
-    // G8_B8_R8_3PLANE_422_UNORM_KHR = Format::G8_B8_R8_3PLANE_422_UNORM as isize,
-    // G8_B8R8_2PLANE_422_UNORM_KHR = Format::G8_B8R8_2PLANE_422_UNORM as isize,
-    // G8_B8_R8_3PLANE_444_UNORM_KHR = Format::G8_B8_R8_3PLANE_444_UNORM as isize,
-    // R10X6_UNORM_PACK16_KHR = Format::R10X6_UNORM_PACK16 as isize,
-    // R10X6G10X6_UNORM_2PACK16_KHR = Format::R10X6G10X6_UNORM_2PACK16 as isize,
-    // R10X6G10X6B10X6A10X6_UNORM_4PACK16_KHR = Format::R10X6G10X6B10X6A10X6_UNORM_4PACK16 as isize,
-    // G10X6B10X6G10X6R10X6_422_UNORM_4PACK16_KHR = Format::G10X6B10X6G10X6R10X6_422_UNORM_4PACK16 as isize,
-    // B10X6G10X6R10X6G10X6_422_UNORM_4PACK16_KHR = Format::B10X6G10X6R10X6G10X6_422_UNORM_4PACK16 as isize,
-    // G10X6_B10X6_R10X6_3PLANE_420_UNORM_3PACK16_KHR = Format::G10X6_B10X6_R10X6_3PLANE_420_UNORM_3PACK16 as isize,
-    // G10X6_B10X6R10X6_2PLANE_420_UNORM_3PACK16_KHR = Format::G10X6_B10X6R10X6_2PLANE_420_UNORM_3PACK16 as isize,
-    // G10X6_B10X6_R10X6_3PLANE_422_UNORM_3PACK16_KHR = Format::G10X6_B10X6_R10X6_3PLANE_422_UNORM_3PACK16 as isize,
-    // G10X6_B10X6R10X6_2PLANE_422_UNORM_3PACK16_KHR = Format::G10X6_B10X6R10X6_2PLANE_422_UNORM_3PACK16 as isize,
-    // G10X6_B10X6_R10X6_3PLANE_444_UNORM_3PACK16_KHR = Format::G10X6_B10X6_R10X6_3PLANE_444_UNORM_3PACK16 as isize,
-    // R12X4_UNORM_PACK16_KHR = Format::R12X4_UNORM_PACK16 as isize,
-    // R12X4G12X4_UNORM_2PACK16_KHR = Format::R12X4G12X4_UNORM_2PACK16 as isize,
-    // R12X4G12X4B12X4A12X4_UNORM_4PACK16_KHR = Format::R12X4G12X4B12X4A12X4_UNORM_4PACK16 as isize,
-    // G12X4B12X4G12X4R12X4_422_UNORM_4PACK16_KHR = Format::G12X4B12X4G12X4R12X4_422_UNORM_4PACK16 as isize,
-    // B12X4G12X4R12X4G12X4_422_UNORM_4PACK16_KHR = Format::B12X4G12X4R12X4G12X4_422_UNORM_4PACK16 as isize,
-    // G12X4_B12X4_R12X4_3PLANE_420_UNORM_3PACK16_KHR = Format::G12X4_B12X4_R12X4_3PLANE_420_UNORM_3PACK16 as isize,
-    // G12X4_B12X4R12X4_2PLANE_420_UNORM_3PACK16_KHR = Format::G12X4_B12X4R12X4_2PLANE_420_UNORM_3PACK16 as isize,
-    // G12X4_B12X4_R12X4_3PLANE_422_UNORM_3PACK16_KHR = Format::G12X4_B12X4_R12X4_3PLANE_422_UNORM_3PACK16 as isize,
-    // G12X4_B12X4R12X4_2PLANE_422_UNORM_3PACK16_KHR = Format::G12X4_B12X4R12X4_2PLANE_422_UNORM_3PACK16 as isize,
-    // G12X4_B12X4_R12X4_3PLANE_444_UNORM_3PACK16_KHR = Format::G12X4_B12X4_R12X4_3PLANE_444_UNORM_3PACK16 as isize,
-    // G16B16G16R16_422_UNORM_KHR = Format::G16B16G16R16_422_UNORM as isize,
-    // B16G16R16G16_422_UNORM_KHR = Format::B16G16R16G16_422_UNORM as isize,
-    // G16_B16_R16_3PLANE_420_UNORM_KHR = Format::G16_B16_R16_3PLANE_420_UNORM as isize,
-    // G16_B16R16_2PLANE_420_UNORM_KHR = Format::G16_B16R16_2PLANE_420_UNORM as isize,
-    // G16_B16_R16_3PLANE_422_UNORM_KHR = Format::G16_B16_R16_3PLANE_422_UNORM as isize,
-    // G16_B16R16_2PLANE_422_UNORM_KHR = Format::G16_B16R16_2PLANE_422_UNORM as isize,
-    // G16_B16_R16_3PLANE_444_UNORM_KHR = Format::G16_B16_R16_3PLANE_444_UNORM as isize,
-}
diff --git a/resource/src/image/mod.rs b/resource/src/image/mod.rs
index 7ed5d0c3..5e10da6f 100644
--- a/resource/src/image/mod.rs
+++ b/resource/src/image/mod.rs
@@ -1,180 +1,15 @@
 //! Image usage, format, kind, extent, creation-info and wrappers.
 
-pub mod format;
 mod usage;
 
-pub use self::format::Format;
 pub use self::usage::*;
 
-use memory::MemoryBlock;
+use ash::vk;
+
+use memory::{Block, MemoryBlock};
 use relevant::Relevant;
 
 use escape::Escape;
-use SharingMode;
-
-/// Image dimensionality
-#[derive(Clone, Copy, Debug, PartialEq, Eq)]
-pub enum Kind {
-    /// Image with single dimension. A line.
-    D1,
-
-    /// Two-dimensional image. Most widely used image kind.
-    D2,
-
-    /// Full 3D image. Can represent volumetric textures.
-    D3,
-}
-
-/// Image size. Unused dimensions must have size `1`.
-#[derive(Clone, Copy, Debug, PartialEq, Eq)]
-pub struct Extent3D {
-    /// The width of the extent
-    pub width: u32,
-    /// The height of the extent
-    pub height: u32,
-    /// The depth of the extent
-    pub depth: u32,
-}
-
-bitflags! {
-    /// Bitmask specifying sample counts supported for an image used for storage operations.
-    /// See Vulkan docs for detailed info:
-    /// <https://www.khronos.org/registry/vulkan/specs/1.1-extensions/man/html/VkSampleCountFlagBits.html>
-    #[repr(transparent)]
-    pub struct SampleCountFlags: u32 {
-        /// Specifies an image with one sample per pixel.
-        const SAMPLE_COUNT_1 = 0x00000001;
-        /// Specifies an image with 2 sample per pixel.
-        const SAMPLE_COUNT_2 = 0x00000002;
-        /// Specifies an image with 4 sample per pixel.
-        const SAMPLE_COUNT_4 = 0x00000004;
-        /// Specifies an image with 8 sample per pixel.
-        const SAMPLE_COUNT_8 = 0x00000008;
-        /// Specifies an image with 16 sample per pixel.
-        const SAMPLE_COUNT_16 = 0x00000010;
-        /// Specifies an image with 32 sample per pixel.
-        const SAMPLE_COUNT_32 = 0x00000020;
-        /// Specifies an image with 64 sample per pixel.
-        const SAMPLE_COUNT_64 = 0x00000040;
-    }
-}
-
-/// Image tiling type.
-#[derive(Clone, Copy, Debug, PartialEq, Eq)]
-pub enum ImageTiling {
-    /// Implementation-defined tiling mode. Texels are arranged for more optimal memory access.
-    Optimal = 0,
-
-    /// Texels are laid in row-major order.
-    Linear = 1,
-}
-
-/// Image layout.
-/// Different layouts support different sets of device accesses.
-/// See Vulkan docs for details:
-/// <https://www.khronos.org/registry/vulkan/specs/1.1-extensions/html/vkspec.html#resources-image-layouts>
-#[derive(Clone, Copy, Debug, PartialEq, Eq)]
-pub enum Layout {
-    /// Not an actual layout.
-    /// It can be used as source layout in layout transition
-    /// in which case transition is no-op and image is just
-    /// interpreted to have destination layout.
-    /// No other operations can be performed with this layout.
-    Undefined = 0,
-
-    /// Supports all types of device access.
-    /// But access could be sub-optimal.
-    General = 1,
-
-    /// Images with this layout can be used as color and resolve attachments.
-    ColorAttachmentOptimal = 2,
-
-    /// Images with this layout can be used as depth-stencil attachments.
-    DepthStencilAttachmentOptimal = 3,
-
-    /// Images with this layout can be used as read-only depth-stencil attachments
-    /// or as read-only image in shader.
-    DepthStencilReadOnlyOptimal = 4,
-
-    /// Images with this layout can be used as read-only shader image.
-    ShaderReadOnlyOptimal = 5,
-
-    /// Images with this layout can be used as source for transfer operations.
-    TransferSrcOptimal = 6,
-
-    /// Images with this layout can be used as destination for transfer operations.
-    TransferDstOptimal = 7,
-
-    /// Image in this layout can be transitioned to another layout while preserving content.
-    /// This layout usable as initial layout for image which content will be written by the host.
-    Preinitialized = 8,
-
-    /// Images with this layout can be used as depth-stencil attachments where
-    /// depth aspect is read-only and/or as read-only image in shader where only depth aspect is accessed.
-    DepthReadOnlyStencilAttachmentOptimal = 1000117000,
-
-    /// Images with this layout can be used as depth-stencil attachments where
-    /// stencil aspect is read-only and/or as read-only image in shader where only stencil aspect is accessed.
-    DepthAttachmentStencilReadOnlyOptimal = 1000117001,
-
-    /// Image with this layout can be presented to the surface.
-    /// Only images from swapchain are presentable.
-    /// Note: Images can't be presented in `General` layout.
-    PresentSrc = 1000001002,
-
-    /// This layout is only valid for shared presentable images.
-    /// They can be used for any operations such image supports.
-    SharedPresentSrc = 1000111000,
-}
-
-bitflags! {
-    /// Bitmask specifying capabilities to create views into an image.
-    /// See Vulkan docs for details:
-    /// <https://www.khronos.org/registry/vulkan/specs/1.1-extensions/html/vkspec.html#VkImageCreateFlagBits>
-    #[repr(transparent)]
-    pub struct ImageCreateFlags: u32 {
-        /// Specifies that the image can be used to create a view with a different format from the image.
-        const IMAGE_CREATE_MUTABLE_FORMAT = 0x00000008;
-        /// Specifies that the image can be used to create a cube or cube array view.
-        const IMAGE_CREATE_CUBE_COMPATIBLE = 0x00000010;
-        /// Specifies that the image can be used to create a 2D array view.
-        const IMAGE_CREATE_2D_ARRAY_COMPATIBLE = 0x00000020;
-    }
-}
-
-/// Contains information required to create an image.
-#[derive(Clone, Copy, Debug, PartialEq, Eq)]
-pub struct CreateInfo {
-    /// Image dimensionality.
-    pub kind: Kind,
-
-    /// Image format.
-    pub format: Format,
-
-    /// Image size.
-    pub extent: Extent3D,
-
-    /// Number of mip levels to generate.
-    pub mips: u32,
-
-    /// Number of image layers.
-    pub array: u32,
-
-    /// Number of samples per texel.
-    pub samples: SampleCountFlags,
-
-    /// Tiling of the image.
-    pub tiling: ImageTiling,
-
-    /// Intended usage flags. Limits memory types suitable for the image.
-    pub usage: UsageFlags,
-
-    /// Specifies command queues from which families can access the image.
-    pub sharing: SharingMode,
-
-    /// Specifies what kind of views can be created from the image.
-    pub flags: ImageCreateFlags,
-}
 
 /// Generic image object wrapper.
 ///
@@ -183,14 +18,40 @@ pub struct CreateInfo {
 /// `T` - type of the memory object of memory block.
 /// `B` - raw image type.
 #[derive(Debug)]
-pub struct Image<M, I> {
-    pub(super) inner: Escape<Inner<M, I>>,
-    pub(super) info: CreateInfo,
+pub struct Image {
+    pub(super) inner: Escape<Inner>,
+    pub(super) info: vk::ImageCreateInfo,
 }
 
 #[derive(Debug)]
-pub(super) struct Inner<M, I> {
-    pub(super) block: MemoryBlock<M>,
-    pub(super) raw: I,
+pub(super) struct Inner {
+    pub(super) block: MemoryBlock,
+    pub(super) raw: vk::Image,
     pub(super) relevant: Relevant,
 }
+
+impl Image {
+    /// Get buffers memory block.
+    pub fn block(&self) -> &impl Block {
+        &self.inner.block
+    }
+
+    /// Get buffers memory block.
+    pub fn block_mut(&mut self) -> &mut impl Block {
+        &mut self.inner.block
+    }
+
+    /// Get raw image handle.
+    ///
+    /// # Safety
+    ///
+    /// Raw image handler should not be usage to violate this object valid usage.
+    pub unsafe fn raw(&self) -> vk::Image {
+        self.inner.raw
+    }
+
+    /// Get extent of the image.
+    pub fn extent(&self) -> vk::Extent3D {
+        self.info.extent
+    }
+}
diff --git a/resource/src/image/usage.rs b/resource/src/image/usage.rs
index 0d06df4a..052843b6 100644
--- a/resource/src/image/usage.rs
+++ b/resource/src/image/usage.rs
@@ -1,35 +1,77 @@
-bitflags! {
-    /// Bitmask specifying intended usage of an image.
-    /// See Vulkan docs for detailed info:
-    /// <https://www.khronos.org/registry/vulkan/specs/1.1-extensions/man/html/VkImageUsageFlagBits.html>
-    #[repr(transparent)]
-    pub struct UsageFlags: u32 {
-        /// Specifies that the image can be used as the source of a transfer command.
-        const TRANSFER_SRC = 0x00000001;
-
-        /// Specifies that the image can be used as the destination of a transfer command.
-        const TRANSFER_DST = 0x00000002;
-
-        /// Specifies that the image can be used to create a `ImageView` suitable for occupying a descriptor set slot either of
-        /// type `SAMPLED_IMAGE` or `COMBINED_IMAGE_SAMPLER`, and be sampled by a shader.
-        const SAMPLED = 0x00000004;
-
-        /// Specifies that the image can be used to create a `ImageView` suitable for occupying a descriptor set slot of type `STORAGE_IMAGE`.
-        const STORAGE = 0x00000008;
-
-        /// Specifies that the image can be used to create a `ImageView` suitable for use as a color or resolve attachment in a `Framebuffer`.
-        const COLOR_ATTACHMENT = 0x00000010;
-
-        /// Specifies that the image can be used to create a `ImageView` suitable for use as a depth/stencil attachment in a `Framebuffer`.
-        const DEPTH_STENCIL_ATTACHMENT = 0x00000020;
-
-        /// Specifies that the memory bound to this image will have been allocated with the VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT
-        /// (see <https://www.khronos.org/registry/vulkan/specs/1.1-extensions/html/vkspec.html#memory> for more detail).
-        /// This bit can be set for any image that can be used to create a `ImageView` suitable for use as a color, resolve, depth/stencil, or input attachment.
-        const TRANSIENT_ATTACHMENT = 0x00000040;
-
-        /// Specifies that the image can be used to create a `ImageView` suitable for occupying descriptor set slot of type `INPUT_ATTACHMENT`;
-        /// be read from a shader as an input attachment; and be used as an input attachment in a framebuffer.
-        const INPUT_ATTACHMENT = 0x00000080;
+use ash::vk::ImageUsageFlags;
+use memory::usage::{Data, MemoryUsage, MemoryUsageValue};
+
+/// Usage trait that must implemented by usage types.
+/// This trait provides a way to convert type-level usage to the value-level flags.
+pub trait Usage {
+    /// Suggested memory usage type.
+    type MemoryUsage: MemoryUsage;
+
+    /// Convert usage to the flags.
+    fn flags(&self) -> ImageUsageFlags;
+
+    /// Get suggested memory usage.
+    fn memory(&self) -> Self::MemoryUsage;
+}
+
+impl Usage for (ImageUsageFlags, MemoryUsageValue) {
+    type MemoryUsage = MemoryUsageValue;
+
+    fn flags(&self) -> ImageUsageFlags {
+        self.0
+    }
+
+    fn memory(&self) -> MemoryUsageValue {
+        self.1
+    }
+}
+
+/// Type that specify that image is intended to be used as texture.
+/// It implies `TRANSFER_DST` because device-local, host-invisible memory should be used
+/// and transfer is left the only way to fill the buffer.
+#[derive(Clone, Copy, Debug)]
+pub struct Texture;
+
+impl Usage for Texture {
+    type MemoryUsage = Data;
+
+    fn flags(&self) -> ImageUsageFlags {
+        ImageUsageFlags::TRANSFER_DST | ImageUsageFlags::SAMPLED
+    }
+
+    fn memory(&self) -> Data {
+        Data
+    }
+}
+
+/// Type that specify that image is intended to be used as render target and storage image.
+#[derive(Clone, Copy, Debug)]
+pub struct RenderTargetStorage;
+
+impl Usage for RenderTargetStorage {
+    type MemoryUsage = Data;
+
+    fn flags(&self) -> ImageUsageFlags {
+        ImageUsageFlags::COLOR_ATTACHMENT | ImageUsageFlags::STORAGE
+    }
+
+    fn memory(&self) -> Data {
+        Data
+    }
+}
+
+/// Type that specify that image is intended to be used as render target and sampled image.
+#[derive(Clone, Copy, Debug)]
+pub struct RenderTargetSampled;
+
+impl Usage for RenderTargetSampled {
+    type MemoryUsage = Data;
+
+    fn flags(&self) -> ImageUsageFlags {
+        ImageUsageFlags::COLOR_ATTACHMENT | ImageUsageFlags::SAMPLED
+    }
+
+    fn memory(&self) -> Data {
+        Data
     }
 }
diff --git a/resource/src/impls/ash.rs b/resource/src/impls/ash.rs
deleted file mode 100644
index 476b5f00..00000000
--- a/resource/src/impls/ash.rs
+++ /dev/null
@@ -1,558 +0,0 @@
-use std::ptr;
-
-use ash::{
-    self,
-    version::{DeviceV1_0, FunctionPointers},
-    vk,
-};
-
-use buffer;
-use device::Device;
-use error;
-use image;
-use memory;
-use MemoryRequirements;
-use SharingMode;
-
-impl From<vk::Format> for image::Format {
-    fn from(f: vk::Format) -> Self {
-        use self::vk::Format::*;
-        use image::Format;
-        match f {
-            Undefined => Format::UNDEFINED,
-            R4g4UnormPack8 => Format::R4G4_UNORM_PACK8,
-            R4g4b4a4UnormPack16 => Format::R4G4B4A4_UNORM_PACK16,
-            B4g4r4a4UnormPack16 => Format::B4G4R4A4_UNORM_PACK16,
-            R5g6b5UnormPack16 => Format::R5G6B5_UNORM_PACK16,
-            B5g6r5UnormPack16 => Format::B5G6R5_UNORM_PACK16,
-            R5g5b5a1UnormPack16 => Format::R5G5B5A1_UNORM_PACK16,
-            B5g5r5a1UnormPack16 => Format::B5G5R5A1_UNORM_PACK16,
-            A1r5g5b5UnormPack16 => Format::A1R5G5B5_UNORM_PACK16,
-            R8Unorm => Format::R8_UNORM,
-            R8Snorm => Format::R8_SNORM,
-            R8Uscaled => Format::R8_USCALED,
-            R8Sscaled => Format::R8_SSCALED,
-            R8Uint => Format::R8_UINT,
-            R8Sint => Format::R8_SINT,
-            R8Srgb => Format::R8_SRGB,
-            R8g8Unorm => Format::R8G8_UNORM,
-            R8g8Snorm => Format::R8G8_SNORM,
-            R8g8Uscaled => Format::R8G8_USCALED,
-            R8g8Sscaled => Format::R8G8_SSCALED,
-            R8g8Uint => Format::R8G8_UINT,
-            R8g8Sint => Format::R8G8_SINT,
-            R8g8Srgb => Format::R8G8_SRGB,
-            R8g8b8Unorm => Format::R8G8B8_UNORM,
-            R8g8b8Snorm => Format::R8G8B8_SNORM,
-            R8g8b8Uscaled => Format::R8G8B8_USCALED,
-            R8g8b8Sscaled => Format::R8G8B8_SSCALED,
-            R8g8b8Uint => Format::R8G8B8_UINT,
-            R8g8b8Sint => Format::R8G8B8_SINT,
-            R8g8b8Srgb => Format::R8G8B8_SRGB,
-            B8g8r8Unorm => Format::B8G8R8_UNORM,
-            B8g8r8Snorm => Format::B8G8R8_SNORM,
-            B8g8r8Uscaled => Format::B8G8R8_USCALED,
-            B8g8r8Sscaled => Format::B8G8R8_SSCALED,
-            B8g8r8Uint => Format::B8G8R8_UINT,
-            B8g8r8Sint => Format::B8G8R8_SINT,
-            B8g8r8Srgb => Format::B8G8R8_SRGB,
-            R8g8b8a8Unorm => Format::R8G8B8A8_UNORM,
-            R8g8b8a8Snorm => Format::R8G8B8A8_SNORM,
-            R8g8b8a8Uscaled => Format::R8G8B8A8_USCALED,
-            R8g8b8a8Sscaled => Format::R8G8B8A8_SSCALED,
-            R8g8b8a8Uint => Format::R8G8B8A8_UINT,
-            R8g8b8a8Sint => Format::R8G8B8A8_SINT,
-            R8g8b8a8Srgb => Format::R8G8B8A8_SRGB,
-            B8g8r8a8Unorm => Format::B8G8R8A8_UNORM,
-            B8g8r8a8Snorm => Format::B8G8R8A8_SNORM,
-            B8g8r8a8Uscaled => Format::B8G8R8A8_USCALED,
-            B8g8r8a8Sscaled => Format::B8G8R8A8_SSCALED,
-            B8g8r8a8Uint => Format::B8G8R8A8_UINT,
-            B8g8r8a8Sint => Format::B8G8R8A8_SINT,
-            B8g8r8a8Srgb => Format::B8G8R8A8_SRGB,
-            A8b8g8r8UnormPack32 => Format::A8B8G8R8_UNORM_PACK32,
-            A8b8g8r8SnormPack32 => Format::A8B8G8R8_SNORM_PACK32,
-            A8b8g8r8UscaledPack32 => Format::A8B8G8R8_USCALED_PACK32,
-            A8b8g8r8SscaledPack32 => Format::A8B8G8R8_SSCALED_PACK32,
-            A8b8g8r8UintPack32 => Format::A8B8G8R8_UINT_PACK32,
-            A8b8g8r8SintPack32 => Format::A8B8G8R8_SINT_PACK32,
-            A8b8g8r8SrgbPack32 => Format::A8B8G8R8_SRGB_PACK32,
-            A2r10g10b10UnormPack32 => Format::A2R10G10B10_UNORM_PACK32,
-            A2r10g10b10SnormPack32 => Format::A2R10G10B10_SNORM_PACK32,
-            A2r10g10b10UscaledPack32 => Format::A2R10G10B10_USCALED_PACK32,
-            A2r10g10b10SscaledPack32 => Format::A2R10G10B10_SSCALED_PACK32,
-            A2r10g10b10UintPack32 => Format::A2R10G10B10_UINT_PACK32,
-            A2r10g10b10SintPack32 => Format::A2R10G10B10_SINT_PACK32,
-            A2b10g10r10UnormPack32 => Format::A2B10G10R10_UNORM_PACK32,
-            A2b10g10r10SnormPack32 => Format::A2B10G10R10_SNORM_PACK32,
-            A2b10g10r10UscaledPack32 => Format::A2B10G10R10_USCALED_PACK32,
-            A2b10g10r10SscaledPack32 => Format::A2B10G10R10_SSCALED_PACK32,
-            A2b10g10r10UintPack32 => Format::A2B10G10R10_UINT_PACK32,
-            A2b10g10r10SintPack32 => Format::A2B10G10R10_SINT_PACK32,
-            R16Unorm => Format::R16_UNORM,
-            R16Snorm => Format::R16_SNORM,
-            R16Uscaled => Format::R16_USCALED,
-            R16Sscaled => Format::R16_SSCALED,
-            R16Uint => Format::R16_UINT,
-            R16Sint => Format::R16_SINT,
-            R16Sfloat => Format::R16_SFLOAT,
-            R16g16Unorm => Format::R16G16_UNORM,
-            R16g16Snorm => Format::R16G16_SNORM,
-            R16g16Uscaled => Format::R16G16_USCALED,
-            R16g16Sscaled => Format::R16G16_SSCALED,
-            R16g16Uint => Format::R16G16_UINT,
-            R16g16Sint => Format::R16G16_SINT,
-            R16g16Sfloat => Format::R16G16_SFLOAT,
-            R16g16b16Unorm => Format::R16G16B16_UNORM,
-            R16g16b16Snorm => Format::R16G16B16_SNORM,
-            R16g16b16Uscaled => Format::R16G16B16_USCALED,
-            R16g16b16Sscaled => Format::R16G16B16_SSCALED,
-            R16g16b16Uint => Format::R16G16B16_UINT,
-            R16g16b16Sint => Format::R16G16B16_SINT,
-            R16g16b16Sfloat => Format::R16G16B16_SFLOAT,
-            R16g16b16a16Unorm => Format::R16G16B16A16_UNORM,
-            R16g16b16a16Snorm => Format::R16G16B16A16_SNORM,
-            R16g16b16a16Uscaled => Format::R16G16B16A16_USCALED,
-            R16g16b16a16Sscaled => Format::R16G16B16A16_SSCALED,
-            R16g16b16a16Uint => Format::R16G16B16A16_UINT,
-            R16g16b16a16Sint => Format::R16G16B16A16_SINT,
-            R16g16b16a16Sfloat => Format::R16G16B16A16_SFLOAT,
-            R32Uint => Format::R32_UINT,
-            R32Sint => Format::R32_SINT,
-            R32Sfloat => Format::R32_SFLOAT,
-            R32g32Uint => Format::R32G32_UINT,
-            R32g32Sint => Format::R32G32_SINT,
-            R32g32Sfloat => Format::R32G32_SFLOAT,
-            R32g32b32Uint => Format::R32G32B32_UINT,
-            R32g32b32Sint => Format::R32G32B32_SINT,
-            R32g32b32Sfloat => Format::R32G32B32_SFLOAT,
-            R32g32b32a32Uint => Format::R32G32B32A32_UINT,
-            R32g32b32a32Sint => Format::R32G32B32A32_SINT,
-            R32g32b32a32Sfloat => Format::R32G32B32A32_SFLOAT,
-            R64Uint => Format::R64_UINT,
-            R64Sint => Format::R64_SINT,
-            R64Sfloat => Format::R64_SFLOAT,
-            R64g64Uint => Format::R64G64_UINT,
-            R64g64Sint => Format::R64G64_SINT,
-            R64g64Sfloat => Format::R64G64_SFLOAT,
-            R64g64b64Uint => Format::R64G64B64_UINT,
-            R64g64b64Sint => Format::R64G64B64_SINT,
-            R64g64b64Sfloat => Format::R64G64B64_SFLOAT,
-            R64g64b64a64Uint => Format::R64G64B64A64_UINT,
-            R64g64b64a64Sint => Format::R64G64B64A64_SINT,
-            R64g64b64a64Sfloat => Format::R64G64B64A64_SFLOAT,
-            B10g11r11UfloatPack32 => Format::B10G11R11_UFLOAT_PACK32,
-            E5b9g9r9UfloatPack32 => Format::E5B9G9R9_UFLOAT_PACK32,
-            D16Unorm => Format::D16_UNORM,
-            X8D24UnormPack32 => Format::X8_D24_UNORM_PACK32,
-            D32Sfloat => Format::D32_SFLOAT,
-            S8Uint => Format::S8_UINT,
-            D16UnormS8Uint => Format::D16_UNORM_S8_UINT,
-            D24UnormS8Uint => Format::D24_UNORM_S8_UINT,
-            D32SfloatS8Uint => Format::D32_SFLOAT_S8_UINT,
-            Bc1RgbUnormBlock => Format::BC1_RGB_UNORM_BLOCK,
-            Bc1RgbSrgbBlock => Format::BC1_RGB_SRGB_BLOCK,
-            Bc1RgbaUnormBlock => Format::BC1_RGBA_UNORM_BLOCK,
-            Bc1RgbaSrgbBlock => Format::BC1_RGBA_SRGB_BLOCK,
-            Bc2UnormBlock => Format::BC2_UNORM_BLOCK,
-            Bc2SrgbBlock => Format::BC2_SRGB_BLOCK,
-            Bc3UnormBlock => Format::BC3_UNORM_BLOCK,
-            Bc3SrgbBlock => Format::BC3_SRGB_BLOCK,
-            Bc4UnormBlock => Format::BC4_UNORM_BLOCK,
-            Bc4SnormBlock => Format::BC4_SNORM_BLOCK,
-            Bc5UnormBlock => Format::BC5_UNORM_BLOCK,
-            Bc5SnormBlock => Format::BC5_SNORM_BLOCK,
-            Bc6hUfloatBlock => Format::BC6H_UFLOAT_BLOCK,
-            Bc6hSfloatBlock => Format::BC6H_SFLOAT_BLOCK,
-            Bc7UnormBlock => Format::BC7_UNORM_BLOCK,
-            Bc7SrgbBlock => Format::BC7_SRGB_BLOCK,
-            Etc2R8g8b8UnormBlock => Format::ETC2_R8G8B8_UNORM_BLOCK,
-            Etc2R8g8b8SrgbBlock => Format::ETC2_R8G8B8_SRGB_BLOCK,
-            Etc2R8g8b8a1UnormBlock => Format::ETC2_R8G8B8A1_UNORM_BLOCK,
-            Etc2R8g8b8a1SrgbBlock => Format::ETC2_R8G8B8A1_SRGB_BLOCK,
-            Etc2R8g8b8a8UnormBlock => Format::ETC2_R8G8B8A8_UNORM_BLOCK,
-            Etc2R8g8b8a8SrgbBlock => Format::ETC2_R8G8B8A8_SRGB_BLOCK,
-            EacR11UnormBlock => Format::EAC_R11_UNORM_BLOCK,
-            EacR11SnormBlock => Format::EAC_R11_SNORM_BLOCK,
-            EacR11g11UnormBlock => Format::EAC_R11G11_UNORM_BLOCK,
-            EacR11g11SnormBlock => Format::EAC_R11G11_SNORM_BLOCK,
-            Astc4x4UnormBlock => Format::ASTC_4x4_UNORM_BLOCK,
-            Astc4x4SrgbBlock => Format::ASTC_4x4_SRGB_BLOCK,
-            Astc5x4UnormBlock => Format::ASTC_5x4_UNORM_BLOCK,
-            Astc5x4SrgbBlock => Format::ASTC_5x4_SRGB_BLOCK,
-            Astc5x5UnormBlock => Format::ASTC_5x5_UNORM_BLOCK,
-            Astc5x5SrgbBlock => Format::ASTC_5x5_SRGB_BLOCK,
-            Astc6x5UnormBlock => Format::ASTC_6x5_UNORM_BLOCK,
-            Astc6x5SrgbBlock => Format::ASTC_6x5_SRGB_BLOCK,
-            Astc6x6UnormBlock => Format::ASTC_6x6_UNORM_BLOCK,
-            Astc6x6SrgbBlock => Format::ASTC_6x6_SRGB_BLOCK,
-            Astc8x5UnormBlock => Format::ASTC_8x5_UNORM_BLOCK,
-            Astc8x5SrgbBlock => Format::ASTC_8x5_SRGB_BLOCK,
-            Astc8x6UnormBlock => Format::ASTC_8x6_UNORM_BLOCK,
-            Astc8x6SrgbBlock => Format::ASTC_8x6_SRGB_BLOCK,
-            Astc8x8UnormBlock => Format::ASTC_8x8_UNORM_BLOCK,
-            Astc8x8SrgbBlock => Format::ASTC_8x8_SRGB_BLOCK,
-            Astc10x5UnormBlock => Format::ASTC_10x5_UNORM_BLOCK,
-            Astc10x5SrgbBlock => Format::ASTC_10x5_SRGB_BLOCK,
-            Astc10x6UnormBlock => Format::ASTC_10x6_UNORM_BLOCK,
-            Astc10x6SrgbBlock => Format::ASTC_10x6_SRGB_BLOCK,
-            Astc10x8UnormBlock => Format::ASTC_10x8_UNORM_BLOCK,
-            Astc10x8SrgbBlock => Format::ASTC_10x8_SRGB_BLOCK,
-            Astc10x10UnormBlock => Format::ASTC_10x10_UNORM_BLOCK,
-            Astc10x10SrgbBlock => Format::ASTC_10x10_SRGB_BLOCK,
-            Astc12x10UnormBlock => Format::ASTC_12x10_UNORM_BLOCK,
-            Astc12x10SrgbBlock => Format::ASTC_12x10_SRGB_BLOCK,
-            Astc12x12UnormBlock => Format::ASTC_12x12_UNORM_BLOCK,
-            Astc12x12SrgbBlock => Format::ASTC_12x12_SRGB_BLOCK,
-        }
-    }
-}
-
-impl From<image::Format> for vk::Format {
-    fn from(f: image::Format) -> Self {
-        use self::vk::Format::*;
-        use image::Format;
-        match f {
-            Format::UNDEFINED => Undefined,
-            Format::R4G4_UNORM_PACK8 => R4g4UnormPack8,
-            Format::R4G4B4A4_UNORM_PACK16 => R4g4b4a4UnormPack16,
-            Format::B4G4R4A4_UNORM_PACK16 => B4g4r4a4UnormPack16,
-            Format::R5G6B5_UNORM_PACK16 => R5g6b5UnormPack16,
-            Format::B5G6R5_UNORM_PACK16 => B5g6r5UnormPack16,
-            Format::R5G5B5A1_UNORM_PACK16 => R5g5b5a1UnormPack16,
-            Format::B5G5R5A1_UNORM_PACK16 => B5g5r5a1UnormPack16,
-            Format::A1R5G5B5_UNORM_PACK16 => A1r5g5b5UnormPack16,
-            Format::R8_UNORM => R8Unorm,
-            Format::R8_SNORM => R8Snorm,
-            Format::R8_USCALED => R8Uscaled,
-            Format::R8_SSCALED => R8Sscaled,
-            Format::R8_UINT => R8Uint,
-            Format::R8_SINT => R8Sint,
-            Format::R8_SRGB => R8Srgb,
-            Format::R8G8_UNORM => R8g8Unorm,
-            Format::R8G8_SNORM => R8g8Snorm,
-            Format::R8G8_USCALED => R8g8Uscaled,
-            Format::R8G8_SSCALED => R8g8Sscaled,
-            Format::R8G8_UINT => R8g8Uint,
-            Format::R8G8_SINT => R8g8Sint,
-            Format::R8G8_SRGB => R8g8Srgb,
-            Format::R8G8B8_UNORM => R8g8b8Unorm,
-            Format::R8G8B8_SNORM => R8g8b8Snorm,
-            Format::R8G8B8_USCALED => R8g8b8Uscaled,
-            Format::R8G8B8_SSCALED => R8g8b8Sscaled,
-            Format::R8G8B8_UINT => R8g8b8Uint,
-            Format::R8G8B8_SINT => R8g8b8Sint,
-            Format::R8G8B8_SRGB => R8g8b8Srgb,
-            Format::B8G8R8_UNORM => B8g8r8Unorm,
-            Format::B8G8R8_SNORM => B8g8r8Snorm,
-            Format::B8G8R8_USCALED => B8g8r8Uscaled,
-            Format::B8G8R8_SSCALED => B8g8r8Sscaled,
-            Format::B8G8R8_UINT => B8g8r8Uint,
-            Format::B8G8R8_SINT => B8g8r8Sint,
-            Format::B8G8R8_SRGB => B8g8r8Srgb,
-            Format::R8G8B8A8_UNORM => R8g8b8a8Unorm,
-            Format::R8G8B8A8_SNORM => R8g8b8a8Snorm,
-            Format::R8G8B8A8_USCALED => R8g8b8a8Uscaled,
-            Format::R8G8B8A8_SSCALED => R8g8b8a8Sscaled,
-            Format::R8G8B8A8_UINT => R8g8b8a8Uint,
-            Format::R8G8B8A8_SINT => R8g8b8a8Sint,
-            Format::R8G8B8A8_SRGB => R8g8b8a8Srgb,
-            Format::B8G8R8A8_UNORM => B8g8r8a8Unorm,
-            Format::B8G8R8A8_SNORM => B8g8r8a8Snorm,
-            Format::B8G8R8A8_USCALED => B8g8r8a8Uscaled,
-            Format::B8G8R8A8_SSCALED => B8g8r8a8Sscaled,
-            Format::B8G8R8A8_UINT => B8g8r8a8Uint,
-            Format::B8G8R8A8_SINT => B8g8r8a8Sint,
-            Format::B8G8R8A8_SRGB => B8g8r8a8Srgb,
-            Format::A8B8G8R8_UNORM_PACK32 => A8b8g8r8UnormPack32,
-            Format::A8B8G8R8_SNORM_PACK32 => A8b8g8r8SnormPack32,
-            Format::A8B8G8R8_USCALED_PACK32 => A8b8g8r8UscaledPack32,
-            Format::A8B8G8R8_SSCALED_PACK32 => A8b8g8r8SscaledPack32,
-            Format::A8B8G8R8_UINT_PACK32 => A8b8g8r8UintPack32,
-            Format::A8B8G8R8_SINT_PACK32 => A8b8g8r8SintPack32,
-            Format::A8B8G8R8_SRGB_PACK32 => A8b8g8r8SrgbPack32,
-            Format::A2R10G10B10_UNORM_PACK32 => A2r10g10b10UnormPack32,
-            Format::A2R10G10B10_SNORM_PACK32 => A2r10g10b10SnormPack32,
-            Format::A2R10G10B10_USCALED_PACK32 => A2r10g10b10UscaledPack32,
-            Format::A2R10G10B10_SSCALED_PACK32 => A2r10g10b10SscaledPack32,
-            Format::A2R10G10B10_UINT_PACK32 => A2r10g10b10UintPack32,
-            Format::A2R10G10B10_SINT_PACK32 => A2r10g10b10SintPack32,
-            Format::A2B10G10R10_UNORM_PACK32 => A2b10g10r10UnormPack32,
-            Format::A2B10G10R10_SNORM_PACK32 => A2b10g10r10SnormPack32,
-            Format::A2B10G10R10_USCALED_PACK32 => A2b10g10r10UscaledPack32,
-            Format::A2B10G10R10_SSCALED_PACK32 => A2b10g10r10SscaledPack32,
-            Format::A2B10G10R10_UINT_PACK32 => A2b10g10r10UintPack32,
-            Format::A2B10G10R10_SINT_PACK32 => A2b10g10r10SintPack32,
-            Format::R16_UNORM => R16Unorm,
-            Format::R16_SNORM => R16Snorm,
-            Format::R16_USCALED => R16Uscaled,
-            Format::R16_SSCALED => R16Sscaled,
-            Format::R16_UINT => R16Uint,
-            Format::R16_SINT => R16Sint,
-            Format::R16_SFLOAT => R16Sfloat,
-            Format::R16G16_UNORM => R16g16Unorm,
-            Format::R16G16_SNORM => R16g16Snorm,
-            Format::R16G16_USCALED => R16g16Uscaled,
-            Format::R16G16_SSCALED => R16g16Sscaled,
-            Format::R16G16_UINT => R16g16Uint,
-            Format::R16G16_SINT => R16g16Sint,
-            Format::R16G16_SFLOAT => R16g16Sfloat,
-            Format::R16G16B16_UNORM => R16g16b16Unorm,
-            Format::R16G16B16_SNORM => R16g16b16Snorm,
-            Format::R16G16B16_USCALED => R16g16b16Uscaled,
-            Format::R16G16B16_SSCALED => R16g16b16Sscaled,
-            Format::R16G16B16_UINT => R16g16b16Uint,
-            Format::R16G16B16_SINT => R16g16b16Sint,
-            Format::R16G16B16_SFLOAT => R16g16b16Sfloat,
-            Format::R16G16B16A16_UNORM => R16g16b16a16Unorm,
-            Format::R16G16B16A16_SNORM => R16g16b16a16Snorm,
-            Format::R16G16B16A16_USCALED => R16g16b16a16Uscaled,
-            Format::R16G16B16A16_SSCALED => R16g16b16a16Sscaled,
-            Format::R16G16B16A16_UINT => R16g16b16a16Uint,
-            Format::R16G16B16A16_SINT => R16g16b16a16Sint,
-            Format::R16G16B16A16_SFLOAT => R16g16b16a16Sfloat,
-            Format::R32_UINT => R32Uint,
-            Format::R32_SINT => R32Sint,
-            Format::R32_SFLOAT => R32Sfloat,
-            Format::R32G32_UINT => R32g32Uint,
-            Format::R32G32_SINT => R32g32Sint,
-            Format::R32G32_SFLOAT => R32g32Sfloat,
-            Format::R32G32B32_UINT => R32g32b32Uint,
-            Format::R32G32B32_SINT => R32g32b32Sint,
-            Format::R32G32B32_SFLOAT => R32g32b32Sfloat,
-            Format::R32G32B32A32_UINT => R32g32b32a32Uint,
-            Format::R32G32B32A32_SINT => R32g32b32a32Sint,
-            Format::R32G32B32A32_SFLOAT => R32g32b32a32Sfloat,
-            Format::R64_UINT => R64Uint,
-            Format::R64_SINT => R64Sint,
-            Format::R64_SFLOAT => R64Sfloat,
-            Format::R64G64_UINT => R64g64Uint,
-            Format::R64G64_SINT => R64g64Sint,
-            Format::R64G64_SFLOAT => R64g64Sfloat,
-            Format::R64G64B64_UINT => R64g64b64Uint,
-            Format::R64G64B64_SINT => R64g64b64Sint,
-            Format::R64G64B64_SFLOAT => R64g64b64Sfloat,
-            Format::R64G64B64A64_UINT => R64g64b64a64Uint,
-            Format::R64G64B64A64_SINT => R64g64b64a64Sint,
-            Format::R64G64B64A64_SFLOAT => R64g64b64a64Sfloat,
-            Format::B10G11R11_UFLOAT_PACK32 => B10g11r11UfloatPack32,
-            Format::E5B9G9R9_UFLOAT_PACK32 => E5b9g9r9UfloatPack32,
-            Format::D16_UNORM => D16Unorm,
-            Format::X8_D24_UNORM_PACK32 => X8D24UnormPack32,
-            Format::D32_SFLOAT => D32Sfloat,
-            Format::S8_UINT => S8Uint,
-            Format::D16_UNORM_S8_UINT => D16UnormS8Uint,
-            Format::D24_UNORM_S8_UINT => D24UnormS8Uint,
-            Format::D32_SFLOAT_S8_UINT => D32SfloatS8Uint,
-            Format::BC1_RGB_UNORM_BLOCK => Bc1RgbUnormBlock,
-            Format::BC1_RGB_SRGB_BLOCK => Bc1RgbSrgbBlock,
-            Format::BC1_RGBA_UNORM_BLOCK => Bc1RgbaUnormBlock,
-            Format::BC1_RGBA_SRGB_BLOCK => Bc1RgbaSrgbBlock,
-            Format::BC2_UNORM_BLOCK => Bc2UnormBlock,
-            Format::BC2_SRGB_BLOCK => Bc2SrgbBlock,
-            Format::BC3_UNORM_BLOCK => Bc3UnormBlock,
-            Format::BC3_SRGB_BLOCK => Bc3SrgbBlock,
-            Format::BC4_UNORM_BLOCK => Bc4UnormBlock,
-            Format::BC4_SNORM_BLOCK => Bc4SnormBlock,
-            Format::BC5_UNORM_BLOCK => Bc5UnormBlock,
-            Format::BC5_SNORM_BLOCK => Bc5SnormBlock,
-            Format::BC6H_UFLOAT_BLOCK => Bc6hUfloatBlock,
-            Format::BC6H_SFLOAT_BLOCK => Bc6hSfloatBlock,
-            Format::BC7_UNORM_BLOCK => Bc7UnormBlock,
-            Format::BC7_SRGB_BLOCK => Bc7SrgbBlock,
-            Format::ETC2_R8G8B8_UNORM_BLOCK => Etc2R8g8b8UnormBlock,
-            Format::ETC2_R8G8B8_SRGB_BLOCK => Etc2R8g8b8SrgbBlock,
-            Format::ETC2_R8G8B8A1_UNORM_BLOCK => Etc2R8g8b8a1UnormBlock,
-            Format::ETC2_R8G8B8A1_SRGB_BLOCK => Etc2R8g8b8a1SrgbBlock,
-            Format::ETC2_R8G8B8A8_UNORM_BLOCK => Etc2R8g8b8a8UnormBlock,
-            Format::ETC2_R8G8B8A8_SRGB_BLOCK => Etc2R8g8b8a8SrgbBlock,
-            Format::EAC_R11_UNORM_BLOCK => EacR11UnormBlock,
-            Format::EAC_R11_SNORM_BLOCK => EacR11SnormBlock,
-            Format::EAC_R11G11_UNORM_BLOCK => EacR11g11UnormBlock,
-            Format::EAC_R11G11_SNORM_BLOCK => EacR11g11SnormBlock,
-            Format::ASTC_4x4_UNORM_BLOCK => Astc4x4UnormBlock,
-            Format::ASTC_4x4_SRGB_BLOCK => Astc4x4SrgbBlock,
-            Format::ASTC_5x4_UNORM_BLOCK => Astc5x4UnormBlock,
-            Format::ASTC_5x4_SRGB_BLOCK => Astc5x4SrgbBlock,
-            Format::ASTC_5x5_UNORM_BLOCK => Astc5x5UnormBlock,
-            Format::ASTC_5x5_SRGB_BLOCK => Astc5x5SrgbBlock,
-            Format::ASTC_6x5_UNORM_BLOCK => Astc6x5UnormBlock,
-            Format::ASTC_6x5_SRGB_BLOCK => Astc6x5SrgbBlock,
-            Format::ASTC_6x6_UNORM_BLOCK => Astc6x6UnormBlock,
-            Format::ASTC_6x6_SRGB_BLOCK => Astc6x6SrgbBlock,
-            Format::ASTC_8x5_UNORM_BLOCK => Astc8x5UnormBlock,
-            Format::ASTC_8x5_SRGB_BLOCK => Astc8x5SrgbBlock,
-            Format::ASTC_8x6_UNORM_BLOCK => Astc8x6UnormBlock,
-            Format::ASTC_8x6_SRGB_BLOCK => Astc8x6SrgbBlock,
-            Format::ASTC_8x8_UNORM_BLOCK => Astc8x8UnormBlock,
-            Format::ASTC_8x8_SRGB_BLOCK => Astc8x8SrgbBlock,
-            Format::ASTC_10x5_UNORM_BLOCK => Astc10x5UnormBlock,
-            Format::ASTC_10x5_SRGB_BLOCK => Astc10x5SrgbBlock,
-            Format::ASTC_10x6_UNORM_BLOCK => Astc10x6UnormBlock,
-            Format::ASTC_10x6_SRGB_BLOCK => Astc10x6SrgbBlock,
-            Format::ASTC_10x8_UNORM_BLOCK => Astc10x8UnormBlock,
-            Format::ASTC_10x8_SRGB_BLOCK => Astc10x8SrgbBlock,
-            Format::ASTC_10x10_UNORM_BLOCK => Astc10x10UnormBlock,
-            Format::ASTC_10x10_SRGB_BLOCK => Astc10x10SrgbBlock,
-            Format::ASTC_12x10_UNORM_BLOCK => Astc12x10UnormBlock,
-            Format::ASTC_12x10_SRGB_BLOCK => Astc12x10SrgbBlock,
-            Format::ASTC_12x12_UNORM_BLOCK => Astc12x12UnormBlock,
-            Format::ASTC_12x12_SRGB_BLOCK => Astc12x12SrgbBlock,
-            _ => panic!("Format {:?} isn't supported by the hal backend", f),
-        }
-    }
-}
-
-impl<V> Device for ash::Device<V>
-where
-    V: FunctionPointers,
-    ash::Device<V>: DeviceV1_0,
-{
-    type Sampler = vk::Sampler;
-    type Buffer = vk::Buffer;
-    type UnboundBuffer = vk::Buffer;
-    type BufferView = vk::BufferView;
-    type Image = vk::Image;
-    type UnboundImage = vk::Image;
-    type ImageView = vk::ImageView;
-
-    fn create_buffer(
-        &self,
-        info: buffer::CreateInfo,
-    ) -> Result<Self::UnboundBuffer, memory::OutOfMemoryError> {
-        let info = vk::BufferCreateInfo {
-            s_type: vk::StructureType::BufferCreateInfo,
-            p_next: ptr::null(),
-            flags: vk::BufferCreateFlags::empty(),
-            size: info.size,
-            usage: vk::BufferUsageFlags::from_flags(info.usage.bits()).unwrap(),
-            sharing_mode: match info.sharing {
-                SharingMode::Exclusive => vk::SharingMode::Exclusive,
-            },
-            queue_family_index_count: 0,
-            p_queue_family_indices: ptr::null(),
-        };
-
-        unsafe { DeviceV1_0::create_buffer(self, &info, None) }.map_err(|e| {
-            use self::vk::types::Result;
-            match e {
-                Result::ErrorOutOfHostMemory => memory::OutOfMemoryError::OutOfHostMemory,
-                Result::ErrorOutOfDeviceMemory => memory::OutOfMemoryError::OutOfDeviceMemory,
-                e => panic!("Unexpected error: {:?}", e),
-            }
-        })
-    }
-
-    fn buffer_requirements(&self, buffer: &Self::UnboundBuffer) -> MemoryRequirements {
-        let req = DeviceV1_0::get_buffer_memory_requirements(self, *buffer);
-
-        MemoryRequirements {
-            size: req.size,
-            align: req.alignment,
-            mask: req.memory_type_bits,
-        }
-    }
-
-    unsafe fn bind_buffer(
-        &self,
-        buffer: Self::UnboundBuffer,
-        memory: &Self::Memory,
-        offset: u64,
-    ) -> Result<Self::Buffer, error::BindError> {
-        DeviceV1_0::bind_buffer_memory(self, buffer, *memory, offset).map_err(|e| match e {
-            vk::Result::ErrorOutOfHostMemory => {
-                error::BindError::OutOfMemoryError(memory::OutOfMemoryError::OutOfHostMemory)
-            }
-            vk::Result::ErrorOutOfDeviceMemory => {
-                error::BindError::OutOfMemoryError(memory::OutOfMemoryError::OutOfDeviceMemory)
-            }
-            _ => unreachable!(),
-        })?;
-        Ok(buffer)
-    }
-
-    unsafe fn destroy_buffer(&self, buffer: Self::Buffer) {
-        DeviceV1_0::destroy_buffer(self, buffer, None);
-    }
-
-    fn create_image(
-        &self,
-        info: image::CreateInfo,
-    ) -> Result<Self::UnboundImage, error::ImageCreationError> {
-        let info = vk::ImageCreateInfo {
-            s_type: vk::StructureType::ImageCreateInfo,
-            p_next: ptr::null(),
-            flags: vk::ImageCreateFlags::from_flags(info.flags.bits()).unwrap(),
-            image_type: match info.kind {
-                image::Kind::D1 => vk::ImageType::Type1d,
-                image::Kind::D2 => vk::ImageType::Type2d,
-                image::Kind::D3 => vk::ImageType::Type3d,
-            },
-            format: info.format.into(),
-            extent: vk::Extent3D {
-                width: info.extent.width,
-                height: info.extent.height,
-                depth: info.extent.depth,
-            },
-            mip_levels: info.mips,
-            array_layers: info.array,
-            samples: vk::SampleCountFlags::from_flags(info.samples.bits()).unwrap(),
-            tiling: match info.tiling {
-                image::ImageTiling::Optimal => vk::ImageTiling::Optimal,
-                image::ImageTiling::Linear => vk::ImageTiling::Linear,
-            },
-            usage: vk::ImageUsageFlags::from_flags(info.usage.bits()).unwrap(),
-            sharing_mode: match info.sharing {
-                SharingMode::Exclusive => vk::SharingMode::Exclusive,
-            },
-            queue_family_index_count: 0,
-            p_queue_family_indices: ptr::null(),
-            initial_layout: vk::ImageLayout::Undefined,
-        };
-
-        Ok(
-            unsafe { DeviceV1_0::create_image(self, &info, None) }.map_err(|e| match e {
-                vk::Result::ErrorOutOfHostMemory => error::ImageCreationError::OutOfMemoryError(
-                    memory::OutOfMemoryError::OutOfHostMemory,
-                ),
-                vk::Result::ErrorOutOfDeviceMemory => error::ImageCreationError::OutOfMemoryError(
-                    memory::OutOfMemoryError::OutOfDeviceMemory,
-                ),
-                _ => unreachable!(),
-            })?,
-        )
-    }
-
-    fn image_requirements(&self, image: &Self::UnboundImage) -> MemoryRequirements {
-        let req = DeviceV1_0::get_image_memory_requirements(self, *image);
-
-        MemoryRequirements {
-            size: req.size,
-            align: req.alignment,
-            mask: req.memory_type_bits,
-        }
-    }
-
-    unsafe fn bind_image(
-        &self,
-        image: Self::UnboundImage,
-        memory: &Self::Memory,
-        offset: u64,
-    ) -> Result<Self::Image, error::BindError> {
-        DeviceV1_0::bind_image_memory(self, image, *memory, offset).map_err(|e| match e {
-            vk::Result::ErrorOutOfHostMemory => {
-                error::BindError::OutOfMemoryError(memory::OutOfMemoryError::OutOfHostMemory)
-            }
-            vk::Result::ErrorOutOfDeviceMemory => {
-                error::BindError::OutOfMemoryError(memory::OutOfMemoryError::OutOfDeviceMemory)
-            }
-            _ => unreachable!(),
-        })?;
-
-        Ok(image)
-    }
-
-    unsafe fn destroy_image(&self, image: Self::Image) {
-        DeviceV1_0::destroy_image(self, image, None);
-    }
-}
diff --git a/resource/src/impls/hal.rs b/resource/src/impls/hal.rs
deleted file mode 100644
index a64bf011..00000000
--- a/resource/src/impls/hal.rs
+++ /dev/null
@@ -1,537 +0,0 @@
-use std::borrow::Borrow;
-use std::marker::PhantomData;
-
-use hal;
-use hal::Device as HalDevice;
-
-use buffer;
-use device::Device;
-use error;
-use image;
-use memory;
-use MemoryRequirements;
-
-impl From<hal::device::BindError> for error::BindError {
-    fn from(e: hal::device::BindError) -> Self {
-        match e {
-            hal::device::BindError::WrongMemory => error::BindError::WrongMemory,
-            hal::device::BindError::OutOfBounds => error::BindError::OutOfBounds,
-        }
-    }
-}
-
-impl From<hal::image::CreationError> for error::ImageCreationError {
-    fn from(e: hal::image::CreationError) -> Self {
-        use error::ImageCreationError;
-        use hal::image::CreationError as HalCreationError;
-        match e {
-            HalCreationError::Format(f) => ImageCreationError::UnsupportedFormat(f.into()),
-            HalCreationError::Kind => ImageCreationError::Kind,
-            HalCreationError::Samples(n) => {
-                ImageCreationError::Samples(image::SampleCountFlags::from_bits(n.into()).unwrap())
-            }
-            HalCreationError::Size(s) => ImageCreationError::UnsupportedSize(s),
-            HalCreationError::Data(_) => ImageCreationError::DataSizeMismatch,
-            HalCreationError::Usage(u) => ImageCreationError::UnsupportedUsage(
-                image::UsageFlags::from_bits(u.bits()).unwrap(),
-            ),
-        }
-    }
-}
-
-impl From<hal::format::Format> for image::Format {
-    fn from(f: hal::format::Format) -> Self {
-        use hal::format::Format::*;
-        use image::Format;
-        match f {
-            __NumFormats => panic!(),
-            Rg4Unorm => Format::R4G4_UNORM_PACK8,
-            Rgba4Unorm => Format::R4G4B4A4_UNORM_PACK16,
-            Bgra4Unorm => Format::B4G4R4A4_UNORM_PACK16,
-            R5g6b5Unorm => Format::R5G6B5_UNORM_PACK16,
-            B5g6r5Unorm => Format::B5G6R5_UNORM_PACK16,
-            R5g5b5a1Unorm => Format::R5G5B5A1_UNORM_PACK16,
-            B5g5r5a1Unorm => Format::B5G5R5A1_UNORM_PACK16,
-            A1r5g5b5Unorm => Format::A1R5G5B5_UNORM_PACK16,
-            R8Unorm => Format::R8_UNORM,
-            R8Inorm => Format::R8_SNORM,
-            R8Uscaled => Format::R8_USCALED,
-            R8Iscaled => Format::R8_SSCALED,
-            R8Uint => Format::R8_UINT,
-            R8Int => Format::R8_SINT,
-            R8Srgb => Format::R8_SRGB,
-            Rg8Unorm => Format::R8G8_UNORM,
-            Rg8Inorm => Format::R8G8_SNORM,
-            Rg8Uscaled => Format::R8G8_USCALED,
-            Rg8Iscaled => Format::R8G8_SSCALED,
-            Rg8Uint => Format::R8G8_UINT,
-            Rg8Int => Format::R8G8_SINT,
-            Rg8Srgb => Format::R8G8_SRGB,
-            Rgb8Unorm => Format::R8G8B8_UNORM,
-            Rgb8Inorm => Format::R8G8B8_SNORM,
-            Rgb8Uscaled => Format::R8G8B8_USCALED,
-            Rgb8Iscaled => Format::R8G8B8_SSCALED,
-            Rgb8Uint => Format::R8G8B8_UINT,
-            Rgb8Int => Format::R8G8B8_SINT,
-            Rgb8Srgb => Format::R8G8B8_SRGB,
-            Bgr8Unorm => Format::B8G8R8_UNORM,
-            Bgr8Inorm => Format::B8G8R8_SNORM,
-            Bgr8Uscaled => Format::B8G8R8_USCALED,
-            Bgr8Iscaled => Format::B8G8R8_SSCALED,
-            Bgr8Uint => Format::B8G8R8_UINT,
-            Bgr8Int => Format::B8G8R8_SINT,
-            Bgr8Srgb => Format::B8G8R8_SRGB,
-            Rgba8Unorm => Format::R8G8B8A8_UNORM,
-            Rgba8Inorm => Format::R8G8B8A8_SNORM,
-            Rgba8Uscaled => Format::R8G8B8A8_USCALED,
-            Rgba8Iscaled => Format::R8G8B8A8_SSCALED,
-            Rgba8Uint => Format::R8G8B8A8_UINT,
-            Rgba8Int => Format::R8G8B8A8_SINT,
-            Rgba8Srgb => Format::R8G8B8A8_SRGB,
-            Bgra8Unorm => Format::B8G8R8A8_UNORM,
-            Bgra8Inorm => Format::B8G8R8A8_SNORM,
-            Bgra8Uscaled => Format::B8G8R8A8_USCALED,
-            Bgra8Iscaled => Format::B8G8R8A8_SSCALED,
-            Bgra8Uint => Format::B8G8R8A8_UINT,
-            Bgra8Int => Format::B8G8R8A8_SINT,
-            Bgra8Srgb => Format::B8G8R8A8_SRGB,
-            Abgr8Unorm => Format::A8B8G8R8_UNORM_PACK32,
-            Abgr8Inorm => Format::A8B8G8R8_SNORM_PACK32,
-            Abgr8Uscaled => Format::A8B8G8R8_USCALED_PACK32,
-            Abgr8Iscaled => Format::A8B8G8R8_SSCALED_PACK32,
-            Abgr8Uint => Format::A8B8G8R8_UINT_PACK32,
-            Abgr8Int => Format::A8B8G8R8_SINT_PACK32,
-            Abgr8Srgb => Format::A8B8G8R8_SRGB_PACK32,
-            A2r10g10b10Unorm => Format::A2R10G10B10_UNORM_PACK32,
-            A2r10g10b10Inorm => Format::A2R10G10B10_SNORM_PACK32,
-            A2r10g10b10Uscaled => Format::A2R10G10B10_USCALED_PACK32,
-            A2r10g10b10Iscaled => Format::A2R10G10B10_SSCALED_PACK32,
-            A2r10g10b10Uint => Format::A2R10G10B10_UINT_PACK32,
-            A2r10g10b10Int => Format::A2R10G10B10_SINT_PACK32,
-            A2b10g10r10Unorm => Format::A2B10G10R10_UNORM_PACK32,
-            A2b10g10r10Inorm => Format::A2B10G10R10_SNORM_PACK32,
-            A2b10g10r10Uscaled => Format::A2B10G10R10_USCALED_PACK32,
-            A2b10g10r10Iscaled => Format::A2B10G10R10_SSCALED_PACK32,
-            A2b10g10r10Uint => Format::A2B10G10R10_UINT_PACK32,
-            A2b10g10r10Int => Format::A2B10G10R10_SINT_PACK32,
-            R16Unorm => Format::R16_UNORM,
-            R16Inorm => Format::R16_SNORM,
-            R16Uscaled => Format::R16_USCALED,
-            R16Iscaled => Format::R16_SSCALED,
-            R16Uint => Format::R16_UINT,
-            R16Int => Format::R16_SINT,
-            R16Float => Format::R16_SFLOAT,
-            Rg16Unorm => Format::R16G16_UNORM,
-            Rg16Inorm => Format::R16G16_SNORM,
-            Rg16Uscaled => Format::R16G16_USCALED,
-            Rg16Iscaled => Format::R16G16_SSCALED,
-            Rg16Uint => Format::R16G16_UINT,
-            Rg16Int => Format::R16G16_SINT,
-            Rg16Float => Format::R16G16_SFLOAT,
-            Rgb16Unorm => Format::R16G16B16_UNORM,
-            Rgb16Inorm => Format::R16G16B16_SNORM,
-            Rgb16Uscaled => Format::R16G16B16_USCALED,
-            Rgb16Iscaled => Format::R16G16B16_SSCALED,
-            Rgb16Uint => Format::R16G16B16_UINT,
-            Rgb16Int => Format::R16G16B16_SINT,
-            Rgb16Float => Format::R16G16B16_SFLOAT,
-            Rgba16Unorm => Format::R16G16B16A16_UNORM,
-            Rgba16Inorm => Format::R16G16B16A16_SNORM,
-            Rgba16Uscaled => Format::R16G16B16A16_USCALED,
-            Rgba16Iscaled => Format::R16G16B16A16_SSCALED,
-            Rgba16Uint => Format::R16G16B16A16_UINT,
-            Rgba16Int => Format::R16G16B16A16_SINT,
-            Rgba16Float => Format::R16G16B16A16_SFLOAT,
-            R32Uint => Format::R32_UINT,
-            R32Int => Format::R32_SINT,
-            R32Float => Format::R32_SFLOAT,
-            Rg32Uint => Format::R32G32_UINT,
-            Rg32Int => Format::R32G32_SINT,
-            Rg32Float => Format::R32G32_SFLOAT,
-            Rgb32Uint => Format::R32G32B32_UINT,
-            Rgb32Int => Format::R32G32B32_SINT,
-            Rgb32Float => Format::R32G32B32_SFLOAT,
-            Rgba32Uint => Format::R32G32B32A32_UINT,
-            Rgba32Int => Format::R32G32B32A32_SINT,
-            Rgba32Float => Format::R32G32B32A32_SFLOAT,
-            R64Uint => Format::R64_UINT,
-            R64Int => Format::R64_SINT,
-            R64Float => Format::R64_SFLOAT,
-            Rg64Uint => Format::R64G64_UINT,
-            Rg64Int => Format::R64G64_SINT,
-            Rg64Float => Format::R64G64_SFLOAT,
-            Rgb64Uint => Format::R64G64B64_UINT,
-            Rgb64Int => Format::R64G64B64_SINT,
-            Rgb64Float => Format::R64G64B64_SFLOAT,
-            Rgba64Uint => Format::R64G64B64A64_UINT,
-            Rgba64Int => Format::R64G64B64A64_SINT,
-            Rgba64Float => Format::R64G64B64A64_SFLOAT,
-            B10g11r11Ufloat => Format::B10G11R11_UFLOAT_PACK32,
-            E5b9g9r9Ufloat => Format::E5B9G9R9_UFLOAT_PACK32,
-            D16Unorm => Format::D16_UNORM,
-            X8D24Unorm => Format::X8_D24_UNORM_PACK32,
-            D32Float => Format::D32_SFLOAT,
-            S8Uint => Format::S8_UINT,
-            D16UnormS8Uint => Format::D16_UNORM_S8_UINT,
-            D24UnormS8Uint => Format::D24_UNORM_S8_UINT,
-            D32FloatS8Uint => Format::D32_SFLOAT_S8_UINT,
-            Bc1RgbUnorm => Format::BC1_RGB_UNORM_BLOCK,
-            Bc1RgbSrgb => Format::BC1_RGB_SRGB_BLOCK,
-            Bc1RgbaUnorm => Format::BC1_RGBA_UNORM_BLOCK,
-            Bc1RgbaSrgb => Format::BC1_RGBA_SRGB_BLOCK,
-            Bc2Unorm => Format::BC2_UNORM_BLOCK,
-            Bc2Srgb => Format::BC2_SRGB_BLOCK,
-            Bc3Unorm => Format::BC3_UNORM_BLOCK,
-            Bc3Srgb => Format::BC3_SRGB_BLOCK,
-            Bc4Unorm => Format::BC4_UNORM_BLOCK,
-            Bc4Inorm => Format::BC4_SNORM_BLOCK,
-            Bc5Unorm => Format::BC5_UNORM_BLOCK,
-            Bc5Inorm => Format::BC5_SNORM_BLOCK,
-            Bc6hUfloat => Format::BC6H_UFLOAT_BLOCK,
-            Bc6hFloat => Format::BC6H_SFLOAT_BLOCK,
-            Bc7Unorm => Format::BC7_UNORM_BLOCK,
-            Bc7Srgb => Format::BC7_SRGB_BLOCK,
-            Etc2R8g8b8Unorm => Format::ETC2_R8G8B8_UNORM_BLOCK,
-            Etc2R8g8b8Srgb => Format::ETC2_R8G8B8_SRGB_BLOCK,
-            Etc2R8g8b8a1Unorm => Format::ETC2_R8G8B8A1_UNORM_BLOCK,
-            Etc2R8g8b8a1Srgb => Format::ETC2_R8G8B8A1_SRGB_BLOCK,
-            Etc2R8g8b8a8Unorm => Format::ETC2_R8G8B8A8_UNORM_BLOCK,
-            Etc2R8g8b8a8Srgb => Format::ETC2_R8G8B8A8_SRGB_BLOCK,
-            EacR11Unorm => Format::EAC_R11_UNORM_BLOCK,
-            EacR11Inorm => Format::EAC_R11_SNORM_BLOCK,
-            EacR11g11Unorm => Format::EAC_R11G11_UNORM_BLOCK,
-            EacR11g11Inorm => Format::EAC_R11G11_SNORM_BLOCK,
-            Astc4x4Unorm => Format::ASTC_4x4_UNORM_BLOCK,
-            Astc4x4Srgb => Format::ASTC_4x4_SRGB_BLOCK,
-            Astc5x4Unorm => Format::ASTC_5x4_UNORM_BLOCK,
-            Astc5x4Srgb => Format::ASTC_5x4_SRGB_BLOCK,
-            Astc5x5Unorm => Format::ASTC_5x5_UNORM_BLOCK,
-            Astc5x5Srgb => Format::ASTC_5x5_SRGB_BLOCK,
-            Astc6x5Unorm => Format::ASTC_6x5_UNORM_BLOCK,
-            Astc6x5Srgb => Format::ASTC_6x5_SRGB_BLOCK,
-            Astc6x6Unorm => Format::ASTC_6x6_UNORM_BLOCK,
-            Astc6x6Srgb => Format::ASTC_6x6_SRGB_BLOCK,
-            Astc8x5Unorm => Format::ASTC_8x5_UNORM_BLOCK,
-            Astc8x5Srgb => Format::ASTC_8x5_SRGB_BLOCK,
-            Astc8x6Unorm => Format::ASTC_8x6_UNORM_BLOCK,
-            Astc8x6Srgb => Format::ASTC_8x6_SRGB_BLOCK,
-            Astc8x8Unorm => Format::ASTC_8x8_UNORM_BLOCK,
-            Astc8x8Srgb => Format::ASTC_8x8_SRGB_BLOCK,
-            Astc10x5Unorm => Format::ASTC_10x5_UNORM_BLOCK,
-            Astc10x5Srgb => Format::ASTC_10x5_SRGB_BLOCK,
-            Astc10x6Unorm => Format::ASTC_10x6_UNORM_BLOCK,
-            Astc10x6Srgb => Format::ASTC_10x6_SRGB_BLOCK,
-            Astc10x8Unorm => Format::ASTC_10x8_UNORM_BLOCK,
-            Astc10x8Srgb => Format::ASTC_10x8_SRGB_BLOCK,
-            Astc10x10Unorm => Format::ASTC_10x10_UNORM_BLOCK,
-            Astc10x10Srgb => Format::ASTC_10x10_SRGB_BLOCK,
-            Astc12x10Unorm => Format::ASTC_12x10_UNORM_BLOCK,
-            Astc12x10Srgb => Format::ASTC_12x10_SRGB_BLOCK,
-            Astc12x12Unorm => Format::ASTC_12x12_UNORM_BLOCK,
-            Astc12x12Srgb => Format::ASTC_12x12_SRGB_BLOCK,
-        }
-    }
-}
-
-impl From<image::Format> for hal::format::Format {
-    fn from(f: image::Format) -> Self {
-        use hal::format::Format::*;
-        use image::Format;
-        match f {
-            Format::UNDEFINED => panic!("Attempt to use undefined format"),
-            Format::R4G4_UNORM_PACK8 => Rg4Unorm,
-            Format::R4G4B4A4_UNORM_PACK16 => Rgba4Unorm,
-            Format::B4G4R4A4_UNORM_PACK16 => Bgra4Unorm,
-            Format::R5G6B5_UNORM_PACK16 => R5g6b5Unorm,
-            Format::B5G6R5_UNORM_PACK16 => B5g6r5Unorm,
-            Format::R5G5B5A1_UNORM_PACK16 => R5g5b5a1Unorm,
-            Format::B5G5R5A1_UNORM_PACK16 => B5g5r5a1Unorm,
-            Format::A1R5G5B5_UNORM_PACK16 => A1r5g5b5Unorm,
-            Format::R8_UNORM => R8Unorm,
-            Format::R8_SNORM => R8Inorm,
-            Format::R8_USCALED => R8Uscaled,
-            Format::R8_SSCALED => R8Iscaled,
-            Format::R8_UINT => R8Uint,
-            Format::R8_SINT => R8Int,
-            Format::R8_SRGB => R8Srgb,
-            Format::R8G8_UNORM => Rg8Unorm,
-            Format::R8G8_SNORM => Rg8Inorm,
-            Format::R8G8_USCALED => Rg8Uscaled,
-            Format::R8G8_SSCALED => Rg8Iscaled,
-            Format::R8G8_UINT => Rg8Uint,
-            Format::R8G8_SINT => Rg8Int,
-            Format::R8G8_SRGB => Rg8Srgb,
-            Format::R8G8B8_UNORM => Rgb8Unorm,
-            Format::R8G8B8_SNORM => Rgb8Inorm,
-            Format::R8G8B8_USCALED => Rgb8Uscaled,
-            Format::R8G8B8_SSCALED => Rgb8Iscaled,
-            Format::R8G8B8_UINT => Rgb8Uint,
-            Format::R8G8B8_SINT => Rgb8Int,
-            Format::R8G8B8_SRGB => Rgb8Srgb,
-            Format::B8G8R8_UNORM => Bgr8Unorm,
-            Format::B8G8R8_SNORM => Bgr8Inorm,
-            Format::B8G8R8_USCALED => Bgr8Uscaled,
-            Format::B8G8R8_SSCALED => Bgr8Iscaled,
-            Format::B8G8R8_UINT => Bgr8Uint,
-            Format::B8G8R8_SINT => Bgr8Int,
-            Format::B8G8R8_SRGB => Bgr8Srgb,
-            Format::R8G8B8A8_UNORM => Rgba8Unorm,
-            Format::R8G8B8A8_SNORM => Rgba8Inorm,
-            Format::R8G8B8A8_USCALED => Rgba8Uscaled,
-            Format::R8G8B8A8_SSCALED => Rgba8Iscaled,
-            Format::R8G8B8A8_UINT => Rgba8Uint,
-            Format::R8G8B8A8_SINT => Rgba8Int,
-            Format::R8G8B8A8_SRGB => Rgba8Srgb,
-            Format::B8G8R8A8_UNORM => Bgra8Unorm,
-            Format::B8G8R8A8_SNORM => Bgra8Inorm,
-            Format::B8G8R8A8_USCALED => Bgra8Uscaled,
-            Format::B8G8R8A8_SSCALED => Bgra8Iscaled,
-            Format::B8G8R8A8_UINT => Bgra8Uint,
-            Format::B8G8R8A8_SINT => Bgra8Int,
-            Format::B8G8R8A8_SRGB => Bgra8Srgb,
-            Format::A8B8G8R8_UNORM_PACK32 => Abgr8Unorm,
-            Format::A8B8G8R8_SNORM_PACK32 => Abgr8Inorm,
-            Format::A8B8G8R8_USCALED_PACK32 => Abgr8Uscaled,
-            Format::A8B8G8R8_SSCALED_PACK32 => Abgr8Iscaled,
-            Format::A8B8G8R8_UINT_PACK32 => Abgr8Uint,
-            Format::A8B8G8R8_SINT_PACK32 => Abgr8Int,
-            Format::A8B8G8R8_SRGB_PACK32 => Abgr8Srgb,
-            Format::A2R10G10B10_UNORM_PACK32 => A2r10g10b10Unorm,
-            Format::A2R10G10B10_SNORM_PACK32 => A2r10g10b10Inorm,
-            Format::A2R10G10B10_USCALED_PACK32 => A2r10g10b10Uscaled,
-            Format::A2R10G10B10_SSCALED_PACK32 => A2r10g10b10Iscaled,
-            Format::A2R10G10B10_UINT_PACK32 => A2r10g10b10Uint,
-            Format::A2R10G10B10_SINT_PACK32 => A2r10g10b10Int,
-            Format::A2B10G10R10_UNORM_PACK32 => A2b10g10r10Unorm,
-            Format::A2B10G10R10_SNORM_PACK32 => A2b10g10r10Inorm,
-            Format::A2B10G10R10_USCALED_PACK32 => A2b10g10r10Uscaled,
-            Format::A2B10G10R10_SSCALED_PACK32 => A2b10g10r10Iscaled,
-            Format::A2B10G10R10_UINT_PACK32 => A2b10g10r10Uint,
-            Format::A2B10G10R10_SINT_PACK32 => A2b10g10r10Int,
-            Format::R16_UNORM => R16Unorm,
-            Format::R16_SNORM => R16Inorm,
-            Format::R16_USCALED => R16Uscaled,
-            Format::R16_SSCALED => R16Iscaled,
-            Format::R16_UINT => R16Uint,
-            Format::R16_SINT => R16Int,
-            Format::R16_SFLOAT => R16Float,
-            Format::R16G16_UNORM => Rg16Unorm,
-            Format::R16G16_SNORM => Rg16Inorm,
-            Format::R16G16_USCALED => Rg16Uscaled,
-            Format::R16G16_SSCALED => Rg16Iscaled,
-            Format::R16G16_UINT => Rg16Uint,
-            Format::R16G16_SINT => Rg16Int,
-            Format::R16G16_SFLOAT => Rg16Float,
-            Format::R16G16B16_UNORM => Rgb16Unorm,
-            Format::R16G16B16_SNORM => Rgb16Inorm,
-            Format::R16G16B16_USCALED => Rgb16Uscaled,
-            Format::R16G16B16_SSCALED => Rgb16Iscaled,
-            Format::R16G16B16_UINT => Rgb16Uint,
-            Format::R16G16B16_SINT => Rgb16Int,
-            Format::R16G16B16_SFLOAT => Rgb16Float,
-            Format::R16G16B16A16_UNORM => Rgba16Unorm,
-            Format::R16G16B16A16_SNORM => Rgba16Inorm,
-            Format::R16G16B16A16_USCALED => Rgba16Uscaled,
-            Format::R16G16B16A16_SSCALED => Rgba16Iscaled,
-            Format::R16G16B16A16_UINT => Rgba16Uint,
-            Format::R16G16B16A16_SINT => Rgba16Int,
-            Format::R16G16B16A16_SFLOAT => Rgba16Float,
-            Format::R32_UINT => R32Uint,
-            Format::R32_SINT => R32Int,
-            Format::R32_SFLOAT => R32Float,
-            Format::R32G32_UINT => Rg32Uint,
-            Format::R32G32_SINT => Rg32Int,
-            Format::R32G32_SFLOAT => Rg32Float,
-            Format::R32G32B32_UINT => Rgb32Uint,
-            Format::R32G32B32_SINT => Rgb32Int,
-            Format::R32G32B32_SFLOAT => Rgb32Float,
-            Format::R32G32B32A32_UINT => Rgba32Uint,
-            Format::R32G32B32A32_SINT => Rgba32Int,
-            Format::R32G32B32A32_SFLOAT => Rgba32Float,
-            Format::R64_UINT => R64Uint,
-            Format::R64_SINT => R64Int,
-            Format::R64_SFLOAT => R64Float,
-            Format::R64G64_UINT => Rg64Uint,
-            Format::R64G64_SINT => Rg64Int,
-            Format::R64G64_SFLOAT => Rg64Float,
-            Format::R64G64B64_UINT => Rgb64Uint,
-            Format::R64G64B64_SINT => Rgb64Int,
-            Format::R64G64B64_SFLOAT => Rgb64Float,
-            Format::R64G64B64A64_UINT => Rgba64Uint,
-            Format::R64G64B64A64_SINT => Rgba64Int,
-            Format::R64G64B64A64_SFLOAT => Rgba64Float,
-            Format::B10G11R11_UFLOAT_PACK32 => B10g11r11Ufloat,
-            Format::E5B9G9R9_UFLOAT_PACK32 => E5b9g9r9Ufloat,
-            Format::D16_UNORM => D16Unorm,
-            Format::X8_D24_UNORM_PACK32 => X8D24Unorm,
-            Format::D32_SFLOAT => D32Float,
-            Format::S8_UINT => S8Uint,
-            Format::D16_UNORM_S8_UINT => D16UnormS8Uint,
-            Format::D24_UNORM_S8_UINT => D24UnormS8Uint,
-            Format::D32_SFLOAT_S8_UINT => D32FloatS8Uint,
-            Format::BC1_RGB_UNORM_BLOCK => Bc1RgbUnorm,
-            Format::BC1_RGB_SRGB_BLOCK => Bc1RgbSrgb,
-            Format::BC1_RGBA_UNORM_BLOCK => Bc1RgbaUnorm,
-            Format::BC1_RGBA_SRGB_BLOCK => Bc1RgbaSrgb,
-            Format::BC2_UNORM_BLOCK => Bc2Unorm,
-            Format::BC2_SRGB_BLOCK => Bc2Srgb,
-            Format::BC3_UNORM_BLOCK => Bc3Unorm,
-            Format::BC3_SRGB_BLOCK => Bc3Srgb,
-            Format::BC4_UNORM_BLOCK => Bc4Unorm,
-            Format::BC4_SNORM_BLOCK => Bc4Inorm,
-            Format::BC5_UNORM_BLOCK => Bc5Unorm,
-            Format::BC5_SNORM_BLOCK => Bc5Inorm,
-            Format::BC6H_UFLOAT_BLOCK => Bc6hUfloat,
-            Format::BC6H_SFLOAT_BLOCK => Bc6hFloat,
-            Format::BC7_UNORM_BLOCK => Bc7Unorm,
-            Format::BC7_SRGB_BLOCK => Bc7Srgb,
-            Format::ETC2_R8G8B8_UNORM_BLOCK => Etc2R8g8b8Unorm,
-            Format::ETC2_R8G8B8_SRGB_BLOCK => Etc2R8g8b8Srgb,
-            Format::ETC2_R8G8B8A1_UNORM_BLOCK => Etc2R8g8b8a1Unorm,
-            Format::ETC2_R8G8B8A1_SRGB_BLOCK => Etc2R8g8b8a1Srgb,
-            Format::ETC2_R8G8B8A8_UNORM_BLOCK => Etc2R8g8b8a8Unorm,
-            Format::ETC2_R8G8B8A8_SRGB_BLOCK => Etc2R8g8b8a8Srgb,
-            Format::EAC_R11_UNORM_BLOCK => EacR11Unorm,
-            Format::EAC_R11_SNORM_BLOCK => EacR11Inorm,
-            Format::EAC_R11G11_UNORM_BLOCK => EacR11g11Unorm,
-            Format::EAC_R11G11_SNORM_BLOCK => EacR11g11Inorm,
-            Format::ASTC_4x4_UNORM_BLOCK => Astc4x4Unorm,
-            Format::ASTC_4x4_SRGB_BLOCK => Astc4x4Srgb,
-            Format::ASTC_5x4_UNORM_BLOCK => Astc5x4Unorm,
-            Format::ASTC_5x4_SRGB_BLOCK => Astc5x4Srgb,
-            Format::ASTC_5x5_UNORM_BLOCK => Astc5x5Unorm,
-            Format::ASTC_5x5_SRGB_BLOCK => Astc5x5Srgb,
-            Format::ASTC_6x5_UNORM_BLOCK => Astc6x5Unorm,
-            Format::ASTC_6x5_SRGB_BLOCK => Astc6x5Srgb,
-            Format::ASTC_6x6_UNORM_BLOCK => Astc6x6Unorm,
-            Format::ASTC_6x6_SRGB_BLOCK => Astc6x6Srgb,
-            Format::ASTC_8x5_UNORM_BLOCK => Astc8x5Unorm,
-            Format::ASTC_8x5_SRGB_BLOCK => Astc8x5Srgb,
-            Format::ASTC_8x6_UNORM_BLOCK => Astc8x6Unorm,
-            Format::ASTC_8x6_SRGB_BLOCK => Astc8x6Srgb,
-            Format::ASTC_8x8_UNORM_BLOCK => Astc8x8Unorm,
-            Format::ASTC_8x8_SRGB_BLOCK => Astc8x8Srgb,
-            Format::ASTC_10x5_UNORM_BLOCK => Astc10x5Unorm,
-            Format::ASTC_10x5_SRGB_BLOCK => Astc10x5Srgb,
-            Format::ASTC_10x6_UNORM_BLOCK => Astc10x6Unorm,
-            Format::ASTC_10x6_SRGB_BLOCK => Astc10x6Srgb,
-            Format::ASTC_10x8_UNORM_BLOCK => Astc10x8Unorm,
-            Format::ASTC_10x8_SRGB_BLOCK => Astc10x8Srgb,
-            Format::ASTC_10x10_UNORM_BLOCK => Astc10x10Unorm,
-            Format::ASTC_10x10_SRGB_BLOCK => Astc10x10Srgb,
-            Format::ASTC_12x10_UNORM_BLOCK => Astc12x10Unorm,
-            Format::ASTC_12x10_SRGB_BLOCK => Astc12x10Srgb,
-            Format::ASTC_12x12_UNORM_BLOCK => Astc12x12Unorm,
-            Format::ASTC_12x12_SRGB_BLOCK => Astc12x12Srgb,
-            _ => panic!("Format {:?} isn't supported by the hal backend", f),
-        }
-    }
-}
-
-impl<D, B> Device for (D, PhantomData<B>)
-where
-    B: hal::Backend,
-    D: Borrow<B::Device>,
-{
-    type Sampler = B::Sampler;
-    type Buffer = B::Buffer;
-    type UnboundBuffer = B::UnboundBuffer;
-    type BufferView = B::BufferView;
-    type Image = B::Image;
-    type UnboundImage = B::UnboundImage;
-    type ImageView = B::ImageView;
-
-    fn create_buffer(
-        &self,
-        info: buffer::CreateInfo,
-    ) -> Result<Self::UnboundBuffer, memory::OutOfMemoryError> {
-        let usage = hal::buffer::Usage::from_bits(info.usage.bits()).unwrap();
-        self.0
-            .borrow()
-            .create_buffer(info.size, usage)
-            .map_err(|e| {
-                use hal::buffer::CreationError;
-                match e {
-                    CreationError::OutOfHostMemory => memory::OutOfMemoryError::OutOfHostMemory,
-                    CreationError::OutOfDeviceMemory => memory::OutOfMemoryError::OutOfDeviceMemory,
-                    CreationError::UnsupportedUsage { .. } => {
-                        panic!("Backend doesn't support this usage")
-                    }
-                }
-            })
-    }
-
-    fn buffer_requirements(&self, buffer: &Self::UnboundBuffer) -> MemoryRequirements {
-        let req = self.0.borrow().get_buffer_requirements(buffer);
-        MemoryRequirements {
-            size: req.size,
-            align: req.alignment,
-            mask: req.type_mask as u32,
-        }
-    }
-
-    unsafe fn bind_buffer(
-        &self,
-        buffer: Self::UnboundBuffer,
-        memory: &Self::Memory,
-        offset: u64,
-    ) -> Result<Self::Buffer, error::BindError> {
-        Ok(self.0.borrow().bind_buffer_memory(memory, offset, buffer)?)
-    }
-
-    unsafe fn destroy_buffer(&self, buffer: Self::Buffer) {
-        self.0.borrow().destroy_buffer(buffer);
-    }
-
-    fn create_image(
-        &self,
-        info: image::CreateInfo,
-    ) -> Result<Self::UnboundImage, error::ImageCreationError> {
-        let kind = match info.kind {
-            image::Kind::D1 => hal::image::Kind::D1(info.extent.width, info.array as u16),
-            image::Kind::D2 => hal::image::Kind::D2(
-                info.extent.width,
-                info.extent.height,
-                info.array as u16,
-                info.samples.bits() as u8,
-            ),
-            image::Kind::D3 => {
-                hal::image::Kind::D3(info.extent.width, info.extent.height, info.extent.depth)
-            }
-        };
-        let format = info.format.into();
-        let tiling = match info.tiling {
-            image::ImageTiling::Optimal => hal::image::Tiling::Optimal,
-            image::ImageTiling::Linear => hal::image::Tiling::Linear,
-        };
-        let usage = hal::image::Usage::from_bits(info.usage.bits()).unwrap();
-        let view_caps = hal::image::ViewCapabilities::from_bits(info.flags.bits()).unwrap();
-
-        Ok(self
-            .0
-            .borrow()
-            .create_image(kind, info.mips as u8, format, tiling, usage, view_caps)?)
-    }
-
-    fn image_requirements(&self, image: &Self::UnboundImage) -> MemoryRequirements {
-        let req = self.0.borrow().get_image_requirements(image);
-        MemoryRequirements {
-            size: req.size,
-            align: req.alignment,
-            mask: req.type_mask as u32,
-        }
-    }
-
-    unsafe fn bind_image(
-        &self,
-        image: Self::UnboundImage,
-        memory: &Self::Memory,
-        offset: u64,
-    ) -> Result<Self::Image, error::BindError> {
-        Ok(self.0.borrow().bind_image_memory(memory, offset, image)?)
-    }
-
-    unsafe fn destroy_image(&self, image: Self::Image) {
-        self.0.borrow().destroy_image(image);
-    }
-}
diff --git a/resource/src/impls/mod.rs b/resource/src/impls/mod.rs
deleted file mode 100644
index 0a3cc9cc..00000000
--- a/resource/src/impls/mod.rs
+++ /dev/null
@@ -1,5 +0,0 @@
-#[cfg(feature = "hal")]
-mod hal;
-
-#[cfg(feature = "ash")]
-mod ash;
diff --git a/resource/src/lib.rs b/resource/src/lib.rs
index f62c5234..a2505d10 100644
--- a/resource/src/lib.rs
+++ b/resource/src/lib.rs
@@ -18,62 +18,19 @@
 #![warn(rust_2018_compatibility)]
 #![warn(rust_2018_idioms)]
 
-#[macro_use]
-extern crate failure;
-
-#[macro_use]
-extern crate bitflags;
-
-#[macro_use]
-extern crate derivative;
-
+extern crate ash;
 extern crate crossbeam_channel;
+#[macro_use]
+extern crate log;
 extern crate relevant;
 extern crate rendy_memory as memory;
 
-#[cfg(feature = "hal")]
-extern crate gfx_hal as hal;
-
-#[cfg(feature = "ash")]
-extern crate ash;
-
-mod device;
-mod error;
 mod escape;
-mod impls;
 mod resources;
 
 pub mod buffer;
 pub mod image;
 
-pub use device::Device;
-pub use error::{ImageCreationError, ResourceError};
+pub use buffer::Buffer;
+pub use image::Image;
 pub use resources::Resources;
-
-/// Sharing mode.
-/// Resources created with sharing mode `Exclusive`
-/// can be accessed only from queues of single family that owns resource.
-/// Resources created with sharing mode `Concurrent` can be accessed by queues
-/// from specified families.
-#[derive(Clone, Copy, Debug, PartialEq, Eq)]
-pub enum SharingMode {
-    /// Sharing mode that denies sharing.
-    /// Resource created with this sharing mode can be accessed
-    /// only by queues of single family.
-    /// This generally results in faster access than concurrent sharing mode which is not implemented yet.
-    /// Ownership transfer is required in order to access resource by the queue from different family.Resources
-    /// See Vulkan docs for more detail:
-    /// <https://www.khronos.org/registry/vulkan/specs/1.1-extensions/html/vkspec.html#synchronization-queue-transfers>
-    Exclusive,
-}
-
-/// Memory requirements for the resource.
-#[derive(Clone, Copy, Debug, PartialEq, Eq)]
-pub struct MemoryRequirements {
-    /// Size of memory range required by the resource.
-    pub size: u64,
-    /// Minimal alignment required by the resource.
-    pub align: u64,
-    /// Memory type mask with bits set for memory types that support the resource.
-    pub mask: u32,
-}
diff --git a/resource/src/resources.rs b/resource/src/resources.rs
index 75e0b0f6..e9202b30 100644
--- a/resource/src/resources.rs
+++ b/resource/src/resources.rs
@@ -1,58 +1,51 @@
 use std::cmp::max;
-use std::default::Default;
 
-use memory::{Block, Heaps, MemoryError, Usage as MemoryUsage};
+use ash::{version::DeviceV1_0, vk};
+use memory::{Block, Heaps, MemoryError, MemoryUsage};
 use relevant::Relevant;
 
 use buffer;
-use device::Device;
-use error::ResourceError;
-use escape::{Escape, Terminal};
+use escape::Terminal;
 use image;
 
 /// Resource manager.
 /// It can be used to create and destroy resources such as buffers and images.
-#[derive(Debug, Derivative)]
-#[derivative(Default(bound = ""))]
-pub struct Resources<M, B, I> {
-    buffers: Terminal<buffer::Inner<M, B>>,
-    images: Terminal<image::Inner<M, I>>,
+#[derive(Debug, Default)]
+pub struct Resources {
+    buffers: Terminal<buffer::Inner>,
+    images: Terminal<image::Inner>,
 }
 
-impl<M: 'static, B: 'static, I: 'static> Resources<M, B, I> {
-    /// Create new Resource
+impl Resources {
+    /// Create new `Resources` instance.
     pub fn new() -> Self {
         Self::default()
     }
 
     /// Create a buffer and bind to the memory that support intended usage.
-    pub fn create_buffer<D, U>(
+    pub fn create_buffer(
         &mut self,
-        device: &D,
-        heaps: &mut Heaps<M>,
-        info: buffer::CreateInfo,
+        device: &impl DeviceV1_0,
+        heaps: &mut Heaps,
+        info: vk::BufferCreateInfo,
         align: u64,
-        memory_usage: U,
-    ) -> Result<buffer::Buffer<M, B>, MemoryError>
-    where
-        D: Device<Memory = M, Buffer = B>,
-        U: MemoryUsage,
-    {
-        let ubuf = device.create_buffer(info)?;
-        let reqs = device.buffer_requirements(&ubuf);
+        memory_usage: impl MemoryUsage,
+    ) -> Result<buffer::Buffer, MemoryError> {
+        let buf = unsafe { device.create_buffer(&info, None)? };
+        let reqs = unsafe { device.get_buffer_memory_requirements(buf) };
         let block = heaps.allocate(
             device,
-            reqs.mask,
+            reqs.memory_type_bits,
             memory_usage,
             reqs.size,
-            max(reqs.align, align),
+            max(reqs.alignment, align),
         )?;
 
-        let buf = unsafe {
+        unsafe {
             device
-                .bind_buffer(ubuf, block.memory(), block.range().start)
+                .bind_buffer_memory(buf, block.memory(), block.range().start)
                 .unwrap()
-        };
+        }
 
         Ok(buffer::Buffer {
             inner: self.buffers.escape(buffer::Inner {
@@ -66,45 +59,49 @@ impl<M: 'static, B: 'static, I: 'static> Resources<M, B, I> {
 
     /// Destroy buffer.
     /// Buffer can be dropped but this method reduces overhead.
-    pub unsafe fn destroy_buffer<D>(buffer: buffer::Buffer<M, B>, device: &D, heaps: &mut Heaps<M>)
-    where
-        D: Device<Memory = M, Buffer = B>,
-    {
-        Self::destroy_buffer_inner(Escape::into_inner(buffer.inner), device, heaps)
+    pub fn destroy_buffer(_buffer: buffer::Buffer, _device: &impl DeviceV1_0, _heaps: &mut Heaps) {
+        unimplemented!()
     }
 
-    unsafe fn destroy_buffer_inner<D>(inner: buffer::Inner<M, B>, device: &D, heaps: &mut Heaps<M>)
-    where
-        D: Device<Memory = M, Buffer = B>,
-    {
-        device.destroy_buffer(inner.raw);
+    /// Drop inner buffer representation.
+    ///
+    /// # Safety
+    ///
+    /// Device must not attempt to use the buffer.
+    unsafe fn destroy_buffer_inner(
+        inner: buffer::Inner,
+        device: &impl DeviceV1_0,
+        heaps: &mut Heaps,
+    ) {
+        device.destroy_buffer(inner.raw, None);
         heaps.free(device, inner.block);
+        inner.relevant.dispose();
     }
 
     /// Create an image and bind to the memory that support intended usage.
-    pub fn create_image<D, U>(
+    pub fn create_image(
         &mut self,
-        device: &D,
-        heaps: &mut Heaps<M>,
-        info: image::CreateInfo,
+        device: &impl DeviceV1_0,
+        heaps: &mut Heaps,
+        info: vk::ImageCreateInfo,
         align: u64,
-        memory_usage: U,
-    ) -> Result<image::Image<M, I>, ResourceError>
-    where
-        D: Device<Memory = M, Image = I>,
-        U: MemoryUsage,
-    {
-        let uimg = device.create_image(info)?;
-        let reqs = device.image_requirements(&uimg);
+        memory_usage: impl MemoryUsage,
+    ) -> Result<image::Image, MemoryError> {
+        let img = unsafe { device.create_image(&info, None)? };
+        let reqs = unsafe { device.get_image_memory_requirements(img) };
         let block = heaps.allocate(
             device,
-            reqs.mask,
+            reqs.memory_type_bits,
             memory_usage,
             reqs.size,
-            max(reqs.align, align),
+            max(reqs.alignment, align),
         )?;
 
-        let img = unsafe { device.bind_image(uimg, block.memory(), block.range().start)? };
+        unsafe {
+            device
+                .bind_image_memory(img, block.memory(), block.range().start)
+                .unwrap()
+        }
 
         Ok(image::Image {
             inner: self.images.escape(image::Inner {
@@ -117,35 +114,44 @@ impl<M: 'static, B: 'static, I: 'static> Resources<M, B, I> {
     }
 
     /// Destroy image.
-    /// Buffer can be dropped but this method reduces overhead.
-    pub unsafe fn destroy_image<D>(image: image::Image<M, I>, device: &D, heaps: &mut Heaps<M>)
-    where
-        D: Device<Memory = M, Image = I>,
-    {
-        Self::destroy_image_inner(Escape::into_inner(image.inner), device, heaps)
+    /// Image can be dropped but this method reduces overhead.
+    pub unsafe fn destroy_image(
+        _image: image::Image,
+        _device: &impl DeviceV1_0,
+        _heaps: &mut Heaps,
+    ) {
+        unimplemented!()
     }
 
-    unsafe fn destroy_image_inner<D>(inner: image::Inner<M, I>, device: &D, heaps: &mut Heaps<M>)
-    where
-        D: Device<Memory = M, Image = I>,
-    {
-        device.destroy_image(inner.raw);
+    /// Drop inner image representation.
+    ///
+    /// # Safety
+    ///
+    /// Device must not attempt to use the image.
+    unsafe fn destroy_image_inner(
+        inner: image::Inner,
+        device: &impl DeviceV1_0,
+        heaps: &mut Heaps,
+    ) {
+        device.destroy_image(inner.raw, None);
         heaps.free(device, inner.block);
+        inner.relevant.dispose();
     }
 
     /// Recycle dropped resources.
-    pub unsafe fn cleanup<D>(&mut self, device: &D, heaps: &mut Heaps<M>)
-    where
-        D: Device<Memory = M, Buffer = B, Image = I>,
-    {
+    ///
+    /// # Safety
+    ///
+    /// Device must not attempt to use previously dropped buffers and images.
+    pub unsafe fn cleanup(&mut self, device: &impl DeviceV1_0, heaps: &mut Heaps) {
+        // trace!("Cleanup buffers");
         for buffer in self.buffers.drain() {
-            device.destroy_buffer(buffer.raw);
-            heaps.free(device, buffer.block);
+            Self::destroy_buffer_inner(buffer, device, heaps);
         }
 
+        // trace!("Cleanup images");
         for image in self.images.drain() {
-            device.destroy_image(image.raw);
-            heaps.free(device, image.block);
+            Self::destroy_image_inner(image, device, heaps);
         }
     }
 }
diff --git a/shader/Cargo.toml b/shader/Cargo.toml
new file mode 100644
index 00000000..85e363c3
--- /dev/null
+++ b/shader/Cargo.toml
@@ -0,0 +1,8 @@
+[package]
+name = "rendy-shader"
+version = "0.1.0"
+authors = ["omni-viral <scareaangel@gmail.com>"]
+
+[dependencies]
+shaderc = "0.3"
+rendy-shader-proc = { path = "proc" }
diff --git a/shader/proc/Cargo.toml b/shader/proc/Cargo.toml
new file mode 100644
index 00000000..0b052384
--- /dev/null
+++ b/shader/proc/Cargo.toml
@@ -0,0 +1,13 @@
+[package]
+name = "rendy-shader-proc"
+version = "0.1.0"
+authors = ["omni-viral <scareaangel@gmail.com>"]
+
+[lib]
+proc-macro = true
+
+[dependencies]
+ash = { path = "../../../ash/ash" }
+shaderc = "0.3"
+syn = "0.15"
+quote = "0.6"
diff --git a/shader/proc/src/lib.rs b/shader/proc/src/lib.rs
new file mode 100644
index 00000000..a2d06532
--- /dev/null
+++ b/shader/proc/src/lib.rs
@@ -0,0 +1,104 @@
+extern crate proc_macro;
+extern crate quote;
+extern crate shaderc;
+extern crate syn;
+
+use proc_macro::TokenStream;
+use std::path::PathBuf;
+
+struct Input {
+    name_ident: syn::Ident,
+    kind_ident: syn::Ident,
+    lang_ident: syn::Ident,
+    file_lit: syn::LitStr,
+}
+
+impl syn::parse::Parse for Input {
+    fn parse(stream: syn::parse::ParseStream) -> Result<Self, syn::parse::Error> {
+        let name_ident = syn::Ident::parse(stream)?;
+        let kind_ident = syn::Ident::parse(stream)?;
+        let lang_ident = syn::Ident::parse(stream)?;
+        let file_lit = <syn::LitStr as syn::parse::Parse>::parse(stream)?;
+
+        Ok(Input {
+            name_ident,
+            kind_ident,
+            lang_ident,
+            file_lit,
+        })
+    }
+}
+
+fn kind(ident: &str) -> shaderc::ShaderKind {
+    match ident {
+        "Vertex" => shaderc::ShaderKind::Vertex,
+        "Fragment" => shaderc::ShaderKind::Fragment,
+        "Compute" => shaderc::ShaderKind::Compute,
+        "Geometry" => shaderc::ShaderKind::Geometry,
+        "TessControl" => shaderc::ShaderKind::TessControl,
+        "TessEvaluation" => shaderc::ShaderKind::TessEvaluation,
+        "InferFromSource" => shaderc::ShaderKind::InferFromSource,
+        "DefaultVertex" => shaderc::ShaderKind::DefaultVertex,
+        "DefaultFragment" => shaderc::ShaderKind::DefaultFragment,
+        "DefaultCompute" => shaderc::ShaderKind::DefaultCompute,
+        "DefaultGeometry" => shaderc::ShaderKind::DefaultGeometry,
+        "DefaultTessControl" => shaderc::ShaderKind::DefaultTessControl,
+        "DefaultTessEvaluation" => shaderc::ShaderKind::DefaultTessEvaluation,
+        "SpirvAssembly" => shaderc::ShaderKind::SpirvAssembly,
+        _ => panic!("Unknown shader kind"),
+    }
+}
+
+fn lang(ident: &str) -> shaderc::SourceLanguage {
+    match ident {
+        "GLSL" => shaderc::SourceLanguage::GLSL,
+        "HLSL" => shaderc::SourceLanguage::HLSL,
+        _ => panic!("Unknown shader lang"),
+    }
+}
+
+#[proc_macro]
+pub fn compile_to_spirv_proc(input: TokenStream) -> TokenStream {
+    let Input {
+        name_ident,
+        kind_ident,
+        lang_ident,
+        file_lit,
+    } = syn::parse_macro_input!(input);
+
+    let file = file_lit.value();
+    let glsl = PathBuf::from(std::env::var("CARGO_MANIFEST_DIR").unwrap()).join(&file);
+
+    let glsl_code = std::fs::read_to_string(&glsl).unwrap();
+    let glsl_code_lit = syn::LitStr::new(&glsl_code, file_lit.span());
+
+    let spirv = shaderc::Compiler::new()
+        .unwrap()
+        .compile_into_spirv(
+            &glsl_code,
+            kind(&kind_ident.to_string()),
+            &glsl.to_string_lossy(),
+            "main",
+            Some({
+                let mut ops = shaderc::CompileOptions::new().unwrap();
+                ops.set_target_env(shaderc::TargetEnv::Vulkan, ash::vk_make_version!(1, 0, 0));
+                ops.set_source_language(lang(&lang_ident.to_string()));
+                ops
+            }).as_ref(),
+        ).unwrap();
+
+    let spirv_code = spirv.as_binary_u8();
+    let spirv_code_lit = syn::LitByteStr::new(spirv_code, file_lit.span());
+
+    let tokens = quote::quote! {
+        struct #name_ident;
+
+        impl #name_ident {
+            const FILE: &'static str = #file;
+            const GLSL: &'static str = #glsl_code_lit;
+            const SPIRV: &'static [u8] = #spirv_code_lit;
+        }
+    };
+
+    tokens.into()
+}
diff --git a/shader/src/lib.rs b/shader/src/lib.rs
new file mode 100644
index 00000000..311755f7
--- /dev/null
+++ b/shader/src/lib.rs
@@ -0,0 +1,11 @@
+extern crate rendy_shader_proc;
+pub use rendy_shader_proc::compile_to_spirv_proc;
+
+#[macro_export]
+macro_rules! compile_to_spirv {
+    ($(struct $name:ident { kind: $kind:ident, lang: $lang:ident, file: $file:tt, })*) => {
+        $(
+            $crate::compile_to_spirv_proc!($name $kind $lang $file);
+        )*
+    };
+}
diff --git a/wsi/Cargo.toml b/wsi/Cargo.toml
new file mode 100644
index 00000000..aa4fe22a
--- /dev/null
+++ b/wsi/Cargo.toml
@@ -0,0 +1,17 @@
+[package]
+name = "rendy-wsi"
+version = "0.1.0"
+authors = ["omni-viral <scareaangel@gmail.com>"]
+
+[dependencies]
+ash = { path = "../../ash/ash" }
+derivative = "1.0"
+failure = "0.1"
+log = "0.4"
+relevant = "0.2"
+smallvec = "0.6"
+winit = "0.17"
+
+[target.'cfg(target_os = "macos")'.dependencies]
+objc = "0.2"
+cocoa = "0.18"
diff --git a/wsi/src/lib.rs b/wsi/src/lib.rs
new file mode 100644
index 00000000..90bf0c4e
--- /dev/null
+++ b/wsi/src/lib.rs
@@ -0,0 +1,29 @@
+pub extern crate ash;
+#[macro_use]
+extern crate derivative;
+#[macro_use]
+extern crate failure;
+
+#[macro_use]
+extern crate log;
+extern crate relevant;
+#[macro_use]
+extern crate smallvec;
+extern crate winit;
+
+mod target;
+
+#[cfg(target_os = "macos")]
+#[macro_use]
+extern crate objc;
+
+#[cfg(target_os = "macos")]
+extern crate cocoa;
+
+#[cfg(target_os = "macos")]
+mod macos;
+
+#[cfg(target_os = "macos")]
+pub use macos::NativeSurface;
+
+pub use target::Target;
diff --git a/wsi/src/macos.rs b/wsi/src/macos.rs
new file mode 100644
index 00000000..bc28ba3a
--- /dev/null
+++ b/wsi/src/macos.rs
@@ -0,0 +1,70 @@
+use std::ffi::{c_void, CStr};
+
+use ash::{
+    extensions::MacOSSurface,
+    version::{EntryV1_0, InstanceV1_0},
+    vk,
+};
+
+use failure::Error;
+use objc::runtime::{Object, BOOL, NO, YES};
+use winit::{os::macos::WindowExt, Window};
+
+pub struct NativeSurface(MacOSSurface);
+
+impl NativeSurface {
+    pub fn name() -> &'static CStr {
+        MacOSSurface::name()
+    }
+
+    pub fn new(
+        entry: &impl EntryV1_0,
+        instance: &impl InstanceV1_0,
+    ) -> Result<Self, Vec<&'static str>> {
+        MacOSSurface::new(entry, instance).map(NativeSurface)
+    }
+
+    pub fn create_surface(&self, window: &Window) -> Result<vk::SurfaceKHR, Error> {
+        let surface = unsafe {
+            let nsview = window.get_nsview();
+
+            if nsview.is_null() {
+                bail!("Window does not have a valid contentView");
+            }
+
+            put_metal_layer(nsview);
+
+            self.0.create_mac_os_surface_mvk(
+                &vk::MacOSSurfaceCreateInfoMVK::builder()
+                    .view(&*nsview)
+                    .build(),
+                None,
+            )
+        }?;
+
+        // trace!("Surface {:p} created", surface);
+        Ok(surface)
+    }
+}
+
+unsafe fn put_metal_layer(nsview: *mut c_void) {
+    let class = class!(CAMetalLayer);
+    let view: cocoa::base::id = std::mem::transmute(nsview);
+
+    let is_layer: BOOL = msg_send![view, isKindOfClass: class];
+    if is_layer == YES {
+        // msg_send![view, displaySyncEnabled: NO];
+        return;
+    }
+
+    let layer: *mut Object = msg_send![view, layer];
+    if !layer.is_null() && msg_send![layer, isKindOfClass: class] {
+        // msg_send![layer, displaySyncEnabled: NO];
+        return;
+    }
+
+    let layer: *mut Object = msg_send![class, new];
+    // msg_send![layer, displaySyncEnabled: NO];
+    msg_send![view, setLayer: layer];
+    msg_send![view, retain];
+}
diff --git a/wsi/src/target.rs b/wsi/src/target.rs
new file mode 100644
index 00000000..0dd13d2e
--- /dev/null
+++ b/wsi/src/target.rs
@@ -0,0 +1,182 @@
+use std::cmp::{max, min};
+
+use ash::{
+    extensions::{Surface, Swapchain},
+    version::{DeviceV1_0, EntryV1_0, InstanceV1_0},
+    vk,
+};
+
+use failure::Error;
+use relevant::Relevant;
+use smallvec::SmallVec;
+use winit::Window;
+
+use NativeSurface;
+
+pub struct Target {
+    fp: Swapchain,
+    window: Window,
+    surface: vk::SurfaceKHR,
+    swapchain: vk::SwapchainKHR,
+    images: Vec<vk::Image>,
+    format: vk::Format,
+    extent: vk::Extent2D,
+    relevant: Relevant,
+}
+
+impl Target {
+    pub fn new(
+        window: Window,
+        image_count: u32,
+        physical: vk::PhysicalDevice,
+        native_surface: &NativeSurface,
+        surface: &Surface,
+        swapchain: &Swapchain,
+    ) -> Result<Self, Error> {
+        let surface_khr = native_surface.create_surface(&window)?;
+
+        let present_modes = unsafe {
+            surface.get_physical_device_surface_present_modes_khr(physical, surface_khr)
+        }?;
+        info!("Present modes: {:#?}", present_modes);
+
+        let formats =
+            unsafe { surface.get_physical_device_surface_formats_khr(physical, surface_khr) }?;
+        info!("Formats: {:#?}", formats);
+
+        let capabilities =
+            unsafe { surface.get_physical_device_surface_capabilities_khr(physical, surface_khr) }?;
+        info!("Capabilities: {:#?}", capabilities);
+
+        let image_count = max(
+            min(image_count, capabilities.max_image_count),
+            capabilities.min_image_count,
+        );
+
+        let swapchain_khr = unsafe {
+            swapchain.create_swapchain_khr(
+                &vk::SwapchainCreateInfoKHR::builder()
+                    .surface(surface_khr)
+                    .min_image_count(image_count)
+                    .image_format(formats[0].format)
+                    .image_extent(capabilities.current_extent)
+                    .image_array_layers(1)
+                    .image_usage(capabilities.supported_usage_flags)
+                    .present_mode(*present_modes.first().unwrap())
+                    .build(),
+                None,
+            )
+        }?;
+
+        let images =
+            unsafe { swapchain.get_swapchain_images_khr(swapchain_khr) }.map_err(Error::from)?;
+
+        // trace!("Target created");
+
+        Ok(Target {
+            fp: swapchain.clone(),
+            window,
+            surface: surface_khr,
+            swapchain: swapchain_khr,
+            images,
+            format: formats[0].format,
+            extent: capabilities.current_extent,
+            relevant: Relevant,
+        })
+    }
+
+    /// Strip the target to the internal parts.
+    ///
+    /// # Safety
+    ///
+    /// Surface and swapchain must be destroyed immediately.
+    pub unsafe fn dispose(self) -> (Window, vk::SurfaceKHR, vk::SwapchainKHR) {
+        self.relevant.dispose();
+        (self.window, self.surface, self.swapchain)
+    }
+
+    /// Get raw surface handle.
+    ///
+    /// # Safety
+    ///
+    /// Raw handle usage should not violate this type valid usage.
+    pub unsafe fn surface(&self) -> vk::SurfaceKHR {
+        self.surface
+    }
+
+    /// Get raw surface handle.
+    ///
+    /// # Safety
+    ///
+    /// Raw handle usage should not violate this type valid usage.
+    pub unsafe fn swapchain(&self) -> vk::SwapchainKHR {
+        self.swapchain
+    }
+
+    /// Get target current extent.
+    pub fn extent(&self) -> vk::Extent2D {
+        self.extent
+    }
+
+    /// Get target current format.
+    pub fn format(&self) -> vk::Format {
+        self.format
+    }
+
+    /// Get raw handlers for the swapchain images.
+    pub fn images(&self) -> &[vk::Image] {
+        &self.images
+    }
+
+    /// Acquire next image.
+    pub fn next_image(&mut self, signal: vk::Semaphore) -> Result<NextImages<'_>, Error> {
+        let index = unsafe {
+            self.fp
+                .acquire_next_image_khr(self.swapchain, !0, signal, vk::Fence::null())
+                .map_err(Error::from)
+        }?;
+
+        Ok(NextImages {
+            fp: &self.fp,
+            swapchains: smallvec![self.swapchain],
+            indices: smallvec![index],
+        })
+    }
+}
+
+#[derive(Derivative)]
+#[derivative(Debug)]
+pub struct NextImages<'a> {
+    #[derivative(Debug = "ignore")]
+    fp: &'a Swapchain,
+    swapchains: SmallVec<[vk::SwapchainKHR; 4]>,
+    indices: SmallVec<[u32; 4]>,
+}
+
+impl<'a> NextImages<'a> {
+    /// Get indices.
+    pub fn indices(&self) -> &[u32] {
+        &self.indices
+    }
+
+    /// Present images by the queue.
+    pub fn queue_present(self, queue: vk::Queue, wait: &[vk::Semaphore]) -> Result<(), Error> {
+        assert_eq!(self.swapchains.len(), self.indices.len());
+        unsafe {
+            // TODO: ???
+            let mut results = std::iter::repeat(ash::vk::Result::SUCCESS)
+                .take(self.swapchains.len())
+                .collect::<SmallVec<[_; 4]>>();
+            self.fp
+                .queue_present_khr(
+                    queue,
+                    &vk::PresentInfoKHR::builder()
+                        .wait_semaphores(wait)
+                        .swapchains(&self.swapchains)
+                        .image_indices(&self.indices)
+                        .results(&mut results)
+                        .build(),
+                ).map_err(Error::from)
+        }
+    }
+}