Skip to content

MetalPerformanceShaders tvOS xcode9 beta1

Vincent Dondain edited this page Jun 5, 2017 · 1 revision

#MetalPerformanceShaders.framework

diff -ruN /Applications/Xcode.app/Contents/Developer/Platforms/AppleTVOS.platform/Developer/SDKs/AppleTVOS.sdk/System/Library/Frameworks/MetalPerformanceShaders.framework/Headers/MPSCNN.h /Applications/Xcode9-beta1.app/Contents/Developer/Platforms/AppleTVOS.platform/Developer/SDKs/AppleTVOS.sdk/System/Library/Frameworks/MetalPerformanceShaders.framework/Headers/MPSCNN.h
--- /Applications/Xcode.app/Contents/Developer/Platforms/AppleTVOS.platform/Developer/SDKs/AppleTVOS.sdk/System/Library/Frameworks/MetalPerformanceShaders.framework/Headers/MPSCNN.h	2016-08-01 16:43:15.000000000 -0400
+++ /Applications/Xcode9-beta1.app/Contents/Developer/Platforms/AppleTVOS.platform/Developer/SDKs/AppleTVOS.sdk/System/Library/Frameworks/MetalPerformanceShaders.framework/Headers/MPSCNN.h	1969-12-31 19:00:00.000000000 -0500
@@ -1,1049 +0,0 @@
-/*!
- *  @header MPSCNN.h
- *  @framework MetalPerformanceShaders.framework
- *
- *  @copyright Copyright (c) 2015 Apple Inc. All rights reserved.
- *  @discussion Metal Performance Shaders CNN kernels
- */
-
-#ifndef MPS_MPSCNN_h
-#define MPS_MPSCNN_h
-
-#include <MetalPerformanceShaders/MPSKernel.h>
-#include <MetalPerformanceShaders/MPSImage.h>
-#include <simd/simd.h>
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-
-#pragma mark MPSCNNKernel
-
-
-/*!
- *  @class      MPSCNNKernel
- *  @dependency This depends on Metal.framework
- *  @abstract   Describes a convolution neural network kernel.
- *  @discussion A MPSCNNKernel consumes one MPSImage and produces one MPSImage.
- *
- *              The region overwritten in the destination MPSImage is described 
- *              by the clipRect.  The top left corner of the region consumed (ignoring
- *              adjustments for filter size -- e.g. convolution filter size) is given
- *              by the offset. The size of the region consumed is a function of the 
- *              clipRect size and any subsampling caused by pixel strides at work,
- *              e.g. MPSCNNPooling.strideInPixelsX/Y.  Where the offset + clipRect
- *              would cause a {x,y} pixel address not in the image to be read, the
- *              edgeMode is used to determine what value to read there.
- *
- *              The Z/depth component of the offset, clipRect.origin and clipRect.size
- *              indexes which images to use. If the MPSImage contains only a single image
- *              then these should be offset.z = 0, clipRect.origin.z = 0
- *              and clipRect.size.depth = 1. If the MPSImage contains multiple images,
- *              clipRect.size.depth refers to number of images to process. Both source 
- *              and destination MPSImages must have at least this many images. offset.z 
- *              refers to starting source image index. Thus offset.z + clipRect.size.depth must
- *              be <= source.numberOfImages. Similarly, clipRect.origin.z refers to starting 
- *              image index in destination. So clipRect.origin.z + clipRect.size.depth must be
- *              <= destination.numberOfImage.
- *
- *              destinationFeatureChannelOffset property can be used to control where the MPSKernel will
- *              start writing in feature channel dimension. For example, if the destination image has
- *              64 channels, and MPSKernel outputs 32 channels, by default channels 0-31 of destination
- *              will be populated by MPSKernel. But if we want this MPSKernel to populate channel 32-63
- *              of the destination, we can set destinationFeatureChannelOffset = 32.
- *              A good example of this is concat (concatenation) operation in Tensor Flow. Suppose
- *              we have a src = w x h x Ni which goes through CNNConvolution_0 which produces
- *              output O0 = w x h x N0 and CNNConvolution_1 which produces output O1 = w x h x N1 followed
- *              by concatenation which produces O = w x h x (N0 + N1). We can achieve this by creating
- *              an MPSImage with dimensions O = w x h x (N0 + N1) and using this as destination of
- *              both convolutions as follows
- *                  CNNConvolution0: destinationFeatureChannelOffset = 0, this will output N0 channels starting at
- *                                   channel 0 of destination thus populating [0,N0-1] channels.
- *                  CNNConvolution1: destinationFeatureChannelOffset = N0, this will output N1 channels starting at
- *                                   channel N0 of destination thus populating [N0,N0+N1-1] channels.
- *
- *
- */
-MPS_CLASS_AVAILABLE_STARTING( __MAC_10_12, __IPHONE_10_0, __TVOS_10_0)
-@interface MPSCNNKernel : MPSKernel
-
-
-/*! @property   offset
- *  @abstract   The position of the destination clip rectangle origin relative to the source buffer.
- *  @discussion The offset is defined to be the position of clipRect.origin in source coordinates.
- *              Default: {0,0,0}, indicating that the top left corners of the clipRect and source image align.
- *              offset.z is the index of starting source image in batch processing mode.
- *
- *              See Also: @ref subsubsection_mpsoffset
- */
-@property (readwrite, nonatomic) MPSOffset                offset;
-
-/*! @property   clipRect
- *  @abstract   An optional clip rectangle to use when writing data. Only the pixels in the rectangle will be overwritten.
- *  @discussion A MTLRegion that indicates which part of the destination to overwrite. If the clipRect does not lie
- *              completely within the destination image, the intersection between clip rectangle and destination bounds is
- *              used.   Default: MPSRectNoClip (MPSKernel::MPSRectNoClip) indicating the entire image.
- *              clipRect.origin.z is the index of starting destination image in batch processing mode. clipRect.size.depth
- *              is the number of images to process in batch processing mode.
- *
- *              See Also: @ref subsubsection_clipRect
- */
-@property (readwrite, nonatomic) MTLRegion               clipRect;
-
-
-/*! @property   destinationFeatureChannelOffset
- *  @abstract   The number of channels in the destination MPSImage to skip before writing output.
- *  @discussion This is the starting offset into the destination image in the feature channel dimension
- *              at which destination data is written.
- *              This allows an application to pass a subset of all the channels in MPSImage as output of MPSKernel.
- *              E.g. Suppose MPSImage has 24 channels and a MPSKernel outputs 8 channels. If
- *              we want channels 8 to 15 of this MPSImage to be used as output, we can set destinationFeatureChannelOffset = 8.
- *              Note that this offset applies independently to each image when the MPSImage
- *              is a container for multiple images and the MPSCNNKernel is processing multiple images (clipRect.size.depth > 1).
- *              The default value is 0 and any value specifed shall be a multiple of 4. If MPSKernel outputs N channels,
- *              destination image MUST have at least destinationFeatureChannelOffset + N channels. Using a destination
- *              image with insufficient number of feature channels result in an error.
- *              E.g. if the MPSCNNConvolution outputs 32 channels, and destination has 64 channels, then it is an error to set
- *              destinationFeatureChannelOffset > 32.
- */
-@property (readwrite, nonatomic) NSUInteger              destinationFeatureChannelOffset;
-
-/*! @property   edgeMode
- *  @abstract   The MPSImageEdgeMode to use when texture reads stray off the edge of an image
- *  @discussion Most MPSKernel objects can read off the edge of the source image. This can happen 
- *              because of a negative offset property, because the offset + clipRect.size is larger 
- *              than the source image or because the filter looks at neighboring pixels, such as a 
- *              Convolution filter.   Default:  MPSImageEdgeModeZero. 
- *
- *              See Also: @ref subsubsection_edgemode
- *              Note: For @ref MPSCNNPoolingAverage specifying edge mode @ref MPSImageEdgeModeClamp
- *                      is interpreted as a "shrink-to-edge" operation, which shrinks the effective
- *                      filtering window to remain within the source image borders.
- */
-@property (readwrite, nonatomic) MPSImageEdgeMode        edgeMode;
-
-
-/*!
- *  @abstract   Encode a MPSCNNKernel into a command Buffer.  The operation shall proceed out-of-place.
- *  @param      commandBuffer       A valid MTLCommandBuffer to receive the encoded filter
- *  @param      sourceImage         A valid MPSImage object containing the source image.
- *  @param      destinationImage    A valid MPSImage to be overwritten by result image. destinationImage may not alias sourceImage.
- */
--(void) encodeToCommandBuffer: (nonnull id <MTLCommandBuffer>) commandBuffer
-                  sourceImage: (MPSImage * __nonnull) sourceImage
-             destinationImage: (MPSImage * __nonnull) destinationImage
-                MPS_SWIFT_NAME( encode(commandBuffer:sourceImage:destinationImage:));
-
-/*!
- *  sourceRegionForDestinationSize: is used to determine which region of the
- *  sourceTexture will be read by encodeToCommandBuffer:sourceImage:destinationImage
- *  (and similar) when the filter runs. This information may be needed if the
- *  source image is broken into multiple textures.  The size of the full
- *  (untiled) destination image is provided. The region of the full (untiled)
- *  source image that will be read is returned. You can then piece together an
- *  appropriate texture containing that information for use in your tiled context.
- *
- *  The function will consult the MPSCNNKernel offset and clipRect parameters,
- *  to determine the full region read by the function. Other parameters such as
- *  sourceClipRect, kernelHeight and kernelWidth will be consulted as necessary.
- *  All properties should be set to intended values prior to calling
- *  sourceRegionForDestinationSize:.
- *
- *      Caution: This function operates using global image coordinates, but
- *      -encodeToCommandBuffer:... uses coordinates local to the source and
- *      destination image textures. Consequently, the offset and clipRect
- *      attached to this object will need to be updated using a global to
- *      local coordinate transform before -encodeToCommandBuffer:... is
- *      called.
- *
- *  @abstract   Determine the region of the source texture that will be read for a encode operation
- *  @param      destinationSize The size of the full virtual destination image.
- *  @return     The area in the virtual source image that will be read.
- */
--(MPSRegion) sourceRegionForDestinationSize: (MTLSize) destinationSize
-                  MPS_SWIFT_NAME( sourceRegion(destinationSize:));
-@end
-
-
-#pragma mark MPSCNNNeuron
-
-/*!
- *  @class      MPSCNNNeuron
- *  @dependency This depends on Metal.framework
- *  @discussion This filter applies a neuron activation function.
-                You must use one of the sub-classes of MPSCNNNeuron
- */
-MPS_CLASS_AVAILABLE_STARTING( __MAC_10_12, __IPHONE_10_0, __TVOS_10_0)
-@interface  MPSCNNNeuron : MPSCNNKernel
-
-@end    /* MPSCNNNeuron */
-
-
-/*!
- *  @class      MPSCNNNeuronLinear
- *  @dependency This depends on Metal.framework
- *  @discussion Specifies the linear neuron filter. For each pixel, applies the following function: f(x) = a * x + b
- */
-MPS_CLASS_AVAILABLE_STARTING( __MAC_10_12, __IPHONE_10_0, __TVOS_10_0)
-@interface  MPSCNNNeuronLinear : MPSCNNNeuron
-
-@property (readonly, atomic) float a;
-@property (readonly, atomic) float b;
-
-/*!
- *  @abstract  Initialize the linear neuron filter
- *  @param     device   The device the filter will run on
- *  @param     a        Filter property "a". See lass discussion.
- *  @param     b        Filter property "b". See class discussion.
- *  @return    A valid MPSCNNNeuronLinear object or nil, if failure.
- */
--(nonnull instancetype) initWithDevice: (nonnull id <MTLDevice>) device
-                                     a: (float) a
-                                     b: (float) b NS_DESIGNATED_INITIALIZER;
-
-/*
- * You must use initWithDevice:a:b instead
- */
--(nonnull instancetype) initWithDevice: (nonnull id <MTLDevice>) device NS_UNAVAILABLE;
-
-@end    /* MPSCNNNeuronLinear */
-
-
-/*!
- *  @class MPSCNNNeuronReLU
- *  @dependency This depends on Metal.framework
- *  @discussion Specifies the ReLU neuron filter.
- *              For each pixel, applies the following function: f(x) = x, if x >= 0
- *                                                                   = a * x if x < 0
- *              This is called Leaky ReLU in literature. Some literature defines
- *              classical ReLU as max(0, x). If you want this behavior, simply pass a = 0
- */
-MPS_CLASS_AVAILABLE_STARTING( __MAC_10_12, __IPHONE_10_0, __TVOS_10_0)
-@interface  MPSCNNNeuronReLU : MPSCNNNeuron
-
-@property (readonly, atomic) float a;
-
-/*!
- *  @abstract  Initialize the ReLU neuron filter
- *  @param     device           The device the filter will run on
- *  @param     a                Filter property "a". See class discussion.
- *  @return    A valid MPSCNNNeuronReLU object or nil, if failure.
- */
--(nonnull instancetype) initWithDevice: (nonnull id <MTLDevice>) device
-                                     a: (float) a NS_DESIGNATED_INITIALIZER;
-
-/*
- * Use initWithDevice:a: instead
- */
--(nonnull instancetype) initWithDevice: (nonnull id <MTLDevice>) device NS_UNAVAILABLE;
-
-@end    /* MPSCNNNeuronReLU */
-
-
-/*!
- *  @class MPSCNNNeuronSigmoid
- *  @dependency This depends on Metal.framework
- *  @discussion Specifies the sigmoid neuron filter.  For each pixel, applies the following function: f(x) = 1 / (1 + e^-x)
- */
-MPS_CLASS_AVAILABLE_STARTING( __MAC_10_12, __IPHONE_10_0, __TVOS_10_0)
-@interface  MPSCNNNeuronSigmoid : MPSCNNNeuron
-
-/*!
- *  @abstract  Initialize a neuron filter
- *  @param      device          The device the filter will run on
- *  @return     A valid MPSCNNNeuronSigmoid object or nil, if failure.
- */
--(nonnull instancetype) initWithDevice: (nonnull id <MTLDevice>) device NS_DESIGNATED_INITIALIZER;
-
-@end    /* MPSCNNNeuronSigmoid */
-
-
-/*!
- *  @class MPSCNNNeuronTanH
- *  @dependency This depends on Metal.framework
- *  @discussion Specifies the hyperbolic tangent neuron filter.
- *              For each pixel, applies the following function: f(x) = a * tanh(b * x)
- */
-MPS_CLASS_AVAILABLE_STARTING( __MAC_10_12, __IPHONE_10_0, __TVOS_10_0)
-@interface  MPSCNNNeuronTanH : MPSCNNNeuron
-
-@property (readonly, atomic) float a;
-@property (readonly, atomic) float b;
-
-/*!
- *  @abstract  Initialize the hyperbolic tangent neuron filter
- *  @param     device           The device the filter will run on
- *  @param     a                Filter property "a". See class discussion.
- *  @param     b                Filter property "b". See class discussion.
- *  @return    A valid MPSCNNNeuronTanH object or nil, if failure.
- */
--(nonnull instancetype) initWithDevice: (nonnull id <MTLDevice>) device
-                                     a: (float) a
-                                     b: (float) b NS_DESIGNATED_INITIALIZER;
-/*
- * Use initWithDevice:a:b: instead
- */
--(nonnull instancetype) initWithDevice: (nonnull id <MTLDevice>) device NS_UNAVAILABLE;
-
-@end    /* MPSCNNNeuronTanH */
-
-/*!
- *  @class MPSCNNNeuronAbsolute
- *  @dependency This depends on Metal.framework
- *  @discussion Specifies the absolute neuron filter.  For each pixel, applies the following function: f(x) = | x |
- */
-MPS_CLASS_AVAILABLE_STARTING( __MAC_10_12, __IPHONE_10_0, __TVOS_10_0)
-@interface  MPSCNNNeuronAbsolute : MPSCNNNeuron
-
-/*!
- *  @abstract  Initialize a neuron filter
- *  @param      device          The device the filter will run on
- *  @return     A valid MPSCNNNeuronAbsolute object or nil, if failure.
- */
--(nonnull instancetype) initWithDevice: (nonnull id <MTLDevice>) device NS_DESIGNATED_INITIALIZER;
-
-@end    /* MPSCNNNeuronAbsolute */
-
-
-#pragma mark MPSCNNConvolution
-
-
-/*!
- *  @class      MPSCNNConvolutionDescriptor
- *  @dependency This depends on Metal.framework
- *  @discussion The MPSCNNConvolutionDescriptor specifies a convolution descriptor
- */
-MPS_CLASS_AVAILABLE_STARTING( __MAC_10_12, __IPHONE_10_0, __TVOS_10_0)
-@interface MPSCNNConvolutionDescriptor : NSObject <NSCopying>
-
-/*! @property   kernelWidth
- *  @abstract   The width of the filter window.  The default value is 3.
- *              Any positive non-zero value is valid, including even values. 
- *              The position of the left edge of the filter window is given
- *              by offset.x - (kernelWidth>>1)
- */
-@property(readwrite, nonatomic) NSUInteger       kernelWidth;
-
-/*! @property   kernelHeight
- *  @abstract   The height of the filter window.  The default value is 3.
- *              Any positive non-zero value is valid, including even values.
- *              The position of the top edge of the filter window is given
- *              by offset.y - (kernelHeight>>1)
- */
-@property(readwrite, nonatomic) NSUInteger       kernelHeight;
-
-/*! @property   inputFeatureChannels
- *  @abstract   The number of feature channels per pixel in the input image.
- */
-@property(readwrite, nonatomic) NSUInteger       inputFeatureChannels;
-
-/*! @property   outputFeatureChannels
- *  @abstract   The number of feature channels per pixel in the output image.
- */
-@property(readwrite, nonatomic) NSUInteger       outputFeatureChannels;
-
-/*! @property   strideInPixelsX
- *  @abstract   The output stride (downsampling factor) in the x dimension. The default value is 1.
- */
-@property(readwrite, nonatomic) NSUInteger      strideInPixelsX;
-
-/*! @property   strideInPixelsY
- *  @abstract   The output stride (downsampling factor) in the y dimension. The default value is 1.
- */
-@property(readwrite, nonatomic) NSUInteger      strideInPixelsY;
-
-/*! @property   groups
- *  @abstract   Number of groups input and output channels are divided into. The default value is 1.
- *              Groups lets you reduce the parametrization. If groups is set to n, input is divided into n
- *              groups with inputFeatureChannels/n channels in each group. Similarly output is divided into
- *              n groups with outputFeatureChannels/n channels in each group. ith group in input is only 
- *              connected to ith group in output so number of weights (parameters) needed is reduced by factor
- *              of n. Both inputFeatureChannels and outputFeatureChannels must be divisible by n and number of
- *              channels in each group must be multiple of 4.
- */
-@property(readwrite, nonatomic) NSUInteger      groups;
-
-/*! @property   neuron
- *  @abstract   MPSCNNNeuron filter to be applied as part of convolution.
- *              Default is nil.
- */
-@property(readwrite, nonatomic, retain) const MPSCNNNeuron * __nullable  neuron;
-
-/*!
- *  @abstract   Creates a convolution descriptor with a neuron filter
- *  @param      kernelWidth             The width of the filter window.  Must be > 0. Large values will take a long time.
- *  @param      kernelHeight            The height of the filter window.   Must be > 0. Large values will take a long time.
- *  @param      inputFeatureChannels    The number of feature channels in the input image. Must be >= 1.
- *  @param      outputFeatureChannels   The number of feature channels in the output image. Must be >= 1.
- *  @param      neuronFilter            An optional neuron filter that can be applied to the output of convolution.
- *  @return     A valid MPSCNNConvolutionDescriptor object or nil, if failure.
- */
-+(nonnull instancetype) cnnConvolutionDescriptorWithKernelWidth: (NSUInteger) kernelWidth
-                                                   kernelHeight: (NSUInteger) kernelHeight
-                                           inputFeatureChannels: (NSUInteger) inputFeatureChannels
-                                          outputFeatureChannels: (NSUInteger) outputFeatureChannels
-                                                   neuronFilter: (const MPSCNNNeuron * __nullable) neuronFilter;
-
-@end    /* MPSCNNConvolutionDescriptor */
-
-/*! @enum       MPSCNNConvolutionFlags
- *  @abstract   Options used to control how kernel weights are stored and used in the CNN kernels.
- *              For future expandability.
- */
-#if defined(DOXYGEN)
-typedef enum MPSCNNConvolutionFlags
-#else
-typedef NS_ENUM(NSUInteger, MPSCNNConvolutionFlags)
-#endif
-{
-    /*! Use default options */
-    MPSCNNConvolutionFlagsNone      MPS_ENUM_AVAILABLE_STARTING( __MAC_10_12, __IPHONE_10_0, __TVOS_10_0)  = 0,
-};
-
-/*!
- *  @class      MPSCNNConvolution
- *  @dependency This depends on Metal.framework
- *  @discussion The MPSCNNConvolution specifies a convolution.
- *              The MPSCNNConvolution convolves the input image with a set of filters, each producing one feature map in the output image.
- */
-MPS_CLASS_AVAILABLE_STARTING( __MAC_10_12, __IPHONE_10_0, __TVOS_10_0)
-@interface  MPSCNNConvolution : MPSCNNKernel
-
-/*! @property   kernelWidth
- *  @abstract   The width of the filter window.
- *              The position of the left edge of the filter window is given
- *              by offset.x - (kernelWidth>>1)
- */
-@property(readonly, nonatomic) NSUInteger       kernelWidth;
-
-/*! @property   kernelHeight
- *  @abstract   The height of the filter window.
- *              Any positive non-zero value is valid, including even values.
- *              The position of the top edge of the filter window is given
- *              by offset.y - (kernelHeight>>1)
- */
-@property(readonly, nonatomic) NSUInteger       kernelHeight;
-
-/*! @property   inputFeatureChannels
- *  @abstract   The number of feature channels per pixel in the input image.
- */
-@property(readonly, nonatomic) NSUInteger       inputFeatureChannels;
-
-/*! @property   outputFeatureChannels
- *  @abstract   The number of feature channels per pixel in the output image.
- */
-@property(readonly, nonatomic) NSUInteger       outputFeatureChannels;
-
-/*! @property   strideInPixelsX
- *  @abstract   The output stride (downsampling factor) in the x dimension. The default value is 1.
- */
-@property(readonly, nonatomic) NSUInteger      strideInPixelsX;
-
-/*! @property   strideInPixelsY
- *  @abstract   The output stride (downsampling factor) in the y dimension. The default value is 1.
- */
-@property(readonly, nonatomic) NSUInteger      strideInPixelsY;
-
-/*! @property   groups
- *  @abstract   Number of groups input and output channels are divided into.
- */
-@property(readonly, nonatomic) NSUInteger      groups;
-
-/*! @property   neuron
- *  @abstract   MPSCNNNeuron filter to be applied as part of convolution.
- *              Can be nil in wich case no neuron activation fuction is applied.
- */
-@property(readonly, nonatomic) const MPSCNNNeuron * __nullable  neuron;
-
-/*!
- *  @abstract   Initializes a convolution kernel
- *  @param      device                          The MTLDevice on which this MPSCNNConvolution filter will be used
- *  @param      convolutionDescriptor           A pointer to a MPSCNNConvolutionDescriptor.
- *  @param      kernelWeights                   A pointer to a weights array.  Each entry is a float value. The number of entries is =
- *                                              inputFeatureChannels * outputFeatureChannels * kernelHeight * kernelWidth
- *                                              The layout of filter weight is so that it can be reinterpreted as 4D tensor (array)
- *                                              weight[ outputChannels ][ kernelHeight ][ kernelWidth ][ inputChannels / groups ]
- *                                              Weights are converted to half float (fp16) internally for best performance.
- *  @param      biasTerms                       A pointer to bias terms to be applied to the convolution output.  Each entry is a float value.
- *                                              The number of entries is = numberOfOutputFeatureMaps
- *  @param      flags                           Currently unused. Pass MPSCNNConvolutionFlagsNone
- *
- *  @return     A valid MPSCNNConvolution object or nil, if failure.
- */
--(nonnull instancetype) initWithDevice: (nonnull id <MTLDevice>) device
-                 convolutionDescriptor: (const MPSCNNConvolutionDescriptor * __nonnull) convolutionDescriptor
-                         kernelWeights: (const float * __nonnull) kernelWeights
-                             biasTerms: (const float * __nullable) biasTerms
-                                 flags: (MPSCNNConvolutionFlags) flags        NS_DESIGNATED_INITIALIZER;
-
-/*
- * Use initWithDevice:convolutionDescriptor:kernelWeights:biasTerms instead
- */
--(nonnull instancetype) initWithDevice: (nonnull id <MTLDevice>) device NS_UNAVAILABLE;
-
-@end    /* MPSCNNConvolution */
-    
-/*!
- *  @class      MPSCNNFullyConnected
- *  @dependency This depends on Metal.framework
- *  @discussion The MPSCNNFullyConnected specifies a fully connected convolution layer a.k.a. Inner product 
- *              layer. A fully connected CNN layer is one where every input channel is connected
- *              to every output channel. The kernel width is equal to width of source image
- *              and the kernel height is equal to the height of source image. Width and height of the output 
- *              is 1x1. Thus, it takes a srcW x srcH x Ni MPSCNNImage, convolves it with Weights[No][SrcW][srcH][Ni]
- *              and produces a 1 x 1 x No output. The following must be true:
- *@code
- *                         kernelWidth  == source.width
- *                         kernelHeight == source.height
- *                         clipRect.size.width == 1
- *                         clipRect.size.height == 1
- *@endcode
- *              One can think of a fully connected layer as a matrix multiplication that flattens an image into a vector of length
- *              srcW*srcH*Ni. The weights are arragned in a matrix of dimension No x (srcW*srcH*Ni) for product output vectors
- *              of length No. The strideInPixelsX, strideInPixelsY, and group must be 1. Offset is not applicable and is ignored. 
- *              Since clipRect is clamped to the destination image bounds, if the destination is 1x1, one doesn't need to set the
- *              clipRect.
- *
- *              Note that one can implement an inner product using MPSCNNConvolution by setting
- *@code
- *                     offset = (kernelWidth/2,kernelHeight/2)
- *                     clipRect.origin = (ox,oy), clipRect.size = (1,1)
- *                     strideX = strideY = group = 1
- *@endcode
- *              However, using the MPSCNNFullyConnected for this is better for performance as it lets us choose the most 
- *              performant method which may not be possible when using a general convolution. For example,
- *              we may internally use matrix multiplication or special reduction kernels for a specific platform.
-*/
-MPS_CLASS_AVAILABLE_STARTING( __MAC_10_12, __IPHONE_10_0, __TVOS_10_0)
-@interface  MPSCNNFullyConnected : MPSCNNConvolution
-
-/*!
- *  @abstract   Initializes a fully connected kernel.
- *
- *  @param      device                          The MTLDevice on which this MPSCNNFullyConnected filter will be used
- *  @param      fullyConnectedDescriptor        A pointer to a MPSCNNConvolutionDescriptor. strideInPixelsX, strideInPixelsY and group
- *                                              properties of fullyConnectedDescriptor must be set to 1 (default).
- *  @param      kernelWeights                   A pointer to a weights array.  Each entry is a float value. The number of entries is =
- *                                              inputFeatureChannels * outputFeatureChannels * kernelHeight * kernelWidth
- *                                              The layout of filter weight is so that it can be reinterpreted as 4D tensor (array)
- *                                              weight[ outputChannels ][ kernelHeight ][ kernelWidth ][ inputChannels / groups ]
- *                                              Weights are converted to half float (fp16) internally for best performance.
- *  @param      biasTerms                       A pointer to bias terms to be applied to the convolution output.  Each entry is a float value.
- *                                              The number of entries is = numberOfOutputFeatureMaps
- *  @param      flags                           Currently unused. Pass MPSCNNConvolutionFlagsNone
- *
- *  @return     A valid MPSCNNConvolution object or nil, if failure.
- */
--(nonnull instancetype) initWithDevice: (nonnull id <MTLDevice>) device
-                 convolutionDescriptor: (const MPSCNNConvolutionDescriptor * __nonnull) fullyConnectedDescriptor
-                         kernelWeights: (const float * __nonnull) kernelWeights
-                             biasTerms: (const float * __nullable) biasTerms
-                                 flags: (MPSCNNConvolutionFlags) flags  NS_DESIGNATED_INITIALIZER;
-
-/*
- * Use initWithDevice:convolutionDescriptor:kernelWeights:biasTerms instead
- */
--(nonnull instancetype) initWithDevice: (nonnull id <MTLDevice>) device NS_UNAVAILABLE;
-
-/*!
- * NOTE:    The encodeToCommandBuffer API in MPSCNNKernel can be used to encode a inner product kernel to a MTLCommandBuffer.
- *          The source and destination must be MPSImage.
- */
-
-@end    /* MPSCNNFullyConnected */
-    
-#pragma mark MPSCNNPooling
-
-/*!
- *  @class      MPSCNNPooling
- *  @dependency This depends on Metal.framework
- *  @discussion Pooling is a form of non-linear sub-sampling. Pooling partitions the input image into a set of
- *              rectangles (overlapping or non-overlapping) and, for each such sub-region, outputs a value.
- *              The pooling operation is used in computer vision to reduce the dimensionality of intermediate representations.
- */
-MPS_CLASS_AVAILABLE_STARTING( __MAC_10_12, __IPHONE_10_0, __TVOS_10_0)
-@interface  MPSCNNPooling : MPSCNNKernel
-
-/*! @property   kernelWidth
- *  @abstract   The width of the filter window
- */
-@property(readonly, nonatomic) NSUInteger       kernelWidth;
-
-/*! @property   kernelHeight
- *  @abstract   The height of the filter window
- */
-@property(readonly, nonatomic) NSUInteger       kernelHeight;
-
-/*! @property   strideInPixelsX
- *  @abstract   The output stride (downsampling factor) in the x dimension.  The default value is 1.
- */
-
-@property(readonly, nonatomic) NSUInteger      strideInPixelsX;
-
-/*! @property   strideInPixelsY
- *  @abstract   The output stride (downsampling factor) in the y dimension.  The default value is 1.
- */
-@property(readonly, nonatomic) NSUInteger      strideInPixelsY;
-
-/*!
- *  @abstract  Initialize a pooling filter
- *  @param      device              The device the filter will run on
- *  @param      kernelWidth         The width of the kernel.  Can be an odd or even value.
- *  @param      kernelHeight        The height of the kernel.  Can be an odd or even value.
- *  @return     A valid MPSCNNPooling object or nil, if failure.
- *
- */
--(nonnull instancetype) initWithDevice: (nonnull id <MTLDevice>) device
-                           kernelWidth: (NSUInteger) kernelWidth
-                          kernelHeight: (NSUInteger) kernelHeight;
-
-/*!
- *  @abstract  Initialize a pooling filter
- *  @param      device              The device the filter will run on
- *  @param      kernelWidth         The width of the kernel.  Can be an odd or even value.
- *  @param      kernelHeight        The height of the kernel.  Can be an odd or even value.
- *  @param      strideInPixelsX     The output stride (downsampling factor) in the x dimension.
- *  @param      strideInPixelsY     The output stride (downsampling factor) in the y dimension.
- *  @return     A valid MPSCNNPooling object or nil, if failure.
- *
- */
--(nonnull instancetype) initWithDevice: (nonnull id <MTLDevice>) device
-                           kernelWidth: (NSUInteger) kernelWidth
-                          kernelHeight: (NSUInteger) kernelHeight
-                       strideInPixelsX: (NSUInteger) strideInPixelsX
-                       strideInPixelsY: (NSUInteger) strideInPixelsY NS_DESIGNATED_INITIALIZER;
-
-/*
- * Use initWithDevice:kernelWidth:kernelHeight:strideInPixelsX:strideInPixelsY: instead
- */
--(nonnull instancetype) initWithDevice: (nonnull id <MTLDevice>) device NS_UNAVAILABLE;
-
-
-/*!
- *          Pooling window notes
- *          ====================
- *          The encodeToCommandBuffer API in MPSCNNKernel can be used to encode a MPSCNNPooling kernel
- *          to a MTLCommandBuffer. The exact location of the pooling window for each output value is determined
- *          as follows: The pooling window center for the first (top left) output pixel of the clipping
- *          rectangle is at spatial coordinates (offset.x, offset.y) in the input image. From this
- *          the top left corner of the pooling window is at
- *              (offset.x - floor(kernelWidth/2), offset.y - floor(kernelHeight/2)) and
- *          extends of course (kernelWidth, kernelHeight) pixels to the right and down direction, which means
- *          that the last pixel to be included into the pooling window is at:
- *              (offset.x + floor((kernelWidth-1)/2), offset.y + floor((kernelHeight-1)/2)),
- *          so that for even kernel sizes the pooling window is extends one pixel more into the left and up
- *          directions.
- *          The following pooling windows can be then easily deduced from the first one by simple shifting
- *          the source coordinates according to values of @ref strideInPixelsX and @ref strideInPixelsY.
- *          For example the pooling window center w(x,y) for the output value at coordinate (x,y) of the
- *          destination clip rectangle ((x,y) computed wrt. clipping rectangle origin) is at:
- *              w(x,y) = ( offset.x + strideInPixelsX * x , offset.y + strideInPixelsY * y ).
- *
- *          Quite often it is desirable to distribute the pooling windows as evenly as possible in the
- *          input image. As explained above, if offset is zero, then the center of the first pooling
- *          window is at the top left corner of the input image, which means that the left and top stripes
- *          of the pooling window are read from outside the input image boundaries (when filter size is
- *          larger than unity). Also it may mean that some values from the bottom and right stripes are
- *          not included at all in the pooling resulting in loss of valuable information.
- *
- *          A scheme used in some common libraries is to shift the source offset according to the following
- *          formula:
- *              offset.xy += { (int)ceil( ((L.xy - 1) % s.xy) / 2 ), for odd f.xy
- *                           { (int)floor( ((L.xy - 1) % s.xy) / 2 ) + 1, for even f.xy, where
- *          L is the size of the input image (or more accurately the size corresponding to the scaled cliprect
- *          in source coordinates, which commonly coincides with the source image itself),
- *          s.xy is (strideInPixelsX, strideInPixelsY) and f.xy is (kernelWidth, kernelHeight).
- *          This offset distributes the pooling window centers evenly in the effective source cliprect,
- *          when the output size is rounded up wrt. stride ( output size = ceil( input size / stride ) )
- *          and is commonly used in CNN libraries (for example TensorFlow uses this offset scheme
- *          in its maximum pooling implementation tf.nn.max_pool with 'SAME' - padding, for 'VALID'
- *          padding one can simply set offset.xy += floor(f.xy/2) to get the first pooling window
- *          inside the source image completely).
- *          For @ref MPSCNNPoolingMax the way the input image borders are handled (see @ref edgeMode)
- *          can become important: if there are negative values in the source image near the borders of the
- *          image and the pooling window crosses the borders, then using @ref MPSImageEdgeModeZero may
- *          cause the maximum pooling operation to override the negative input data values with zeros
- *          coming from outside the source image borders, resulting in large boundary effects. A simple
- *          way to avoid this is to use @ref MPSImageEdgeModeClamp for @ref edgeMode, which for
- *          @ref MPSCNNPoolingMax effectively causes all pooling windows to remain within the source image.
- *
- *          Below are a couple of examples that can be used to visualize the effects of different
- *          offset values for pooling. The illustrations show the pooling window placements inside a
- *          single feature channel of a source image. In the first examples we use strides that are
- *          larger than the pooling window sizes in order to clarify the placement of each
- *          individual pooling window.
- *@code
- *              Source image: width = 8, height = 5
- *              Destination cliprect: width = 3, height = 2
- *              o - source pixel center, one for each destination cliprect pixel
- *              x - filter taps in the pooling windows
- *@endcode
- *          1) Filter size = 2x2, stride = 3x3, offset = (0,0)
- *@code
- *              x  x     x  x     x  x
- *                |-----------------------|
- *              x |xo|  |x |xo|  |x |xo|  |
- *                |-----------------------|
- *                |  |  |  |  |  |  |  |  |
- *                |-----------------------|
- *              x |x |  |x |x |  |x |x |  |
- *                |-----------------------|
- *              x |xo|  |x |xo|  |x |xo|  |
- *                |-----------------------|
- *                |  |  |  |  |  |  |  |  |
- *                |-----------------------|
- *@endcode
- *          One can use @ref offset to move the pooling windows within the source image:
- *          Using the formula offset.xy += (int)floor( ((L.xy - 1) % s.xy) / 2 ) + 1 from above
- *          for even filter sizes gives:
- *@code
- *              offset.x = floor( (7 % 3) / 2) + 1 = 0 + 1 = 1 and
- *              offset.y = floor( (4 % 3) / 2) + 1 = 0 + 1 = 1.
- *@endcode
- *          2) Filter size = 2x2, stride = 3x3, offset = (1,1)
- *@code
- *                |-----------------------|
- *                |x |x |  |x |x |  |x |x |
- *                |-----------------------|
- *                |x |xo|  |x |xo|  |x |xo|
- *                |-----------------------|
- *                |  |  |  |  |  |  |  |  |
- *                |-----------------------|
- *                |x |x |  |x |x |  |x |x |
- *                |-----------------------|
- *                |x |xo|  |x |xo|  |x |xo|
- *                |-----------------------|
- *@endcode
- *
- *          Our third example shows the placement of additional taps when we increase
- *          the size of the pooling window to 3x3.
- *          In this case the recommended formula for offsets with odd filter sizes gives:
- *@code
- *              offset.x = ceil( (7 % 3) / 2) = 1 and
- *              offset.y = ceil( (4 % 3) / 2) = 1.
- *@endcode
- *          3) Filter size = 3x3, stride = 3x3, offset = (1,1)
- *@code
- *                |-----------------------|
- *                |x |x |x |x |x |x |x |x |x
- *                |-----------------------|
- *                |x |xo|x |x |xo|x |x |xo|x
- *                |-----------------------|
- *                |x |x |x |x |x |x |x |x |x
- *                |-----------------------|
- *                |x |x |x |x |x |x |x |x |x
- *                |-----------------------|
- *                |x |xo|x |x |xo|x |x |xo|x
- *                |-----------------------|
- *                 x  x  x  x  x  x  x  x  x
- *@endcode
- *          In order to avoid large boundary effects with max pooling in examples 1) and 3) the user can use
- *          @ref MPSImageEdgeModeClamp for @ref edgeMode, which has the same effect as constraining the pooling
- *          windows to be confined completely within the source image.
- *
- */
-
-@end    /* MPSCNNPooling */
-
-
-/*!
- *  @class MPSCNNPoolingMax
- *  @dependency This depends on Metal.framework
- *  @discussion Specifies the max pooling filter.  For each pixel, returns the maximum value of pixels
- *              in the kernelWidth x kernelHeight filter region.
- */
-MPS_CLASS_AVAILABLE_STARTING( __MAC_10_12, __IPHONE_10_0, __TVOS_10_0)
-@interface  MPSCNNPoolingMax : MPSCNNPooling
-@end    /* MPSCNNPoolingMax */
-
-
-/*!
- *  @class MPSCNNPoolingAverage
- *  @dependency This depends on Metal.framework
- *  @discussion Specifies the average pooling filter.  For each pixel, returns the mean value of pixels
- *              in the kernelWidth x kernelHeight filter region.
- *              When @ref edgeMode is @ref MPSImageEdgeModeClamp the filtering window is shrunk to remain
- #              within the source image borders. What this means is that close to image borders the filtering window
- *              will be smaller in order to fit inside the source image and less values will be used to compute the
- *              average. In case the filtering window is entirely outside the source image border the
- *              outputted value will be zero.
- */
-MPS_CLASS_AVAILABLE_STARTING( __MAC_10_12, __IPHONE_10_0, __TVOS_10_0)
-@interface  MPSCNNPoolingAverage : MPSCNNPooling
-@end    /* MPSCNNPoolingAverage */
-
-
-
-#pragma mark MPSCNNNormalization
-
-/*!
- *  @class MPSCNNSpatialNormalization
- *  @dependency This depends on Metal.framework
- *  @discussion Specifies the spatial normalization filter.
- *              The spatial normalization for a feature channel applies the filter over local regions which extend
- *              spatially, but are in separate feature channels (i.e., they have shape 1 x kernelWidth x kernelHeight).
- *              For each feature channel, the function computes the sum of squares of X inside each rectangle, N2(i,j).
- *              It then divides each element of X as follows:
- *                  Y(i,j) = X(i,j) / (delta + alpha/(kw*kh) * N2(i,j))^beta,
- *              where kw and kh are the kernelWidth and the kernelHeight.
- *              It is the end-users responsibility to ensure that the combination of the
- *              parameters delta and alpha does not result in a situation where the denominator
- *              becomes zero - in such situations the resulting pixel-value is undefined.
- */
-NS_CLASS_AVAILABLE( 10_12, 10_0 )
-@interface MPSCNNSpatialNormalization : MPSCNNKernel
-
-/*! @property   alpha
- *  @abstract   The value of alpha.  Default is 1.0. Must be non-negative.
- */
-@property (readwrite, nonatomic) float   alpha;
-
-/*! @property   beta
- *  @abstract   The value of beta.  Default is 5.0
- */
-@property (readwrite, nonatomic) float   beta;
-
-/*! @property   delta
- *  @abstract   The value of delta.  Default is 1.0
- */
-@property (readwrite, nonatomic) float   delta;
-
-/*! @property   kernelWidth
- *  @abstract   The width of the filter window
- */
-@property(readonly, nonatomic) NSUInteger       kernelWidth;
-
-/*! @property   kernelHeight
- *  @abstract   The height of the filter window
- */
-@property(readonly, nonatomic) NSUInteger       kernelHeight;
-
-/*!
- *  @abstract  Initialize a spatial normalization filter
- *  @param      device              The device the filter will run on
- *  @param      kernelWidth         The width of the kernel
- *  @param      kernelHeight        The height of the kernel
- *  @return     A valid MPSCNNSpatialNormalization object or nil, if failure.
- *
- *  NOTE:  For now, kernelWidth must be equal to kernelHeight
- */
--(nonnull instancetype) initWithDevice: (nonnull id <MTLDevice>) device
-                           kernelWidth: (NSUInteger) kernelWidth
-                          kernelHeight: (NSUInteger) kernelHeight NS_DESIGNATED_INITIALIZER;
-
-/*
- * Use initWithDevice:kernelWidth:kernelHeight instead
- */
--(nonnull instancetype) initWithDevice: (nonnull id <MTLDevice>) device NS_UNAVAILABLE;
-
-/*!
- * NOTE:    The encodeToCommandBuffer API in MPSUnaryImageKernel can be used to encode a
- *          MPSCNNSpatialNormalization filter to a MTLCommandBuffer.
- */
-@end    /* MPSCNNSpatialNormalization */
-
-/*!
- *  @class MPSCNNLocalContrastNormalization
- *  @dependency This depends on Metal.framework
- *  @discussion Specifies the local contrast normalization filter.
- *              The local contrast normalization is quite similar to spatial normalization
- *              (see @ref MPSCNNSpatialNormalization) in that it applies the filter over local regions which extend
- *              spatially, but are in separate feature channels (i.e., they have shape 1 x kernelWidth x kernelHeight),
- *              but instead of dividing by the local "energy" of the feature, the denominator uses the local variance
- *              of the feature - effectively the mean value of the feature is subtracted from the signal.
- *              For each feature channel, the function computes the variance VAR(i,j) and
- *              mean M(i,j) of X(i,j) inside each rectangle around the spatial point (i,j).
- *
- *              Then the result is computed for each element of X as follows:
- *
- *                  Y(i,j) = pm + ps * ( X(i,j) - p0 * M(i,j)) / (delta + alpha * VAR(i,j))^beta,
- *
- *              where kw and kh are the kernelWidth and the kernelHeight and pm, ps and p0 are parameters that
- *              can be used to offset and scale the result in various ways. For example setting
- *              pm=0, ps=1, p0=1, delta=0, alpha=1.0 and beta=0.5 scales input data so that the result has
- *              unit variance and zero mean, provided that input variance is positive.
- *              It is the end-users responsibility to ensure that the combination of the
- *              parameters delta and alpha does not result in a situation where the denominator
- *              becomes zero - in such situations the resulting pixel-value is undefined. A good way to guard
- *              against tiny variances is to regulate the expression with a small value for delta, for example
- *              delta = 1/1024 = 0.0009765625.
- */
-MPS_CLASS_AVAILABLE_STARTING( __MAC_10_12, __IPHONE_10_0, __TVOS_10_0)
-@interface MPSCNNLocalContrastNormalization : MPSCNNKernel
-
-/*! @property   alpha
- *  @abstract   The value of alpha.  Default is 1.0
- */
-@property (readwrite, nonatomic) float   alpha;
-
-/*! @property   beta
- *  @abstract   The value of beta.  Default is 0.5
- */
-@property (readwrite, nonatomic) float   beta;
-
-/*! @property   delta
- *  @abstract   The value of delta.  Default is 1/1024
- */
-@property (readwrite, nonatomic) float   delta;
-
-/*! @property   p0
- *  @abstract   The value of p0.  Default is 1.0
- */
-@property (readwrite, nonatomic) float   p0;
-
-/*! @property   pm
- *  @abstract   The value of pm.  Default is 0.0
- */
-@property (readwrite, nonatomic) float   pm;
-
-/*! @property   ps
- *  @abstract   The value of ps.  Default is 1.0
- */
-@property (readwrite, nonatomic) float   ps;
-
-
-/*! @property   kernelWidth
- *  @abstract   The width of the filter window
- */
-@property(readonly, nonatomic) NSUInteger       kernelWidth;
-
-/*! @property   kernelHeight
- *  @abstract   The height of the filter window
- */
-@property(readonly, nonatomic) NSUInteger       kernelHeight;
-
-/*!
- *  @abstract  Initialize a local contrast normalization filter
- *  @param      device              The device the filter will run on
- *  @param      kernelWidth         The width of the kernel
- *  @param      kernelHeight        The height of the kernel
- *  @return     A valid MPSCNNLocalContrastNormalization object or nil, if failure.
- *
- *  NOTE:  For now, kernelWidth must be equal to kernelHeight
- */
--(nonnull instancetype) initWithDevice: (nonnull id <MTLDevice>) device
-                           kernelWidth: (NSUInteger) kernelWidth
-                          kernelHeight: (NSUInteger) kernelHeight NS_DESIGNATED_INITIALIZER;
-
-/*
- * Use initWithDevice:kernelWidth:kernelHeight instead
- */
--(nonnull instancetype) initWithDevice: (nonnull id <MTLDevice>) device NS_UNAVAILABLE;
-
-/*!
- * NOTE:    The encodeToCommandBuffer API in MPSUnaryImageKernel can be used to encode a
- *          MPSCNNLocalContrastNormalization filter to a MTLCommandBuffer.
- */
-@end    /* MPSCNNLocalContrastNormalization */
-
-
-/*!
- *  @class MPSCNNCrossChannelNormalization
- *  @dependency This depends on Metal.framework
- *  @discussion Specifies the normalization filter across feature channels.
- *               This normalization filter applies the filter to a local region across nearby feature channels,
- *              but with no spatial extent (i.e., they have shape kernelSize x 1 x 1).
- *              The normalized output is given by:
- *                  Y(i,j,k) = X(i,j,k) / L(i,j,k)^beta,
- *              where the normalizing factor is:
- *                  L(i,j,k) = delta + alpha/N * (sum_{q in Q(k)} X(i,j,q)^2, where
- *              N is the kernel size. The window Q(k) itself is defined as:
- *                  Q(k) = [max(0, k-floor(N/2)), min(D-1, k+floor((N-1)/2)], where
- *
- *              k is the feature channel index (running from 0 to D-1) and
- *              D is the number of feature channels, and alpha, beta and delta are paremeters.
- *              It is the end-users responsibility to ensure that the combination of the
- *              parameters delta and alpha does not result in a situation where the denominator
- *              becomes zero - in such situations the resulting pixel-value is undefined.
- */
-MPS_CLASS_AVAILABLE_STARTING( __MAC_10_12, __IPHONE_10_0, __TVOS_10_0)
-@interface MPSCNNCrossChannelNormalization : MPSCNNKernel
-
-/*! @property   alpha
- *  @abstract   The value of alpha.  Default is 1.0. Must be non-negative.
- */
-@property (readwrite, nonatomic) float   alpha;
-
-/*! @property   beta
- *  @abstract   The value of beta.  Default is 5.0
- */
-@property (readwrite, nonatomic) float   beta;
-
-/*! @property   delta
- *  @abstract   The value of delta.  Default is 1.0
- */
-@property (readwrite, nonatomic) float   delta;
-
-/*! @property   kernelSize
- *  @abstract   The size of the square filter window.  Default is 5
- */
-@property(readonly, nonatomic) NSUInteger       kernelSize;
-
-/*!
- *  @abstract  Initialize a local response normalization filter in a channel
- *  @param      device              The device the filter will run on
- *  @param      kernelSize          The kernel filter size in each dimension.
- *  @return     A valid MPSCNNCrossFeatureMapNormalization object or nil, if failure.
- *
- */
--(nonnull instancetype) initWithDevice: (nonnull id <MTLDevice>) device
-                            kernelSize: (NSUInteger) kernelSize NS_DESIGNATED_INITIALIZER;
-
-/*
- * Use initWithDevice:kernelSize: instead
- */
--(nonnull instancetype) initWithDevice: (nonnull id <MTLDevice>) device NS_UNAVAILABLE;
-
-/*!
- * NOTE:    The encodeToCommandBuffer API in MPSUnaryImageKernel can be used to encode a
- *          MPSCNNCrossChannelNormalization filter to a MTLCommandBuffer.
- */
-@end    /* MPSCNNCrossChannelNormalization */
-
-
-/*!
- *  @class      MPSCNNSoftMax
- *  @dependency This depends on Metal.framework
- *  @discussion The softmax filter is a neural transfer function and is useful for classification tasks.
- *              The softmax filter is applied across feature channels and in a convolutional manner at all
- *              spatial locations. The softmax filter can be seen as the combination of an
- *              activation function (exponential) and a normalization operator.
- *              For each feature channel per pixel in an image in a feature map, the softmax filter computes the following:
- *                  result channel in pixel = exp(pixel(x,y,k))/sum(exp(pixel(x,y,0)) ... exp(pixel(x,y,N-1))
- *                      where N is the number of feature channels
- *
- */
-MPS_CLASS_AVAILABLE_STARTING( __MAC_10_12, __IPHONE_10_0, __TVOS_10_0)
-@interface MPSCNNSoftMax : MPSCNNKernel
-
-@end    /* MPSCNNSoftMax */
-
-/*!
- *  @class      MPSCNNLogSoftMax
- *  @dependency This depends on Metal.framework
- *  @discussion The logarithmic softmax filter can be achieved by taking the natural logarithm of the
- *              the result of the softmax filter. The results are often used to construct a loss function to be
- *              minimized when training neural networks.
- *              For each feature channel per pixel in an image in a feature map, the logarithmic softmax filter
- *              computes the following:
- *                  result channel in pixel = pixel(x,y,k)) - ln{sum(exp(pixel(x,y,0)) ... exp(pixel(x,y,N-1))}
- *                      where N is the number of feature channels and y = ln{x} satisfies e^y = x.
- *
- */
-MPS_CLASS_AVAILABLE_STARTING( __MAC_10_12, __IPHONE_10_0, __TVOS_10_0)
-@interface MPSCNNLogSoftMax : MPSCNNKernel
-
-@end    /* MPSCNNLogSoftMax */
-
-
-
-
-#ifdef __cplusplus
-}       /* extern "C" */
-#endif
-
-
-#endif  /* MPS_MPSCNN_h */
-
-
diff -ruN /Applications/Xcode.app/Contents/Developer/Platforms/AppleTVOS.platform/Developer/SDKs/AppleTVOS.sdk/System/Library/Frameworks/MetalPerformanceShaders.framework/Headers/MPSImage.h /Applications/Xcode9-beta1.app/Contents/Developer/Platforms/AppleTVOS.platform/Developer/SDKs/AppleTVOS.sdk/System/Library/Frameworks/MetalPerformanceShaders.framework/Headers/MPSImage.h
--- /Applications/Xcode.app/Contents/Developer/Platforms/AppleTVOS.platform/Developer/SDKs/AppleTVOS.sdk/System/Library/Frameworks/MetalPerformanceShaders.framework/Headers/MPSImage.h	2016-08-01 16:43:15.000000000 -0400
+++ /Applications/Xcode9-beta1.app/Contents/Developer/Platforms/AppleTVOS.platform/Developer/SDKs/AppleTVOS.sdk/System/Library/Frameworks/MetalPerformanceShaders.framework/Headers/MPSImage.h	1969-12-31 19:00:00.000000000 -0500
@@ -1,476 +0,0 @@
-/*!
- *  @header MPSImage.h
- *  @framework MetalPerformanceShaders.framework
- *
- *  @copyright Copyright (c) 2015 Apple Inc. All rights reserved.
- *  @discussion A MPSImage is a MTLTexture abstraction that allows for more than 4 channels, and
- *              for temporary images.
- */
-
-#ifndef MPSImage_h
-#define MPSImage_h
-
-
-#include <MetalPerformanceShaders/MPSTypes.h>
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#pragma mark MPSImage
-
-/*!
- *  @class      MPSImageDescriptor
- *  @dependency This depends on Metal.framework
- *  @abstract   A MPSImageDescriptor object describes a attributes of MPSImage and is used to
- *              create one (see MPSImage discussion below)
- */
-MPS_CLASS_AVAILABLE_STARTING( __MAC_10_12, __IPHONE_10_0, __TVOS_10_0)
-@interface MPSImageDescriptor : NSObject
-/*! @property   width
- *  @abstract   The width of the CNN image.
- *  @discussion The formal width of the CNN image in pixels.  Default = 1.
- */
-@property (readwrite, nonatomic) NSUInteger width;
-
-/*! @property   height
- *  @abstract   The height of the CNN image.
- *  @discussion The formal height of the CNN image in pixels. Default = 1.
- */
-@property (readwrite, nonatomic) NSUInteger height;
-
-/*! @property   featureChannels
- *  @abstract   The number of feature channels per pixel.  Default = 1.
- */
-@property (readwrite, nonatomic) NSUInteger featureChannels;
-
-/*! @property   numberOfImages
- *  @abstract   The number of images for batch processing.   Default = 1.
- */
-@property (readwrite, nonatomic) NSUInteger numberOfImages;
-
-/*! @property   pixelFormat
- *  @abstract   The MTLPixelFormat expected for the underlying texture.
- */
-@property (readonly, nonatomic) MTLPixelFormat pixelFormat;
-
-
-/*! @property   channelFormat
- *  @abstract   The storage format to use for each channel in the image.
- */
-@property (readwrite, nonatomic) MPSImageFeatureChannelFormat channelFormat;
-
-/*!
- @property cpuCacheMode
- @abstract Options to specify CPU cache mode of texture resource. Default = MTLCPUCacheModeDefaultCache
- */
-@property (readwrite, nonatomic) MTLCPUCacheMode cpuCacheMode;
-
-/*!
- @property storageMode
- @abstract To specify storage mode of texture resource. 
-           Default = MTLStorageModeShared on iOS
-                     MTLStorageModeManaged on Mac OSX
-           MTLStorageModeShared not supported on Mac OSX. 
-           See Metal headers for synchronization requirements when using StorageModeManaged
- */
-@property (readwrite, nonatomic) MTLStorageMode storageMode;
-
-/*!
- *  @property   usage
- *  @abstract   Description of texture usage.  Default = MTLTextureUsageShaderRead/Write
- */
-@property (readwrite, nonatomic) MTLTextureUsage usage;
-
-/*! @abstract   Create a MPSImageDescriptor for a single read/write cnn image.
- */
-+(__nonnull instancetype) imageDescriptorWithChannelFormat: (MPSImageFeatureChannelFormat)channelFormat
-                                                     width: (NSUInteger)width
-                                                    height: (NSUInteger)height
-                                           featureChannels: (NSUInteger)featureChannels;
-
-/*! @abstract   Create a MPSImageDescriptor for a read/write cnn image with option to set usage and batch size (numberOfImages).
- */
-+(__nonnull instancetype) imageDescriptorWithChannelFormat: (MPSImageFeatureChannelFormat)channelFormat
-                                                     width: (NSUInteger)width
-                                                    height: (NSUInteger)height
-                                           featureChannels: (NSUInteger)featureChannels
-                                            numberOfImages: (NSUInteger)numberOfImages
-                                                     usage: (MTLTextureUsage)usage;
-
-@end
-
-typedef NS_ENUM(NSUInteger, MPSPurgeableState)
-{
-    MPSPurgeableStateAllocationDeferred MPS_ENUM_AVAILABLE_STARTING( __MAC_10_12, __IPHONE_10_0, __TVOS_10_0) = 0,                // The buffer hasn't been allocated yet. Attempts to set purgeability will be ignored.
-    MPSPurgeableStateKeepCurrent        MPS_ENUM_AVAILABLE_STARTING( __MAC_10_12, __IPHONE_10_0, __TVOS_10_0) = MTLPurgeableStateKeepCurrent,
-    
-    MPSPurgeableStateNonVolatile        MPS_ENUM_AVAILABLE_STARTING( __MAC_10_12, __IPHONE_10_0, __TVOS_10_0) = MTLPurgeableStateNonVolatile,
-    MPSPurgeableStateVolatile           MPS_ENUM_AVAILABLE_STARTING( __MAC_10_12, __IPHONE_10_0, __TVOS_10_0) = MTLPurgeableStateVolatile,
-    MPSPurgeableStateEmpty              MPS_ENUM_AVAILABLE_STARTING( __MAC_10_12, __IPHONE_10_0, __TVOS_10_0) = MTLPurgeableStateEmpty,
-} NS_ENUM_AVAILABLE(10_11, 8_0);
-
-/*!
- *  @class      MPSImage
- *  @dependency This depends on Metal.framework
- *  @abstract   A MPSImage object describes a MTLTexture that may have more than 4 channels.
- *  @discussion Some image types, such as those found in convolutional neural networks (CNN) 
- *              image differs from a standard texture in that it may have more than 4 channels 
- *              per image. While the channels could hold RGBA data, they will more commonly 
- *              hold a number of structural permutations upon a RGBA image as the neural 
- *              network progresses. It is not uncommon for each pixel to have 32 or 64 channels 
- *              in it.
- *
- *              Since a standard MTLTexture may have no more than 4 channels, the additional
- *              channels are stored in slices of 2d texture array (i.e. texture type is MTLTextureType2DArray) 
- *              such that 4 consecutive channels are stored in each slice of this array.
- *              If the number of feature channels is N, number of array slices needed is (N+3)/4.
- *              E.g. a CNN image with width 3 and height 2 with 9 channels will be stored as
- * @code
- *              slice 2         R???   R???   R???
- *                              R???   R???   R???
- *
- *              slice 1      RGBA   RGBA   RGBA
- *                           RGBA   RGBA   RGBA         (ASCII art /diagonal offset/ intended to show a Z dimension)
- *
- *              slice 0   RGBA   RGBA  RGBA
- *                        RGBA   RGBA  RGBA
- *@endcode
- *              Thus width and height of underlying 2d texture array is same as width and height of MPSImage
- *              and array length is equal to (featureChannels + 3) / 4. Channels marked with ? are just
- *              for padding and should not contain NaNs or Infs.
- *
- *              MPSImage can be container of multiple CNN images for batch processing. In order to create
- *              MPSImage that contains N images, create MPSImageDescriptror with numberOfImages set to N.
- *
- *              Length of 2d texture array (i.e. number of slices) will be equal to ((featureChannels +3)/4)*numberOfImage
- *              where consecutive (featureChannels+3)/4 slices of this array represent one image.
- *
- *              Although MPSImage can contain numberOfImages > 1, the actual number of images among these processed by MPSCNNKernel
- *              is controlled by z-dimension of clipRect. MPSCNNKernel processes n=clipRect.size.depth images from this collection.
- *              Starting index of image to process from source MPSImage is given by offset.z. Starting index of image in destination
- *              MPSImage where this processed image is written to is given by clipRect.origin.z. Thus MPSCNNKernel takes n=clipRect.size.depth
- *              image from source at indices [offset.z, offset.z+n], processed each independently and
- *              stores the result in destination at indices [clipRect.origin.z, clipRect.origin.z+n] respectively.
- *              Thus offset.z+n should be <= [src numberOfImage] and clipRect.origin.z+n should be <= [dest numberOfImages] and offset.z must
- *              be >= 0. 
- *              Example: Suppose MPSCNNConvolution takes an input image with 16 channels and outputs an image with 32 channels. This number of
- *              slices needed in source 2d texture array is 2 and number of slices nneded in destination 2d array is 4. Suppose source batch size is
- *              5 and desination batch size is 4. Thus number of source slices will be 2*5=10 and number of destination slices will be 4*4=16. If
- *              you want to process image 2 and 3 of source and store the result at index 1 and 2 in destination, you can achieve this by setting
- *              offset.z=2, clipRect.origin.z=1 and clipRect.size.depth=2. MPSCNNConvoluiton will take, in this case, slice 4 and 5 of source and
- *              produce slice 4 to 7 of destination. Similarly slice 6 and 7 will be used to produce slice 8 to 11 of destination.
- *
- *              All MPSCNNKernels process images in the batch independently. That is, calling a MPSCNNKernel on an
- *              batch is formally the same as calling it on each image in the batch sequentially.
- *              Computational and GPU work submission overhead will be amortized over more work if batch processing is used. This is especially 
- *              important for better performance on small images.
- *
- *              If the number of feature channel is <= 4 and numberOfImages=1 i.e. only one slice is needed by represent MPSImage, underlying
- *              metal texture type is choosen to be MTLTextureType2D rather than MTLTextureType2DArray as explained above.
- *
- *              There are also MPSTemporaryImages,
- *              intended for use for very short-lived image data that is produced and consumed
- *              immediately in the same MTLCommandBuffer. They are a useful way to minimize CPU-side
- *              texture allocation costs and greatly reduce the amount of memory used by your image
- *              pipeline.
- *
- *              Creation of the underlying texture may in some cases occur lazily.  You should
- *              in general avoid calling MPSImage.texture except when unavoidable to avoid
- *              materializing memory for longer than necessary. When possible, use the other MPSImage
- *              properties to get information about the MPSImage instead.
- */
-MPS_CLASS_AVAILABLE_STARTING( __MAC_10_12, __IPHONE_10_0, __TVOS_10_0)
-@interface MPSImage :  NSObject
-
-/*! @property device
- *  @abstract  The device on which the MPSImage will be used
- */
-@property (readonly, retain, nonatomic, nonnull)  id <MTLDevice>    device;
-
-/*! @property   width
- *  @abstract   The formal width of the image in pixels.
- */
-@property (readonly, nonatomic) NSUInteger width;
-
-/*! @property   height
- *  @abstract   The formal height of the image in pixels.
- */
-@property (readonly, nonatomic) NSUInteger height;
-
-/*! @property   featureChannels
- *  @abstract   The number of feature channels per pixel.
- */
-@property (readonly, nonatomic) NSUInteger featureChannels;
-
-/*! @property   numberOfImages
- *  @abstract   numberOfImages for batch processing
- */
-@property (readonly, nonatomic) NSUInteger numberOfImages;
-
-/*! @property   textureType
- *  @abstract   The type of the underlying texture, typically MTLTextureType2D
- *              or MTLTextureType2DArray
- */
-@property (readonly, nonatomic) MTLTextureType textureType;
-
-/*! @property   pixelFormat
- *  @abstract   The MTLPixelFormat of the underlying texture
- */
-@property (readonly, nonatomic) MTLPixelFormat pixelFormat;
-
-/*! @property   precision
- *  @abstract   The number of bits of numeric precision available for each feature channel.
- *  @discussion This is precision, not size.  That is, float is 24 bits, not 32. half
- *              precision floating-point is 11 bits, not 16. SNorm formats have one less
- *              bit of precision for the sign bit, etc. For formats like MTLPixelFormatB5G6R5Unorm
- *              it is the precision of the most precise channel, in this case 6.  When this
- *              information is unavailable, typically compressed formats, 0 will be returned.
- */
-@property (readonly, nonatomic) NSUInteger precision;
-
-/*!
- *  @property   usage
- *  @abstract   Description of texture usage.
- */
-@property (readonly, nonatomic) MTLTextureUsage usage;
-
-/*!
- *  @property   pixelSize
- *  @abstract   Number of bytes from the first byte of one pixel to the first byte of the next 
- *              pixel in storage order.  (Includes padding.)
- */
-@property (readonly, nonatomic) size_t  pixelSize;
-
-
-/*! @property   texture
- *  @abstract   The associated MTLTexture object.
- *              This is a 2D texture if numberOfImages is 1 and number of feature channels <= 4.
- *              It is a 2D texture array otherwise.
- *  @discussion To avoid the high cost of premature allocation of the underlying texture, avoid calling this
- *              property except when strictly necessary. [MPSCNNKernel encode...] calls typically cause
- *              their arguments to become allocated. Likewise, MPSImages initialized with -initWithTexture:
- *              featureChannels: have already been allocated.
- */
-@property (readonly, nonnull, nonatomic) id <MTLTexture> texture;
-
-/*!
- @property label
- @abstract A string to help identify this object.
- */
-@property (copy, atomic, nullable)  NSString *label;
-
-/*!
- *  @abstract   Initialize an empty image object
- *  @param      device              The device that the image will be used. May not be NULL.
- *  @param      imageDescriptor     The MPSImageDescriptor. May not be NULL.
- *  @return     A valid MPSImage object or nil, if failure.
- *  @discussion Storage to store data needed is allocated lazily on first use of MPSImage or
- *              when application calls MPSImage.texture
- *
- */
--(nonnull instancetype) initWithDevice: (nonnull id <MTLDevice>) device
-                       imageDescriptor: (const MPSImageDescriptor * __nonnull) imageDescriptor;
-
-/*!
- *  @abstract   Initialize an MPSImage object using Metal texture. Metal texture has been created by
- *              user for specific number of feature channels and number of images.
- *  @param      texture          The MTLTexture allocated by the user to be used as backing for MPSImage.
- *  @param      featureChannels  Number of feature channels this texture contains.
- *  @return     A valid MPSImage object or nil, if failure.
- *  @discussion Application can let MPS framework allocate texture with properties specified in imageDescriptor 
- *              using initWithDevice:MPSImageDescriptor API above. However in memory intensive application, 
- *              you can save memory (and allocation/deallocation time) by using MPSTemporaryImage where MPS 
- *              framework aggressively reuse memory underlying textures on same command buffer. See MPSTemporaryImage
- *              class for details below. However, in certain cases, application developer may want more control
- *              on allocation, placement, reusing/recycling of memory backing textures used in application using
- *              Metal Heaps API. In this case, application can create MPSImage from pre-allocated texture using 
- *              initWithTexture:featureChannels.
- *
- *              MTLTextureType of texture can be MTLTextureType2D ONLY if featureChannels <= 4 in which case
- *              MPSImage.numberOfImages is set to 1. Else it should be MTLTextureType2DArray with
- *              arrayLength == numberOfImage * ((featureChannels + 3)/4). MPSImage.numberOfImages is set to
- *              texture.arrayLength / ((featureChannels + 3)/4).
- *
- *              For MTLTextures containing typical image data which application may obtain from MetalKit or 
- *              other libraries such as that drawn from a JPEG or PNG, featureChannels should
- *              be set to number of valid color channel e.g. for RGB data, even thought MTLPixelFormat will be
- *              MTLPixelFormatRGBA, featureChannels should be set to 3.
- *
- */
--(nonnull instancetype) initWithTexture: (nonnull id <MTLTexture>) texture
-                        featureChannels: (NSUInteger) featureChannels;
-
-
-/*
- * Use initWithDevice:texture: or initWithDevice:imageDescriptor: instead
- */
--(nonnull instancetype) init NS_UNAVAILABLE;
-
-/*!
- *  @method         setPurgeableState
- *  @abstract       Set (or query) the purgeability state of a MPSImage
- *  @discussion     Usage is per [MTLResource setPurgeableState:], except that the MTLTexture might be
- *                  MPSPurgeableStateAllocationDeferred, which means there is no texture to mark volatile / nonvolatile.
- *                  Attempts to set purgeability on MTLTextures that have not been allocated will be ignored.
- */
-- (MPSPurgeableState)setPurgeableState:(MPSPurgeableState)state;
-
-@end
-
-/*!
- *  @class      MPSTemporaryImage
- *  @dependency MPSImage
- *  @abstract   MPSTemporaryImages are provided as a fast way to store transient data
- *              that will be used promptly and discarded.
- *
- *  @discussion MPSTemporaryImages can provide for profound reduction in the aggregate 
- *              texture memory and associated CPU-side allocation cost in your application. 
- *              MPS achieves this by automatically identifying MPSTemporaryImages that
- *              do not overlap in time over the course of a MTLCommandBuffer and so can 
- *              reuse the same memory. MPSTemporaryImages leverage MPS's internal cache
- *              of preallocated reusable memory to hold pixel data to avoid typical 
- *              memory allocation performance penalties common to ordinary MPSImages and 
- *              MTLTextures.
- *
- *              To avoid data corruption due to aliasing, MPSTemporaryImages impose some
- *              important restrictions:
- *
- *              - The textures are MTLStorageModePrivate. You can not, for example, use
- *                [MTLTexture getBytes...] or [MTLTexture replaceRegion...] with them. 
- *                MPSTemporaryImages are strictly read and written by the GPU.
- *
- *              - The temporary image may be used only on a single MTLCommandBuffer.
- *                This limits the chronology to a single linear time stream.
- *
- *              - The readCount property must be managed correctly. Please see
- *                the description of the readCount property for full details.
- *
- *              - see also pixel format restrictions for MPSImages in general.
- *
- *              Since MPSTemporaryImages can only be used with a single MTLCommandBuffer,
- *              and can not be used off the GPU, they generally should not be kept 
- *              around past the completion of the MTLCommandBuffer. The lifetime of
- *              MPSTemporaryImages is expected to be typically extremely short, perhaps 
- *              only a few lines of code.
- *
- *              To keep the lifetime of the underlying texture allocation as short as 
- *              possible, the underlying texture is not allocated until the first time
- *              the MPSTemporaryImage is used by a MPSCNNKernel or the .texture property
- *              is read. The readCount property serves to limit the lifetime on the
- *              other end.
- *
- *              You may use the MPSTemporaryImage.texture with MPSUnaryImageKernel -encode... methods,
- *              iff featureChannels <= 4 and the MTLTexture conforms to requirements of that MPSKernel.
- *              In such cases, the readCount is not modified, since the enclosing object
- *              is not available. There is no locking mechanism provided to prevent a MTLTexture
- *              returned from the .texture property from becoming invalid when the
- *              readCount reaches 0.
- *
- *              MPSTemporaryImages can otherwise be used wherever MPSImages are used.
- *
- *
- */
-MPS_CLASS_AVAILABLE_STARTING( __MAC_10_12, __IPHONE_10_0, __TVOS_10_0)
-@interface  MPSTemporaryImage : MPSImage
-
-
-/*!
- *  @abstract   Initialize a MPSTemporaryImage for use on a MTLCommandBuffer
- *
- *  @param      commandBuffer   The MTLCommandBuffer on which the MPSTemporaryImage will be exclusively used
- *
- *  @param      imageDescriptor A valid imageDescriptor describing the MPSImage format to create.
- *
- *  @return     A valid MPSTemporaryImage.  The object will be released when the command buffer
- *              is committed. The underlying texture will become invalid before this time
- *              due to the action of the readCount property.
- *
- */
-+(nonnull instancetype) temporaryImageWithCommandBuffer: (nonnull id <MTLCommandBuffer>) commandBuffer
-                                        imageDescriptor: (const MPSImageDescriptor * __nonnull) imageDescriptor;
-
-
-/*!
- *  @abstract       Low level interface for creating a MPSTemporaryImage using a MTLTextureDescriptor
- *  @discussion     This function provides access to MTLPixelFormats not typically covered by -initForCommandBuffer:imageDescriptor:
- *                  The feature channels will be inferred from the MTLPixelFormat without changing the width. 
- *                  The following restrictions apply:
- *  
- *                      MTLTextureType must be MTLTextureType2D or MTLTextureType2DArray
- *                      MTLTextureUsage must contain at least one of MTLTextureUsageShaderRead, MTLTextureUsageShaderWrite
- *                      MTLStorageMode must be MTLStorageModePrivate
- *                      depth must be 1
- *
- *  @param commandBuffer        The command buffer on which the MPSTemporaryImage may be used
- *  @param textureDescriptor    A texture descriptor describing the MPSTemporaryImage texture
- *
- *  @return     A valid MPSTemporaryImage.  The object will be released when the command buffer
- *              is committed. The underlying texture will become invalid before this time
- *              due to the action of the readCount property.
- */
-+(nonnull instancetype) temporaryImageWithCommandBuffer: (nonnull id <MTLCommandBuffer>) commandBuffer
-                                      textureDescriptor: (const MTLTextureDescriptor * __nonnull) textureDescriptor;
-
-
-/*!
- *  @abstract       Help MPS decide which allocations to make ahead of time
- *  @discussion     The texture cache that underlies the MPSTemporaryImage can automatically allocate new storage as
- *                  needed as you create new temporary images.  However, sometimes a more global view of what you
- *                  plan to make is useful for maximizing memory reuse to get the most efficient operation.
- *                  This class method hints to the cache what the list of images will be.
- *
- *                  It is never necessary to call this method. It is purely a performance and memory optimization.
- *
- *  @param commandBuffer        The command buffer on which the MPSTemporaryImages will be used
- *  @param descriptorList       A NSArray of MPSImageDescriptors, indicating images that will be created
- */
-+(void) prefetchStorageWithCommandBuffer: (nonnull id <MTLCommandBuffer>) commandBuffer
-                     imageDescriptorList: (NSArray <MPSImageDescriptor*> * __nonnull) descriptorList;
-
-/*! Unavailable. Use temporaryImageForCommandBuffer:textureDescriptor: or -temporaryImageForCommandBuffer:imageDescriptor: instead. */
--(nonnull instancetype) initWithTexture: (nonnull id <MTLTexture>) texture
-                        featureChannels: (NSUInteger) featureChannels NS_UNAVAILABLE;
-
-/*! Unavailable. Use itemporaryImageForCommandBuffer:textureDescriptor: instead. */
--(nonnull instancetype) initWithDevice: (nonnull id <MTLDevice>) device
-                       imageDescriptor: (const MPSImageDescriptor * __nonnull) imageDescriptor      NS_UNAVAILABLE;
-
-/*
- *  @abstract       The number of times a temporary image may be read by a MPSCNNKernel
- *                  before its contents become undefined.
- *
- *  @discussion     MPSTemporaryImages must release their underlying textures for reuse
- *                  immediately after last use. So as to facilitate *prompt* convenient 
- *                  memory recycling, each time a MPSTemporaryImage is read by a 
- *                  MPSCNNKernel -encode... method, its readCount is automatically 
- *                  decremented. When the readCount reaches 0, the underlying texture is 
- *                  automatically made available for reuse to MPS for its own needs and for 
- *                  other MPSTemporaryImages prior to return from the -encode.. function.  
- *                  The contents of the texture become undefined at this time. 
- *
- *                  By default, the readCount is initialized to 1, indicating a image that
- *                  may be overwritten any number of times, but read only once.
- *
- *                  You may change the readCount as desired to allow MPSCNNKernels to read
- *                  the MPSTemporaryImage additional times. However, it is an error to change
- *                  the readCount once it is zero. It is an error to read or write to a
- *                  MPSTemporaryImage with a zero readCount. You may set the readCount to 0
- *                  yourself to cause the underlying texture to be returned to MPS. Writing
- *                  to a MPSTemporaryImage does not adjust the readCount.
- *
- *                  The Metal API Validation layer will assert if a MPSTemporaryImage is
- *                  deallocated with non-zero readCount to help identify cases when resources
- *                  are not returned promptly.
- */
-@property (readwrite, nonatomic)  NSUInteger  readCount;
-
-
-@end
-
-#ifdef __cplusplus
-}   // extern "C"
-#endif
-
-
-#endif /* MPSImage_h */
diff -ruN /Applications/Xcode.app/Contents/Developer/Platforms/AppleTVOS.platform/Developer/SDKs/AppleTVOS.sdk/System/Library/Frameworks/MetalPerformanceShaders.framework/Headers/MPSImageConversion.h /Applications/Xcode9-beta1.app/Contents/Developer/Platforms/AppleTVOS.platform/Developer/SDKs/AppleTVOS.sdk/System/Library/Frameworks/MetalPerformanceShaders.framework/Headers/MPSImageConversion.h
--- /Applications/Xcode.app/Contents/Developer/Platforms/AppleTVOS.platform/Developer/SDKs/AppleTVOS.sdk/System/Library/Frameworks/MetalPerformanceShaders.framework/Headers/MPSImageConversion.h	2016-08-01 16:43:15.000000000 -0400
+++ /Applications/Xcode9-beta1.app/Contents/Developer/Platforms/AppleTVOS.platform/Developer/SDKs/AppleTVOS.sdk/System/Library/Frameworks/MetalPerformanceShaders.framework/Headers/MPSImageConversion.h	1969-12-31 19:00:00.000000000 -0500
@@ -1,69 +0,0 @@
-/*!
- *  @header MPSConversions.h
- *  @framework MetalPerformanceShaders.framework
- *
- *  @copyright Copyright (c) 2015 Apple Inc. All rights reserved.
- *  @abstract MetalPerformanceShaders conversion filters
- *  @ignorefuncmacro MPS_CLASS_AVAILABLE_STARTING
- */
-
-#ifndef MPS_Conversions_h
-#define MPS_Conversions_h
-
-#include <MetalPerformanceShaders/MPSImageKernel.h>
-#include <CoreGraphics/CGColorConversionInfo.h>
-
-
-/*!
- *  @class      MPSImageConversion
- *  @discussion The MPSImageConversion filter performs a conversion from source to destination
- *
- */
-MPS_CLASS_AVAILABLE_STARTING( __MAC_10_12, __IPHONE_10_0, __TVOS_10_0)
-@interface  MPSImageConversion : MPSUnaryImageKernel
-
-/*! @property   sourceAlpha
- *  @abstract   Premultiplication description for the source texture
- *  @discussion Most colorspace conversion operations can not work directly on premultiplied data.
- *              Use this property to tag premultiplied data so that the source texture can
- *              be unpremultiplied prior to application of these transforms. 
- *              Default: MPSPixelAlpha_AlphaIsOne
- */
-@property (readonly, nonatomic) MPSAlphaType sourceAlpha;
-
-/*! @property   destinationAlpha
- *  @abstract   Premultiplication description for the destinationAlpha texture
- *  @discussion Colorspace conversion operations produce non-premultiplied data.
- *              Use this property to tag cases where premultiplied results are required.
- *              If MPSPixelAlpha_AlphaIsOne is used, the alpha channel will be set to 1. 
- *              Default: MPSPixelAlpha_AlphaIsOne
- */
-@property (readonly, nonatomic) MPSAlphaType destinationAlpha;
-
-
-
-/*!
- *  @abstract   Create a converter that can convert texture colorspace, alpha and texture format
- *  @discussion Create a converter that can convert texture colorspace, alpha and MTLPixelFormat. 
- *              Optimized cases exist for NULL color space converter and no alpha conversion.
- *  @param      device              The device the filter will run on
- *  @param      srcAlpha            The alpha encoding for the source texture
- *  @param      destAlpha           The alpha encoding for the destination texture
- *  @param      backgroundColor     An array of CGFloats giving the background color to use when flattening an image.
- *                                  The color is in the source colorspace.  The length of the array is the number 
- *                                  of color channels in the src colorspace. If NULL, use {0}.
- *  @param      conversionInfo      The colorspace conversion to use. May be NULL, indicating no
- *                                  color space conversions need to be done.
- *
- *  @result     An initialized MPSImageConversion object.
- */
--(nonnull instancetype) initWithDevice:(nonnull id<MTLDevice>)device
-                              srcAlpha:(MPSAlphaType) srcAlpha
-                             destAlpha:(MPSAlphaType) destAlpha
-                       backgroundColor:(nullable CGFloat*) backgroundColor
-                        conversionInfo:(nullable CGColorConversionInfoRef) conversionInfo;
-
-
-@end  /* MPSImageConversion */
-
-#endif
diff -ruN /Applications/Xcode.app/Contents/Developer/Platforms/AppleTVOS.platform/Developer/SDKs/AppleTVOS.sdk/System/Library/Frameworks/MetalPerformanceShaders.framework/Headers/MPSImageConvolution.h /Applications/Xcode9-beta1.app/Contents/Developer/Platforms/AppleTVOS.platform/Developer/SDKs/AppleTVOS.sdk/System/Library/Frameworks/MetalPerformanceShaders.framework/Headers/MPSImageConvolution.h
--- /Applications/Xcode.app/Contents/Developer/Platforms/AppleTVOS.platform/Developer/SDKs/AppleTVOS.sdk/System/Library/Frameworks/MetalPerformanceShaders.framework/Headers/MPSImageConvolution.h	2016-09-23 20:26:03.000000000 -0400
+++ /Applications/Xcode9-beta1.app/Contents/Developer/Platforms/AppleTVOS.platform/Developer/SDKs/AppleTVOS.sdk/System/Library/Frameworks/MetalPerformanceShaders.framework/Headers/MPSImageConvolution.h	1969-12-31 19:00:00.000000000 -0500
@@ -1,391 +0,0 @@
-/*!
- *  @header MPSImageConvolution.h
- *  @framework MetalPerformanceShaders
- *  @copyright Copyright (c) 2015 Apple Inc. All rights reserved.
- *
- *  @abstract MetalPerformanceShaders Convolution Filters
- */
-
-#ifndef MPS_MSImageConvolution_h
-#define MPS_MSImageConvolution_h
-
-#include <MetalPerformanceShaders/MPSImageKernel.h>
-
-/*!
- *  @class      MPSImageConvolution
- *  @discussion The MPSImageConvolution convolves an image with given filter of odd width and height.
- *              The center of the kernel aligns with the MPSImageConvolution.offset. That is, the position 
- *              of the top left corner of the area covered by the kernel is given by 
- *              MPSImageConvolution.offset - {kernel_width>>1, kernel_height>>1, 0}
- *
- *              Optimized cases include 3x3,5x5,7x7,9x9,11x11, 1xN and Nx1. If a convolution kernel 
- *              does not fall into one of these cases but is a rank-1 matrix (a.k.a. separable)
- *              then it will fall on an optimzied separable path. Other convolutions will execute with
- *              full MxN complexity.
- *
- *              If there are multiple channels in the source image, each channel is processed independently.
- *  
- *  @performance Separable convolution filters may perform better when done in two passes. A convolution filter
- *              is separable if the ratio of filter values between all rows is constant over the whole row. For
- *              example, this edge detection filter:
- *                  @code
- *                      -1      0       1
- *                      -2      0       2
- *                      -1      0       1
- *                  @endcode
- *              can be separated into the product of two vectors:
- *                  @code
- *                      1
- *                      2      x    [-1  0   1]
- *                      1
- *                  @endcode
- *              and consequently can be done as two, one-dimensional convolution passes back to back on the same image. 
- *              In this way, the number of multiplies (ignoring the fact that we could skip zeros here) is reduced from
- *              3*3=9 to 3+3 = 6. There are similar savings for addition. For large filters, the savings can be profound.
- *
- */
-
-MPS_CLASS_AVAILABLE_STARTING( __MAC_10_11, __IPHONE_9_0, __TVOS_9_0)
-@interface  MPSImageConvolution : MPSUnaryImageKernel
-
-/*! @property kernelHeight
- *  @abstract  The height of the filter window. Must be an odd number.
- */
-@property (readonly, nonatomic)   NSUInteger  kernelHeight;
-
-/*! @property kernelWidth
- *  @abstract  The width of the filter window. Must be an odd number.
- */
-@property (readonly, nonatomic)   NSUInteger  kernelWidth;
-
-
-/*! @property    bias
- *  @discussion  The bias is a value to be added to convolved pixel before it is converted back to the storage format.
- *               It can be used to convert negative values into a representable range for a unsigned MTLPixelFormat.
- *               For example, many edge detection filters produce results in the range [-k,k]. By scaling the filter
- *               weights by 0.5/k and adding 0.5, the results will be in range [0,1] suitable for use with unorm formats. 
- *               It can be used in combination with renormalization of the filter weights to do video ranging as part 
- *               of the convolution effect. It can also just be used to increase the brightness of the image.
- *
- *               Default value is 0.0f.
- */
-@property (readwrite, nonatomic) float bias;
-
-/*!
- *  @abstract  Initialize a convolution filter
- *  @param      device          The device the filter will run on
- *  @param      kernelWidth     the width of the kernel
- *  @param      kernelHeight    the height of the kernel
- *  @param      kernelWeights   A pointer to an array of kernelWidth * kernelHeight values to be used as the kernel.
- *                              These are in row major order.
- *  @return     A valid MPSImageConvolution object or nil, if failure.
- */
-
--(nonnull instancetype) initWithDevice: (nonnull id <MTLDevice>) device
-                           kernelWidth: (NSUInteger)kernelWidth
-                          kernelHeight: (NSUInteger)kernelHeight
-                               weights: (const float*__nonnull)kernelWeights     NS_DESIGNATED_INITIALIZER;
-
-
-@end
-
-
-/*!
- *  @class      MPSImageLaplacian
- *  @discussion The MPSImageLaplacian is an optimized variant of the MPSImageConvolution filter provided primarily for ease of use.
- *              This filter uses an optimized convolution filter with a 3 x 3 kernel with the following weights:
- *                  [ 0  1  0
- *                    1 -4  1
- *                    0  1  0 ]
- *
- *              The optimized convolution filter used by MPSImageLaplacian can also be used by creating a MPSImageConvolution
- *              object with kernelWidth = 3, kernelHeight = 3 and weights as specified above.
- */
-MPS_CLASS_AVAILABLE_STARTING( __MAC_10_12, __IPHONE_10_0, __TVOS_10_0)
-@interface  MPSImageLaplacian : MPSUnaryImageKernel
-
-/*! @property    bias
- *  @discussion  The bias is a value to be added to convolved pixel before it is converted back to the storage format.
- *               It can be used to convert negative values into a representable range for a unsigned MTLPixelFormat.
- *               For example, many edge detection filters produce results in the range [-k,k]. By scaling the filter
- *               weights by 0.5/k and adding 0.5, the results will be in range [0,1] suitable for use with unorm formats.
- *               It can be used in combination with renormalization of the filter weights to do video ranging as part
- *               of the convolution effect. It can also just be used to increase the brightness of the image.
- *
- *               Default value is 0.0f.
- */
-@property (readwrite, nonatomic) float bias;
-
-@end
-
-
-/*!
- *  @class      MPSImageBox
- *  @discussion The MPSImageBox convolves an image with given filter of odd width and height. The kernel elements
- *              all have equal weight, achieving a blur effect. (Each result is the unweighted average of the
- *              surrounding pixels.) This allows for much faster algorithms, espcially for larger blur radii.
- *              The box height and width must be odd numbers. The box blur is a separable filter. The implementation 
- *              is aware of this and will act accordingly to give best performance for multi-dimensional blurs.
- */
-
-MPS_CLASS_AVAILABLE_STARTING( __MAC_10_11, __IPHONE_9_0, __TVOS_9_0)
-@interface  MPSImageBox : MPSUnaryImageKernel
-
-
-/*! @property kernelHeight
- *  @abstract  The height of the filter window.
- */
-@property (readonly, nonatomic)   NSUInteger  kernelHeight;
-
-/*! @property kernelWidth
- *  @abstract  The width of the filter window.
- */
-@property (readonly, nonatomic)   NSUInteger  kernelWidth;
-
-/*! @abstract   Initialize a filter for a particular kernel size and device
- *  @param      device  The device the filter will run on
- *  @param      kernelWidth  the width of the kernel.  Must be an odd number.
- *  @param      kernelHeight the height of the kernel. Must be an odd number.
- *  @return     A valid object or nil, if failure.
- */
-
--(nonnull instancetype) initWithDevice: (nonnull id <MTLDevice>) device
-                           kernelWidth: (NSUInteger)kernelWidth
-                          kernelHeight: (NSUInteger)kernelHeight        NS_DESIGNATED_INITIALIZER;
-
-/* You must use initWithDevice:kernelWidth:kernelHeight: instead. */
--(nonnull instancetype) initWithDevice:(nonnull id<MTLDevice>)device    NS_UNAVAILABLE;
-@end
-
-/*!
- *  @class      MPSImageTent
- *  @discussion The box filter, while fast, may yield square-ish looking blur effects. However, multiple
- *              passes of the box filter tend to smooth out with each additional pass. For example, two 3-wide
- *              box blurs produces the same effective convolution as a 5-wide tent blur:
- *              @code
- *                      1   1   1
- *                          1   1   1
- *                      +       1   1   1
- *                      =================
- *                      1   2   3   2   1
- *              @endcode
- *              Addition passes tend to approximate a gaussian line shape.
- *
- *              The MPSImageTent convolves an image with a tent filter. These form a tent shape with incrementally
- *              increasing sides, for example:
- *
- *                  1   2   3   2   1
- *
- *
- *                  1   2   1
- *                  2   4   2
- *                  1   2   1
- *
- *              Like the box filter, this arrangement allows for much faster algorithms, espcially for for larger blur
- *              radii but with a more pleasing appearance.
- *
- *              The tent blur is a separable filter. The implementation is aware of this and will act accordingly
- *              to give best performance for multi-dimensional blurs.
- */
-MPS_CLASS_AVAILABLE_STARTING( __MAC_10_11, __IPHONE_9_0, __TVOS_9_0)
-@interface MPSImageTent : MPSImageBox
-
-@end
-
-/*!
- *  @class      MPSImageGaussianBlur
- *  @discussion The MPSImageGaussianBlur convolves an image with gaussian of given sigma in both x and y direction.
- *
- *                  The MPSImageGaussianBlur utilizes a very fast algorith that typically runs at approximately
- *                  1/2 of copy speeds. Notably, it is faster than either the tent or box blur except perhaps
- *                  for very large filter windows. Mathematically, it is an approximate gaussian. Some
- *                  non-gaussian behavior may be detectable with advanced analytical methods such as FFT.  
- *                  If a analytically clean gaussian filter is required, please use the MPSImageConvolution 
- *                  filter instead with an appropriate set of weights. The MPSImageGaussianBlur is intended
- *                  to be suitable for all common image processing needs demanding ~10 bits of precision or
- *                  less.
- */
-MPS_CLASS_AVAILABLE_STARTING( __MAC_10_11, __IPHONE_9_0, __TVOS_9_0)
-@interface  MPSImageGaussianBlur : MPSUnaryImageKernel
-
-/*! @abstract   Initialize a gaussian blur filter for a particular sigma and device
- *  @param      device  The device the filter will run on
- *  @param      sigma   The standard deviation of gaussian blur filter. 
- *                      Gaussian weight, centered at 0, at integer grid i is given as 
- *                            w(i) = 1/sqrt(2*pi*sigma) * exp(-i^2/2*sigma^2)
- *                      If we take cut off at 1% of w(0) (max weight) beyond which weights
- *                      are considered 0, we have 
- *                              ceil (sqrt(-log(0.01)*2)*sigma) ~ ceil(3.7*sigma) 
- *                      as rough estimate of filter width
- *  @return     A valid object or nil, if failure.
- */
-
--(nonnull instancetype) initWithDevice: (nonnull id <MTLDevice>) device
-                                 sigma: (float)sigma                   NS_DESIGNATED_INITIALIZER;
-
-/* You must use initWithDevice:sigma: instead. */
--(nonnull instancetype) initWithDevice:(nonnull id<MTLDevice>)device    NS_UNAVAILABLE;
-
-/*! @property sigma
- *  @abstract Read-only sigma value with which filter was created
- */
-@property (readonly, nonatomic) float sigma;
-
-@end
-
-/*!
- *  @class      MPSImageSobel
- *  @discussion The MPSImageSobel implements the Sobel filter.
- *              When the color model (e.g. RGB, two-channel, grayscale, etc.) of source 
- *              and destination textures match, the filter is applied to each channel 
- *              separately. If the destination is monochrome (single channel) but source 
- *              multichannel, the pixel values are converted to grayscale before applying Sobel
- *              operator using the linear gray color transform vector (v).
- *
- *                  Luminance = v[0] * pixel.x + v[1] * pixel.y + v[2] * pixel.z;
- */
-MPS_CLASS_AVAILABLE_STARTING( __MAC_10_11, __IPHONE_9_0, __TVOS_9_0)
-@interface  MPSImageSobel : MPSUnaryImageKernel
-
-/*! @abstract   Initialize a Sobel filter on a given device using the default color 
- *              transform. Default: BT.601/JPEG {0.299f, 0.587f, 0.114f}
- *
- *              For non-default conversion matrices, use -initWithDevice:linearGrayColorTransform:
- *
- *  @param      device  The device the filter will run on
- *  @return     A valid object or nil, if failure.
- */
--(nonnull instancetype) initWithDevice: (nonnull id <MTLDevice>) device;
-
-/*! @abstract   Initialize a Sobel filter on a given device with a non-default color transform
- *  @param      device          The device the filter will run on
- *  @param      transform       Array of three floats describing the rgb to gray scale color transform.
- *
- *                          Luminance = transform[0] * pixel.x + transform[1] * pixel.y + transform[2] * pixel.z;
- *
- *  @return     A valid object or nil, if failure.
- */
--(nonnull instancetype) initWithDevice: (nonnull id <MTLDevice>) device
-              linearGrayColorTransform: (const float * __nonnull) transform      NS_DESIGNATED_INITIALIZER;
-
-/*! @property    colorTransform
- *  @discussion  Returns a pointer to the array of three floats used to convert RGBA, RGB or RG images
- *               to the destination format when the destination is monochrome.
- */
-@property (readonly, nonatomic, nonnull) const float* colorTransform;
-
-@end  /* MPSImageSobel */
-
-
-
-/*!
- *  @class      MPSImagePyramid
- *  @discussion The MPSImagePyramid is a base class for creating different kinds of pyramid images
- *
- *              Currently supported pyramid-types are:
- *              @ref MPSImageGaussianPyramid
- *
- *              The Gaussian image pyramid kernel is enqueued as a in-place operation using
- *              @ref MPSUnaryImageKernel::encodeToCommandBuffer:inPlaceTexture:fallbackCopyAllocator:
- *              and all mipmap levels after level=1, present in the provided image are filled using
- *              the provided filtering kernel. The fallbackCopyAllocator parameter is not used.
- *
- *              The Gaussian image pyramid filter ignores @ref clipRect and @ref offset and fills
- *              the entire mipmap levels.
- *
- *  @note       Make sure your texture type is compatible with mipmapping and supports texture views
- *                  (see @ref MTLTextureUsagePixelFormatView).
- *  @note       Recall the size of the nth mipmap level:
- *              @code
- *                  w_n = max(1, floor(w_0 / 2^n))
- *                  h_n = max(1, floor(h_0 / 2^n)),
- *              @endcode
- *              where w_0, h_0 are the zeroth level width and height. ie the image dimensions themselves.
- */
-
-MPS_CLASS_AVAILABLE_STARTING( __MAC_10_12, __IPHONE_10_0, __TVOS_10_0)
-@interface  MPSImagePyramid : MPSUnaryImageKernel
-
-/*! @abstract   Initialize a downwards 5-tap image pyramid with the default filter kernel and device
- *  @param      device  The device the filter will run on
- *
- *  @discussion The filter kernel is the outer product of w = [ 1/16,  1/4,  3/8,  1/4,  1/16 ]^T, with itself
- *
- *  @return     A valid object or nil, if failure.
- */
-
--(nonnull instancetype) initWithDevice: (nonnull id <MTLDevice>) device;
-
-
-/*! @abstract   Initialize a downwards 5-tap image pyramid with a central weight parameter and device
- *  @param      device  The device the filter will run on
- *  @param      centerWeight Defines form of the filter-kernel  through the outer product ww^T, where
- *              w = [ (1/4 - a/2),  1/4,  a,  1/4,  (1/4 - a/2) ]^T and 'a' is centerWeight.
- *
- *  @return     A valid object or nil, if failure.
- */
-
--(nonnull instancetype) initWithDevice: (nonnull id <MTLDevice>) device
-                          centerWeight: (float) centerWeight;
-
-
-/*! @abstract   Initialize a downwards n-tap pyramid with a custom filter kernel and device
- *  @param      device  The device the filter will run on
- *  @param      kernelWidth The width of the filtering kernel. See @ref MPSImageConvolution.
- *  @param      kernelHeight    The height of the filtering kernel. See @ref MPSImageConvolution.
- *  @param      kernelWeights   A pointer to an array of kernelWidth * kernelHeight values to be
- *                              used as the kernel.
- *                              These are in row major order. See @ref MPSImageConvolution.
- *
- *  @return     A valid object or nil, if failure.
- */
-
--(nonnull instancetype) initWithDevice: (nonnull id <MTLDevice>) device
-                           kernelWidth: (NSUInteger)kernelWidth
-                          kernelHeight: (NSUInteger)kernelHeight
-                               weights: (const float*__nonnull)kernelWeights NS_DESIGNATED_INITIALIZER;
-
-
-/*! @property kernelHeight
- *  @abstract  The height of the filter window. Must be an odd number.
- */
-@property (readonly, nonatomic)   NSUInteger  kernelHeight;
-
-/*! @property kernelWidth
- *  @abstract  The width of the filter window. Must be an odd number.
- */
-@property (readonly, nonatomic)   NSUInteger  kernelWidth;
-
-
-
-@end
-
-/*!
- *  @class      MPSImageGaussianPyramid
- *  @discussion The Gaussian image pyramid is constructed as follows:
- *              First the zeroth level mipmap of the input image is filtered with the specified
- *              convolution kernel.
- *              The default the convolution filter kernel is
- *              @code
- *                  k = w w^T, where w = [ 1/16,  1/4,  3/8,  1/4,  1/16 ]^T,
- *              @endcode
- *              but the user may also tweak this kernel with a @ref centerWeight parameter: 'a':
- *              @code
- *                  k = w w^T, where w = [ (1/4 - a/2),  1/4,  a,  1/4,  (1/4 - a/2) ]^T
- *              @endcode
- *              or the user can provide a completely custom kernel. After this the image is downsampled by
- *              removing all odd rows and columns, which defines the next level in the Gaussian image pyramid.
- *              This procedure is continued until every mipmap level present in the image texture are
- *              filled with the pyramid levels.
- *
- *              In case of the Gaussian pyramid the user must run the operation in-place using:
- *              @ref MPSUnaryImageKernel::encodeToCommandBuffer:inPlaceTexture:fallbackCopyAllocator:,
- *              where the fallback allocator is ignored.
- */
-
-MPS_CLASS_AVAILABLE_STARTING( __MAC_10_12, __IPHONE_10_0, __TVOS_10_0)
-@interface  MPSImageGaussianPyramid : MPSImagePyramid
-@end
-
-
-#endif    /* MPS_MSImageConvolution_h */
diff -ruN /Applications/Xcode.app/Contents/Developer/Platforms/AppleTVOS.platform/Developer/SDKs/AppleTVOS.sdk/System/Library/Frameworks/MetalPerformanceShaders.framework/Headers/MPSImageHistogram.h /Applications/Xcode9-beta1.app/Contents/Developer/Platforms/AppleTVOS.platform/Developer/SDKs/AppleTVOS.sdk/System/Library/Frameworks/MetalPerformanceShaders.framework/Headers/MPSImageHistogram.h
--- /Applications/Xcode.app/Contents/Developer/Platforms/AppleTVOS.platform/Developer/SDKs/AppleTVOS.sdk/System/Library/Frameworks/MetalPerformanceShaders.framework/Headers/MPSImageHistogram.h	2016-09-23 20:26:03.000000000 -0400
+++ /Applications/Xcode9-beta1.app/Contents/Developer/Platforms/AppleTVOS.platform/Developer/SDKs/AppleTVOS.sdk/System/Library/Frameworks/MetalPerformanceShaders.framework/Headers/MPSImageHistogram.h	1969-12-31 19:00:00.000000000 -0500
@@ -1,273 +0,0 @@
-/*!
- *  @header MPSImageHistogram.h
- *  @framework MetalPerformanceShaders.framework
- *
- *  @copyright Copyright (c) 2015 Apple Inc. All rights reserved.
- *  @abstract MetalPerformanceShaders histogram filters
- */
-
-#ifndef MPS_MPSImageHistogram_h
-#define MPS_MPSImageHistogram_h
-
-#include <MetalPerformanceShaders/MPSImageKernel.h>
-#include <simd/simd.h>
-
-/*!
- *  @brief      Specifies information to compute the histogram for channels of an image.
- */
-typedef struct
-{
-    NSUInteger      numberOfHistogramEntries;   /**<  Specifies the number of histogram entries, or "bins" for each channel.  For example, if you want 256 histogram bins then numberOfHistogramEntries must be set to 256.  The value stored in each histogram bin is a 32-bit unsigned integer.  The size of the histogram buffer in which these bins will be stored should be >= numberOfHistogramEntries * sizeof(uint32_t) * number of channels in the image. numberOfHistogramEntries must be a power of 2 and is a minimum of 256 bins.   */
-    BOOL            histogramForAlpha;          /**<  Specifies whether the histogram for the alpha channel should be computed or not. */
-    vector_float4   minPixelValue;              /**<  Specifies the minimum pixel value.  Any pixel value less than this will be clipped to this value (for the purposes of histogram calculation), and assigned to the first histogram entry. This minimum value is applied to each of the four channels separately. */
-    vector_float4   maxPixelValue;              /**<  Specifies the maximum pixel value.  Any pixel value greater than this will be clipped to this value (for the purposes of histogram calculation), and assigned to the first histogram entry. This maximum value is applied to each of the four channels separately. */
-} MPSImageHistogramInfo;
-
-/*!
- *  @class      MPSImageHistogram
- *  @discussion The MPSImageHistogram computes the histogram of an image.
- *              
- */
-MPS_CLASS_AVAILABLE_STARTING( __MAC_10_11, __IPHONE_9_0, __TVOS_9_0)
-@interface  MPSImageHistogram : MPSKernel
-
-/*! @property   clipRectSource
- *  @abstract   The source rectangle to use when reading data.
- *  @discussion A MTLRegion that indicates which part of the source to read. If the clipRectSource does not lie
- *              completely within the source image, the intersection of the image bounds and clipRectSource will
- *              be used. The clipRectSource replaces the MPSUnaryImageKernel offset parameter for this filter.
- *              The latter is ignored.   Default: MPSRectNoClip, use the entire source texture.
- */
-@property (readwrite, nonatomic) MTLRegion clipRectSource;
-
-/*! @property   zeroHistogram
- *  @abstract   Zero-initalize the histogram results
- *  @discussion Indicates that the memory region in which the histogram results are to be written in the
- *              histogram buffer are to be zero-initialized or not. Default: YES.
- */
-@property (readwrite, nonatomic) BOOL zeroHistogram;
-
-/*! @property   histogramInfo
- *  @abstract   Return a structure describing the histogram content
- *  @discussion Returns a MPSImageHistogramInfo structure describing the format of the
- *              histogram.
- */
-@property (readonly, nonatomic)  MPSImageHistogramInfo histogramInfo;
-
-/*!
- *  @abstract Specifies information to compute the histogram for channels of an image.
- *  @param    device            The device the filter will run on
- *  @param    histogramInfo     Pointer to the MPSHistogramInfo struct
- *  @return     A valid MPSImageHistogram object or nil, if failure.
- */
-
--(nonnull instancetype) initWithDevice: (nonnull id <MTLDevice>) device
-                         histogramInfo: (const MPSImageHistogramInfo * __nonnull) histogramInfo     NS_DESIGNATED_INITIALIZER;
-
-
-/*!
- *  @abstract Encode the filter to a command buffer using a MTLComputeCommandEncoder.
- *  @discussion The filter will not begin to execute until after the command
- *  buffer has been enqueued and committed.
- *
- *
- *  @param  commandBuffer           A valid MTLCommandBuffer.
- *  @param  source                  A valid MTLTexture containing the source image for the filter
- *  @param  histogram               A valid MTLBuffer to receive the histogram results.
- *  @param  histogramOffset         Byte offset into histogram buffer at which to write the histogram results. Must be a multiple of 32 bytes.
- *                                  The histogram results / channel are stored together.  The number of channels for which
- *                                  histogram results are stored is determined by the number of channels in the image.
- *                                  If histogramInfo.histogramForAlpha is false and the source image is RGBA then only histogram
- *                                  results for RGB channels are stored.
- */
--(void) encodeToCommandBuffer: (nonnull id <MTLCommandBuffer>) commandBuffer
-                sourceTexture: (nonnull id <MTLTexture>) source
-                    histogram: (nonnull id <MTLBuffer>) histogram
-              histogramOffset: (NSUInteger) histogramOffset;
-
-
-/*!
- *  @abstract   The amount of space in the output MTLBuffer the histogram will take up.
- *  @discussion This convenience function calculates the minimum amount of space
- *              needed in the output histogram for the results.  The MTLBuffer should
- *              be at least this length, longer if histogramOffset is non-zero.
- *  @param      sourceFormat      The MTLPixelFormat of the source image. This is
- *                                the source parameter of -encodeToCommandBuffer:
- *                                sourceTexture:histogram:histogramOffset
- *  @return     The number of bytes needed to store the result histograms.
- */
--(size_t) histogramSizeForSourceFormat: (MTLPixelFormat) sourceFormat;
-
-
-@end  /* MPSImageHistogram */
-
-/*!
- *  @class      MPSImageHistogramEqualization
- *  @discussion The MPSImageHistogramEqualization performs equalizes the histogram of an image.
- *              The process is divided into three steps. 
- *
- *              -# Call -initWithDevice:histogramInfo:   This creates a MPSImageHistogramEqualization
- *              object.   It is done when the method returns.
- *  
- *              -# Call -encodeTransform:sourceTexture:histogram:histogramOffset:  This creates a privately held
- *              image transform (i.e. a cumulative distribution function of the histogram) which will be used to 
- *              equalize the distribution of the histogram of the source image. This process runs on a MTLCommandBuffer
- *              when it is committed to a MTLCommandQueue. It must complete before the next step can be run.
- *              It may be performed on the same MTLCommandBuffer.  The histogram argument specifies the histogram
- *              buffer which contains the histogram values for sourceTexture.  The sourceTexture argument is used by
- *              encodeTransform to determine the number of channels and therefore which histogram data in histogram 
- *              buffer to use. The histogram for sourceTexture must have been computed either on the CPU or using 
- *              the MPSImageHistogram kernel
- *
- *              -# Call -encodeToCommandBuffer:sourceTexture:destinationTexture: to read data from
- *              sourceTexture, apply the equalization transform to it and write to destination texture.
- *              This step is also done on the GPU on a MTLCommandQueue.
- *
- *              You can reuse the same equalization transform on other images to perform the
- *              same transform on those images. (Since their distribution is probably different,
- *              they will probably not be equalized by it.) This filter usually will not be able 
- *              to work in place.
- */
-MPS_CLASS_AVAILABLE_STARTING( __MAC_10_11, __IPHONE_9_0, __TVOS_9_0)
-@interface  MPSImageHistogramEqualization : MPSUnaryImageKernel
-
-/*! @property   histogramInfo
- *  @abstract   Return a structure describing the histogram content
- *  @discussion Returns a MPSImageHistogramInfo structure describing the format of the
- *              histogram.
- */
-@property (readonly, nonatomic)  MPSImageHistogramInfo histogramInfo;
-
-/*!
- *  @abstract Specifies information about the histogram for the channels of an image.
- *  @param    device            The device the filter will run on
- *  @param    histogramInfo     Pointer to the MPSHistogramInfo struct
- *  @return     A valid MPSImageHistogramEqualization object or nil, if failure.
- */
-
--(nonnull instancetype) initWithDevice: (nonnull id <MTLDevice>) device
-                         histogramInfo: (const MPSImageHistogramInfo * __nonnull) histogramInfo     NS_DESIGNATED_INITIALIZER;
-
-/*!
- *  @abstract Encode the transform function to a command buffer using a MTLComputeCommandEncoder.
- *            The transform function computes the equalization lookup table.
- *  @discussion The transform function will not begin to execute until after the command
- *              buffer has been enqueued and committed.  This step will need to be repeated
- *              with the new MPSKernel if -copyWithZone:device or -copyWithZone: is called.
- *              The transform is stored as internal state to the object. You still need to 
- *              call -encodeToCommandBuffer:sourceTexture:destinationTexture: afterward
- *              to apply the transform to produce a result texture.
- *
- *  @param  commandBuffer   A valid MTLCommandBuffer.
- *  @param  source          A valid MTLTexture containing the source image for the filter.
- *  @param  histogram       A valid MTLBuffer containing the histogram results for an image.  This filter
- *                          will use these histogram results to generate the cumulative histogram for equalizing
- *                          the image.  The histogram results / channel are stored together.  The number of channels
- *                          for which histogram results are stored is determined by the number of channels in the image.
- *                          If histogramInfo.histogramForAlpha is false and the source image is RGBA then only histogram
- *                          results for RGB channels are stored.
- *  @param  histogramOffset A byte offset into the histogram MTLBuffer where the histogram starts. Must conform to
- *                          alignment requirements for [MTLComputeCommandEncoder setBuffer:offset:atIndex:] offset
- *                          parameter.
- */
- -(void) encodeTransformToCommandBuffer: (nonnull id <MTLCommandBuffer>) commandBuffer
-                          sourceTexture: (nonnull id <MTLTexture>) source
-                              histogram: (nonnull id <MTLBuffer>) histogram
-                        histogramOffset: (NSUInteger) histogramOffset;
-
-
-
-@end  /* MPSImageHistogramEqualization */
-
-/*!
- *  @class      MPSImageHistogramSpecification
- *  @discussion The MPSImageHistogramSpecification performs a histogram specification operation on an image.
- *              It is a generalized version of histogram equalization operation.  The histogram specificaiton filter
- *              converts the image so that its histogram matches the desired histogram.
- *
- */
-MPS_CLASS_AVAILABLE_STARTING( __MAC_10_11, __IPHONE_9_0, __TVOS_9_0)
-@interface  MPSImageHistogramSpecification : MPSUnaryImageKernel
-
-/*! @property   histogramInfo
- *  @abstract   Return a structure describing the histogram content
- *  @discussion Returns a MPSImageHistogramInfo structure describing the format of the
- *              histogram.
- */
-@property (readonly, nonatomic)  MPSImageHistogramInfo histogramInfo;
-
-/*!
- *  @abstract Specifies information about the histogram for the channels of an image.
- *  @discussion The MPSImageHistogramSpecification applies a transfor to convert the histogram 
- *              to a specified histogram. The process is divided into three steps:
- *
- *              -# Call -initWithDevice:histogramInfo:   This creates a MPSImageHistogramSpecification
- *              object.  It is done when the method returns.
- *
- *              -# Call -encodeTransform:sourceTexture:sourceHistogram:sourceHistogramOffset:desiredHistogram:
- *              desiredHistogramOffset: This creates a privately held image transform which will convert the
- *              the distribution of the source histogram to the desired histogram. This process runs on a 
- *              MTLCommandBuffer when it is committed to a MTLCommandQueue. It must complete before the next 
- *              step can be run. It may be performed on the same MTLCommandBuffer.  The sourceTexture argument 
- *              is used by encodeTransform to determine the number of channels and therefore which histogram data 
- *              in sourceHistogram buffer to use. The sourceHistogram and desiredHistogram must have been computed 
- *              either on the CPU or using the MPSImageHistogram kernel
- *
- *              -# Call -encodeToCommandBuffer:sourceTexture:destinationTexture: to read data from
- *              sourceTexture, apply the transform to it and write to destination texture.
- *              This step is also done on the GPU on a MTLCommandQueue.
- *
- *              You can reuse the same specification transform on other images to perform the
- *              same transform on those images. (Since their starting distribution is probably
- *              different, they will probably not arrive at the same distribution as the desired
- *              histogram.) This filter usually will not be able to work in place.
- *
- *  @param    device            The device the filter will run on
- *  @param    histogramInfo     Pointer to the MPSHistogramInfo struct
- *  @return     A valid MPSImageHistogramSpecification object or nil, if failure.
- */
-
--(nonnull instancetype) initWithDevice: (nonnull id <MTLDevice>) device
-                         histogramInfo: (const MPSImageHistogramInfo * __nonnull) histogramInfo     NS_DESIGNATED_INITIALIZER;
-
-
-/*!
- *  @abstract Encode the transform function to a command buffer using a MTLComputeCommandEncoder.
- *            The transform function computes the specification lookup table.
- *  @discussion The transform function will not begin to execute until after the command
- *              buffer has been enqueued and committed. This step will need to be repeated
- *              with the new MPSKernel if -copyWithZone:device or -copyWithZone: is called.
- *
- *  @param  commandBuffer   A valid MTLCommandBuffer.
- *  @param  source          A valid MTLTexture containing the source image for the filter.
- *  @param  sourceHistogram A valid MTLBuffer containing the histogram results for the source image.  This filter
- *                          will use these histogram results to generate the cumulative histogram for equalizing
- *                          the image.  The histogram results / channel are stored together.  The number of channels
- *                          for which histogram results are stored is determined by the number of channels in the image.
- *                          If histogramInfo.histogramForAlpha is false and the source image is RGBA then only histogram
- *                          results for RGB channels are stored.
- *  @param  sourceHistogramOffset   A byte offset into the sourceHistogram MTLBuffer where the histogram starts. Must conform to
- *                                  alignment requirements for [MTLComputeCommandEncoder setBuffer:offset:atIndex:] offset
- *                                  parameter.
- *  @param  desiredHistogram    A valid MTLBuffer containing the desired histogram results for the source image.
- *                          The histogram results / channel are stored together.  The number of channels
- *                          for which histogram results are stored is determined by the number of channels in the image.
- *                          If histogramInfo.histogramForAlpha is false and the source image is RGBA then only histogram
- *                          results for RGB channels are stored.
- *  @param  desiredHistogramOffset  A byte offset into the desiredHistogram MTLBuffer where the histogram starts. Must conform to
- *                                  alignment requirements for [MTLComputeCommandEncoder setBuffer:offset:atIndex:] offset
- *                                  parameter.
- */
--(void) encodeTransformToCommandBuffer: (nonnull id <MTLCommandBuffer>) commandBuffer
-                         sourceTexture: (nonnull id <MTLTexture>) source
-                       sourceHistogram: (nonnull id <MTLBuffer>) sourceHistogram
-                 sourceHistogramOffset: (NSUInteger) sourceHistogramOffset
-                      desiredHistogram: (nonnull id <MTLBuffer>) desiredHistogram
-                desiredHistogramOffset: (NSUInteger) desiredHistogramOffset;
-
-
-
-@end  /* MPSImageHistogramSpecification */
-
-#endif  /* MPS_MSImageHistogram_h */
-
diff -ruN /Applications/Xcode.app/Contents/Developer/Platforms/AppleTVOS.platform/Developer/SDKs/AppleTVOS.sdk/System/Library/Frameworks/MetalPerformanceShaders.framework/Headers/MPSImageIntegral.h /Applications/Xcode9-beta1.app/Contents/Developer/Platforms/AppleTVOS.platform/Developer/SDKs/AppleTVOS.sdk/System/Library/Frameworks/MetalPerformanceShaders.framework/Headers/MPSImageIntegral.h
--- /Applications/Xcode.app/Contents/Developer/Platforms/AppleTVOS.platform/Developer/SDKs/AppleTVOS.sdk/System/Library/Frameworks/MetalPerformanceShaders.framework/Headers/MPSImageIntegral.h	2016-08-01 16:43:15.000000000 -0400
+++ /Applications/Xcode9-beta1.app/Contents/Developer/Platforms/AppleTVOS.platform/Developer/SDKs/AppleTVOS.sdk/System/Library/Frameworks/MetalPerformanceShaders.framework/Headers/MPSImageIntegral.h	1969-12-31 19:00:00.000000000 -0500
@@ -1,52 +0,0 @@
-/*!
- *  @header MPSImageIntegral.h
- *  @framework MetalPerformanceShaders.framework
- *
- *  @copyright Copyright (c) 2015 Apple Inc. All rights reserved.
- *  @abstract MetalPerformanceShaders integral filters
- */
-
-#ifndef MPS_MPSImageIntegral_h
-#define MPS_MPSImageIntegral_h
-
-#include <MetalPerformanceShaders/MPSImageKernel.h>
-
-/*!
- *  @class      MPSImageIntegral
- *  @discussion The MPSImageIntegral calculates the sum of pixels over a specified region in the image.
- *              The value at each position is the sum of all pixels in a source image rectangle, sumRect:
- *
- *                  sumRect.origin = MPSUnaryImageKernel.offset
- *                  sumRect.size = dest_position - MPSUnaryImageKernel.clipRect.origin
- *
- *              If the channels in the source image are normalized, half-float or floating values,
- *              the destination image is recommended to be a 32-bit floating-point image.
- *              If the channels in the source image are integer values, it is recommended that
- *              an appropriate 32-bit integer image destination format is used.
- */
-MPS_CLASS_AVAILABLE_STARTING( __MAC_10_11, __IPHONE_9_0, __TVOS_9_0)
-@interface  MPSImageIntegral : MPSUnaryImageKernel
-
-@end    /* MPSImageIntegral */
-
-
-/*!
- *  @class      MPSImageIntegralOfSquares
- *  @discussion The MPSImageIntegralOfSquares calculates the sum of squared pixels over a specified region in the image.
- *              The value at each position is the sum of all squared pixels in a source image rectangle, sumRect:
- *
- *                  sumRect.origin = MPSUnaryImageKernel.offset
- *                  sumRect.size = dest_position - MPSUnaryImageKernel.clipRect.origin
- *
- *              If the channels in the source image are normalized, half-float or floating values,
- *              the destination image is recommended to be a 32-bit floating-point image.
- *              If the channels in the source image are integer values, it is recommended that
- *              an appropriate 32-bit integer image destination format is used.
- */
-MPS_CLASS_AVAILABLE_STARTING( __MAC_10_11, __IPHONE_9_0, __TVOS_9_0)
-@interface  MPSImageIntegralOfSquares : MPSUnaryImageKernel
-
-@end    /* MPSImageIntegralOfSquares */
-
-#endif  /* MPS_MSImageIntegral_h */
-
diff -ruN /Applications/Xcode.app/Contents/Developer/Platforms/AppleTVOS.platform/Developer/SDKs/AppleTVOS.sdk/System/Library/Frameworks/MetalPerformanceShaders.framework/Headers/MPSImageKernel.h /Applications/Xcode9-beta1.app/Contents/Developer/Platforms/AppleTVOS.platform/Developer/SDKs/AppleTVOS.sdk/System/Library/Frameworks/MetalPerformanceShaders.framework/Headers/MPSImageKernel.h
--- /Applications/Xcode.app/Contents/Developer/Platforms/AppleTVOS.platform/Developer/SDKs/AppleTVOS.sdk/System/Library/Frameworks/MetalPerformanceShaders.framework/Headers/MPSImageKernel.h	2016-08-01 16:43:15.000000000 -0400
+++ /Applications/Xcode9-beta1.app/Contents/Developer/Platforms/AppleTVOS.platform/Developer/SDKs/AppleTVOS.sdk/System/Library/Frameworks/MetalPerformanceShaders.framework/Headers/MPSImageKernel.h	1969-12-31 19:00:00.000000000 -0500
@@ -1,442 +0,0 @@
-/*!
- *  @header MPSImageKernel.h
- *  @framework MetalPerformanceShaders.framework
- *
- *  @copyright Copyright (c) 2015 Apple Inc. All rights reserved.
- *  @abstract MetalPerformanceShaders filter base classes
- */
-
-#ifndef _MPS_MPSImageKernel_
-#define _MPS_MPSImageKernel_
-
-#import <MetalPerformanceShaders/MPSKernel.h>
-
-/*!
- *  @class      MPSUnaryImageKernel
- *  @dependency This depends on Metal.framework
- *  @discussion A MPSUnaryImageKernel consumes one MTLTexture and produces one MTLTexture.
- */
-MPS_CLASS_AVAILABLE_STARTING( __MAC_10_11, __IPHONE_9_0, __TVOS_9_0)
-@interface MPSUnaryImageKernel : MPSKernel
-
-
-/*! @property   offset
- *  @abstract   The position of the destination clip rectangle origin relative to the source buffer.
- *  @discussion The offset is defined to be the position of clipRect.origin in source coordinates.
- *              Default: {0,0,0}, indicating that the top left corners of the clipRect and source image align.
- *
- *              See Also: @ref subsubsection_mpsoffset
- */
-@property (readwrite, nonatomic) MPSOffset                offset;
-
-/*! @property   clipRect
- *  @abstract   An optional clip rectangle to use when writing data. Only the pixels in the rectangle will be overwritten.
- *  @discussion A MTLRegion that indicates which part of the destination to overwrite. If the clipRect does not lie
- *              completely within the destination image, the intersection between clip rectangle and destination bounds is
- *              used.   Default: MPSRectNoClip (MPSKernel::MPSRectNoClip) indicating the entire image.
- *
- *              See Also: @ref subsubsection_clipRect
- */
-@property (readwrite, nonatomic) MTLRegion               clipRect;
-
-
-/*! @property   edgeMode
- *  @abstract   The MPSImageEdgeMode to use when texture reads stray off the edge of an image
- *  @discussion Most MPSKernel objects can read off the edge of the source image. This can happen because of a
- *              negative offset property, because the offset + clipRect.size is larger than the
- *              source image or because the filter looks at neighboring pixels, such as a Convolution
- *              or morphology filter.   Default: usually MPSImageEdgeModeZero. (Some MPSKernel types default
- *              to MPSImageEdgeModeClamp, because MPSImageEdgeModeZero is either not supported or
- *              would produce unexpected results.)
- *
- *              See Also: @ref subsubsection_edgemode
- */
-@property (readwrite, nonatomic) MPSImageEdgeMode        edgeMode;
-
-
-
-/*!
- *  This method attempts to apply the MPSKernel in place on a texture.
- *
- *          In-place operation means that the same texture is used both to hold the input
- *          image and the results. Operating in-place can be an excellent way to reduce
- *          resource utilization, and save time and energy. While simple Metal kernels can
- *          not operate in place because textures can not be readable and writable at the
- *          same time, some MPSKernels can operate in place because they use
- *          multi-pass algorithms. Whether a MPSKernel can operate in-place can
- *          depend on current hardware, operating system revision and the parameters
- *          and properties passed to it. You should never assume that a MPSKernel will
- *          continue to work in place, even if you have observed it doing so before.
- *
- *  If the operation succeeds in-place, YES is returned.  If the in-place operation
- *  fails and no copyAllocator is provided, then NO is returned. Without a fallback
- *  MPSCopyAllocator, in neither case is the pointer held at *texture modified.
- *
- *  Failure during in-place operation is very common and will occur inconsistently across
- *  different hardware platforms and OS releases. Without a fallback MPSCopyAllocator,
- *  operating in place may require significant error handling code to accompany each
- *  call to -encodeToCommandBuffer:..., complicating your code.
- *
- *  You may find it simplifies your code to provide a fallback MPSCopyAllocator so
- *  that the operation can proceed reliably even when it can not proceed in-place.
- *  When an in-place filter fails, the MPSCopyAllocator (if any) will be
- *  invoked to create a new texture in which to write the results, allowing the
- *  filter to proceed reliably out-of-place. The original texture will be released,
- *  replaced with a pointer to the new texture and YES will be returned. If the
- *  allocator returns an invalid texture, it is released, *texture remains unmodified
- *  and NO is returned.  Please see the MPSCopyAllocator definition for a sample allocator
- *  implementation.
- *
- *  Sample usage with a copy allocator:
- *  @code
- *  id <MTLTexture> inPlaceTex = ...;
- *  MPSImageSobel *sobelFiler = [[MPSImageSobel alloc] initWithDevice: my_device];
- *
- *  // With a fallback MPSCopyAllocator, failure should only occur in exceptional
- *  // conditions such as MTLTexture allocation failure or programmer error.
- *  // That is, the operation is roughly as robust as the MPSCopyAllocator.
- *  // Depending on the quality of that, we might decide we are justified here
- *  // in not checking the return value.
- *  [sobelFilter encodeToCommandBuffer: my_command_buffer
- *                      inPlaceTexture: &inPlaceTex  // may be replaced!
- *               fallbackCopyAllocator: myAllocator];
- *
- *  // If myAllocator was not called:
- *  //
- *  //      inPlaceTex holds the original texture with the result pixels in it
- *  //
- *  // else,
- *  //
- *  //      1) myAllocator creates a new texture.
- *  //      2) The new texture pixel data is overwritten by MPSUnaryImageKernel.
- *  //      3) The old texture passed in *inPlaceTex is released once.
- *  //      4) *inPlaceTex = the new texture
- *  //
- *  // In either case, the caller should now hold one reference to the texture now held in
- *  // inPlaceTex, whether it was replaced or not. Most of the time that means that nothing
- *  // further needs to be done here, and you can proceed to the next image encoding operation.
- *  // However, if other agents held references to the original texture, they still hold them
- *  // and may need to be alerted that the texture has been replaced so that they can retain
- *  // the new texture and release the old one.
- *
- *  [sobelFilter release];  // if not ARC, clean up the MPSImageSobel object
- *  @endcode
- *
- *  Note: Image filters that look at neighboring pixel values may actually consume more
- *        memory when operating in place than out of place. Many such operations are
- *        tiled internally to save intermediate texture storage, but can not tile when
- *        operating in place. The memory savings for tiling is however very short term,
- *        typically the lifetime of the MTLCommandBuffer.
- *
- *  @abstract   Attempt to apply a MPSKernel to a texture in place.
- *  @param      commandBuffer       A valid MTLCommandBuffer to receive the encoded filter
- *  @param      texture             A pointer to a valid MTLTexture containing source image.
- *                                  On success, the image contents and possibly texture itself
- *                                  will be replaced with the result image.
- *  @param      copyAllocator       An optional block to allocate a new texture to hold the
- *                                  results, in case in-place operation is not possible. The
- *                                  allocator may use a different MTLPixelFormat or size than
- *                                  the original texture. You may enqueue operations on the
- *                                  provided MTLCommandBuffer using the provided
- *                                  MTLComputeCommandEncoder to initialize the texture contents.
- *  @return     On success, YES is returned. The texture may have been replaced with a new
- *              texture if a copyAllocator was provided.  On failure, NO is returned. The
- *              texture is unmodified.
- */
--(BOOL)    encodeToCommandBuffer: (nonnull id <MTLCommandBuffer>)commandBuffer
-                  inPlaceTexture: (__nonnull id <MTLTexture> __strong * __nonnull) texture
-           fallbackCopyAllocator: (nullable MPSCopyAllocator) copyAllocator
-                MPS_SWIFT_NAME(encode(commandBuffer:inPlaceTexture:fallbackCopyAllocator:));
-
-
-/*!
- *  @abstract   Encode a MPSKernel into a command Buffer.  The operation shall proceed out-of-place.
- *  @param      commandBuffer       A valid MTLCommandBuffer to receive the encoded filter
- *  @param      sourceTexture       A valid MTLTexture containing the source image.
- *  @param      destinationTexture  A valid MTLTexture to be overwritten by result image. DestinationTexture may not alias sourceTexture.
- */
--(void) encodeToCommandBuffer: (nonnull id <MTLCommandBuffer>) commandBuffer
-                sourceTexture: (nonnull id <MTLTexture>) sourceTexture
-           destinationTexture: (nonnull id <MTLTexture>) destinationTexture
-            MPS_SWIFT_NAME(encode(commandBuffer:sourceTexture:destinationTexture:));
-
-
-/*!
- *  sourceRegionForDestinationSize: is used to determine which region of the
- *  sourceTexture will be read by encodeToCommandBuffer:sourceTexture:destinationTexture
- *  (and similar) when the filter runs. This information may be needed if the
- *  source image is broken into multiple textures.  The size of the full
- *  (untiled) destination image is provided. The region of the full (untiled)
- *  source image that will be read is returned. You can then piece together an
- *  appropriate texture containing that information for use in your tiled context.
- *
- *  The function will consult the MPSUnaryImageKernel offset and clipRect parameters, 
- *  to determine the full region read by the function. Other parameters such as
- *  sourceClipRect, kernelHeight and kernelWidth will be consulted as necessary.
- *  All properties should be set to intended values prior to calling 
- *  sourceRegionForDestinationSize:.
- *
- *      Caution: This function operates using global image coordinates, but
- *      -encodeToCommandBuffer:... uses coordinates local to the source and
- *      destination image textures. Consequently, the offset and clipRect 
- *      attached to this object will need to be updated using a global to 
- *      local coordinate transform before -encodeToCommandBuffer:... is 
- *      called.
- *
- *  @abstract   Determine the region of the source texture that will be read for a encode operation 
- *  @param      destinationSize The size of the full virtual destination image.
- *  @return     The area in the virtual source image that will be read.
- */
--(MPSRegion) sourceRegionForDestinationSize: (MTLSize) destinationSize
-            MPS_SWIFT_NAME(sourceRegion(destinationSize:));
-
-@end
-
-
-
-/*!
- *  @class      MPSBinaryImageKernel
- *  @dependency This depends on Metal.framework
- *  @discussion A MPSBinaryImageKernel consumes two MTLTextures and produces one MTLTexture.
- */
-MPS_CLASS_AVAILABLE_STARTING( __MAC_10_11, __IPHONE_9_0, __TVOS_9_0)
-@interface MPSBinaryImageKernel : MPSKernel
-
-/*! @property   primaryOffset
- *  @abstract   The position of the destination clip rectangle origin relative to the primary source buffer.
- *  @discussion The offset is defined to be the position of clipRect.origin in source coordinates.
- *              Default: {0,0,0}, indicating that the top left corners of the clipRect and primary source image align.
- *
- *              See Also: @ref subsubsection_mpsoffset
- */
-@property (readwrite, nonatomic) MPSOffset                primaryOffset;
-
-/*! @property   secondaryOffset
- *  @abstract   The position of the destination clip rectangle origin relative to the secondary source buffer.
- *  @discussion The offset is defined to be the position of clipRect.origin in source coordinates.
- *              Default: {0,0,0}, indicating that the top left corners of the clipRect and primary source image align.
- *
- *              See Also: @ref subsubsection_mpsoffset
- */
-@property (readwrite, nonatomic) MPSOffset                secondaryOffset;
-
-
-/*! @property   primaryEdgeMode
- *  @abstract   The MPSImageEdgeMode to use when texture reads stray off the edge of the primary source image
- *  @discussion Most MPSKernel objects can read off the edge of a source image. This can happen because of a
- *              negative offset property, because the offset + clipRect.size is larger than the
- *              source image or because the filter looks at neighboring pixels, such as a Convolution
- *              or morphology filter.   Default: usually MPSImageEdgeModeZero. (Some MPSKernel types default
- *              to MPSImageEdgeModeClamp, because MPSImageEdgeModeZero is either not supported or
- *              would produce unexpected results.)
- *
- *              See Also: @ref subsubsection_edgemode
- */
-@property (readwrite, nonatomic) MPSImageEdgeMode        primaryEdgeMode;
-
-/*! @property   secondaryEdgeMode
- *  @abstract   The MPSImageEdgeMode to use when texture reads stray off the edge of the secondary source image
- *  @discussion Most MPSKernel objects can read off the edge of a source image. This can happen because of a
- *              negative offset property, because the offset + clipRect.size is larger than the
- *              source image or because the filter looks at neighboring pixels, such as a Convolution
- *              or morphology filter.   Default: usually MPSImageEdgeModeZero. (Some MPSKernel types default
- *              to MPSImageEdgeModeClamp, because MPSImageEdgeModeZero is either not supported or
- *              would produce unexpected results.)
- *
- *              See Also: @ref subsubsection_edgemode
- */
-@property (readwrite, nonatomic) MPSImageEdgeMode        secondaryEdgeMode;
-
-/*! @property   clipRect
- *  @abstract   An optional clip rectangle to use when writing data. Only the pixels in the rectangle will be overwritten.
- *  @discussion A MTLRegion that indicates which part of the destination to overwrite. If the clipRect does not lie
- *              completely within the destination image, the intersection between clip rectangle and destination bounds is
- *              used.   Default: MPSRectNoClip (MPSKernel::MPSRectNoClip) indicating the entire image.
- *
- *              See Also: @ref subsubsection_clipRect
- */
-@property (readwrite, nonatomic) MTLRegion               clipRect;
-
-
-
-
-/*!
- *  This method attempts to apply the MPSKernel in place on a texture.
- *
- *          In-place operation means that the same texture is used both to hold the input
- *          image and the results. Operating in-place can be an excellent way to reduce
- *          resource utilization, and save time and energy. While simple Metal kernels can
- *          not operate in place because textures can not be readable and writable at the
- *          same time, some MPSKernels can operate in place because they use
- *          multi-pass algorithms. Whether a MPSKernel can operate in-place can
- *          depend on current hardware, operating system revision and the parameters
- *          and properties passed to it. You should never assume that a MPSKernel will
- *          continue to work in place, even if you have observed it doing so before.
- *
- *  If the operation succeeds in-place, YES is returned.  If the in-place operation
- *  fails and no copyAllocator is provided, then NO is returned. In neither
- *  case is the pointer held at *texture modified.
- *
- *  Failure during in-place operation is common. You may find it simplifies your
- *  code to provide a copyAllocator. When an in-place filter fails, your
- *  copyAllocator will be invoked to create a new texture in which to write
- *  the results, allowing the filter to proceed reliably out-of-place. The
- *  original texture will be released, replaced with a pointer to the new texture
- *  and YES will be returned. If the allocator returns an invalid texture, it is
- *  released, *texture remains unmodified and NO is returned.  Please see the
- *  MPSCopyAllocator definition for a sample allocator implementation.
- *
- *  Note: Image filters that look at neighboring pixel values may actually consume more
- *        memory when operating in place than out of place. Many such operations are
- *        tiled internally to save intermediate texture storage, but can not tile when
- *        operating in place. The memory savings for tiling is however very short term,
- *        typically the lifetime of the MTLCommandBuffer.
- *
- *  @abstract   Attempt to apply a MPSKernel to a texture in place.
- *  @param      commandBuffer           A valid MTLCommandBuffer to receive the encoded filter
- *  @param      primaryTexture          A pointer to a valid MTLTexture containing the
- *                                      primary source image. It will not be overwritten.
- *  @param      inPlaceSecondaryTexture A pointer to a valid MTLTexture containing secondary image.
- *                                      On success, the image contents and possibly texture itself
- *                                      will be replaced with the result image.
- *  @param      copyAllocator           An optional block to allocate a new texture to hold the
- *                                      results, in case in-place operation is not possible. The
- *                                      allocator may use a different MTLPixelFormat or size than
- *                                      the original texture. You may enqueue operations on the
- *                                      provided MTLCommandBuffer using the provided
- *                                      MTLComputeCommandEncoder to initialize the texture contents.
- *  @return     On success, YES is returned. The texture may have been replaced with a new
- *              texture if a copyAllocator was provided.  On failure, NO is returned. The
- *              texture is unmodified.
- */
--(BOOL)    encodeToCommandBuffer: (nonnull id <MTLCommandBuffer>)commandBuffer
-                  primaryTexture: (nonnull id <MTLTexture>) primaryTexture
-         inPlaceSecondaryTexture: (__nonnull id <MTLTexture> __strong * __nonnull) inPlaceSecondaryTexture
-           fallbackCopyAllocator: (nullable MPSCopyAllocator) copyAllocator;
-
-/*!
- *  This method attempts to apply the MPSKernel in place on a texture.
- *
- *          In-place operation means that the same texture is used both to hold the input
- *          image and the results. Operating in-place can be an excellent way to reduce
- *          resource utilization, and save time and energy. While simple Metal kernels can
- *          not operate in place because textures can not be readable and writable at the
- *          same time, some MPSKernels can operate in place because they use
- *          multi-pass algorithms. Whether a MPSKernel can operate in-place can
- *          depend on current hardware, operating system revision and the parameters
- *          and properties passed to it. You should never assume that a MPSKernel will
- *          continue to work in place, even if you have observed it doing so before.
- *
- *  If the operation succeeds in-place, YES is returned.  If the in-place operation
- *  fails and no copyAllocator is provided, then NO is returned. In neither
- *  case is the pointer held at *texture modified.
- *
- *  Failure during in-place operation is common. You may find it simplifies your
- *  code to provide a copyAllocator. When an in-place filter fails, your
- *  copyAllocator will be invoked to create a new texture in which to write
- *  the results, allowing the filter to proceed reliably out-of-place. The
- *  original texture will be released, replaced with a pointer to the new texture
- *  and YES will be returned. If the allocator returns an invalid texture, it is
- *  released, *texture remains unmodified and NO is returned.  Please see the
- *  MPSCopyAllocator definition for a sample allocator implementation.
- *
- *  Note: Image filters that look at neighboring pixel values may actually consume more
- *        memory when operating in place than out of place. Many such operations are
- *        tiled internally to save intermediate texture storage, but can not tile when
- *        operating in place. The memory savings for tiling is however very short term,
- *        typically the lifetime of the MTLCommandBuffer.
- *
- *  @abstract   Attempt to apply a MPSKernel to a texture in place.
- *  @param      commandBuffer           A valid MTLCommandBuffer to receive the encoded filter
- *  @param      inPlacePrimaryTexture   A pointer to a valid MTLTexture containing secondary image.
- *                                      On success, the image contents and possibly texture itself
- *                                      will be replaced with the result image.
- *  @param      secondaryTexture        A pointer to a valid MTLTexture containing the
- *                                      primary source image. It will not be overwritten.
- *  @param      copyAllocator           An optional block to allocate a new texture to hold the
- *                                      results, in case in-place operation is not possible. The
- *                                      allocator may use a different MTLPixelFormat or size than
- *                                      the original texture. You may enqueue operations on the
- *                                      provided MTLCommandBuffer using the provided
- *                                      MTLComputeCommandEncoder to initialize the texture contents.
- *  @return     On success, YES is returned. The texture may have been replaced with a new
- *              texture if a copyAllocator was provided.  On failure, NO is returned. The
- *              texture is unmodified.
- */
--(BOOL)    encodeToCommandBuffer: (nonnull id <MTLCommandBuffer>)commandBuffer
-           inPlacePrimaryTexture: (__nonnull id <MTLTexture> __strong * __nonnull) inPlacePrimaryTexture
-                secondaryTexture: (nonnull id <MTLTexture>) secondaryTexture
-           fallbackCopyAllocator: (nullable MPSCopyAllocator) copyAllocator;
-
-
-/*!
- *  @abstract   Encode a MPSKernel into a command Buffer.  The operation shall proceed out-of-place.
- *  @param      commandBuffer       A valid MTLCommandBuffer to receive the encoded filter
- *  @param      primaryTexture      A valid MTLTexture containing the primary source image.
- *  @param      secondaryTexture    A valid MTLTexture containing the secondary source image.
- *  @param      destinationTexture  A valid MTLTexture to be overwritten by result image. destinationTexture may not alias sourceTexture.
- */
--(void) encodeToCommandBuffer: (nonnull id <MTLCommandBuffer>) commandBuffer
-               primaryTexture: (nonnull id <MTLTexture>) primaryTexture
-             secondaryTexture: (nonnull id <MTLTexture>) secondaryTexture
-           destinationTexture: (nonnull id <MTLTexture>) destinationTexture;
-
-/*!
- *  primarySourceRegionForDestinationSize: is used to determine which region of the
- *  primaryTexture will be read by encodeToCommandBuffer:primaryTexture:secondaryTexture:destinationTexture
- *  (and in-place variants) when the filter runs. This information may be needed if the
- *  primary source image is broken into multiple textures.  The size of the full
- *  (untiled) destination image is provided. The region of the full (untiled)
- *  source image that will be read is returned. You can then piece together an
- *  appropriate texture containing that information for use in your tiled context.
- *
- *  The function will consult the MPSBinaryImageKernel primaryOffset and clipRect parameters,
- *  to determine the full region read by the function. Other parameters such as
- *  kernelHeight and kernelWidth will be consulted as necessary. All properties
- *  should be set to intended values prior to calling primarySourceRegionForDestinationSize:.
- *
- *      Caution: This function operates using global image coordinates, but
- *      -encodeToCommandBuffer:... uses coordinates local to the source and
- *      destination image textures. Consequently, the primaryOffset and clipRect
- *      attached to this object will need to be updated using a global to
- *      local coordinate transform before -encodeToCommandBuffer:... is
- *      called.
- *
- *  @abstract   Determine the region of the source texture that will be read for a encode operation
- *  @param      destinationSize     The size of the full virtual destination image.
- *  @return     The area in the virtual source image that will be read.
- */
--(MPSRegion) primarySourceRegionForDestinationSize: (MTLSize) destinationSize;
-
-/*!
- *  secondarySourceRegionForDestinationSize: is used to determine which region of the
- *  sourceTexture will be read by encodeToCommandBuffer:primaryTexture:secondaryTexture:destinationTexture
- *  (and in-place variants) when the filter runs. This information may be needed if the
- *  secondary source image is broken into multiple textures.  The size of the full
- *  (untiled) destination image is provided. The region of the full (untiled)
- *  secondary source image that will be read is returned. You can then piece together an
- *  appropriate texture containing that information for use in your tiled context.
- *
- *  The function will consult the MPSBinaryImageKernel secondaryOffset and clipRect
- *  parameters, to determine the full region read by the function. Other parameters
- *  such as kernelHeight and kernelWidth will be consulted as necessary.  All properties
- *  should be set to intended values prior to calling secondarySourceRegionForDestinationSize:.
- *
- *      Caution: This function operates using global image coordinates, but
- *      -encodeToCommandBuffer:... uses coordinates local to the source and
- *      destination image textures. Consequently, the secondaryOffset and clipRect
- *      attached to this object will need to be updated using a global to
- *      local coordinate transform before -encodeToCommandBuffer:... is
- *      called.
- *
- *  @abstract   Determine the region of the source texture that will be read for a encode operation
- *  @param      destinationSize     The size of the full virtual destination image.
- *  @return     The area in the virtual source image that will be read.
- */
--(MPSRegion) secondarySourceRegionForDestinationSize: (MTLSize) destinationSize;
-
-
-
-@end
-
-#endif /* defined(_MetalPerformanceShaders_MSImageKernel_) */
diff -ruN /Applications/Xcode.app/Contents/Developer/Platforms/AppleTVOS.platform/Developer/SDKs/AppleTVOS.sdk/System/Library/Frameworks/MetalPerformanceShaders.framework/Headers/MPSImageMedian.h /Applications/Xcode9-beta1.app/Contents/Developer/Platforms/AppleTVOS.platform/Developer/SDKs/AppleTVOS.sdk/System/Library/Frameworks/MetalPerformanceShaders.framework/Headers/MPSImageMedian.h
--- /Applications/Xcode.app/Contents/Developer/Platforms/AppleTVOS.platform/Developer/SDKs/AppleTVOS.sdk/System/Library/Frameworks/MetalPerformanceShaders.framework/Headers/MPSImageMedian.h	2016-08-01 16:43:15.000000000 -0400
+++ /Applications/Xcode9-beta1.app/Contents/Developer/Platforms/AppleTVOS.platform/Developer/SDKs/AppleTVOS.sdk/System/Library/Frameworks/MetalPerformanceShaders.framework/Headers/MPSImageMedian.h	1969-12-31 19:00:00.000000000 -0500
@@ -1,61 +0,0 @@
-/*!
- *  @header MPSImageMedian.h
- *  @framework MetalPerformanceShaders.framework
- *
- *  @copyright Copyright (c) 2015 Apple Inc. All rights reserved.
- *  @abstract MetalPerformanceShaders median filters
- */
-
-#ifndef MPS_MPSImageMedian_h
-#define MPS_MPSImageMedian_h
-
-#include <MetalPerformanceShaders/MPSImageKernel.h>
-
-
-/*!
- *  @class      MPSImageMedian
- *  @discussion The MPSImageMedian applies a median filter to an image.  A median filter finds the 
- *              median color value for each channel within a kernelDiameter x kernelDiameter 
- *              window surrounding the pixel of interest.  It is a common means of noise reduction
- *              and also as a smoothing filter with edge preserving qualities.
- *
- *              NOTE: The MPSImageMedian filter currently only supports images with <= 8 bits/channel.
- *
- */
-MPS_CLASS_AVAILABLE_STARTING( __MAC_10_11, __IPHONE_9_0, __TVOS_9_0)
-@interface  MPSImageMedian : MPSUnaryImageKernel
-
-/*! @property   kernelDiameter
- *  @abstract   The diameter in pixels of the filter window.
- *  @discussion The median filter is applied to a kernelDiameter x kernelDiameter window
- *              of pixels centered on the corresponding source pixel for each destination
- *              pixel.  The kernel diameter must be an odd number.
- */
-@property (readonly, nonatomic) NSUInteger kernelDiameter;
-
-
-/*! @abstract   Initialize a filter for a particular kernel size and device
- *  @param      device          The device the filter will run on
- *  @param      kernelDiameter  Diameter of the median filter. Must be an odd number.
- *  @return     A valid object or nil, if failure.
- */
-
--(nonnull instancetype) initWithDevice: (nonnull id <MTLDevice>) device
-                        kernelDiameter: (NSUInteger)kernelDiameter   NS_DESIGNATED_INITIALIZER;
-
-/* You must use initWithDevice:kernelDiameter: instead. */
--(nonnull instancetype) initWithDevice:(nonnull id<MTLDevice>)device    NS_UNAVAILABLE;
-
-
-/*! @abstract   The maximum diameter in pixels of the filter window supported by the median filter.
- */
-+(NSUInteger) maxKernelDiameter;
-
-/*! @abstract   The minimum diameter in pixels of the filter window supported by the median filter.
- */
-+(NSUInteger) minKernelDiameter;
-
-@end  /* MPSImageMedian */
-
-#endif  /* MPS_MSImageMedian_h */
-
diff -ruN /Applications/Xcode.app/Contents/Developer/Platforms/AppleTVOS.platform/Developer/SDKs/AppleTVOS.sdk/System/Library/Frameworks/MetalPerformanceShaders.framework/Headers/MPSImageMorphology.h /Applications/Xcode9-beta1.app/Contents/Developer/Platforms/AppleTVOS.platform/Developer/SDKs/AppleTVOS.sdk/System/Library/Frameworks/MetalPerformanceShaders.framework/Headers/MPSImageMorphology.h
--- /Applications/Xcode.app/Contents/Developer/Platforms/AppleTVOS.platform/Developer/SDKs/AppleTVOS.sdk/System/Library/Frameworks/MetalPerformanceShaders.framework/Headers/MPSImageMorphology.h	2016-09-23 20:26:03.000000000 -0400
+++ /Applications/Xcode9-beta1.app/Contents/Developer/Platforms/AppleTVOS.platform/Developer/SDKs/AppleTVOS.sdk/System/Library/Frameworks/MetalPerformanceShaders.framework/Headers/MPSImageMorphology.h	1969-12-31 19:00:00.000000000 -0500
@@ -1,148 +0,0 @@
-/*!
- *  @header MPSImageMorphology.h
- *  @framework MetalPerformanceShaders
- *
- *  @copyright Copyright (c) 2015 Apple Inc. All rights reserved.
- *  @abstract MetalPerformanceShaders morphological operators
- */
-
-#ifndef MPS_MPSImageMorphology_h
-#define MPS_MPSImageMorphology_h
-
-#include <MetalPerformanceShaders/MPSImageKernel.h>
-
-
-/*!
- *  @class      MPSImageAreaMax
- *  @discussion The MPSImageAreaMax kernel finds the maximum pixel value in a rectangular region centered around each pixel
- *              in the source image. If there are multiple channels in the source image, each channel is processed independently.
- *              The edgeMode property is assumed to always be MPSImageEdgeModeClamp for this filter.
- */
-MPS_CLASS_AVAILABLE_STARTING( __MAC_10_11, __IPHONE_9_0, __TVOS_9_0)
-@interface  MPSImageAreaMax : MPSUnaryImageKernel
-
-/*! @property kernelHeight
- *  @abstract  The height of the filter window. Must be an odd number.
- */
-@property (readonly, nonatomic)   NSUInteger  kernelHeight;
-
-/*! @property kernelWidth
- *  @abstract  The width of the filter window. Must be an odd number.
- */
-@property (readonly, nonatomic)   NSUInteger  kernelWidth;
-
-/*!
- *  @abstract Set the kernel height and width
- *  @param      device              The device the filter will run on
- *  @param      kernelWidth         The width of the kernel. Must be an odd number.
- *  @param      kernelHeight        The height of the kernel. Must be an odd number.
- */
--(nonnull instancetype) initWithDevice: (nonnull id <MTLDevice>) device
-                           kernelWidth: (NSUInteger)kernelWidth
-                          kernelHeight: (NSUInteger)kernelHeight            NS_DESIGNATED_INITIALIZER;
-
-
-/* You must use initWithDevice:kernelWidth:kernelHeight: instead. */
--(nonnull instancetype) initWithDevice:(nonnull id<MTLDevice>)device        NS_UNAVAILABLE;
-
-@end  /* MPSImageAreaMax */
-
-/*!
- *  @class      MPSImageAreaMin
- *  @discussion The MPSImageAreaMin finds the minimum pixel value in a rectangular region centered around each pixel in the
- *               source image. If there are multiple channels in the source image, each channel is processed independently.
- *               It has the same methods as MPSImageAreaMax
- *               The edgeMode property is assumed to always be MPSImageEdgeModeClamp for this filter.
- */
-MPS_CLASS_AVAILABLE_STARTING( __MAC_10_11, __IPHONE_9_0, __TVOS_9_0)
-@interface  MPSImageAreaMin : MPSImageAreaMax
-
-@end  /* MPSImageAreaMin */
-
-/*!
- *  @class      MPSImageDilate
- *  @discussion The MPSImageDilate finds the maximum pixel value in a rectangular region centered around each pixel in the
- *              source image. It is like the MPSImageAreaMax, except that the intensity at each position is calculated relative
- *              to a different value before determining which is the maximum pixel value, allowing for shaped, non-rectangular
- *              morphological probes.
- *  @code
- *          for each pixel in the filter window:
- *              value =  pixel[filterY][filterX] - filter[filterY*filter_width+filterX]
- *              if( value > bestValue ){
- *                   result = value
- *                   bestValue = value;
- *              }
- *  @endcode
- *              A filter that contains all zeros and is identical to a MPSImageAreaMax filter.  The center filter element
- *              is assumed to be 0 to avoid causing a general darkening of the image.
- *
- *              The edgeMode property is assumed to always be MPSImageEdgeModeClamp for this filter.
- */
-MPS_CLASS_AVAILABLE_STARTING( __MAC_10_11, __IPHONE_9_0, __TVOS_9_0)
-@interface  MPSImageDilate : MPSUnaryImageKernel
-/*! @property kernelHeight
- *  @abstract  The height of the filter window. Must be an odd number.
- */
-@property (readonly, nonatomic)   NSUInteger  kernelHeight;
-
-/*! @property kernelWidth
- *  @abstract  The width of the filter window. Must be an odd number.
- */
-@property (readonly, nonatomic)   NSUInteger  kernelWidth;
-
-
-/*!
- *  @abstract   Init a object with kernel height, width and weight values.
- *  @discussion Each dilate shape probe defines a 3D surface of values.
- *              These are arranged in order left to right, then top to bottom
- *              in a 1D array. (values[kernelWidth*y+x] = probe[y][x])
- *              Values should be generally be in the range [0,1] with the center 
- *              pixel tending towards 0 and edges towards 1. However, any numerical
- *              value is allowed. Calculations are subject to the usual floating-point
- *              rounding error.
- *
- *  @param      device              The device the filter will run on
- *  @param      kernelWidth         The width of the kernel. Must be an odd number.
- *  @param      kernelHeight        The height of the kernel. Must be an odd number.
- *  @param      values              The set of values to use as the dilate probe.
- *                                  The values are copied into the filter. To avoid 
- *                                  image ligthening or darkening, the center value should
- *                                  be 0.0f.
- */
--(nonnull instancetype) initWithDevice: (nonnull id <MTLDevice>) device
-                           kernelWidth: (NSUInteger)kernelWidth
-                          kernelHeight: (NSUInteger)kernelHeight
-                                values: (const float* __nonnull) values       NS_DESIGNATED_INITIALIZER;
-
-/* You must use initWithDevice:kernelWidth:kernelHeight:values: instead. */
--(nonnull instancetype) initWithDevice:(nonnull id<MTLDevice>)device        NS_UNAVAILABLE;
-
-@end  /* MPSImageDilate */
-
-
-/*!
- *  @class      MPSImageErode
- *  @discussion The MPSImageErode filter finds the minimum pixel value in a rectangular region centered around each pixel in the
- *              source image. It is like the MPSImageAreaMin, except that the intensity at each position is calculated relative
- *              to a different value before determining which is the maximum pixel value, allowing for shaped, non-rectangular
- *              morphological probes.
- *  @code
- *          for each pixel in the filter window:
- *              value =  pixel[filterY][filterX] + filter[filterY*filter_width+filterX]
- *              if( value < bestValue ){
- *                   result = value
- *                   bestValue = value;
- *              }
- *  @endcode
- *              A filter that contains all zeros is identical to a MPSImageAreaMin filter. The center filter element
- *              is assumed to be 0, to avoid causing a general lightening of the image.
- *
- *              The definition of the filter for MPSImageErode is different from vImage. (MPSErode_filter_value = 1.0f-vImageErode_filter_value.)
- *              This allows MPSImageDilate and MPSImageErode to use the same filter, making open and close operators easier to write.
- *              The edgeMode property is assumed to always be MPSImageEdgeModeClamp for this filter.
- */
-MPS_CLASS_AVAILABLE_STARTING( __MAC_10_11, __IPHONE_9_0, __TVOS_9_0)
-@interface  MPSImageErode : MPSImageDilate
-@end
-
-#endif  /* MPS_MSImageMorphology_h */
diff -ruN /Applications/Xcode.app/Contents/Developer/Platforms/AppleTVOS.platform/Developer/SDKs/AppleTVOS.sdk/System/Library/Frameworks/MetalPerformanceShaders.framework/Headers/MPSImageResampling.h /Applications/Xcode9-beta1.app/Contents/Developer/Platforms/AppleTVOS.platform/Developer/SDKs/AppleTVOS.sdk/System/Library/Frameworks/MetalPerformanceShaders.framework/Headers/MPSImageResampling.h
--- /Applications/Xcode.app/Contents/Developer/Platforms/AppleTVOS.platform/Developer/SDKs/AppleTVOS.sdk/System/Library/Frameworks/MetalPerformanceShaders.framework/Headers/MPSImageResampling.h	2016-09-23 20:26:03.000000000 -0400
+++ /Applications/Xcode9-beta1.app/Contents/Developer/Platforms/AppleTVOS.platform/Developer/SDKs/AppleTVOS.sdk/System/Library/Frameworks/MetalPerformanceShaders.framework/Headers/MPSImageResampling.h	1969-12-31 19:00:00.000000000 -0500
@@ -1,89 +0,0 @@
-/*!
- *  @header MPSImageResampling.h
- *  @framework MetalPerformanceShaders
- *
- *  @copyright Copyright (c) 2015 Apple Inc. All rights reserved.
- *  @abstract  Resampling filters for MetalPerformanceShaders
- */
-
-#ifndef MPS_MPSImageResampling_h
-#define MPS_MPSImageResampling_h
-
-#include <MetalPerformanceShaders/MPSImageKernel.h>
-
-
-/*!
- *  @class      MPSImageLanczosScale
- *  @abstract   Resize an image and / or change its aspect ratio
- *  @discussion The MPSImageLanczosScale filter can be used to resample an existing image
- *              using a different sampling frequency in each dimension. This can be
- *              used to enlarge or reduce the size of an image, or change the aspect
- *              ratio of an image.  The filter uses a Lanczos resampling algorithm
- *              which typically produces better quality for photographs, but is slower
- *              than linear sampling using the GPU texture units. Lanczos downsampling 
- *              does not require a low pass filter to be applied before it is used. 
- *              Because the resampling function has negative lobes, Lanczos can result 
- *              in ringing near sharp edges, making it less suitable for vector art.
- */
-MPS_CLASS_AVAILABLE_STARTING( __MAC_10_11, __IPHONE_9_0, __TVOS_9_0)
-@interface  MPSImageLanczosScale : MPSUnaryImageKernel
-
-/*! @property   scaleTransform
- *  @abstract   An optional transform that describes how to scale and translate the source image
- *  @discussion If the scaleTransform is NULL, then the MPSImageLanczosScale filter will 
- *              rescale the image so that the source image fits exactly into the destination 
- *              texture.  If the transform is not NULL, then the transform is used for determining
- *              how to map the source image to the destination. Default: NULL
- *              
- *              When the scaleTransform is set to non-NULL, the values pointed to by the new 
- *              scaleTransform are copied to object storage, and the pointer is updated to point 
- *              to internal storage. Do not attempt to free it.  You may free your copy of 
- *              the MPSScaleTransform as soon as the property set operation is complete.
- *
- *              When calculating a scaleTransform, use the limits of the bounding box for the intended
- *              source region of interest and the destination clipRect. Adjustments for pixel center 
- *              coordinates are handled internally to the function.  For example,
- *              the scale transform to convert the entire source image to the entire destination image
- *              size (clipRect = MPSRectNoClip) would be:
- *
- *              @code
- *                  scaleTransform.scaleX = (double) dest.width / source.width;
- *                  scaleTransform.scaleY = (double) dest.height / source.height;
- *                  scaleTransform.translateX = scaleTransform.translateY = 0.0;
- *              @endcode
- *
- *              The translation parameters allow you to adjust the region of the source image used
- *              to create the destination image. They are in destination coordinates. To place the
- *              top left corner of the destination clipRect to represent the position {x,y} in source 
- *              coordinates, we solve for the translation based on the standard scale matrix operation
- *              for each axis:
- *
- *              @code
- *                  dest_position = source_position * scale + translation;
- *                  translation = dest_position - source_position * scale;
- *              @endcode
- *
- *              For the top left corner of the clipRect, the dest_position is considered to be {0,0}. 
- *              This gives us a translation of:
- *
- *              @code
- *                  scaleTransform.translateX = -source_origin.x * scaleTransform.scaleX;
- *                  scaleTransform.translateY = -source_origin.y * scaleTransform.scaleY;
- *              @endcode
- *
- *              One would typically use non-zero translations to do tiling, or provide a resized 
- *              view into a internal segment of an image.
- *
- *              Changing the Lanczos scale factor may trigger recalculation of signficant state internal
- *              to the object when the filter is encoded to the command buffer. The scale factor is
- *              scaleTransform->scaleX,Y, or the ratio of source and destination image sizes if
- *              scaleTransform is NULL. Reuse a MPSImageLancosScale object for frequently used scalings 
- *              to avoid redundantly recreating expensive resampling state.
- */
-@property (readwrite, nonatomic, nullable) const MPSScaleTransform *scaleTransform;
-
-
-
-@end
-
-#endif /* MPS_MSImageResampling_h */
diff -ruN /Applications/Xcode.app/Contents/Developer/Platforms/AppleTVOS.platform/Developer/SDKs/AppleTVOS.sdk/System/Library/Frameworks/MetalPerformanceShaders.framework/Headers/MPSImageThreshold.h /Applications/Xcode9-beta1.app/Contents/Developer/Platforms/AppleTVOS.platform/Developer/SDKs/AppleTVOS.sdk/System/Library/Frameworks/MetalPerformanceShaders.framework/Headers/MPSImageThreshold.h
--- /Applications/Xcode.app/Contents/Developer/Platforms/AppleTVOS.platform/Developer/SDKs/AppleTVOS.sdk/System/Library/Frameworks/MetalPerformanceShaders.framework/Headers/MPSImageThreshold.h	2016-09-23 20:26:03.000000000 -0400
+++ /Applications/Xcode9-beta1.app/Contents/Developer/Platforms/AppleTVOS.platform/Developer/SDKs/AppleTVOS.sdk/System/Library/Frameworks/MetalPerformanceShaders.framework/Headers/MPSImageThreshold.h	1969-12-31 19:00:00.000000000 -0500
@@ -1,220 +0,0 @@
-/*!
- *  @header MPSImageThreshold.h
- *  @framework MetalPerformanceShaders
- *
- *  @copyright Copyright (c) 2015 Apple Inc. All rights reserved.
- *  @abstract MetalPerformanceShaders thresholding filters
- */
-
-#ifndef MPS_MPSImageThreshold_h
-#define MPS_MPSImageThreshold_h
-
-#include <MetalPerformanceShaders/MPSImageKernel.h>
-
-/*!
- *  @class      MPSImageThresholdBinary
- *  @discussion The MPSThreshold filter applies a fixed-level threshold to each pixel in the image.
- *              The threshold functions convert a single channel image to a binary image.
- *              If the input image is not a single channel image, convert the inputimage to a single channel
- *              luminance image using the linearGrayColorTransform and then apply the threshold.
- *              The ThresholdBinary function is:
- *                  destinationPixelValue = sourcePixelValue > thresholdValue ? maximumValue : 0
- */
-MPS_CLASS_AVAILABLE_STARTING( __MAC_10_11, __IPHONE_9_0, __TVOS_9_0)
-@interface  MPSImageThresholdBinary : MPSUnaryImageKernel
-
-/*!
- *  @abstract   initialize a MPSImageThresholdBinary filter
- *  @param      device          The device the filter will run on
- *  @param      thresholdValue  The threshold value to use
- *  @param      maximumValue    The maximum value to use
- *  @param      transform       This matrix is an array of 3 floats.
- *                              The default if no transform is specifed is BT.601/JPEG: {0.299f, 0.587f, 0.114f};
- */
--(nonnull instancetype) initWithDevice: (nonnull id <MTLDevice>) device
-                        thresholdValue: (float)thresholdValue
-                          maximumValue: (float)maximumValue
-              linearGrayColorTransform: (const float * __nullable)transform       NS_DESIGNATED_INITIALIZER;
-
-/* You must use initWithDevice:thresholdValue:maximumValue:linearGrayColorTransform: instead */
--(nonnull instancetype) initWithDevice:(nonnull id<MTLDevice>)device            NS_UNAVAILABLE;
-
-/*! @property thresholdValue
- *  @discussion The threshold value used to init the threshold filter
- */
-@property (readonly, nonatomic) float   thresholdValue;
-
-/*! @property maximumValue
- *  @discussion The maximum value used to init the threshold filter
- */
-@property (readonly, nonatomic) float   maximumValue;
-
-/*! @property transform
- *  @discussion The color transform used to init the threshold filter
- */
-@property (readonly, nonatomic, nonnull) const float *transform;
-
-
-@end  /* MPSImageThresholdBinary */
-
-/*!
- *  @class      MPSImageThresholdBinaryInverse
- *  @discussion The MPSImageThresholdBinaryInverse filter applies a fixed-level threshold to each pixel in the image.
- *              The threshold functions convert a single channel image to a binary image.
- *              If the input image is not a single channel image, convert the inputimage to a single channel
- *              luminance image using the linearGrayColorTransform and then apply the threshold.
- *              The ThresholdBinaryInverse function is:
- *                  destinationPixelValue = sourcePixelValue > thresholdValue ? 0 : maximumValue
- */
-MPS_CLASS_AVAILABLE_STARTING( __MAC_10_11, __IPHONE_9_0, __TVOS_9_0)
-@interface  MPSImageThresholdBinaryInverse : MPSUnaryImageKernel
-
-/*!
- *  @abstract   initialize a MPSImageThresholdBinaryInverse filter
- *  @param      device          The device the filter will run on
- *  @param      thresholdValue  The threshold value to use
- *  @param      maximumValue    The maximum value to use
- *  @param      transform       This matrix is an array of 3 floats.
- *                              The default if no transform is specifed is BT.601/JPEG: {0.299f, 0.587f, 0.114f};
- */
--(nonnull instancetype) initWithDevice: (nonnull id <MTLDevice>) device
-                        thresholdValue: (float)thresholdValue
-                          maximumValue: (float)maximumValue
-              linearGrayColorTransform: (const float * __nullable)transform       NS_DESIGNATED_INITIALIZER;
-
-/* You must use initWithDevice:thresholdValue:maximumValue:linearGrayColorTransform: instead */
--(nonnull instancetype) initWithDevice:(nonnull id<MTLDevice>)device            NS_UNAVAILABLE;
-
-/*! @property thresholdValue
- *  @discussion The threshold value used to init the threshold filter
- */
-@property (readonly, nonatomic) float   thresholdValue;
-
-/*! @property maximumValue
- *  @discussion The maximum value used to init the threshold filter
- */
-@property (readonly, nonatomic) float   maximumValue;
-
-/*! @property transform
- *  @discussion The color transform used to init the threshold filter
- */
-@property (readonly, nonatomic, nonnull) const float *transform;
-
-@end  /* MPSImageThresholdBinaryInverse */
-
-/*!
- *  @class      MPSImageThresholdTruncate
- *  @discussion The MPSImageThresholdTruncate filter applies a fixed-level threshold to each pixel in the image:
- *              The threshold functions convert a single channel image to a binary image.
- *              If the input image is not a single channel image, convert the inputimage to a single channel
- *              luminance image using the linearGrayColorTransform and then apply the threshold.
- *              The ThresholdTruncate function is:
- *                  destinationPixelValue = sourcePixelValue > thresholdValue ? thresholdValue : sourcePixelValue
- */
-MPS_CLASS_AVAILABLE_STARTING( __MAC_10_11, __IPHONE_9_0, __TVOS_9_0)
-@interface  MPSImageThresholdTruncate : MPSUnaryImageKernel
-
-/*! 
- *  @abstract   initialize a MPSImageThresholdTruncate filter
- *  @param      device          The device the filter will run on
- *  @param      thresholdValue The threshold value to use
- *  @param      transform       This matrix is an array of 3 floats.
- *                              The default if no transform is specifed is BT.601/JPEG: {0.299f, 0.587f, 0.114f};
- */
--(nonnull instancetype) initWithDevice: (nonnull id <MTLDevice>) device
-                        thresholdValue: (float)thresholdValue
-              linearGrayColorTransform: (const float * __nullable)transform       NS_DESIGNATED_INITIALIZER;
-
-/* You must use initWithDevice:thresholdValue:linearGrayColorTransform: instead */
--(nonnull instancetype) initWithDevice:(nonnull id<MTLDevice>)device            NS_UNAVAILABLE;
-
-/*! @property thresholdValue
- *  @discussion The threshold value used to init the threshold filter
- */
-@property (readonly, nonatomic) float   thresholdValue;
-
-/*! @property transform
- *  @discussion The color transform used to init the threshold filter
- */
-@property (readonly, nonatomic, nonnull) const float *transform;
-
-@end  /* MPSImageThresholdTruncate */
-
-
-/*!
- *  @class      MPSImageThresholdToZero
- *  @discussion The MPSImageThresholdToZero filter applies a fixed-level threshold to each pixel in the image.
- *              The threshold functions convert a single channel image to a binary image.
- *              If the input image is not a single channel image, convert the inputimage to a single channel
- *              luminance image using the linearGrayColorTransform and then apply the threshold.
- *              The ThresholdToZero function is:
- *                  destinationPixelValue = sourcePixelValue > thresholdValue ? sourcePixelValue : 0
- */
-MPS_CLASS_AVAILABLE_STARTING( __MAC_10_11, __IPHONE_9_0, __TVOS_9_0)
-@interface  MPSImageThresholdToZero : MPSUnaryImageKernel
-
-/*!
- *  @abstract   initialize a MPSImageThresholdToZero filter
- *  @param      device          The device the filter will run on
- *  @param      thresholdValue  The threshold value to use
- *  @param      transform       This matrix is an array of 3 floats.
- *                              The default if no transform is specifed is BT.601/JPEG: {0.299f, 0.587f, 0.114f};
- */
--(nonnull instancetype) initWithDevice: (nonnull id <MTLDevice>) device
-                        thresholdValue: (float)thresholdValue
-              linearGrayColorTransform: (const float * __nullable)transform       NS_DESIGNATED_INITIALIZER;
-
-/* You must use initWithDevice:thresholdValue:linearGrayColorTransform: instead */
--(nonnull instancetype) initWithDevice:(nonnull id<MTLDevice>)device            NS_UNAVAILABLE;
-
-/*! @property thresholdValue
- *  @discussion The threshold value used to init the threshold filter
- */
-@property (readonly, nonatomic) float   thresholdValue;
-
-/*! @property transform
- *  @discussion The color transform used to init the threshold filter
- */
-@property (readonly, nonatomic, nonnull) const float *transform;
-
-@end  /* MPSImageThresholdToZero */
-
-/*!
- *  @class      MPSImageThresholdToZeroInverse
- *  @discussion The MPSImageThresholdToZeroInverse filter applies a fixed-level threshold to each pixel in the image.
- *              The threshold functions convert a single channel image to a binary image.
- *              If the input image is not a single channel image, convert the inputimage to a single channel
- *              luminance image using the linearGrayColorTransform and then apply the threshold.
- *              The ThresholdToZeroINverse function is:
- *                  destinationPixelValue = sourcePixelValue > thresholdValue ? 0 : sourcePixelValue
- */
-MPS_CLASS_AVAILABLE_STARTING( __MAC_10_11, __IPHONE_9_0, __TVOS_9_0)
-@interface  MPSImageThresholdToZeroInverse : MPSUnaryImageKernel
-
-/*!
- *  @abstract  initialize a MPSImageThresholdToZeroInverse filter
- *  @param      device          The device the filter will run on
- *  @param      thresholdValue The threshold value to use
- *  @param      transform       This matrix is an array of 3 floats.
- *                              The default if no transform is specifed is BT.601/JPEG: {0.299f, 0.587f, 0.114f};
- */
--(nonnull instancetype) initWithDevice: (nonnull id <MTLDevice>) device
-                        thresholdValue: (float)thresholdValue
-              linearGrayColorTransform: (const float * __nullable)transform       NS_DESIGNATED_INITIALIZER;
-
-/* You must use initWithDevice:thresholdValue:linearGrayColorTransform: instead */
--(nonnull instancetype) initWithDevice:(nonnull id<MTLDevice>)device            NS_UNAVAILABLE;
-
-/*! @property thresholdValue
- *  @discussion The threshold value used to init the threshold filter
- */
-@property (readonly, nonatomic) float   thresholdValue;
-
-/*! @property transform
- *  @discussion The color transform used to init the threshold filter
- */
-@property (readonly, nonatomic, nonnull) const float *transform;
-
-@end  /* MPSImageThresholdToZeroInverse */
-
-#endif  /* MPS_MSImageThreshold_h */
diff -ruN /Applications/Xcode.app/Contents/Developer/Platforms/AppleTVOS.platform/Developer/SDKs/AppleTVOS.sdk/System/Library/Frameworks/MetalPerformanceShaders.framework/Headers/MPSImageTranspose.h /Applications/Xcode9-beta1.app/Contents/Developer/Platforms/AppleTVOS.platform/Developer/SDKs/AppleTVOS.sdk/System/Library/Frameworks/MetalPerformanceShaders.framework/Headers/MPSImageTranspose.h
--- /Applications/Xcode.app/Contents/Developer/Platforms/AppleTVOS.platform/Developer/SDKs/AppleTVOS.sdk/System/Library/Frameworks/MetalPerformanceShaders.framework/Headers/MPSImageTranspose.h	2016-08-01 16:43:15.000000000 -0400
+++ /Applications/Xcode9-beta1.app/Contents/Developer/Platforms/AppleTVOS.platform/Developer/SDKs/AppleTVOS.sdk/System/Library/Frameworks/MetalPerformanceShaders.framework/Headers/MPSImageTranspose.h	1969-12-31 19:00:00.000000000 -0500
@@ -1,26 +0,0 @@
-/*!
- *  @header MPSImageTranspose.h
- *  @framework MetalPerformanceShaders.framework
- *
- *  @copyright Copyright (c) 2015 Apple Inc. All rights reserved.
- *  @abstract MetalPerformanceShaders transpose filters
- */
-
-#ifndef MPS_MPSImageTranspose_h
-#define MPS_MPSImageTranspose_h
-
-#include <MetalPerformanceShaders/MPSImageKernel.h>
-
-/*!
- *  @class      MPSImageTranspose
- *  @discussion The MPSImageTranspose transposes an image
- *
- */
-MPS_CLASS_AVAILABLE_STARTING( __MAC_10_11, __IPHONE_9_0, __TVOS_9_0)
-@interface  MPSImageTranspose : MPSUnaryImageKernel
-
-@end    /* MPSImageTranspose */
-
-
-#endif  /* MPS_MSImageTranspose_h */
-
diff -ruN /Applications/Xcode.app/Contents/Developer/Platforms/AppleTVOS.platform/Developer/SDKs/AppleTVOS.sdk/System/Library/Frameworks/MetalPerformanceShaders.framework/Headers/MPSKernel.h /Applications/Xcode9-beta1.app/Contents/Developer/Platforms/AppleTVOS.platform/Developer/SDKs/AppleTVOS.sdk/System/Library/Frameworks/MetalPerformanceShaders.framework/Headers/MPSKernel.h
--- /Applications/Xcode.app/Contents/Developer/Platforms/AppleTVOS.platform/Developer/SDKs/AppleTVOS.sdk/System/Library/Frameworks/MetalPerformanceShaders.framework/Headers/MPSKernel.h	2016-08-01 16:43:15.000000000 -0400
+++ /Applications/Xcode9-beta1.app/Contents/Developer/Platforms/AppleTVOS.platform/Developer/SDKs/AppleTVOS.sdk/System/Library/Frameworks/MetalPerformanceShaders.framework/Headers/MPSKernel.h	1969-12-31 19:00:00.000000000 -0500
@@ -1,156 +0,0 @@
-/*!
- *  @header MPSKernel.h
- *  @framework MetalPerformanceShaders.framework
- *  @copyright Copyright (c) 2015 Apple Inc. All rights reserved.
- *
- *  @discussion  MPSKernel objects encode tuned image processing operations into a MTLCommandBuffer.
- */
-
-
-#ifndef MPS_MPSKernel_h
-#define MPS_MPSKernel_h
-
-#include <MetalPerformanceShaders/MPSTypes.h>
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-/*!
- *  MPSSupportsMTLDevice
- *  @abstract   Determine whether a MetalPerformanceShaders.framework  supports a MTLDevice.
- *  @discussion Use this function to determine whether a MTLDevice can be used with interfaces in MetalPerformanceShaders.framework.
- *  @param      device          A valid MTLDevice
- *  @return     YES             The device is supported.
- *              NO              The device is not supported
- */
-BOOL    MPSSupportsMTLDevice( __nullable id <MTLDevice> device )  MPS_AVAILABLE_STARTING( __MAC_10_11, __IPHONE_9_0, __TVOS_9_0);
-
-
-/*!
- *  @class      MPSKernel
- *  @dependency This depends on Metal.framework
- *  @discussion The MPSKernel class is the base class for all MPS objects.  It defines a standard interface for
- *              MPS kernels.   You should not use the MPSKernel class directly. Instead, a  number of MPSKernel 
- *              subclasses are available in MetalPerformanceShaders.framework that define specific high-performance
- *              image processing operations.
- *
- *              The basic sequence for applying a MPSKernel to an image is as follows:
- *
- *              1.  Create a MPSKernel corresponding to the operation you wish to perform:
- *                  @code
- *                  MPSImageSobel *sobel = [[MPSImageSobel alloc] initWithDevice: mtlDevice];
- *                  @endcode
- *
- *              2.  Encode the filter into a command buffer:
- *                  @code
- *                  sobel.offset = ...;
- *                  sobel.clipRect = ...;
- *                  sobel.options = ...;
- *                  [sobel encodeToCommandBuffer: commandBuffer
- *                                 sourceTexture: inputImage
- *                            destinationTexture: resultImage ];
- *                  
- *                  if( returnVal < 0 )
- *                      MyShowError( returnVal );
- *                  @endcode
- *                  Encoding the kernel merely encodes the operation into a MTLCommandBuffer. It does not modify any pixels, yet.
- *                  All MPSKernel state has been copied to the command buffer. MPSKernels may be reused.  If the texture was previously
- *                  operated on by another command encoder (e.g. MTLRenderCommandEncoder), you should call -endEncoding on the other
- *                  encoder before encoding the filter.
- *
- *                  Some MPS filters work in place (inputImage = resultImage) even in situations where Metal might not
- *                  normally allow in place operation on textures. If in-place operation is desired, you may attempt to call
- *                  [MPSKernel encodeKernelInPlace...]. If the operation can not be completed in place, then
- *                  NO will be returned and you will have to create a new result texture and try again. To make an in-place
- *                  image filter reliable, pass a fallback MPSCopyAllocator to the method to create a new texture to write
- *                  to in the event that a filter can not operate in place.
- *
- *                  (Repeat steps 2 for more filters, as desired.)
- *
- *                      It should be self evident that step 2 may not be thread safe. That is, you can not have
- *                      multiple threads manipulating the same properties on the same MPSKernel object at the
- *                      same time and achieve coherent output. In common usage, the MPSKernel properties don't
- *                      often need to be changed from their default values, but if you need to apply the same
- *                      filter to multiple images on multiple threads with cropping / tiling, make additional
- *                      MPSKernel objects per thread. They are cheap. You can use multiple MPSKernel objects on
- *                      multiple threads, as long as only one thread is operating on any particular MPSKernel
- *                      object at a time.
- *
- *              3.  After encoding any additional work to the command buffer using other encoders, submit the MTLCommandBuffer
- *                  to your MTLCommandQueue, using:
- *                  @code
- *                  [mtlCommandBuffer commit];
- *                  @endcode
- */
-
-MPS_CLASS_AVAILABLE_STARTING( __MAC_10_11, __IPHONE_9_0, __TVOS_9_0)
-@interface MPSKernel  : NSObject <NSCopying>
-
-/****************
- *  Properties  *
- ****************/
-
-/*! @property   options
- *  @abstract   The set of options used to run the kernel.
- *  @ref        subsubsection_options
- */
-@property (readwrite, nonatomic) MPSKernelOptions                   options;
-
-/*! @property device
- *  @abstract  The device on which the kernel will be used
- */
-@property (readonly, retain, nonatomic, nonnull)  id <MTLDevice>    device;
-
-/*!
- @property label
- @abstract A string to help identify this object.
- */
-@property (copy, atomic, nullable)  NSString *                      label;
-
-/*********************
- *  Object creation  *
- *********************/
-
-
-/*!
- *  @abstract   Standard init with default properties per filter type
- *  @param      device      The device that the filter will be used on. May not be NULL.
- *  @result     a pointer to the newly initialized object. This will fail, returning
- *              nil if the device is not supported. Devices must be 
- *              MTLFeatureSet_iOS_GPUFamily2_v1 or later.
- */
--(nonnull instancetype) initWithDevice: (nonnull id <MTLDevice>) device
-                        NS_DESIGNATED_INITIALIZER;
-
-
-/*!
- *  @abstract   Make a copy of this MPSKernel for a new device
- *  @discussion -copyWithZone: will call this API to make a copy of the
- *              MPSKernel on the same device.  This interface may also be
- *              called directly to make a copy of the MPSKernel on a new
- *              device. Typically, the same MPSKernels should not be used
- *              to encode kernels on multiple command buffers from multiple
- *              threads. Many MPSKernels have mutable properties that might 
- *              be changed by the other thread while this one is trying to 
- *              encode. If you need to use a MPSKernel from multiple threads
- *              make a copy of it for each additional thread using -copyWithZone:
- *              or -copyWithZone:device:
- *  @param      zone        The NSZone in which to allocate the object
- *  @param      device      The device for the new MPSKernel. If nil, then use
- *                          self.device.
- *  @result     a pointer to a copy of this MPSKernel. This will fail, returning
- *              nil if the device is not supported. Devices must be
- *              MTLFeatureSet_iOS_GPUFamily2_v1 or later.
- */
-- (nonnull instancetype) copyWithZone:(nullable NSZone *)zone
-                               device:(nullable id <MTLDevice>) device;
-
-@end
-
-
-#ifdef __cplusplus
-    }       /* extern "C" */
-#endif
-
-#endif  /* MPS_MPSKernel_h */
diff -ruN /Applications/Xcode.app/Contents/Developer/Platforms/AppleTVOS.platform/Developer/SDKs/AppleTVOS.sdk/System/Library/Frameworks/MetalPerformanceShaders.framework/Headers/MPSMatrix.h /Applications/Xcode9-beta1.app/Contents/Developer/Platforms/AppleTVOS.platform/Developer/SDKs/AppleTVOS.sdk/System/Library/Frameworks/MetalPerformanceShaders.framework/Headers/MPSMatrix.h
--- /Applications/Xcode.app/Contents/Developer/Platforms/AppleTVOS.platform/Developer/SDKs/AppleTVOS.sdk/System/Library/Frameworks/MetalPerformanceShaders.framework/Headers/MPSMatrix.h	2016-08-01 16:43:15.000000000 -0400
+++ /Applications/Xcode9-beta1.app/Contents/Developer/Platforms/AppleTVOS.platform/Developer/SDKs/AppleTVOS.sdk/System/Library/Frameworks/MetalPerformanceShaders.framework/Headers/MPSMatrix.h	1969-12-31 19:00:00.000000000 -0500
@@ -1,165 +0,0 @@
-#ifndef MPSMatrix_h
-#define MPSMatrix_h
-
-#include <MetalPerformanceShaders/MPSTypes.h>
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-/*!
- *  @class      MPSMatrixDescriptor
- *
- *  @dependency This depends on Metal.framework
- *
- *  @discussion A MPSMatrixDescriptor describes the sizes, strides, and data type of a
- *              2-dimensional array of data.  All storage is assumed to be in row-major
- *              order.
- */
-MPS_CLASS_AVAILABLE_STARTING( __MAC_10_12, __IPHONE_10_0, __TVOS_10_0)
-@interface MPSMatrixDescriptor: NSObject
-
-/*! @property   rows
- *  @discussion The number of rows in the matrix.
- */
-@property (readwrite, nonatomic) NSUInteger rows;
-
-/*! @property   columns
- *  @discussion The number of columns in the matrix.
- */
-@property (readwrite, nonatomic) NSUInteger columns;
-
-/*! @property   dataType
- *  @discussion The type of the data which makes up the values of the matrix.
- */
-@property (readwrite, nonatomic) MPSDataType dataType;
-
-/*! @property   rowBytes
- *  @discussion The stride, in bytes, between corresponding elements of
- *              consecutive rows.  Must be a multiple of the element size.
- */
-@property (readwrite, nonatomic) NSUInteger rowBytes;
-
-/*!
- *  @abstract   Create a MPSMatrixDescriptor with the specified dimensions and data type.
- *
- *  @param      rows                The number of rows of the matrix.
- *
- *  @param      columns             The number of columns of the matrix.
- *
- *  @param      rowBytes            The number of bytes between starting elements of consecutive
- *                                  rows.  Must be a multiple of the element size.
- *
- *  @param      dataType            The type of the data to be stored in the matrix.
- *
- *  @discussion For performance considerations the optimal row stride may not necessarily be equal
- *              to the number of columns in the matrix.  The MPSMatrix class provides a method which
- *              may be used to determine this value, see the rowBytesFromColumns API in the MPSMatrix
- *              class.
- */
-+(__nonnull instancetype) matrixDescriptorWithDimensions: (NSUInteger)              rows
-                                                 columns: (NSUInteger)              columns
-                                                rowBytes: (NSUInteger)              rowBytes
-                                                dataType: (MPSDataType)             dataType;
-
-/*!
- *  @abstract   Return the recommended row stride, in bytes, for a given number of
- *              columns.
- *
- *  @param      columns         The number of columns in the matrix for which the recommended
- *                              row stride, in bytes, is to be determined.
- *
- *  @param      dataType        The type of matrix data values.
- *
- *  @discussion To achieve best performance the optimal stride between rows of a matrix is not
- *              necessarily equivalent to the number of columns.  This method returns the row stride, in
- *              bytes, which gives best performance for a given number of columns.  Using this row stride
- *              to construct your array is recommended, but not required (provided that the stride
- *              used is still large enough to allocate a full row of data).
- */
-+(size_t) rowBytesFromColumns: (NSUInteger) columns
-                     dataType: (MPSDataType) dataType;
-
-@end // MPSMatrixDescriptor
-
-/*!
- *  @class      MPSMatrix
- *
- *  @dependency This depends on Metal.framework
- *
- *  @discussion A MPSMatrix object describes a 2-dimensional array of data and provides storage
- *              for its values.  MPSMatrix objects serve as inputs and outputs of MPSMatrixKernel
- *              objects.
- *
- *              Implementation note:
- *              A MPSMatrix object maintains its internal storage using a MTLBuffer object and thus
- *              the same rules for maintaining coherency of a MTLBuffer's data between CPU memory and GPU
- *              memory apply to a MPSMatrix.  Data is assumed to be stored in row-major layout.
- */
-MPS_CLASS_AVAILABLE_STARTING( __MAC_10_12, __IPHONE_10_0, __TVOS_10_0)
-@interface MPSMatrix: NSObject
-
-/*! @property   device
- *  @discussion The device on which the MPSMatrix will be used.
- */
-@property (readonly, retain, nonatomic, nonnull) id<MTLDevice> device;
-
-/*! @property   rows
- *  @discussion The number of rows in the MPSMatrix.
- */
-@property (readonly, nonatomic) NSUInteger rows;
-
-/*! @property   columns
- *  @discussion The number of columns in the MPSMatrix.
- */
-@property (readonly, nonatomic) NSUInteger columns;
-
-/*! @property   dataType
- *  @discussion The type of the MPSMatrix data.
- */
-@property (readonly, nonatomic) MPSDataType dataType;
-
-/*! @property   rowBytes
- *  @discussion The stride, in bytes, between corresponding elements of
- *              consecutive rows.
- */
-@property (readonly, nonatomic) NSUInteger rowBytes;
-
-/*! @property   data
- *  @discussion An MTLBuffer to store the data.
- */
-@property (readonly, nonnull, nonatomic) id<MTLBuffer> data;
-
-/*!
- *  @abstract   Initialize a MPSMatrix object with a MTLBuffer.
- *
- *  @param      buffer          The MTLBuffer object which contains the data to use for the
- *                              MPSMatrix. May not be NULL.
- *
- *  @param      descriptor      The MPSMatrixDescriptor. May not be NULL.
- *
- *  @return     A valid MPSMatrix object or nil, if failure.
- *
- *  @discussion This function returns a MPSMatrix object which uses the supplied MTLBuffer.  The
- *              dimensions and stride of the matrix are specified by the MPSMatrixDescriptor object.
- *
- *              The provided MTLBuffer must have enough storage to hold
- *
- *                  (descriptor.rows-1) * descriptor.rowBytes + descriptor.columns * (element size) bytes.
- *
- */
--(nonnull instancetype) initWithBuffer: (nonnull id<MTLBuffer>) buffer
-                            descriptor: (nonnull MPSMatrixDescriptor*) descriptor;
-
-/*
- * Use one of the above initialization methods instead.
- */
--(nonnull instancetype) init NS_UNAVAILABLE;
-
-@end // MPSMatrix
-
-#ifdef __cplusplus
-}   // extern "C"
-#endif
-
-#endif /* MPSMatrix_h */
diff -ruN /Applications/Xcode.app/Contents/Developer/Platforms/AppleTVOS.platform/Developer/SDKs/AppleTVOS.sdk/System/Library/Frameworks/MetalPerformanceShaders.framework/Headers/MPSMatrixMultiplication.h /Applications/Xcode9-beta1.app/Contents/Developer/Platforms/AppleTVOS.platform/Developer/SDKs/AppleTVOS.sdk/System/Library/Frameworks/MetalPerformanceShaders.framework/Headers/MPSMatrixMultiplication.h
--- /Applications/Xcode.app/Contents/Developer/Platforms/AppleTVOS.platform/Developer/SDKs/AppleTVOS.sdk/System/Library/Frameworks/MetalPerformanceShaders.framework/Headers/MPSMatrixMultiplication.h	2016-09-23 20:26:03.000000000 -0400
+++ /Applications/Xcode9-beta1.app/Contents/Developer/Platforms/AppleTVOS.platform/Developer/SDKs/AppleTVOS.sdk/System/Library/Frameworks/MetalPerformanceShaders.framework/Headers/MPSMatrixMultiplication.h	1969-12-31 19:00:00.000000000 -0500
@@ -1,152 +0,0 @@
-/*!
- *  @header MPSMatrixMultiplication.h
- *  @framework MetalPerformanceShaders.framework
- *
- *  @copyright Copyright (c) 2016 Apple Inc. All rights reserved.
- *  @abstract MetalPerformanceShaders filter base classes
- */
-#ifndef MPSMatrixMultiplication_h
-#define MPSMatrixMultiplication_h
-
-#import <MetalPerformanceShaders/MPSKernel.h>
-#import <MetalPerformanceShaders/MPSMatrix.h>
-
-/*!
- *  @class      MPSMatrixMultiplication
- *
- *  @dependency This depends on Metal.framework.
- *
- *  @abstract   A matrix multiplication kernel.
- *
- *  @discussion A MPSMatrixMultiplication object computes:
- *
- *                  C = alpha * op(A) * op(B) + beta * C
- *
- *              A, B, and C are matrices which are represented by MPSMatrix
- *              objects. alpha and beta are scalar values (of the same data type
- *              as values of C) which are applied as shown above.  A and B may
- *              each have an optional transposition operation applied.
- *
- *              A, B, and C (also referred to in later discussions as the left input
- *              matrix, the right input matrix, and the result matrix respectively).
- *
- *              A MPSMatrixMultiplication object is initialized with the transpose
- *              operators to apply to A and B, sizes for the operation to perform,
- *              and the scalar values alpha and beta.
- *
- */
-MPS_CLASS_AVAILABLE_STARTING( __MAC_10_12, __IPHONE_10_0, __TVOS_10_0)
-@interface MPSMatrixMultiplication : MPSKernel
-/*! @property   resultMatrixOrigin
- *
- *  @discussion The origin, relative to [0, 0] in the result matrix, at which to
- *              start writing (and reading if necessary) results.  This property is
- *              modifiable and defaults to [0, 0] at initialization time.  If a
- *              different origin is desired then this should be modified prior to
- *              encoding the kernel.  The z value must be 0.
- */
-@property (readwrite, nonatomic) MTLOrigin resultMatrixOrigin;
-
-/*! @property   leftMatrixOrigin
- *
- *  @discussion The origin, relative to [0, 0] in the left input matrix, at which to
- *              start reading values.  This property is modifiable and defaults to
- *              [0, 0] at initialization time.  If a different origin is desired then
- *              this should be modified prior to encoding the kernel.  The z value
- *              must be 0.
- */
-@property (readwrite, nonatomic) MTLOrigin leftMatrixOrigin;
-
-/*! @property   rightMatrixOrigin
- *
- *  @discussion The origin, relative to [0, 0] in the right input matrix, at which to
- *              start reading values.  This property is modifiable and defaults to
- *              [0, 0] at initialization time.  If a different origin is desired then
- *              this should be modified prior to encoding the kernel.  The z value
- *              must be 0.
- */
-@property (readwrite, nonatomic) MTLOrigin rightMatrixOrigin;
-
-/*!
- *  @abstract   Initialize an MPSMatrixMultiplication object on a device for a given size
- *              and desired transpose and scale values.
- *
- *  @param      device          The device on which the kernel will execute.
- *
- *  @param      transposeLeft   A boolean value which indicates if the left input matrix should be
- *                              used in transposed form.  If 'YES' then op(A) = A**T, otherwise
- *                              op(A) = A.
- *
- *  @param      transposeRight  A boolean value which indicates if the right input matrix should be
- *                              used in transposed form.  If 'YES' then op(B) = B**T, otherwise
- *                              op(B) = B.
- *
- *  @param      resultRows      The number of rows in the result matrix, M in BLAS GEMM description.
- *
- *  @param      resultColumns   The number of columns in the result matrix, N in BLAS GEMM description.
- *
- *  @param      interiorColumns The number of columns of the left input matrix after the
- *                              appropriate transpose operation has been applied. K in BLAS
- *                              GEMM description.
- *
- *  @param      alpha           The scale factor to apply to the product.  Specified in double
- *                              precision.  Will be converted to the appropriate precision in the
- *                              implementation subject to rounding and/or clamping as necessary.
- *
- *  @param      beta            The scale factor to apply to the initial values of C.  Specified
- *                              in double precision.  Will be converted to the appropriate precision in the
- *                              implementation subject to rounding and/or clamping as necessary.
- *
- *  @return     A valid MPSMatrixMultiplication object or nil, if failure.
- */
--(nonnull instancetype) initWithDevice: (nonnull id<MTLDevice>) device
-                         transposeLeft: (BOOL) transposeLeft
-                        transposeRight: (BOOL) transposeRight
-                            resultRows: (NSUInteger) resultRows
-                         resultColumns: (NSUInteger) resultColumns
-                       interiorColumns: (NSUInteger) interiorColumns
-                                 alpha: (double) alpha
-                                  beta: (double) beta;
-
-/*!
- @discussion Use the above initialization method instead.
- */
--(nonnull instancetype) initWithDevice: (nonnull id <MTLDevice>) device NS_UNAVAILABLE;
-
-/*!
- *  @abstract   Encode a MPSMatrixMultiplication object to a command buffer.
- *
- *  @param      commandBuffer   A valid MTLCommandBuffer to receive the encoded kernel.
- *
- *  @param      leftMatrix      A valid MPSMatrix object which specifies the left input matrix.
- *
- *  @param      rightMatrix     A valid MPSMatrix object which specifies the right input matrix.
- *
- *  @param      resultMatrix    A valid MPSMatrix object which specifies the addend matrix which will
- *                              also be overwritten by the result.
- *
- *  @discussion Certain constraints apply to the sizes of the matrices depending on the transposition
- *              operations and sizes requested at initialization time as well as the origins at the time
- *              this routine is called:
- *
- *              The left input matrix must be large enough to hold an array of size resultRows x interiorColumns
- *              elements beginning at leftMatrixOrigin.
- *
- *              The right input matrix must be large enough to hold an array of size interiorColumns x resultColumns
- *              elements beginning at rightMatrixOrigin.
- *
- *              The result matrix must be large enough to hold an array of size resultRows x resultColumns
- *              elements beginning at resultMatrixOrigin.
- */
--(void) encodeToCommandBuffer: (nonnull id <MTLCommandBuffer>) commandBuffer
-                   leftMatrix: (MPSMatrix const* __nonnull) leftMatrix
-                  rightMatrix: (MPSMatrix const* __nonnull) rightMatrix
-                 resultMatrix: (MPSMatrix* __nonnull) resultMatrix
-                    MPS_SWIFT_NAME(encode(commandBuffer:leftMatrix:rightMatrix:resultMatrix:));
-
-
-
-@end // MPSMatrixMultiplication
-
-
-#endif /* MPSMatrixMultiplication_h */
diff -ruN /Applications/Xcode.app/Contents/Developer/Platforms/AppleTVOS.platform/Developer/SDKs/AppleTVOS.sdk/System/Library/Frameworks/MetalPerformanceShaders.framework/Headers/MPSTypes.h /Applications/Xcode9-beta1.app/Contents/Developer/Platforms/AppleTVOS.platform/Developer/SDKs/AppleTVOS.sdk/System/Library/Frameworks/MetalPerformanceShaders.framework/Headers/MPSTypes.h
--- /Applications/Xcode.app/Contents/Developer/Platforms/AppleTVOS.platform/Developer/SDKs/AppleTVOS.sdk/System/Library/Frameworks/MetalPerformanceShaders.framework/Headers/MPSTypes.h	2016-08-01 16:43:15.000000000 -0400
+++ /Applications/Xcode9-beta1.app/Contents/Developer/Platforms/AppleTVOS.platform/Developer/SDKs/AppleTVOS.sdk/System/Library/Frameworks/MetalPerformanceShaders.framework/Headers/MPSTypes.h	1969-12-31 19:00:00.000000000 -0500
@@ -1,401 +0,0 @@
-/*!
- *  @header     MPSTypes.h
- *  @framework  MetalPerformanceShaders
- *  @copyright  Copyright (c) 2015 Apple Inc. All rights reserved.
- *  @discussion Types common to MetalPerformanceShaders.framework
- */
-
-#ifndef MPS_Types_h
-#define MPS_Types_h
-
-#import <Foundation/NSObjCRuntime.h>
-#import <Metal/Metal.h>
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-    
-#ifndef __has_attribute          /* clang will define this. Other compilers maybe not. */
-#    define __has_attribute(a)   0
-#endif
-#ifndef __has_feature           /* clang will define this. Other compilers maybe not. */
-#    define __has_feature(f)     0
-#endif
-#ifndef __has_extension         /* clang will define this. Other compilers maybe not. */
-#    define __has_extension(e)   0
-#endif
-    
-#ifdef MPS_HIDE_AVAILABILITY
-#    define MPS_ENUM_AVAILABLE_STARTING(_osx, _ios, _tvos)
-#    define MPS_CLASS_AVAILABLE_STARTING(_osx, _ios, _tvos)
-#    define MPS_AVAILABLE_STARTING(_osx, _ios, _tvos)
-#else
-#    ifdef __IPHONE_OS_VERSION_MIN_REQUIRED
-#        define MPS_CLASS_AVAILABLE_STARTING(_osx, _ios, _tvos)  __attribute__((visibility("default"))) __AVAILABILITY_INTERNAL##_ios
-#        define MPS_AVAILABLE_STARTING(_osx, _ios, _tvos)        __AVAILABILITY_INTERNAL##_ios
-#    elif defined(__MAC_OS_X_VERSION_MIN_REQUIRED)
-#        define MPS_CLASS_AVAILABLE_STARTING(_osx, _ios, _tvos)  __attribute__((visibility("default"))) __AVAILABILITY_INTERNAL##_osx
-#        define MPS_AVAILABLE_STARTING(_osx, _ios, _tvos)        __AVAILABILITY_INTERNAL##_osx
-#    elif __has_feature(attribute_availability_tvos)
-#        define MPS_CLASS_AVAILABLE_STARTING(_osx, _ios, _tvos)  __attribute__((visibility("default"))) __OS_AVAILABILITY(_tvos,introduced=_vers)
-#        define MPS_AVAILABLE_STARTING(_osx, _ios, _tvos)        __OS_AVAILABILITY(_tvos,introduced=_vers)
-#    elif __has_feature(attribute_availability_watchos)
-#        define MPS_CLASS_AVAILABLE_STARTING(_osx, _ios, _tvos)  __attribute__((visibility("default"))) __OS_AVAILABILITY(watchos,unavailable)
-#        define MPS_AVAILABLE_STARTING(_osx, _ios, _tvos)        __OS_AVAILABILITY(watchos,unavailable)
-#    else
-#        define MPS_CLASS_AVAILABLE_STARTING(_osx, _ios, _tvos)
-#        define MPS_AVAILABLE_STARTING(_osx, _ios, _tvos)
-#    endif
-#
-#    if __has_extension(enumerator_attributes)
-#       ifdef __IPHONE_OS_VERSION_MIN_REQUIRED
-#           define MPS_ENUM_AVAILABLE_STARTING(_osx, _ios, _tvos)   __AVAILABILITY_INTERNAL##_ios
-#       elif defined(__MAC_OS_X_VERSION_MIN_REQUIRED)
-#           define MPS_ENUM_AVAILABLE_STARTING(_osx, _ios, _tvos)   __AVAILABILITY_INTERNAL##_osx
-#       elif __has_feature(attribute_availability_tvos)
-#           define MPS_ENUM_AVAILABLE_STARTING(_osx, _ios, _tvos)   __OS_AVAILABILITY(_tvos,introduced=_vers)
-#       elif __has_feature(attribute_availability_watchos)
-#           define MPS_ENUM_AVAILABLE_STARTING(_osx, _ios, _tvos)   __OS_AVAILABILITY(watchos,unavailable)
-#       else
-#           define MPS_ENUM_AVAILABLE_STARTING(_osx, _ios, _tvos)
-#       endif
-#    else
-#       define MPS_ENUM_AVAILABLE_STARTING(_osx, _ios, _tvos)
-#    endif
-#endif
-
-#if __has_feature(objc_class_property)
-#   define  MPS_SWIFT_NAME(_name)    CF_SWIFT_NAME(_name)
-#else
-#   define  MPS_SWIFT_NAME(_name)
-#endif
-
-/*! @enum       MPSKernelOptions
- *  @memberof   MPSKernel
- *  @abstract   Options used when creating MPSKernel objects
- */
-#if defined(DOXYGEN)
-typedef enum MPSKernelOptions
-#else
-typedef NS_OPTIONS(NSUInteger, MPSKernelOptions)
-#endif
-{
-    /*! Use default options */
-    MPSKernelOptionsNone                         MPS_ENUM_AVAILABLE_STARTING( __MAC_10_11, __IPHONE_9_0, __TVOS_9_0)  = 0,
-    
-    /*! Most MPS functions will sanity check their arguments. This has a small but
-     *  non-zero CPU cost. Setting the MPSKernelOptionsSkipAPIValidation will skip these checks.
-     *  MPSKernelOptionsSkipAPIValidation does not skip checks for memory allocation failure.
-     *  Caution:  turning on MPSKernelOptionsSkipAPIValidation can result in undefined behavior
-     *  if the requested operation can not be completed for some reason. Most error states
-     *  will be passed through to Metal which may do nothing or abort the program if Metal
-     *  API validation is turned on. */
-    MPSKernelOptionsSkipAPIValidation            MPS_ENUM_AVAILABLE_STARTING( __MAC_10_11, __IPHONE_9_0, __TVOS_9_0)  = 1 << 0,
-    
-    /*! When possible, MPSKernels use a higher precision data representation internally than
-     *  the destination storage format to avoid excessive accumulation of computational
-     *  rounding error in the result. MPSKernelOptionsAllowReducedPrecision advises the
-     *  MPSKernel that the destination storage format already has too much precision for
-     *  what is ultimately required downstream, and the MPSKernel may use reduced precision
-     *  internally when it feels that a less precise result would yield better performance.
-     *  The expected performance win is often small, perhaps 0-20%. When enabled, the
-     *  precision of the result may vary by hardware and operating system. */
-    MPSKernelOptionsAllowReducedPrecision        MPS_ENUM_AVAILABLE_STARTING( __MAC_10_11, __IPHONE_9_0, __TVOS_9_0)  = 1 << 1,
-    
-    /*! Some MPSKernels may automatically split up the work internally into multiple tiles.
-     *  This improves performance on larger textures and reduces the amount of memory needed by
-     *  MPS for temporary storage. However, if you are using your own tiling scheme to achieve
-     *  similar results, your tile sizes and MPS's choice of tile sizes may interfere with
-     *  one another causing MPS to subdivide your tiles for its own use inefficiently. Pass
-     *  MPSKernelOptionsDisableInternalTiling to force MPS to process your data tile as a
-     *  single chunk.   */
-    MPSKernelOptionsDisableInternalTiling        MPS_ENUM_AVAILABLE_STARTING( __MAC_10_12, __IPHONE_10_0, __TVOS_10_0) = 1 << 2,
-    
-    /*! Enabling this bit will cause various -encode... methods to call MTLCommandEncoder
-     *  push/popDebugGroup.  The debug string will be drawn from MPSKernel.label, if any
-     *  or the name of the class otherwise. */
-    MPSKernelOptionsInsertDebugGroups            MPS_ENUM_AVAILABLE_STARTING( __MAC_10_12, __IPHONE_10_0, __TVOS_10_0) = 1 << 3,
-};
-    
-/*! @enum       MPSImageEdgeMode
- *  @memberof   MPSKernel
- *  @abstract   Options used to control edge behaviour of filter when filter reads beyond boundary of src image
- */
-#if defined(DOXYGEN)
-typedef enum MPSImageEdgeMode
-#else
-typedef NS_ENUM(NSUInteger, MPSImageEdgeMode)
-#endif
-{
-    /*! Out of bound pixels are clamped to nearest edge pixel */
-    MPSImageEdgeModeZero                MPS_ENUM_AVAILABLE_STARTING(__MAC_10_11, __IPHONE_9_0, __TVOS_9_0)  = 0,
-    
-    /*! Out of bound pixels are (0,0,0,1) for image with pixel format without alpha channel
-     *  and (0,0,0,0) for image with pixel format that has an alpha channel */
-    MPSImageEdgeModeClamp               MPS_ENUM_AVAILABLE_STARTING(__MAC_10_11, __IPHONE_9_0, __TVOS_9_0)  = 1,
-};
-    
-   
-/*! @typedef MPSAlphaType
- *  @abstract Premultiplication description for the color channels of a texture
- *  @discussion Some image data is premultiplied. That is to say that the color channels
- *              are stored instead as color * alpha. This is an optimization for image compositing
- *              (alpha blending), but it can get in the way of most other image filters,
- *              especially those that apply non-linear affects like the MPSImageParametricCurveTransform
- *              multidimensional lookup tables, and functions like convolution or resampling filters
- *              that look at adjacent pixels, where the alpha may not be the same.
- *  @code
- *              Some basic conversion cases:
- *                  source                              destination                         operation
- *                  ------                              -----------                         ---------
- *                  MPSAlphaTypeNonPremultiplied        MPSAlphaTypeNonPremultiplied        <none>
- *                  MPSAlphaTypeNonPremultiplied        MPSAlphaTypeAlphaIsOne              composite with opaque background color
- *                  MPSAlphaTypeNonPremultiplied        MPSAlphaTypePremultiplied           multiply color channels by alpha
- *                  MPSAlphaTypeAlphaIsOne              MPSAlphaTypeNonPremultiplied        set alpha to 1
- *                  MPSAlphaTypeAlphaIsOne              MPSAlphaTypeAlphaIsOne              set alpha to 1
- *                  MPSAlphaTypeAlphaIsOne              MPSAlphaTypePremultiplied           set alpha to 1
- *                  MPSAlphaTypePremultiplied           MPSAlphaTypeNonPremultiplied        divide color channels by alpha
- *                  MPSAlphaTypePremultiplied           MPSAlphaTypeAlphaIsOne              composite with opaque background color
- *                  MPSAlphaTypePremultiplied           MPSAlphaTypePremultiplied           <none>
- *  @endcode
- *
- *              Color space conversion operations require the format to be either MPSPixelAlpha_NonPremultiplied or
- *              MPSPixelAlpha_AlphaIsOne to work correctly. A number of MPSKernels have similar requirements. If
- *              premultiplied data is provided or requested, extra operations will be added to the conversion to
- *              ensure correct operation. Fully opaque images should use MPSAlphaTypeAlphaIsOne.
- *
- *  @constant   MPSAlphaTypeNonPremultiplied   Image is not premultiplied by alpha. Alpha is not guaranteed to be 1. (kCGImageAlphaFirst/Last)
- *  @constant   MPSAlphaTypeAlphaIsOne         Alpha is guaranteed to be 1, even if it is not encoded as 1 or not encoded at all. (kCGImageAlphaNoneSkipFirst/Last, kCGImageAlphaNone)
- *  @constant   MPSAlphaTypePremultiplied      Image is premultiplied by alpha. Alpha is not guaranteed to be 1. (kCGImageAlphaPremultipliedFirst/Last)
- */
-
-typedef NS_ENUM( NSUInteger, MPSAlphaType )
-{
-    MPSAlphaTypeNonPremultiplied   MPS_ENUM_AVAILABLE_STARTING(__MAC_10_12, __IPHONE_10_0, __TVOS_10_0)  = 0,
-    MPSAlphaTypeAlphaIsOne         MPS_ENUM_AVAILABLE_STARTING(__MAC_10_12, __IPHONE_10_0, __TVOS_10_0)  = 1,
-    MPSAlphaTypePremultiplied      MPS_ENUM_AVAILABLE_STARTING(__MAC_10_12, __IPHONE_10_0, __TVOS_10_0)  = 2
-};
-
-    
-/*! @enum       MPSImageFeatureChannelFormat
- *  @memberof   MPSImage
- *  @abstract   Encodes the representation of a single channel within a MPSImage.
- *  @discussion A MPSImage pixel may have many channels in it, sometimes many more than 4, the
- *              limit of what MTLPixelFormats encode. The storage format for a single channel 
- *              within a pixel can be given by the MPSImageFeatureChannelFormat. The number
- *              of channels is given by the featureChannels parameter of appropriate MPSImage
- *              APIs. The size of the pixel is size of the channel format multiplied by the
- *              number of feature channels. No padding is allowed, except to round out to a full
- *              byte.
- */
-typedef NS_ENUM(NSUInteger, MPSImageFeatureChannelFormat)
-{
-    /*! invalid format */
-    MPSImageFeatureChannelFormatInvalid     MPS_ENUM_AVAILABLE_STARTING(__MAC_10_12, __IPHONE_10_0, __TVOS_10_0)  = 0,
-
-    /*! uint8_t with value [0,255] encoding [0,1.0] */
-    MPSImageFeatureChannelFormatUnorm8     MPS_ENUM_AVAILABLE_STARTING(__MAC_10_12, __IPHONE_10_0, __TVOS_10_0)   = 1,
-    
-    /*! uint16_t with value [0,65535] encoding [0,1.0] */
-    MPSImageFeatureChannelFormatUnorm16     MPS_ENUM_AVAILABLE_STARTING(__MAC_10_12, __IPHONE_10_0, __TVOS_10_0)  = 2,
-    
-    /*! IEEE-754 16-bit floating-point value. "half precision" Representable normal range is +-[2**-14, 65504], 0, Infinity, NaN. 11 bits of precision + exponent. */
-    MPSImageFeatureChannelFormatFloat16     MPS_ENUM_AVAILABLE_STARTING(__MAC_10_12, __IPHONE_10_0, __TVOS_10_0)  = 3,
-    
-    /*! IEEE-754 32-bit floating-point value.  "single precision" (standard float type in C) 24 bits of precision + exponent */
-    MPSImageFeatureChannelFormatFloat32     MPS_ENUM_AVAILABLE_STARTING(__MAC_10_12, __IPHONE_10_0, __TVOS_10_0)  = 4,
-    
-};
-   
-/*! @typedef    MPSDataType
- *  @discussion A value to specify a type of data.
- *
- *  @constant   MPSDataTypeFloatBit     A common bit for all floating point data types.
- *  @constant   MSPDataTypeFloat32      32-bit floating point (single-precision).
- */
-typedef NS_ENUM(uint32_t, MPSDataType)
-{
-    MPSDataTypeFloatBit MPS_ENUM_AVAILABLE_STARTING( __MAC_10_12, __IPHONE_10_0, __TVOS_10_0) = 0x10000000,
-    MPSDataTypeFloat32  MPS_ENUM_AVAILABLE_STARTING( __MAC_10_12, __IPHONE_10_0, __TVOS_10_0) = MPSDataTypeFloatBit | 32
-};
-    
-/*!
- *  @struct     MPSOffset
- *  @memberof   MPSKernel
- *  @abstract   A signed coordinate with x, y and z components
- */
-typedef struct
-{
-    NSInteger x;    /**<    The horizontal component of the offset. Units: pixels   */
-    NSInteger y;    /**<    The vertical component of the offset. Units: pixels     */
-    NSInteger z;    /**<    The depth component of the offset. Units: pixels        */
-}MPSOffset;
-
-/*!
- *  @struct     MPSOrigin
- *  @memberof   MPSKernel
- *  @abstract   A position in an image
- */
-typedef struct MPSOrigin
-{
-    double  x;  /**< The x coordinate of the position       */
-    double  y;  /**< The y coordinate of the position       */
-    double  z;  /**< The z coordinate of the position       */
-}MPSOrigin;
-
-/*!
- *  @struct     MPSSize
- *  @memberof   MPSKernel
- *  @abstract   A size of a region in an image
- */
-typedef struct MPSSize
-{
-    double  width;      /**< The width of the region    */
-    double  height;     /**< The height of the region   */
-    double  depth;      /**< The depth of the region    */
-}MPSSize;
-
-/*!
- *  @struct     MPSRegion
- *  @memberof   MPSKernel
- *  @abstract   A region of an image
- */
-typedef struct MPSRegion
-{
-    MPSOrigin       origin;     /**< The top left corner of the region.  Units: pixels  */
-    MPSSize         size;       /**< The size of the region. Units: pixels              */
-}MPSRegion;
-    
-/*!
- *  @memberof   MPSKernel
- *  @constant   MPSRectNoClip
- *  @discussion This is a special constant to indicate no clipping is to be done.
- *              The entire image will be used.
- *              This is the default clipping rectangle or the input extent for MPSKernels.
- */
-extern const MTLRegion  MPSRectNoClip;
-    
-/*!
- *  @struct         MPSScaleTransform
- *  @abstract       Transform matrix for explict control over resampling in MPSImageLanczosScale.
- *  @discussion     The MPSScaleTransform is equivalent to:
- *       @code
- *          (CGAffineTransform) {
- *               .a = scaleX,        .b = 0,
- *               .c = 0,             .d = scaleY,
- *               .tx = translateX,   .ty = translateY
- *           }
- *       @endcode
- *
- *  @memberof       MPSImageLanczosScale
- */
-typedef struct MPSScaleTransform
-{
-    double  scaleX, scaleY;
-    double  translateX, translateY;
-}MPSScaleTransform;
-
-@class MPSKernel;
-    
-/*!
- *  @typedef    MPSCopyAllocator
- *  @memberof   MPSKernel
- *  @abstract   A block to make a copy of sourceTexture for MPSKernels that can only execute out of place.
- *  @discussion Some MPSKernel objects may not be able to operate in place. When that occurs, and in-place
- *              operation is requested, MPS will call back to this block to get a new texture
- *              to return instead. To avoid spending long periods of time allocating pages to back the
- *              MTLTexture, the block should attempt to reuse textures. The texture returned from the
- *              MPSCopyAllocator will be returned instead of the sourceTexture from the MPSKernel method 
- *              on return.
- *              @code
- *              // A MPSCopyAllocator to handle cases where in-place operation fails.
- *              MPSCopyAllocator myAllocator = ^id <MTLTexture>( MPSKernel * __nonnull filter,
- *                                                              __nonnull id <MTLCommandBuffer> cmdBuf,
- *                                                              __nonnull id <MTLTexture> sourceTexture)
- *              {
- *                  MTLPixelFormat format = sourceTexture.pixelFormat;  // FIXME: is this format writable?
- *                  MTLTextureDescriptor *d = [MTLTextureDescriptor texture2DDescriptorWithPixelFormat: format
- *                                               width: sourceTexture.width
- *                                              height: sourceTexture.height
- *                                           mipmapped: NO];
- *                  d.usage = MTLTextureUsageShaderRead | MTLTextureUsageShaderWrite;
- *
- *                  //FIXME: Allocating a new texture each time is slow. They take up to 1 ms each.
- *                  //       There are not too many milliseconds in a video frame! You can recycle
- *                  //       old textures (or MTLBuffers and make textures from them) and reuse
- *                  //       the memory here.
- *                  id <MTLTexture> result = [cmdBuf.device newTextureWithDescriptor: d];
- *
- *                  // FIXME: If there is any metadata associated with sourceTexture such as colorspace
- *                  //        information, MTLResource.label, MTLResource.cpuCacheMode mode,
- *                  //        MTLResource.MTLPurgeableState, etc., it may need to be similarly associated
- *                  //        with the new texture to avoid losing your metadata.
- *
- *                  // FIXME: If filter.clipRect doesn't cover the entire image, you may need to copy
- *                  //        pixels from sourceTexture to the new texture or regions of the new texture
- *                  //        will be uninitialized. You can make a MTLCommandEncoder to encode work on
- *                  //        the MTLCommandBuffer here to do that work, if necessary. It will be
- *                  //        scheduled to run immediately before the MPSKernel work. Do not call
- *                  //        [MTLCommandBuffer enqueue/commit/waitUntilCompleted/waitUntilScheduled]
- *                  //        in the MPSCopyAllocator block. Make sure to call -endEncoding on the
- *                  //        MTLCommandEncoder so that the MTLCommandBuffer has no active encoder
- *                  //        before returning.
- *
- *                  // CAUTION: The next command placed on the MTLCommandBuffer after the MPSCopyAllocator 
- *                  //          returns is almost assuredly going to be encoded with a MTLComputeCommandEncoder. 
- *                  //          Creating any other type of encoder in the MPSCopyAllocator will probably cost
- *                  //          an additional 0.5 ms of both CPU _AND_ GPU time (or more!) due to a double 
- *                  //          mode switch penalty.
- *
- *                  // CAUTION: If other objects (in addition to the caller of -encodeToCommandBuffer:inPlaceTexture:...)
- *                  //          own a reference to sourceTexture, they may need to be notified that
- *                  //          sourceTexture has been replaced so that they can release that resource
- *                  //          and adopt the new texture. 
- *
- *                  //          The reference to sourceTexture owned by the caller of
- *                  //          -encodeToCommandBuffer:inPlaceTexture... will be released by 
- *                  //          -encodeToCommandBuffer:inPlaceTexture:... after the kernel is encoded if 
- *                  //          and only if the MPSCopyAllocator is called, and the operation is successfully 
- *                  //          encoded out of place.
- *
- *                  return result;
- *                  // d is autoreleased
- *              };
- *              @endcode
- *              If nil is returned by the allocator, NO will be returned by the calling function.
- *
- *              When the MPSCopyAllocator is called, no MTLCommandEncoder is active on the commandBuffer.
- *              You may create a MTLCommandEncoder in the block to initialize the texture. Make sure
- *              to call -endEncoding on it before returning, if you do.
- *
- *  @param      filter          A valid pointer to the MPSKernel that is calling the MPSCopyAllocator. From
- *                              it you can get the clipRect of the intended operation.
- *  @param      commandBuffer   A valid MTLCommandBuffer. It can be used to obtain the device against
- *                              which to allocate the new texture. You may also enqueue operations on
- *                              the commandBuffer to initialize the texture on a encoder allocated in the
- *                              block. You may not submit, enqueue or wait for scheduling/completion of 
- *                              the command buffer.
- *  @param      sourceTexture   The texture that is providing the source image for the filter. You may
- *                              wish to use its size and MTLPixelFormat for the new texture, but it is
- *                              not requred.
- *
- *  @return     A new valid MTLTexture to use as the destination for the MPSKernel. If the calling function
- *              succeeds, its texture parameter will be overwritten with a pointer to this texture. If the
- *              calling function fails (highly unlikely, except for user error) then the texture
- *              will be released before the calling function returns.
- */
-    
-/* Warning here for ns_returns_retained is clang rdar://problem/20130079 */
-typedef id <MTLTexture> __nonnull NS_RETURNS_RETAINED (^MPSCopyAllocator)( MPSKernel * __nonnull filter,
-                                                                          id <MTLCommandBuffer> __nonnull commandBuffer,
-                                                                          id <MTLTexture> __nonnull sourceTexture);
-    
-
-#ifdef __cplusplus
-}
-#endif
-
-
-#endif /* MPS_Types_h */
-
diff -ruN /Applications/Xcode.app/Contents/Developer/Platforms/AppleTVOS.platform/Developer/SDKs/AppleTVOS.sdk/System/Library/Frameworks/MetalPerformanceShaders.framework/Headers/MetalPerformanceShaders.h /Applications/Xcode9-beta1.app/Contents/Developer/Platforms/AppleTVOS.platform/Developer/SDKs/AppleTVOS.sdk/System/Library/Frameworks/MetalPerformanceShaders.framework/Headers/MetalPerformanceShaders.h
--- /Applications/Xcode.app/Contents/Developer/Platforms/AppleTVOS.platform/Developer/SDKs/AppleTVOS.sdk/System/Library/Frameworks/MetalPerformanceShaders.framework/Headers/MetalPerformanceShaders.h	2016-09-23 20:26:03.000000000 -0400
+++ /Applications/Xcode9-beta1.app/Contents/Developer/Platforms/AppleTVOS.platform/Developer/SDKs/AppleTVOS.sdk/System/Library/Frameworks/MetalPerformanceShaders.framework/Headers/MetalPerformanceShaders.h	2017-05-20 02:10:25.000000000 -0400
@@ -5,17 +5,25 @@
  *  @copyright Copyright (c) 2015 Apple Inc. All rights reserved.
  */
 
-#import <MetalPerformanceShaders/MPSCNN.h>
-#import <MetalPerformanceShaders/MPSImageConversion.h>
-#import <MetalPerformanceShaders/MPSImageConvolution.h>
-#import <MetalPerformanceShaders/MPSImageHistogram.h>
-#import <MetalPerformanceShaders/MPSImageIntegral.h>
-#import <MetalPerformanceShaders/MPSImageMedian.h>
-#import <MetalPerformanceShaders/MPSImageMorphology.h>
-#import <MetalPerformanceShaders/MPSImageResampling.h>
-#import <MetalPerformanceShaders/MPSImageThreshold.h>
-#import <MetalPerformanceShaders/MPSImageTranspose.h>
-#import <MetalPerformanceShaders/MPSMatrixMultiplication.h>
+#import <MPSCore/MPSCore.h>
+#import <MPSImage/MPSImage.h>
+#import <MPSMatrix/MPSMatrix.h>
+#import <MPSNeuralNetwork/MPSNeuralNetwork.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*!
+ *  MPSSupportsMTLDevice
+ *  @abstract   Determine whether a MetalPerformanceShaders.framework  supports a MTLDevice.
+ *  @discussion Use this function to determine whether a MTLDevice can be used with interfaces in MetalPerformanceShaders.framework.
+ *  @param      device          A valid MTLDevice
+ *  @return     YES             The device is supported.
+ *              NO              The device is not supported
+ */
+BOOL    MPSSupportsMTLDevice( __nullable id <MTLDevice> device )  MPS_AVAILABLE_STARTING( macos(10.13), ios(9.0), tvos(9.0));
+
 
 
 //
@@ -48,6 +56,21 @@
  *  -  collection of kernels to implement and run neural networks using previously obtained training data, on the GPU
  *  -  new image processing filters to perform color-conversion and for building a gaussian pyramid
  *
+ *  @subsection subsection_usingMPS  Using MPS
+ *  To use MPS:
+ *      link:     -framework MetalPerformanceShaders
+ *      include:  #include <MetalPerformanceShaders/MetalPerformanceShaders.h>
+ *
+ *      Advisory: MetalPerformanceShaders features are broken up into many subheaders which are
+ *                included by MetalPerformanceShaders.h.   The exact placement of interfaces in
+ *                headers is subject to change, as functionality in component sub-frameworks can
+ *                move into MPSCore.framework when the functionality needs to be shared by
+ *                multiple components when features are added. To avoid source level breakage,
+ *                #include the top level MetalPerformanceShaders.h header instead of lower
+ *                level headers.  iOS 11 already broke source compatibility for lower level headers
+ *                and future releases will probably do so again. The only supported method of
+ *                including MPS symbols is the top level framework header.
+ *
  *  @section section_data    Data containers
  *  @subsection subsection_metal_containers  MTLTextures and MTLBuffers
  *
@@ -56,6 +79,14 @@
  *  self-explanatory based on your previous experience with Metal.framework. MPS will use these
  *  directly when it can.
  *
+ *  Most MPSImage and MPSCNN filters operate only on floating-point or normalized texture formats.
+ *  If your data is in a UInteger or Integer MTLPixelFormat (e.g. MTLPixelFormatR8Uint as opposed
+ *  to MTLPixelFormatR8Unorm) then you may need to make a texture view of the texture to change
+ *  the type using [(id <MTLTexture>) newTextureViewWithPixelFormat:(MTLPixelFormat)pixelFormat],
+ *  to reinterpret the data to a normalized format of corresponding signedness and precision. In certain
+ *  cases such as thresholding corresponding adjustments (e.g. /255) may have to also be made to
+ *  parameters passed to the MPSKernel.
+ *
  *  @subsection subsection_mpsimage  MPSImages
  *  Convolutional neural networking (CNN) filters may need more than the four data channels that a
  *  MTLTexture can provide. In these cases, the MPSImage is used instead as an abstraction
@@ -325,10 +356,10 @@
  *  @subsubsection subsubsection_convolveAvailability   Convolutions in MPS
  *  Convolution filters provided by MPS include:
  *
- *      MPSImageConvolution       <MetalPerformanceShaders/MPSImageConvolution.h>        General convolution
- *      MPSImageGassianBlur       <MetalPerformanceShaders/MPSImageConvolution.h>        Gaussian blur
- *      MPSImageBox               <MetalPerformanceShaders/MPSImageConvolution.h>        Box blur
- *      MPSImageTent              <MetalPerformanceShaders/MPSImageConvolution.h>        Tent blur
+ *      MPSImageConvolution       <MPSImage/MPSImageConvolution.h>        General convolution
+ *      MPSImageGassianBlur       <MPSImage/MPSImageConvolution.h>        Gaussian blur
+ *      MPSImageBox               <MPSImage/MPSImageConvolution.h>        Box blur
+ *      MPSImageTent              <MPSImage/MPSImageConvolution.h>        Tent blur
  *
  * @subsection subsection_morphology  Morphology
  *  Morphological operators are similar to convolutions in that they find a result by looking at the nearest
@@ -359,10 +390,10 @@
  *
  *  Morphology filters provided by MPS include:
  *
- *      MPSImageAreaMax  <MetalPerformanceShaders/MPSImageMorphology.h>       Area Max
- *      MPSImageAreaMin  <MetalPerformanceShaders/MPSImageMorphology.h>       Area Min
- *      MPSImageDilate   <MetalPerformanceShaders/MPSImageMorphology.h>       Dilate
- *      MPSImageErode    <MetalPerformanceShaders/MPSImageMorphology.h>       Erode
+ *      MPSImageAreaMax  <MPSImage/MPSImageMorphology.h>       Area Max
+ *      MPSImageAreaMin  <MPSImage/MPSImageMorphology.h>       Area Min
+ *      MPSImageDilate   <MPSImage/MPSImageMorphology.h>       Dilate
+ *      MPSImageErode    <MPSImage/MPSImageMorphology.h>       Erode
  *
  *  @subsection subsection_histogram     Histogram
  *  A image may be examined by taking the histogram of its pixels. This gives the distribution of the various
@@ -376,10 +407,10 @@
  *
  *  Histogram filters provided by MPS include:
  *
- *      MPSImageHistogram              <MetalPerformanceShaders/MPSImageHistogram.h>     Calculate the histogram of an image
- *      MPSImageHistogramEqualization  <MetalPerformanceShaders/MPSImageHistogram.h>     Redistribute intensity in an image to equalize
+ *      MPSImageHistogram              <MPSImage/MPSImageHistogram.h>     Calculate the histogram of an image
+ *      MPSImageHistogramEqualization  <MPSImage/MPSImageHistogram.h>     Redistribute intensity in an image to equalize
  *                                                                          the histogram
- *      MPSImageHistogramSpecification <MetalPerformanceShaders/MPSImageHistogram.h>     A generalized version of histogram equalization
+ *      MPSImageHistogramSpecification <MPSImage/MPSImageHistogram.h>     A generalized version of histogram equalization
  *                                                                          operation. Convert the image so that its histogram
  *                                                                          matches the desired histogram provided to the
  *                                                                          histogram specification filter.
@@ -391,7 +422,7 @@
  *
  *  Median filters provided by MPS include:
  *
- *      MPSImageMedian                <MetalPerformanceShaders/MPSImageMedian.h>         Calculate the median of an image using a
+ *      MPSImageMedian                <MPSImage/MPSImageMedian.h>         Calculate the median of an image using a
  *                                                                     square filter window.
  *
  *  @subsection subsection_resampling  Image Resampling
@@ -415,7 +446,7 @@
  *  MetalPerformanceShaders.framework provides a MPSImageLanczosScale function to allow for simple resizing of images into the clipRect
  *  of the result image. It can operate with preservation of aspect ratio or not. 
  *
- *      MPSImageLanczosScale              <MetalPerformanceShaders/MPSResample.h>   Resize or adjust aspect ratio of an image.
+ *      MPSImageLanczosScale              <MPSImage/MPSResample.h>   Resize or adjust aspect ratio of an image.
  *
  *  @subsection subsection_threshold     Image Threshold
  *  Thresholding operations are commonly used to separate elements of image structure from the rest of an image. 
@@ -425,30 +456,58 @@
  *
  *  A variety of thresholding operators are supported:
  *
- *      MPSImageThresholdBinary           <MetalPerformanceShaders/MPSImageThreshold.h>  srcPixel > thresholdVal ? maxVal : 0
- *      MPSImageThresholdBinaryInverse    <MetalPerformanceShaders/MPSImageThreshold.h>  srcPixel > thresholdVal ? 0 : maxVal
- *      MPSImageThresholdTruncate         <MetalPerformanceShaders/MPSImageThreshold.h>  srcPixel > thresholdVal ? thresholdVal : srcPixel
- *      MPSImageThresholdToZero           <MetalPerformanceShaders/MPSImageThreshold.h>  srcPixel > thresholdVal ? srcPixel : 0
- *      MPSImageThresholdToZeroInverse    <MetalPerformanceShaders/MPSImageThreshold.h>  srcPixel > thresholdVal ? 0 : srcPixel
- *
+ *      MPSImageThresholdBinary           <MPSImage/MPSImageThreshold.h>  srcPixel > thresholdVal ? maxVal : 0
+ *      MPSImageThresholdBinaryInverse    <MPSImage/MPSImageThreshold.h>  srcPixel > thresholdVal ? 0 : maxVal
+ *      MPSImageThresholdTruncate         <MPSImage/MPSImageThreshold.h>  srcPixel > thresholdVal ? thresholdVal : srcPixel
+ *      MPSImageThresholdToZero           <MPSImage/MPSImageThreshold.h>  srcPixel > thresholdVal ? srcPixel : 0
+ *      MPSImageThresholdToZeroInverse    <MPSImage/MPSImageThreshold.h>  srcPixel > thresholdVal ? 0 : srcPixel
+ *
+ *
+ *  @subsection subsection_math     Math Filters
+ *  Arithmetic filters take two source images, a primary source image and a secondary source image, as input and
+ *  output a single destination image. The filters apply an element-wise arithmetic operator to each pixel in a primary source
+ *  image and a corresponding pixel in a secondary source image over a specified region. The supported arithmetic operators
+ *  are addition, subtraction, multiplication, and division.
+ *
+ *  These filters take additional parameters: primaryScale, secondaryScale, and bias and apply them to the primary source
+ *  pixel (x) and secondary source pixel (y) in the following way:
+ *
+ *      MPSImageAdd         <MPSImage/MPSImageMath.h>  Element-wise addition operator:      result = ((primaryScale * x) + (secondaryScale * y)) + bias
+ *      MPSImageSubtract    <MPSImage/MPSImageMath.h>  Element-wise subtraction operator    result = ((primaryScale * x) - (secondaryScale * y)) + bias
+ *      MPSImageMultiply    <MPSImage/MPSImageMath.h>  Element-wise multiplication operator result = ((primaryScale * x) * (secondaryScale * y)) + bias
+ *      MPSImageDivide      <MPSImage/MPSImageMath.h>  Element-wise division operator       result = ((primaryScale * x) / (secondaryScale * y)) + bias
+ *
+ *  These filters also take the following additional parameters: secondarySourceStrideInPixelsX and secondarySourceStrideInPixelsY.
+ *  The default value of these parameters is 1. Setting both of these parameters to 0 results in the secondarySource image being
+ *  handled as a single pixel.
  *
  *  @subsection subsection_CNN     Convolutional Neural Networks
  *  Convolutional Neural Networks (CNN) is a machine learning technique that attempts to model the visual cortex as a sequence 
  *  of convolution, rectification, pooling and normalization steps. Several CNN filters commonly derived from the MPSCNNKernel
  *  base class are provided to help you implement these steps as efficiently as possible.
  *
- *      MPSCNNNeuronLinear              <MetalPerformanceShaders/MPSCNN.h>      A linear neuron activation function
- *      MPSCNNNeuronReLU                <MetalPerformanceShaders/MPSCNN.h>      A neuron activation function with rectified linear units
- *      MPSCNNNeuronSigmoid             <MetalPerformanceShaders/MPSCNN.h>      A sigmoid neuron activation function 1/(1+e**-x)
- *      MPSCNNNeuronTanH                <MetalPerformanceShaders/MPSCNN.h>      A neuron activation function using hyperbolic tangent
- *      MPSCNNConvolution               <MetalPerformanceShaders/MPSCNN.h>      A 4D convolution tensor
- *      MPSCNNFullyConnected            <MetalPerformanceShaders/MPSCNN.h>      A fully connected CNN layer
- *      MPSCNNPoolingMax                <MetalPerformanceShaders/MPSCNN.h>      The maximum value in the pooling area
- *      MPSCNNPoolingAverage            <MetalPerformanceShaders/MPSCNN.h>      The average value in the pooling area
- *      MPSCNNSpatialNormalization      <MetalPerformanceShaders/MPSCNN.h>
- *      MPSCNNCrossChannelNormalization <MetalPerformanceShaders/MPSCNN.h>
- *      MPSCNNSoftmax                   <MetalPerformanceShaders/MPSCNN.h>      exp(pixel(x,y,k))/sum(exp(pixel(x,y,0)) ... exp(pixel(x,y,N-1))
- *      MPSCNNLogSoftmax                <MetalPerformanceShaders/MPSCNN.h>      pixel(x,y,k) - ln(sum(exp(pixel(x,y,0)) ... exp(pixel(x,y,N-1)))
+
+ *      MPSCNNNeuronLinear              <MPSNeuralNetwork/MPSCNNConvolution.h>       A linear neuron activation function
+ *      MPSCNNNeuronReLU                <MPSNeuralNetwork/MPSCNNConvolution.h>       A neuron activation function with rectified linear units
+ *      MPSCNNNeuronSigmoid             <MPSNeuralNetwork/MPSCNNConvolution.h>       A sigmoid neuron activation function 1/(1+e**-x)
+ *      MPSCNNNeuronHardSigmoid         <MPSNeuralNetwork/MPSCNNConvolution.h>       A hard sigmoid neuron activation function clamp((a*x)+b, 0, 1)
+ *      MPSCNNNeuronTanH                <MPSNeuralNetwork/MPSCNNConvolution.h>       A neuron activation function using hyperbolic tangent
+ *      MPSCNNNeuronAbsolute            <MPSNeuralNetwork/MPSCNNConvolution.h>       An absolute neuron activation function |x|
+ *      MPSCNNNeuronSoftPlus            <MPSNeuralNetwork/MPSCNNConvolution.h>       A parametric SoftPlus neuron activation function a*log(1+e**(b*x))
+ *      MPSCNNNeuronSoftSign            <MPSNeuralNetwork/MPSCNNConvolution.h>       A SoftSign neuron activation function x/(1+|x|)
+ *      MPSCNNNeuronELU                 <MPSNeuralNetwork/MPSCNNConvolution.h>       A parametric ELU neuron activation function x<0 ? (a*(e**x-1)) : x
+ *      MPSCNNConvolution               <MPSNeuralNetwork/MPSCNNConvolution.h>       A 4D convolution tensor
+ *      MPSCNNFullyConnected            <MPSNeuralNetwork/MPSCNNConvolution.h>       A fully connected CNN layer
+ *      MPSCNNPoolingMax                <MPSNeuralNetwork/MPSCNNPooling.h>           The maximum value in the pooling area
+ *      MPSCNNPoolingAverage            <MPSNeuralNetwork/MPSCNNPooling.h>           The average value in the pooling area
+ *      MPSCNNPoolingL2Norm             <MPSNeuralNetwork/MPSCNNPooling.h>           The L2-Norm value in the pooling area
+ *      MPSCNNDilatedPoolingMax         <MPSNeuralNetwork/MPSCNNPooling.h>           The maximum value in the dilated pooling area
+ *      MPSCNNSpatialNormalization      <MPSNeuralNetwork/MPSCNNNormalization.h>
+ *      MPSCNNCrossChannelNormalization <MPSNeuralNetwork/MPSCNNNormalization.h>
+ *      MPSCNNSoftmax                   <MPSNeuralNetwork/MPSCNNSoftMax.h>           exp(pixel(x,y,k))/sum(exp(pixel(x,y,0)) ... exp(pixel(x,y,N-1))
+ *      MPSCNNLogSoftmax                <MPSNeuralNetwork/MPSCNNSoftMax.h>           pixel(x,y,k) - ln(sum(exp(pixel(x,y,0)) ... exp(pixel(x,y,N-1)))
+ *      MPSCNNUpsamplingNearest         <MPSNeuralNetwork/MPSCNNUpsampling.h>        A nearest upsampling layer.
+ *      MPSCNNUpsamplingBilinear        <MPSNeuralNetwork/MPSCNNUpsampling.h>        A bilinear upsampling layer.
  *
  *  MPSCNNKernels operate on MPSImages.  MPSImages are at their core MTLTextures. However, whereas
  *  MTLTextures commonly represent image or texel data, a MPSImage is a more abstract representation
@@ -611,6 +670,170 @@
  *       return          A new valid MTLTexture to use as the destination for the MPSKernel.
  *                       The format of the returned texture does not need to match sourceTexture.
  *
+ *  @section  section_mpsnngraph   The MPSNNGraph
+ *  New for macOS 10.13, iOS/tvOS 11 is a higher level graph API, intended to simplify the creation of
+ *  neural networks. The graph is a network of MPSNNFilterNodes, MPSNNImageNodes and  MPSNNStateNodes. 
+ *  MPSNNImageNodes represent MPSImages or MPSTemporaryImages. MPSNNFilterNodes represent MPSCNNKernel
+ *  objects -- each of the lower level MPSCNNKernel subclasses has a sister object that is a
+ *  subclass of the MPSNNFilterNode. Finally, MPSStateNodes stand in for MPSState objects. 
+ *
+ *  MPSState objects are also new for macOS 10.13, iOS/tvOS 11. They stand in for bits of opaque state that
+ *  need to be handed  between filter nodes.  For example, a MPSCNNConvolutionTranspose filter may need to
+ *  know the original size of the filter passed into the corresponding MPSCNNConvolution node farther up the
+ *  tree. There is a corresponding MPSCNNConvolutionState object that tracks this information. You will
+ *  encounter state objects only infrequently. Most graphs are made up of images and filters.
+ *
+ *  To represent a graph, one usually first creates a MPSNNImageNode. This represents the input image or
+ *  tensor into the graph. Next one creates a the first filter node to process that input image. For example,
+ *  we may make a MPSCNNNeuronLinearNode to normalize the data before the rest of the graph sees it. (y = 2x-1)
+ *  Then, we can add our first convolution in the graph.
+ *  @code
+ *      //  Graph:   [Image] -> [Linear neuron filter] -> (result image) -> [convolution filter] -> (result image)...
+ *      MPSNNImageNode *startNode = [MPSImageNode nodeWithHandle: nil];
+ *      MPSCNNNeuronLinearNode *norm = [MPSCNNNeuronLinearNode nodeWithSource: startNode a: 2.0f b: -1.0f ];
+ *      MPSCNNConvolutionNode *c = [MPSCNNConvolutionNode nodeWithSource: norm.resultImage
+ *                                                               weights: [[MyWeights alloc] initWithPath: "weights1.dat"]];
+ *      ...
+ *  @endcode
+ *  There are some features to notice about each object. First of all, each image node can have a handle 
+ *  associated with it. The handle is your object that you write. It should conform to the <MPSHandle>
+ *  protocol, which specifies that the object should have a label and conform to NSSecureCoding. (The MTLTexture
+ *  does have a label property but doesn't conform to NSSecureCoding.) NSSecureCoding is used when you
+ *  save the graph to disk using a NSCoder. It isn't used otherwise. You can use a MTLResource here if 
+ *  you don't plan to save the graph to disk.  What is the handle for?  When the MPSNNGraph object is 
+ *  constructed -- the MPSNNGraph takes the network of filter, state and image nodes and rolls it into
+ *  an object that can actually encode work to a MTLCommandBuffer -- the graph object will traverse the
+ *  graph from back to front and determine which image nodes are not produced by filters in the graph.
+ *  These, it will inteprety to be graph input images.  There may be input states too. When it does so, 
+ *  it will represent these image and state nodes as the handles you attach to them. Therefore, the handles
+ *  probably should be objects of your own making that refer back to your own data structures that identify
+ *  various images that you know about. 
+ *
+ *  Continuing on to the neuron filter, which we are using to just take the usual image [0,1] range and stretch
+ *  to [-1,1] before the rest of the graph sees it, we see we can pass in the linear filter parameters when constructing
+ *  it here. All filter nodes also produce a result image. This is used as the argument when constructing the
+ *  convolution filter node next, to show that the product of the neuron filter is the input to the convolution
+ *  filter.
+ *
+ *  The convolution object constructor also takes a weights object. The weights object is an object that you write
+ *  that conforms to the MPSCNNConvolutionDataSource protocol. MPS does not provide an object that conforms
+ *  to this protocol, though you can see some examples in sample code that use this interface.  The convolution
+ *  data source object is designed to provide for deferred loading of convolution weights. Convolution weights
+ *  can be large. In aggregate, the storage for all the weights in the MPSNNGraph, plus the storage for your
+ *  copy of them might start to approach the storage limits of the machine for larger graphs. In order to lessen
+ *  this impact, the convolution weights are unpacked for each convolution in turn and then purged from memory
+ *  so that only the single MPSNNGraph copy remains.  This happens when the MPSCNNConvolutionDataSource load
+ *  and purge methods are called. You should not load the weights until -load is called. (You probably should
+ *  however verify that the file, if any, is there and is well formed in the object -init method.) When -purge 
+ *  is called, you should release any bulky storage that the object owns and and make the object as light weight
+ *  as is reasonable. The MPSCNNConvolutionDataSource.descriptor may include a neuron filter operation.
+ *
+ *  Other object types should be straightforward. 
+ *
+ *  @section  subsection_mpsnngraph_usage   MPSNNGraph usage
+ *  Once the network of MPSNNFilterNodes, MPSNNImageNodes and MPSNNStateNodes is created, the next
+ *  step is to identify the MPSNNImageNode that contains the result of your graph -- typically, this
+ *  is the last one you made -- and make a MPSNNGraph with it:
+ *  @code
+ *      MPSNNGraph *graph = [[MPSNNGraph alloc] initWithDevice: mtlDevice
+ *                                                 resultImage: resultImage];
+ *  @endcode
+ *  If graph creation fails, nil will be returned here. When it is constructed, the graph iterates over
+ *  the network of nodes, starting at the result image and working backwards. Any MPSNNImageNodes and states 
+ *  that are used that are not created by a MPSNNFilterNode are interpreted to be graph inputs. The
+ *  identity of these are given by the MPSNNGraph.sourceImageHandles and MPSNNGraph.sourceStateHandles. 
+ *  Each handle is your object that refers back to a particular image or state node. The order of the handles
+ *  matches the order of the images or states that should be passed to the [MPSNNGraph encodeToCommandBuffer:...]
+ *  call. Similarly, you can get the identity of any intermediate images that you requested to see (See
+ *  MPSNNImageNode.exportFromGraph property) and the identity of any result MPSStates that are produced
+ *  by the graph that are not used.   The graph has a destinationImageAllocator that overrides the 
+ *  MPSNNImageNode.destinationImageAllocator. (see subsection MPSNNGraph intermediate image allocation)
+ *  Typically, this serves to make a default temporary image into a normal image, as a convenience.
+ *
+ *  When you are ready to encode a graph to a command buffer, the operation follows as per much of the 
+ *  rest of MPS. 
+ *  @code
+ *      id <MTLDevice> mtlDevice = MTLCreateSystemDefaultDevice();
+ *      id <MTLCommandQueue> mtlCommandQueue.commandBuffer = mtlDevice.newCommandQueue;
+ *      id <MTLCommandBuffer> cmdBuf = mtlCommandQueue.commandBuffer;
+ *      MPSImage *inputImage = [[MPSImage alloc] initWithDevice: mtlDevice imageDescriptor: myDescriptor];
+ *      // put some data into the input image here. See MTLTexture.replaceBytes...
+ *      MPSImage * result = [myGraph encodeToCommandBuffer: cmdBuf sourceImages: @[inputImage] ];
+ *      [cmdBuf commit];
+ *      [cmdBuf waitForCompletion];
+ *  @endcode
+ *  Obviously, if you have more work to do before or after the graph, it might be better to add it to the 
+ *  command buffer before committing it, rather than paying for an extra synchronization from 
+ *  [id <MTLCommandBuffer> waitForCompletion].
+ *
+ *  @section  subsection_mpsnngraph_sizing   MPSNNGraph intermediate image sizing and centering
+ *  The MPSNNGraph will automatically size and center the intermediate images that appear in the graph.
+ *  However, different neural network frameworks do so differently. In addition, some filters may 
+ *  at times operate on only valid pixels in the source image, whereas others may "look beyond the
+ *  edges" so as to keep the result image size the same as the input. Occasionally some filters will
+ *  want to produce results for which any input is valid. Perhaps some want to behave in between. Torch
+ *  has some particularly inventive edging policies for pooling that have valid invalid regions and 
+ *  invalid invalid regions beyond the edges of the image.
+ *
+ *  Whatever the behavior, you will use the MPSNNFilter.paddingPolicy property to configure behavior.
+ *  In its simplest form, a paddingPolicy is a object (possibly written by you, though MPS provides some)
+ *  that conforms to the MPSNNPadding protocol. It should at minimum provide a padding method, which codes
+ *  for common methods to size the result image, how to center it on the input image and where to place
+ *  the remainder in cases where the image size isn't exactly divisible by the stride. This is a bitfield.
+ *  You can use:
+ *  @code
+ *      [MPSNNDefaultPadding paddingWithMethod: MPSNNPaddingMethodAlign... | MPSNNPaddingMethodAddRemainderTo...
+ *                                              MPSNNPaddingMethodSize... ];
+ *  @endcode
+ *  To quickly configure one of these. The filters also have a default padding policy, which may be
+ *  appropriate most of the time.  
+ *
+ *  Occasionally, something fancy needs to be done. In that case, the padding policy should set the 
+ *  MPSNNPaddingMethodCustom bit and implement the optional destinationImageDescriptorForSourceImages:
+ *  sourceStates:forKernel:suggestedDescriptor: method. The MPSNNGraph will use the MPSNNPadding.paddingMethod
+ *  to generate an initial guess for the configuration of the MPSCNNKernel.offset and the size and formatting
+ *  of the result image and hand that to you in the form of a MPSImageDescriptor. You can modify the descriptor
+ *  or the kernel (also passed to you) in your custom destinationImageDescriptorForSourceImages:sourceStates:
+ *  forKernel:suggestedDescriptor: method, or just ignore it and make a new descriptor.
+ *
+ *  @section  subsection_mpsnngraph_sizing   MPSNNGraph intermediate image allocation
+ *  Typically the graph will make MPSTemporaryImages for these, based on the MPSImageDescriptor obtained
+ *  from the padding policy. Temporary images alias one another and can be used to save a lot of memory,
+ *  in the same way that malloc saves memory in your application by allowing you to reserve memory for 
+ *  a time, use it, then free it for reuse for something else. Ideally, most of the storage in your graph
+ *  should be temporary images.
+ *
+ *  Because temporary images don't (shouldn't) last long, and can't be read by the CPU, some images
+ *  probably can't be temporary. By default, the final image returned from the graph is not temporary.
+ *  (See MPSNNGraph.destinationImageAllocator to adjust).  Also, you may request that certain intermediate
+ *  images be non-temporary so that you can access their contents from outside the graph using the
+ *  MPSNNImageNode.exportFromGraph property. 
+ *
+ *  Temporary images often take up almost no additional memory. Regular images always do.  Some large graphs will only
+ *  be able to run using temporary memory, as regular images would overwhelm the machine. Even if you allocate
+ *  all your images up front and reuse them over and over, you will still very likely use much more memory with
+ *  regular images, than if you just allocate temporary images as needed. Because temporary images do not
+ *  generally allocate large amounts of storage, they are much cheaper and faster to use.
+ *
+ *  What kind of image is created after each filter node can be adjusted using the MPSNNImageNode.imageAllocator 
+ *  property.  Two standard allocators are provided as MPSImage.defaultAllocator and MPSTemporaryImage.defaultAllocator.
+ *  You may of course write your own. This might be necessary for example if you wish to maintain your own 
+ *  MTLHeap and allocate from it.
+ *
+ *  @section  subsection_mpsnngraph_debugging   MPSNNGraph debugging tips
+ *  In typical usage, some refinement, especially of padding policies, may be required to get the expected answer
+ *  from MPS. If the result image is the wrong size, padding is typically the problem. When the answers are incorrect,
+ *  the MPSCNNKernel.offset or other property may be incorrectly configured at some stage.  As the graph is generated 
+ *  starting from an output image node, you may create other graphs starting at any image node within the graph. 
+ *  This will give you a view into the result produced from each intermediate layer with a minimum of fuss.  In 
+ *  addition, the usual NSObject -debugDescription method is available to inspect objects to make sure they conform 
+ *  to expectation.
+ *
+ *  Note that certain operations such as neuron filters that follow convolution filters and image concatenation
+ *  may be optimized away by the MPSNNGraph when it is constructed. The convolution can do neuron operations as
+ *  part of its operation.  Concatenation is best done by writing the result of earlier filter passes in the right
+ *  place using MPSCNNKernel.destinationFeatureChannelOffset rather than by adding an extra copy. Other optimizations 
+ *  may be added as framework capabilities improve.
  *
  *  @section  section_samplecode   Sample Code
  *      @code
@@ -702,4 +925,7 @@
  *  for each tile. 
  */
 
+#ifdef __cplusplus
+}
+#endif
 
Clone this wiki locally