%install-location $cwd/swift-install %install '.package(path: "$cwd/FastaiNotebook_06_cuda")' FastaiNotebook_06_cuda //export import Path import TensorFlow import Python import FastaiNotebook_06_cuda %include "EnableIPythonDisplay.swift" IPythonDisplay.shell.enable_matplotlib("inline") let data = mnistDataBunch(flat: false, bs: 512) func optFunc(_ model: CnnModel) -> SGD { return SGD(for: model, learningRate: 0.4) } func modelInit() -> CnnModel { return CnnModel(channelIn: 1, nOut: 10, filters: [8, 16, 32, 32]) } let learner = Learner(data: data, lossFunc: softmaxCrossEntropy, optFunc: optFunc, modelInit: modelInit) let recorder = learner.makeDefaultDelegates(metrics: [accuracy]) learner.addDelegates([learner.makeNormalize(mean: mnistStats.mean, std: mnistStats.std), learner.makeAddChannel()]) time { try! learner.fit(1) } struct AlmostBatchNorm: Differentiable { // Configuration hyperparameters @noDerivative let momentum, epsilon: Scalar // Running statistics @noDerivative var runningMean, runningVariance: Tensor // Trainable parameters var scale, offset: Tensor init(featureCount: Int, momentum: Scalar = 0.9, epsilon: Scalar = 1e-5) { self.momentum = momentum self.epsilon = epsilon self.scale = Tensor(ones: [featureCount]) self.offset = Tensor(zeros: [featureCount]) self.runningMean = Tensor(0) self.runningVariance = Tensor(1) } mutating func callAsFunction(_ input: Tensor) -> Tensor { let mean: Tensor let variance: Tensor switch Context.local.learningPhase { case .training: mean = input.mean(alongAxes: [0, 1, 2]) variance = input.variance(alongAxes: [0, 1, 2]) runningMean += (mean - runningMean) * (1 - momentum) runningVariance += (variance - runningVariance) * (1 - momentum) case .inference: mean = runningMean variance = runningVariance } let normalizer = rsqrt(variance + epsilon) * scale return (input - mean) * normalizer + offset } } //export public class Reference { public var value: T public init(_ value: T) { self.value = value } } //export public protocol LearningPhaseDependent: FALayer { associatedtype Input associatedtype Output @differentiable func forwardTraining(_ input: Input) -> Output @differentiable func forwardInference(_ input: Input) -> Output } extension LearningPhaseDependent { // This `@differentiable` attribute is necessary, to tell the compiler that this satisfies the FALayer // protocol requirement, even though there is a `@differentiating(forward)` method below. // TODO: It seems nondeterministically necessary. Some subsequent notebooks import this successfully without it, // some require it. Investigate. @differentiable public func forward(_ input: Input) -> Output { switch Context.local.learningPhase { case .training: return forwardTraining(input) case .inference: return forwardInference(input) } } @differentiating(forward) func gradForward(_ input: Input) -> (value: Output, pullback: (Self.Output.TangentVector) -> (Self.TangentVector, Self.Input.TangentVector)) { switch Context.local.learningPhase { case .training: return valueWithPullback(at: input) { $0.forwardTraining ($1) } case .inference: return valueWithPullback(at: input) { $0.forwardInference($1) } } } } //export public protocol Norm: Layer where Input == Tensor, Output == Tensor{ associatedtype Scalar init(featureCount: Int, epsilon: Scalar) } public struct FABatchNorm: LearningPhaseDependent, Norm { // TF-603 workaround. public typealias Input = Tensor public typealias Output = Tensor @noDerivative public var delegates: [(Self.Output) -> ()] = [] // Configuration hyperparameters @noDerivative var momentum, epsilon: Scalar // Running statistics @noDerivative let runningMean, runningVariance: Reference> // Trainable parameters public var scale, offset: Tensor public init(featureCount: Int, momentum: Scalar, epsilon: Scalar = 1e-5) { self.momentum = momentum self.epsilon = epsilon self.scale = Tensor(ones: [featureCount]) self.offset = Tensor(zeros: [featureCount]) self.runningMean = Reference(Tensor(0)) self.runningVariance = Reference(Tensor(1)) } public init(featureCount: Int, epsilon: Scalar = 1e-5) { self.init(featureCount: featureCount, momentum: 0.9, epsilon: epsilon) } @differentiable public func forwardTraining(_ input: Tensor) -> Tensor { let mean = input.mean(alongAxes: [0, 1, 2]) let variance = input.variance(alongAxes: [0, 1, 2]) runningMean.value += (mean - runningMean.value) * (1 - momentum) runningVariance.value += (variance - runningVariance.value) * (1 - momentum) let normalizer = rsqrt(variance + epsilon) * scale return (input - mean) * normalizer + offset } @differentiable public func forwardInference(_ input: Tensor) -> Tensor { let mean = runningMean.value let variance = runningVariance.value let normalizer = rsqrt(variance + epsilon) * scale return (input - mean) * normalizer + offset } } //export struct BatchNormResult : Differentiable{ var y, batchMean, batchVariance, reserveSpace1, reserveSpace2: Tensor } public struct TFBatchNorm: LearningPhaseDependent, Norm { // Configuration hyperparameters @noDerivative var momentum, epsilon: Scalar // Running statistics @noDerivative let runningMean, runningVariance: Reference> // Trainable parameters public var scale, offset: Tensor @noDerivative public var delegates: [(Self.Output) -> ()] = [] public init(featureCount: Int, momentum: Scalar, epsilon: Scalar = 1e-5) { self.momentum = momentum self.epsilon = epsilon self.scale = Tensor(ones: [featureCount]) self.offset = Tensor(zeros: [featureCount]) self.runningMean = Reference(Tensor(0)) self.runningVariance = Reference(Tensor(1)) } public init(featureCount: Int, epsilon: Scalar = 1e-5) { self.init(featureCount: featureCount, momentum: 0.9, epsilon: epsilon) } @differentiable public func forwardTraining(_ input: Tensor) -> Tensor { let res = TFBatchNorm.fusedBatchNorm( input, scale: scale, offset: offset, epsilon: epsilon) let (output, mean, variance) = (res.y, res.batchMean, res.batchVariance) runningMean.value += (mean - runningMean.value) * (1 - momentum) runningVariance.value += (variance - runningVariance.value) * (1 - momentum) return output } @differentiable public func forwardInference(_ input: Tensor) -> Tensor { let mean = runningMean.value let variance = runningVariance.value let normalizer = rsqrt(variance + epsilon) * scale return (input - mean) * normalizer + offset } @differentiable(wrt: (x, scale, offset), vjp: _vjpFusedBatchNorm) static func fusedBatchNorm( _ x : Tensor, scale: Tensor, offset: Tensor, epsilon: Scalar ) -> BatchNormResult { let ret = Raw.fusedBatchNormV2( x, scale: scale, offset: offset, mean: Tensor([] as [Scalar]), variance: Tensor([] as [Scalar]), epsilon: Double(epsilon)) return BatchNormResult( y: ret.y, batchMean: ret.batchMean, batchVariance: ret.batchVariance, reserveSpace1: ret.reserveSpace1, reserveSpace2: ret.reserveSpace2 ) } static func _vjpFusedBatchNorm( _ x : Tensor, scale: Tensor, offset: Tensor, epsilon: Scalar ) -> (BatchNormResult, (BatchNormResult.TangentVector) -> (Tensor.TangentVector, Tensor.TangentVector, Tensor.TangentVector)) { let bnresult = fusedBatchNorm(x, scale: scale, offset: offset, epsilon: epsilon) return ( bnresult, {v in let res = Raw.fusedBatchNormGradV2( yBackprop: v.y, x, scale: Tensor(scale), reserveSpace1: bnresult.reserveSpace1, reserveSpace2: bnresult.reserveSpace2, epsilon: Double(epsilon)) return (res.xBackprop, res.scaleBackprop, res.offsetBackprop) }) } } //export public struct ConvBN: FALayer { // TF-603 workaround. public typealias Input = Tensor public typealias Output = Tensor @noDerivative public var delegates: [(Self.Output) -> ()] = [] public var conv: FANoBiasConv2D public var norm: FABatchNorm public init(_ cIn: Int, _ cOut: Int, ks: Int = 3, stride: Int = 1){ // TODO (when control flow AD works): use Conv2D without bias self.conv = FANoBiasConv2D(cIn, cOut, ks: ks, stride: stride, activation: relu) self.norm = FABatchNorm(featureCount: cOut, epsilon: 1e-5) } @differentiable public func forward(_ input: Tensor) -> Tensor { return norm.forward(conv.forward(input)) } } // Would be great if this generic could work // struct ConvNorm: Layer // where NormType.Scalar == Scalar { // var conv: Conv2D // var norm: NormType // init( // filterShape: (Int, Int, Int, Int), // strides: (Int, Int) = (1, 1), // padding: Padding = .valid, // activation: @escaping Conv2D.Activation = identity // ) { // // TODO (when control flow AD works): use Conv2D without bias // self.conv = Conv2D( // filterShape: filterShape, // strides: strides, // padding: padding, // activation: activation) // self.norm = NormType.init(featureCount: filterShape.3, epsilon: 1e-5) // } // @differentiable // func applied(to input: Tensor) -> Tensor { // return norm.applied(to: conv.applied(to: input)) // } // } //typealias ConvBN = ConvNorm, Float> //export public struct CnnModelBN: Layer { public var convs: [ConvBN] public var pool = FAGlobalAvgPool2D() public var linear: FADense @noDerivative public var delegates: [(Self.Output) -> ()] = [] public init(channelIn: Int, nOut: Int, filters: [Int]){ let allFilters = [channelIn] + filters convs = Array(0..(filters.last!, nOut) } @differentiable public func callAsFunction(_ input: TF) -> TF { // TODO: Work around https://bugs.swift.org/browse/TF-606 return linear.forward(pool.forward(convs(input))) } } func optFunc(_ model: CnnModelBN) -> SGD { return SGD(for: model, learningRate: 0.4) } func modelInit() -> CnnModelBN { return CnnModelBN(channelIn: 1, nOut: 10, filters: [8, 16, 32, 32]) } let learner = Learner(data: data, lossFunc: softmaxCrossEntropy, optFunc: optFunc, modelInit: modelInit) let recorder = learner.makeDefaultDelegates(metrics: [accuracy]) learner.addDelegates([learner.makeNormalize(mean: mnistStats.mean, std: mnistStats.std), learner.makeAddChannel()]) time { try! learner.fit(1) } struct LayerNorm2D: Norm { @noDerivative public var delegates: [(Self.Output) -> ()] = [] // Configuration hyperparameters @noDerivative let epsilon: Scalar // Trainable parameters var scale: Tensor var offset: Tensor init(featureCount: Int, epsilon: Scalar = 1e-5) { self.epsilon = epsilon self.scale = Tensor(ones: [featureCount]) self.offset = Tensor(zeros: [featureCount]) } @differentiable func callAsFunction(_ input: Tensor) -> Tensor { let mean = input.mean(alongAxes: [1, 2, 3]) let variance = input.variance(alongAxes: [1, 2, 3]) let normalizer = rsqrt(variance + epsilon) * scale return (input - mean) * normalizer + offset } } struct ConvLN: FALayer { @noDerivative public var delegates: [(Self.Output) -> ()] = [] var conv: FANoBiasConv2D var norm: LayerNorm2D init(_ cIn: Int, _ cOut: Int, ks: Int = 3, stride: Int = 2){ // TODO (when control flow AD works): use Conv2D without bias self.conv = FANoBiasConv2D(cIn, cOut, ks: ks, stride: stride, activation: relu) self.norm = LayerNorm2D(featureCount: cOut, epsilon: 1e-5) } @differentiable func forward(_ input: Tensor) -> Tensor { return norm.callAsFunction(conv.forward(input)) } } public struct CnnModelLN: Layer { public var convs: [ConvLN] public var pool = FAGlobalAvgPool2D() public var linear: FADense public init(channelIn: Int, nOut: Int, filters: [Int]){ let allFilters = [channelIn] + filters convs = Array(0..(filters.last!, nOut) } @differentiable public func callAsFunction(_ input: TF) -> TF { // TODO: Work around https://bugs.swift.org/browse/TF-606 return linear.forward(pool.forward(convs(input))) } } struct InstanceNorm: Norm { @noDerivative public var delegates: [(Self.Output) -> ()] = [] // Configuration hyperparameters @noDerivative let epsilon: Scalar // Trainable parameters var scale: Tensor var offset: Tensor init(featureCount: Int, epsilon: Scalar = 1e-5) { self.epsilon = epsilon self.scale = Tensor(ones: [featureCount]) self.offset = Tensor(zeros: [featureCount]) } @differentiable func callAsFunction(_ input: Tensor) -> Tensor { let mean = input.mean(alongAxes: [2, 3]) let variance = input.variance(alongAxes: [2, 3]) let normalizer = rsqrt(variance + epsilon) * scale return (input - mean) * normalizer + offset } } struct ConvIN: FALayer { @noDerivative public var delegates: [(Self.Output) -> ()] = [] var conv: FANoBiasConv2D var norm: InstanceNorm init(_ cIn: Int, _ cOut: Int, ks: Int = 3, stride: Int = 2){ // TODO (when control flow AD works): use Conv2D without bias self.conv = FANoBiasConv2D(cIn, cOut, ks: ks, stride: stride, activation: relu) self.norm = InstanceNorm(featureCount: cOut, epsilon: 1e-5) } @differentiable func forward(_ input: Tensor) -> Tensor { return norm.callAsFunction(conv.forward(input)) } } struct RunningBatchNorm: LearningPhaseDependent, Norm { @noDerivative public var delegates: [(Self.Output) -> ()] = [] // Configuration hyperparameters @noDerivative let momentum: Scalar @noDerivative let epsilon: Scalar // Running statistics @noDerivative let runningSum: Reference> @noDerivative let runningSumOfSquares: Reference> @noDerivative let runningCount: Reference @noDerivative let samplesSeen: Reference // Trainable parameters var scale: Tensor var offset: Tensor init(featureCount: Int, momentum: Scalar, epsilon: Scalar = 1e-5) { self.momentum = momentum self.epsilon = epsilon self.scale = Tensor(ones: [featureCount]) self.offset = Tensor(zeros: [featureCount]) self.runningSum = Reference(Tensor(0)) self.runningSumOfSquares = Reference(Tensor(0)) self.runningCount = Reference(Scalar(0)) self.samplesSeen = Reference(0) } init(featureCount: Int, epsilon: Scalar = 1e-5) { self.init(featureCount: featureCount, momentum: 0.9, epsilon: epsilon) } @differentiable func forwardTraining(_ input: Tensor) -> Tensor { let (batch, channels) = (input.shape[0], Scalar(input.shape[3])) let sum = input.sum(alongAxes: [0, 1, 2]) let sumOfSquares = (input * input).sum(alongAxes: [0, 1, 2]) // TODO: Work around https://bugs.swift.org/browse/TF-607 let count = withoutDerivative(at: Scalar(input.scalarCount)) { tmp in tmp } / channels let mom = momentum / sqrt(Scalar(batch) - 1) let runningSum = mom * self.runningSum.value + (1 - mom) * sum let runningSumOfSquares = mom * self.runningSumOfSquares.value + ( 1 - mom) * sumOfSquares let runningCount = mom * self.runningCount.value + (1 - mom) * count self.runningSum.value = runningSum self.runningSumOfSquares.value = runningSumOfSquares self.runningCount.value = runningCount self.samplesSeen.value += batch let mean = runningSum / runningCount let variance = runningSumOfSquares / runningCount - mean * mean let normalizer = rsqrt(variance + epsilon) * scale return (input - mean) * normalizer + offset } @differentiable func forwardInference(_ input: Tensor) -> Tensor { let mean = runningSum.value / runningCount.value let variance = runningSumOfSquares.value / runningCount.value - mean * mean let normalizer = rsqrt(variance + epsilon) * scale return (input - mean) * normalizer + offset } } import NotebookExport let exporter = NotebookExport(Path.cwd/"07_batchnorm.ipynb") print(exporter.export(usingPrefix: "FastaiNotebook_"))