@@ -2,11 +2,12 @@ package io.github.mandar2812.dynaml.kernels
22
33import breeze .linalg .DenseMatrix
44import io .github .mandar2812 .dynaml .DynaMLPipe
5- import io .github .mandar2812 .dynaml .algebra .{ PartitionedPSDMatrix , PartitionedVector }
6- import io .github .mandar2812 .dynaml .pipes .{ DataPipe , Encoder }
5+ import io .github .mandar2812 .dynaml .algebra .PartitionedPSDMatrix
6+ import io .github .mandar2812 .dynaml .pipes ._
77
88import scala .reflect .ClassTag
99
10+
1011/**
1112 * Scalar Kernel defines algebraic behavior for kernels of the form
1213 * K: Index x Index -> Double, i.e. kernel functions whose output
@@ -39,8 +40,10 @@ CovarianceFunction[Index, Double, DenseMatrix[Double]]
3940 * return The kernel k defined above.
4041 *
4142 * */
42- def + [T <: LocalScalarKernel [Index ]](otherKernel : T ): CompositeCovariance [Index ] =
43- kernelOps.addLocalScKernels(this , otherKernel)
43+ def + [T <: LocalScalarKernel [Index ]](otherKernel : T )(implicit ev : ClassTag [Index ]): CompositeCovariance [Index ] =
44+ new DecomposableCovariance (this , otherKernel)(DynaMLPipe .genericReplicationEncoder[Index ](2 ))
45+ // kernelOps.addLocalScKernels(this, otherKernel)
46+
4447
4548 /**
4649 * Create composite kernel k = k<sub>1</sub> * k<sub>2</sub>
@@ -49,8 +52,10 @@ CovarianceFunction[Index, Double, DenseMatrix[Double]]
4952 * @return The kernel k defined above.
5053 *
5154 * */
52- def * [T <: LocalScalarKernel [Index ]](otherKernel : T ): CompositeCovariance [Index ] =
53- kernelOps.multLocalScKernels(this , otherKernel)
55+ def * [T <: LocalScalarKernel [Index ]](otherKernel : T )(implicit ev : ClassTag [Index ]): CompositeCovariance [Index ] =
56+ new DecomposableCovariance (this , otherKernel)(
57+ DynaMLPipe .genericReplicationEncoder[Index ](2 ),
58+ DecomposableCovariance .:*: )
5459
5560 def :* [T1 ](otherKernel : LocalScalarKernel [T1 ]): CompositeCovariance [(Index , T1 )] =
5661 new KernelOps .PairOps [Index , T1 ].tensorMultLocalScKernels(this , otherKernel)
@@ -85,7 +90,7 @@ abstract class CompositeCovariance[T]
8590 */
8691class DecomposableCovariance [S ](kernels : LocalScalarKernel [S ]* )(
8792 implicit encoding : Encoder [S , Array [S ]],
88- reducer : DataPipe [ Array [ Double ], Double ] = DecomposableCovariance .:+: ) extends CompositeCovariance [S ] {
93+ reducer : Reducer = DecomposableCovariance .:+: ) extends CompositeCovariance [S ] {
8994
9095 val kernelMap = kernels.map(k => (k.toString.split(" \\ ." ).last, k)).toMap
9196
@@ -111,9 +116,9 @@ class DecomposableCovariance[S](kernels: LocalScalarKernel[S]*)(
111116 assert(effective_hyper_parameters.forall(h.contains),
112117 " All hyper parameters must be contained in the arguments" )
113118 // group the hyper params by kernel id
114- h.toSeq.map(kv => {
119+ h.toSeq.filterNot(_._1.split( " / " ).length == 1 ). map(kv => {
115120 val idS = kv._1.split(" /" )
116- (idS.head, (idS.last .mkString(" /" ), kv._2))
121+ (idS.head, (idS.tail .mkString(" /" ), kv._2))
117122 }).groupBy(_._1).map(hypC => {
118123 val kid = hypC._1
119124 val hyper_params = hypC._2.map(_._2).toMap
@@ -130,19 +135,32 @@ class DecomposableCovariance[S](kernels: LocalScalarKernel[S]*)(
130135 }))
131136 }
132137
133- override def gradient (x : S , y : S ): Map [String , Double ] = {
134- val (xs, ys) = (encoding* encoding)((x,y))
135- xs.zip(ys).zip(kernels).map(coupleAndKern => {
136- val (u,v) = coupleAndKern._1
137- coupleAndKern._2.gradient(u,v)
138- }).reduceLeft(_++ _)
138+ override def gradient (x : S , y : S ): Map [String , Double ] = reducer match {
139+ case SumReducer =>
140+ val (xs, ys) = (encoding* encoding)((x,y))
141+ xs.zip(ys).zip(kernels).map(coupleAndKern => {
142+ val (u,v) = coupleAndKern._1
143+ coupleAndKern._2.gradient(u,v)
144+ }).reduceLeft(_++ _)
145+ case ProductReducer =>
146+ val (xs, ys) = (encoding* encoding)((x,y))
147+ xs.zip(ys).zip(kernels).map(coupleAndKern => {
148+ val (u,v) = coupleAndKern._1
149+ coupleAndKern._2.gradient(u,v).mapValues(_ * this .evaluate(x,y)/ coupleAndKern._2.evaluate(x,y))
150+ }).reduceLeft(_++ _)
151+ case _ : Reducer =>
152+ val (xs, ys) = (encoding* encoding)((x,y))
153+ xs.zip(ys).zip(kernels).map(coupleAndKern => {
154+ val (u,v) = coupleAndKern._1
155+ coupleAndKern._2.gradient(u,v)
156+ }).reduceLeft(_++ _)
139157 }
140158}
141159
142160object DecomposableCovariance {
143161
144- val :+: = DataPipe (( l : Array [ Double ]) => l.sum)
162+ val :+: = SumReducer
145163
146- val :*: = DataPipe (( l : Array [ Double ]) => l.product)
164+ val :*: = ProductReducer
147165
148166}
0 commit comments