Skip to content

Commit 4dccc22

Browse files
committed
Make MiMa happy with the -Xsource:3 -Xsource-features:v2.13.15,-case-companion-function options
1 parent 7e3d2dc commit 4dccc22

File tree

10 files changed

+60
-27
lines changed

10 files changed

+60
-27
lines changed

mllib/src/main/scala/org/apache/spark/mllib/classification/LogisticRegression.scala

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -221,7 +221,8 @@ class LogisticRegressionWithSGD private[mllib] (
221221
.setNumIterations(numIterations)
222222
.setRegParam(regParam)
223223
.setMiniBatchFraction(miniBatchFraction)
224-
override protected val validators = List(DataValidators.binaryLabelValidator)
224+
override protected val validators: List[RDD[LabeledPoint] => Boolean] =
225+
List(DataValidators.binaryLabelValidator)
225226

226227
override protected[mllib] def createModel(
227228
weights: Vector,

pom.xml

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2736,7 +2736,8 @@
27362736
<arg>-explaintypes</arg>
27372737
<arg>-release</arg>
27382738
<arg>17</arg>
2739-
<arg>-Xsource:3-cross</arg>
2739+
<arg>-Xsource:3</arg>
2740+
<arg>-Xsource-features:v2.13.15,-case-companion-function</arg>
27402741
<arg>-Wconf:any:e</arg>
27412742
<arg>-Wconf:cat=deprecation:wv</arg>
27422743
<arg>-Wunused:imports</arg>

project/MimaExcludes.scala

Lines changed: 32 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -40,7 +40,7 @@ object MimaExcludes {
4040
)
4141

4242
// Exclude rules for 4.0.x from 3.5.0
43-
lazy val v40excludes = defaultExcludes ++ Seq(
43+
lazy val v40excludes = defaultExcludes ++ scala3Excludes ++ Seq(
4444
// [SPARK-44863][UI] Add a button to download thread dump as a txt in Spark UI
4545
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.status.api.v1.ThreadStackTrace.*"),
4646
ProblemFilters.exclude[MissingTypesProblem]("org.apache.spark.status.api.v1.ThreadStackTrace$"),
@@ -238,6 +238,37 @@ object MimaExcludes {
238238
loggingExcludes("org.apache.spark.sql.streaming.DataStreamReader") ++
239239
loggingExcludes("org.apache.spark.sql.SparkSession#Builder")
240240

241+
// Enable -Xsource:3 compiler flag
242+
lazy val scala3Excludes = Seq(
243+
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.status.api.v1.sql.Metric.apply"),
244+
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.status.api.v1.sql.Metric.tupled"),
245+
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.status.api.v1.sql.Metric.curried"),
246+
ProblemFilters.exclude[MissingTypesProblem]("org.apache.spark.status.api.v1.sql.Metric$"),
247+
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.status.api.v1.sql.Node.apply$default$3"),
248+
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.status.api.v1.sql.Node.apply"),
249+
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.status.api.v1.sql.Node.tupled"),
250+
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.status.api.v1.sql.Node.curried"),
251+
ProblemFilters.exclude[MissingTypesProblem]("org.apache.spark.status.api.v1.sql.Node$"),
252+
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.paths.SparkPath.apply"),
253+
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.paths.SparkPath.copy"),
254+
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.paths.SparkPath.copy$default$1"),
255+
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.paths.SparkPath.apply"),
256+
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.scheduler.AccumulableInfo.apply$default$7"),
257+
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.scheduler.AccumulableInfo.apply"),
258+
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.scheduler.AccumulableInfo.tupled"),
259+
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.scheduler.AccumulableInfo.curried"),
260+
ProblemFilters.exclude[MissingTypesProblem]("org.apache.spark.scheduler.AccumulableInfo$"),
261+
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.status.api.v1.ApplicationAttemptInfo.apply$default$7"),
262+
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.status.api.v1.ApplicationAttemptInfo.apply"),
263+
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.status.api.v1.ApplicationAttemptInfo.tupled"),
264+
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.status.api.v1.ApplicationAttemptInfo.curried"),
265+
ProblemFilters.exclude[MissingTypesProblem]("org.apache.spark.status.api.v1.ApplicationAttemptInfo$"),
266+
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.status.api.v1.ApplicationInfo.apply"),
267+
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.status.api.v1.ApplicationInfo.tupled"),
268+
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.status.api.v1.ApplicationInfo.curried"),
269+
ProblemFilters.exclude[MissingTypesProblem]("org.apache.spark.status.api.v1.ApplicationInfo$"),
270+
)
271+
241272
// Default exclude rules
242273
lazy val defaultExcludes = Seq(
243274
// Spark Internals

project/SparkBuild.scala

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -229,7 +229,8 @@ object SparkBuild extends PomBuild {
229229
lazy val compilerWarningSettings: Seq[sbt.Def.Setting[_]] = Seq(
230230
(Compile / scalacOptions) ++= {
231231
Seq(
232-
"-Xsource:3-cross",
232+
"-Xsource:3",
233+
"-Xsource-features:v2.13.15,-case-companion-function",
233234
// replace -Xfatal-warnings with fine-grained configuration, since 2.13.2
234235
// verbose warning on deprecation, error on all others
235236
// see `scalac -Wconf:help` for details

resource-managers/kubernetes/core/src/test/scala/org/apache/spark/scheduler/cluster/k8s/ExecutorPodsSnapshotSuite.scala

Lines changed: 15 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -38,14 +38,14 @@ class ExecutorPodsSnapshotSuite extends SparkFunSuite {
3838
test("States are interpreted correctly from pod metadata.") {
3939
ExecutorPodsSnapshot.setShouldCheckAllContainers(false)
4040
val testCases = Seq(
41-
testCase(pendingExecutor(0), PodPending),
42-
testCase(runningExecutor(1), PodRunning),
43-
testCase(succeededExecutor(2), PodSucceeded),
44-
testCase(failedExecutorWithoutDeletion(3), PodFailed),
45-
testCase(deletedExecutor(4), PodDeleted),
46-
testCase(unknownExecutor(5), PodUnknown),
47-
testCase(finishedExecutorWithRunningSidecar(6, 0), PodSucceeded),
48-
testCase(finishedExecutorWithRunningSidecar(7, 1), PodFailed)
41+
testCase(pendingExecutor(0), PodPending(_)),
42+
testCase(runningExecutor(1), PodRunning(_)),
43+
testCase(succeededExecutor(2), PodSucceeded(_)),
44+
testCase(failedExecutorWithoutDeletion(3), PodFailed(_)),
45+
testCase(deletedExecutor(4), PodDeleted(_)),
46+
testCase(unknownExecutor(5), PodUnknown(_)),
47+
testCase(finishedExecutorWithRunningSidecar(6, 0), PodSucceeded(_)),
48+
testCase(finishedExecutorWithRunningSidecar(7, 1), PodFailed(_))
4949
)
5050
doTest(testCases)
5151
}
@@ -54,13 +54,13 @@ class ExecutorPodsSnapshotSuite extends SparkFunSuite {
5454
+ " when configured to check all containers.") {
5555
ExecutorPodsSnapshot.setShouldCheckAllContainers(true)
5656
val testCases = Seq(
57-
testCase(pendingExecutor(0), PodPending),
58-
testCase(runningExecutor(1), PodRunning),
59-
testCase(runningExecutorWithFailedContainer(2), PodFailed),
60-
testCase(succeededExecutor(3), PodSucceeded),
61-
testCase(failedExecutorWithoutDeletion(4), PodFailed),
62-
testCase(deletedExecutor(5), PodDeleted),
63-
testCase(unknownExecutor(6), PodUnknown)
57+
testCase(pendingExecutor(0), PodPending(_)),
58+
testCase(runningExecutor(1), PodRunning(_)),
59+
testCase(runningExecutorWithFailedContainer(2), PodFailed(_)),
60+
testCase(succeededExecutor(3), PodSucceeded(_)),
61+
testCase(failedExecutorWithoutDeletion(4), PodFailed(_)),
62+
testCase(deletedExecutor(5), PodDeleted(_)),
63+
testCase(unknownExecutor(6), PodUnknown(_))
6464
)
6565
doTest(testCases)
6666
}

sql/api/src/main/scala/org/apache/spark/sql/MergeIntoWriter.scala

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -250,9 +250,7 @@ case class WhenMatched[T] private[sql] (
250250
* @tparam T
251251
* The type of data in the MergeIntoWriter.
252252
*/
253-
case class WhenNotMatched[T](
254-
mergeIntoWriter: MergeIntoWriter[T],
255-
condition: Option[Column]) {
253+
case class WhenNotMatched[T](mergeIntoWriter: MergeIntoWriter[T], condition: Option[Column]) {
256254

257255
/**
258256
* Specifies an action to insert all non-matched rows into the DataFrame.

sql/connect/client/jvm/src/test/scala/org/apache/spark/sql/connect/client/arrow/ArrowEncoderSuite.scala

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -816,7 +816,7 @@ class ArrowEncoderSuite extends ConnectFunSuite with BeforeAndAfterAll {
816816
test("REPL generated classes") {
817817
val encoder = ScalaReflection.encoderFor[MyTestClass]
818818
roundTripAndCheckIdentical(encoder) { () =>
819-
Iterator.tabulate(10)(MyTestClass)
819+
Iterator.tabulate(10)(MyTestClass(_))
820820
}
821821
}
822822

sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/FileSourceStrategy.scala

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -134,7 +134,7 @@ object FileSourceStrategy extends Strategy with PredicateHelper with Logging {
134134
val numBuckets = bucketSpec.numBuckets
135135

136136
val normalizedFiltersAndExpr = normalizedFilters
137-
.reduce(expressions.And)
137+
.reduce(expressions.And(_, _))
138138
val matchedBuckets = getExpressionBuckets(normalizedFiltersAndExpr, bucketColumnName,
139139
numBuckets)
140140

@@ -353,7 +353,8 @@ object FileSourceStrategy extends Strategy with PredicateHelper with Logging {
353353
}.getOrElse(scan)
354354

355355
// bottom-most filters are put in the left of the list.
356-
val finalFilters = afterScanFilters.toSeq.reduceOption(expressions.And).toSeq ++ stayUpFilters
356+
val finalFilters =
357+
afterScanFilters.toSeq.reduceOption(expressions.And(_, _)).toSeq ++ stayUpFilters
357358
val withFilter = finalFilters.foldLeft(withMetadataProjections)((plan, cond) => {
358359
execution.FilterExec(cond, plan)
359360
})

sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/PartitioningAwareFileIndex.scala

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -184,7 +184,7 @@ abstract class PartitioningAwareFileIndex(
184184
}
185185

186186
if (partitionPruningPredicates.nonEmpty) {
187-
val predicate = partitionPruningPredicates.reduce(expressions.And)
187+
val predicate = partitionPruningPredicates.reduce(expressions.And(_, _))
188188

189189
val boundPredicate = Predicate.createInterpreted(predicate.transform {
190190
case a: AttributeReference =>

sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/sources/TextSocketMicroBatchStream.scala

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -128,7 +128,7 @@ class TextSocketMicroBatchStream(host: String, port: Int, numPartitions: Int)
128128
slices(idx % numPartitions).append(r)
129129
}
130130

131-
slices.map(TextSocketInputPartition)
131+
slices.map(TextSocketInputPartition(_))
132132
}
133133

134134
override def createReaderFactory(): PartitionReaderFactory =

0 commit comments

Comments
 (0)