From 536f36a1fce1bf7cbd0ad5e782176b4e28e61ea3 Mon Sep 17 00:00:00 2001 From: kunpeng Date: Thu, 13 Apr 2023 14:22:47 +0800 Subject: [PATCH] Clean files --- .../spark/examples/ml/KMeansExample.scala | 17 +++++------------ .../java/com/intel/oap/mllib/LibLoader.java | 7 +------ .../spark/ml/clustering/spark321/KMeans.scala | 2 -- 3 files changed, 6 insertions(+), 20 deletions(-) diff --git a/examples/kmeans/src/main/scala/org/apache/spark/examples/ml/KMeansExample.scala b/examples/kmeans/src/main/scala/org/apache/spark/examples/ml/KMeansExample.scala index 0bccd4da9..37e26ebe2 100644 --- a/examples/kmeans/src/main/scala/org/apache/spark/examples/ml/KMeansExample.scala +++ b/examples/kmeans/src/main/scala/org/apache/spark/examples/ml/KMeansExample.scala @@ -21,7 +21,6 @@ package org.apache.spark.examples.ml // $example on$ import org.apache.spark.ml.clustering.KMeans -import org.apache.spark.ml.regression.LinearRegression import org.apache.spark.ml.evaluation.ClusteringEvaluator // $example off$ import org.apache.spark.sql.SparkSession @@ -49,27 +48,21 @@ object KMeansExample { dataset.cache() // Trains a k-means model. - println("seem good0") val kmeans = new KMeans().setK(2).setSeed(1L).setInitMode("random").setMaxIter(5) - val lr = new LinearRegression().setMaxIter(10).setRegParam(0.3).setElasticNetParam(0.8) - println("seem good1") - val lrModel = lr.fit(dataset) - println("seem good2") val model = kmeans.fit(dataset) - println("seem good3") // Make predictions - //val predictions = model.transform(dataset) + val predictions = model.transform(dataset) // Evaluate clustering by computing Silhouette score - //val evaluator = new ClusteringEvaluator() + val evaluator = new ClusteringEvaluator() - //val silhouette = evaluator.evaluate(predictions) - println(s"Silhouette with squared euclidean distance = ") + val silhouette = evaluator.evaluate(predictions) + println(s"Silhouette with squared euclidean distance = $silhouette") // Shows the result. println("Cluster Centers: ") - //model.clusterCenters.foreach(println) + model.clusterCenters.foreach(println) // $example off$ spark.stop() diff --git a/mllib-dal/src/main/java/com/intel/oap/mllib/LibLoader.java b/mllib-dal/src/main/java/com/intel/oap/mllib/LibLoader.java index 165fc3873..1a14e4a99 100644 --- a/mllib-dal/src/main/java/com/intel/oap/mllib/LibLoader.java +++ b/mllib-dal/src/main/java/com/intel/oap/mllib/LibLoader.java @@ -49,12 +49,7 @@ public static synchronized void loadLibraries() throws IOException { if (isLoaded) { return; } -/* - if (!loadLibSYCL()) { - log.debug("SYCL libraries are not available, will load CPU libraries only."); - } - loadLibCCL(); - */ + loadLibMLlibDAL(); isLoaded = true; diff --git a/mllib-dal/src/main/scala/org/apache/spark/ml/clustering/spark321/KMeans.scala b/mllib-dal/src/main/scala/org/apache/spark/ml/clustering/spark321/KMeans.scala index ece970fc2..25b5d3ab1 100644 --- a/mllib-dal/src/main/scala/org/apache/spark/ml/clustering/spark321/KMeans.scala +++ b/mllib-dal/src/main/scala/org/apache/spark/ml/clustering/spark321/KMeans.scala @@ -58,8 +58,6 @@ class KMeans @Since("1.5.0") ( instr.logPipelineStage(this) instr.logDataset(dataset) - System.exit(0) - instr.logParams(this, featuresCol, predictionCol, k, initMode, initSteps, distanceMeasure, maxIter, seed, tol, weightCol)