diff --git a/conf/log4j.properties.template b/conf/log4j.properties.template index ec1aa187dfb3..ba97bf1b7d53 100644 --- a/conf/log4j.properties.template +++ b/conf/log4j.properties.template @@ -38,3 +38,14 @@ log4j.logger.parquet=ERROR # SPARK-9183: Settings to avoid annoying messages when looking up nonexistent UDFs in SparkSQL with Hive support log4j.logger.org.apache.hadoop.hive.metastore.RetryingHMSHandler=FATAL log4j.logger.org.apache.hadoop.hive.ql.exec.FunctionRegistry=ERROR + +# SPARK-14754: Metrics as logs are not coming through slf4j. +#log4j.logger.org.apache.spark.metrics=INFO, metricFileAppender +#log4j.additivity.org.apache.spark.metrics=true + +#log4j.appender.metricFileAppender=org.apache.log4j.RollingFileAppender +#log4j.appender.metricFileAppender.File=${logFilePath} +#log4j.appender.metricFileAppender.MaxFileSize=10MB +#log4j.appender.metricFileAppender.MaxBackupIndex=10 +#log4j.appender.metricFileAppender.layout=org.apache.log4j.PatternLayout +#log4j.appender.metricFileAppender.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss} %-5p %c{1}:%L - %m%n diff --git a/core/src/main/scala/org/apache/spark/metrics/sink/Slf4jSink.scala b/core/src/main/scala/org/apache/spark/metrics/sink/Slf4jSink.scala index 773e074336cb..40338acf89bf 100644 --- a/core/src/main/scala/org/apache/spark/metrics/sink/Slf4jSink.scala +++ b/core/src/main/scala/org/apache/spark/metrics/sink/Slf4jSink.scala @@ -22,6 +22,9 @@ import java.util.concurrent.TimeUnit import com.codahale.metrics.{MetricRegistry, Slf4jReporter} +import org.slf4j.Logger +import org.slf4j.LoggerFactory + import org.apache.spark.SecurityManager import org.apache.spark.metrics.MetricsSystem @@ -49,6 +52,7 @@ private[spark] class Slf4jSink( MetricsSystem.checkMinimalPollingPeriod(pollUnit, pollPeriod) val reporter: Slf4jReporter = Slf4jReporter.forRegistry(registry) + .outputTo(LoggerFactory.getLogger("org.apache.spark.metrics.sink.Slf4jSink")) .convertDurationsTo(TimeUnit.MILLISECONDS) .convertRatesTo(TimeUnit.SECONDS) .build()