Skip to content

Commit

Permalink
Check in the scalariform formatted source
Browse files Browse the repository at this point in the history
  • Loading branch information
massie committed Apr 7, 2014
1 parent 67482f9 commit b320f02
Show file tree
Hide file tree
Showing 51 changed files with 195 additions and 195 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -65,7 +65,7 @@ object ADAMMain extends Logging {
printCommands()
} else {
commands.find(_.commandName == args(0)) match {
case None => printCommands()
case None => printCommands()

This comment has been minimized.

Copy link
@heuermh

heuermh Apr 8, 2014

Member

Probably too late to comment, but these kind of white space changes seem silly to me

This comment has been minimized.

Copy link
@fnothaft

fnothaft Apr 8, 2014

Member

For what it's worth, it's nice to have things consistent, and now that scalariform is in the Maven pom as a plugin, it'll be easy to keep consistent across changes.

This comment has been minimized.

Copy link
@massie

massie Apr 8, 2014

Author Member

I'm to blame for this. I merged #206 without realizing that the Scalariform configuration was different than we used in previous commits (run from the command-line).

I agree that these white space changes are annoying. As Frank said, moving forward these changes won't occur because scalariform will keep the syntax consistent.

This comment has been minimized.

Copy link
@massie

massie Apr 8, 2014

Author Member

I also forgot to mention -- it's never too late to comment. Keep the feedback (and code!) coming.

This comment has been minimized.

Copy link
@nealsid

nealsid via email Apr 8, 2014

This comment has been minimized.

Copy link
@fnothaft

fnothaft Apr 8, 2014

Member

IMO, it's not a problem that the parameters changed; I think Matt is saying that your PR changed the parameters in the pom.xml, but it didn't include all the changes to the code base when the reformatting was run.

If we are settled on the Scalariform parameters for ADAM, can you also push those same parameters to avocado, @nealsid?

case Some(cmd) => cmd.apply(args drop 1).run()
}
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -68,11 +68,11 @@ object CompareADAM extends ADAMCommandCompanion with Serializable {
* @see CompareADAMArgs.recurse1, CompareADAMArgs.recurse2
*/
def setupTraversalEngine(sc: SparkContext,
input1Path: String,
recurse1: String,
input2Path: String,
recurse2: String,
generator: BucketComparisons[Any]): ComparisonTraversalEngine = {
input1Path: String,
recurse1: String,
input2Path: String,
recurse2: String,
generator: BucketComparisons[Any]): ComparisonTraversalEngine = {

val schemas = Seq[FieldValue](
recordGroupId,
Expand All @@ -88,7 +88,7 @@ object CompareADAM extends ADAMCommandCompanion with Serializable {
def parseGenerators(nameList: String): Seq[BucketComparisons[Any]] = {
nameList match {
case null => DefaultComparisons.comparisons
case s => parseGenerators(s.split(","))
case s => parseGenerators(s.split(","))
}
}

Expand Down Expand Up @@ -146,9 +146,9 @@ class CompareADAM(protected val args: CompareADAMArgs) extends ADAMSparkCommand[
* @param writer The PrintWriter to print the summary with.
*/
def printSummary(engine: ComparisonTraversalEngine,
generators: Seq[BucketComparisons[Any]],
aggregateds: Seq[Histogram[Any]],
writer: PrintWriter) {
generators: Seq[BucketComparisons[Any]],
aggregateds: Seq[Histogram[Any]],
writer: PrintWriter) {

writer.println("%15s: %s".format("INPUT1", args.input1Path))
writer.println("\t%15s: %d".format("total-reads", engine.named1.count()))
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,7 @@ class MpileupCommand(protected val args: MpileupArgs) extends ADAMSparkCommand[M
// The reference base
pileup.referenceBase match {
case Some(base) => print(base)
case None => print("?")
case None => print("?")
}

// The number of reads
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@ class PileupAggregatorArgs extends Args4jBase with SparkArgs with ParquetArgs {
}

class PileupAggregator(protected val args: PileupAggregatorArgs)
extends ADAMSparkCommand[PileupAggregatorArgs] {
extends ADAMSparkCommand[PileupAggregatorArgs] {

val companion = PileupAggregator

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -95,19 +95,19 @@ class PluginExecutor(protected val args: PluginExecutorArgs) extends ADAMSparkCo
// see. This is related to Issue #62: Predicate to filter conversion.
val filter = accessControl.predicate match {
case None => plugin.predicate match {
case None => None
case None => None
case Some(predicateFilter) => Some(predicateFilter)
}
case Some(accessControlPredicate) => plugin.predicate match {
case None => Some(accessControlPredicate)
case None => Some(accessControlPredicate)
case Some(predicateFilter) => Some((value: ADAMRecord) => accessControlPredicate(value) && predicateFilter(value))
}
}

val firstRdd: RDD[ADAMRecord] = load[ADAMRecord](sc, args.input, plugin.projection)

val input = filter match {
case None => firstRdd
case None => firstRdd
case Some(filterFunc) => firstRdd.filter(filterFunc)
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -56,8 +56,8 @@ class SummarizeGenotypes(val args: SummarizeGenotypesArgs) extends ADAMSparkComm
val stats = GenotypesSummary(adamGTs)
val result = args.format match {
case "human" => GenotypesSummaryFormatting.format_human_readable(stats)
case "csv" => GenotypesSummaryFormatting.format_csv(stats)
case _ => throw new IllegalArgumentException("Invalid -format: %s".format(args.format))
case "csv" => GenotypesSummaryFormatting.format_csv(stats)
case _ => throw new IllegalArgumentException("Invalid -format: %s".format(args.format))
}
if (args.out.isEmpty) {
println(result)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -33,10 +33,10 @@ object SmithWatermanConstantGapScoring {
}

abstract class SmithWatermanConstantGapScoring(xSequence: String,
ySequence: String,
wMatch: Double,
wMismatch: Double,
wInsert: Double,
wDelete: Double)
extends SmithWatermanGapScoringFromFn(xSequence, ySequence, SmithWatermanConstantGapScoring.constantGapFn(wMatch, wMismatch, wInsert, wDelete)) {
ySequence: String,
wMatch: Double,
wMismatch: Double,
wInsert: Double,
wDelete: Double)
extends SmithWatermanGapScoringFromFn(xSequence, ySequence, SmithWatermanConstantGapScoring.constantGapFn(wMatch, wMismatch, wInsert, wDelete)) {
}
Original file line number Diff line number Diff line change
Expand Up @@ -17,9 +17,9 @@
package org.bdgenomics.adam.algorithms.smithwaterman

abstract class SmithWatermanGapScoringFromFn(xSequence: String,
ySequence: String,
scoreFn: (Int, Int, Char, Char) => Double)
extends SmithWaterman(xSequence, ySequence) {
ySequence: String,
scoreFn: (Int, Int, Char, Char) => Double)
extends SmithWaterman(xSequence, ySequence) {

def buildScoringMatrix(): Array[Array[Double]] = {

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@ private[adam] object FastaConverter {
* @return An RDD of ADAM FASTA data.
*/
def apply(rdd: RDD[(Int, String)],
maxFragmentLength: Long = 10000L): RDD[ADAMNucleotideContigFragment] = {
maxFragmentLength: Long = 10000L): RDD[ADAMNucleotideContigFragment] = {
val filtered = rdd.map(kv => (kv._1, kv._2.trim()))
.filter((kv: (Int, String)) => !kv._2.startsWith(";"))

Expand Down Expand Up @@ -164,9 +164,9 @@ private[converters] class FastaConverter(fragmentLength: Long) extends Serializa
* @return The converted ADAM FASTA contig.
*/
def convert(name: Option[String],
id: Int,
sequence: Seq[String],
description: Option[String]): Seq[ADAMNucleotideContigFragment] = {
id: Int,
sequence: Seq[String],
description: Option[String]): Seq[ADAMNucleotideContigFragment] = {

// get sequence length
val sequenceLength = sequence.map(_.length).reduce(_ + _)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ import org.bdgenomics.adam.util._
import scala.math.{ pow, sqrt }

private[adam] class GenotypesToVariantsConverter(validateSamples: Boolean = false,
failOnValidationError: Boolean = false) extends Serializable {
failOnValidationError: Boolean = false) extends Serializable {

/**
* Computes root mean squared (RMS) values for a series of doubles.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -124,7 +124,7 @@ class SAMRecordConverter extends Serializable {
if (recordGroup != null) {
Option(recordGroup.getRunDate) match {
case Some(date) => builder.setRecordGroupRunDateEpoch(date.getTime)
case None =>
case None =>
}
recordGroup.getId
builder.setRecordGroupId(readGroups(recordGroup.getReadGroupId))
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -26,18 +26,18 @@ import org.bdgenomics.adam.util.ImplicitJavaConversions._
object VariantAnnotationConverter extends Serializable {

private def attrAsInt(attr: Object): Object = attr match {
case a: String => java.lang.Integer.valueOf(a)
case a: String => java.lang.Integer.valueOf(a)
case a: java.lang.Integer => a
case a: java.lang.Number => java.lang.Integer.valueOf(a.intValue)
case a: java.lang.Number => java.lang.Integer.valueOf(a.intValue)
}
private def attrAsLong(attr: Object): Object = attr match {
case a: String => java.lang.Long.valueOf(a)
case a: java.lang.Long => a
case a: String => java.lang.Long.valueOf(a)
case a: java.lang.Long => a
case a: java.lang.Number => java.lang.Long.valueOf(a.longValue)
}
private def attrAsFloat(attr: Object): Object = attr match {
case a: String => java.lang.Float.valueOf(a)
case a: java.lang.Float => a
case a: String => java.lang.Float.valueOf(a)
case a: java.lang.Float => a
case a: java.lang.Number => java.lang.Float.valueOf(a.floatValue)
}
private def attrAsString(attr: Object): Object = attr match {
Expand All @@ -46,7 +46,7 @@ object VariantAnnotationConverter extends Serializable {

private def attrAsBoolean(attr: Object): Object = attr match {
case a: java.lang.Boolean => a
case a: String => java.lang.Boolean.valueOf(a)
case a: String => java.lang.Boolean.valueOf(a)
}

private case class AttrKey(adamKey: String, attrConverter: (Object => Object), hdrLine: VCFCompoundHeaderLine) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -48,8 +48,8 @@ object VariantContextConverter {
private def convertAlleles(g: ADAMGenotype): java.util.List[Allele] = {
g.getAlleles.map(a => a match {
case ADAMGenotypeAllele.NoCall => Allele.NO_CALL
case ADAMGenotypeAllele.Ref => Allele.create(g.getVariant.getReferenceAllele.toString, true)
case ADAMGenotypeAllele.Alt => Allele.create(g.getVariant.getVariantAllele.toString)
case ADAMGenotypeAllele.Ref => Allele.create(g.getVariant.getReferenceAllele.toString, true)
case ADAMGenotypeAllele.Alt => Allele.create(g.getVariant.getVariantAllele.toString)
})
}
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -55,8 +55,8 @@ trait Aggregated[+T] extends Writable with Serializable {
}

class AggregatedCollection[T, U <: Aggregated[T]](val values: Seq[U])
extends Aggregated[metrics.Collection[Seq[T]]]
with Serializable {
extends Aggregated[metrics.Collection[Seq[T]]]
with Serializable {

def count(): Long = values.map(_.count()).reduce(_ + _)
def countIdentical(): Long =
Expand All @@ -72,7 +72,7 @@ object AggregatedCollection {
}

class CombinedAggregator[Single, Agg <: Aggregated[Single]](aggs: Seq[Aggregator[Single, Agg]])
extends Aggregator[metrics.Collection[Seq[Single]], AggregatedCollection[Single, Agg]] {
extends Aggregator[metrics.Collection[Seq[Single]], AggregatedCollection[Single, Agg]] {

def initialValue: AggregatedCollection[Single, Agg] = AggregatedCollection(aggs.map(_.initialValue))

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -78,10 +78,10 @@ object ADAMVariantContext {
}

class ADAMVariantContext(
val position: ReferencePosition,
val variant: RichADAMVariant,
val genotypes: Seq[ADAMGenotype],
val databases: Option[ADAMDatabaseVariantAnnotation] = None) {
val position: ReferencePosition,
val variant: RichADAMVariant,
val genotypes: Seq[ADAMGenotype],
val databases: Option[ADAMDatabaseVariantAnnotation] = None) {
def this(variant: RichADAMVariant, genotypes: Seq[ADAMGenotype], database: Option[ADAMDatabaseVariantAnnotation] = None) = {
this(ReferencePosition(variant), variant, genotypes, database)
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -29,12 +29,12 @@ import com.esotericsoftware.kryo.io.{ Input, Output }
* This is useful as this will usually map a single read in any of the sequences.
*/
case class ReadBucket(unpairedPrimaryMappedReads: Seq[ADAMRecord] = Seq.empty,
pairedFirstPrimaryMappedReads: Seq[ADAMRecord] = Seq.empty,
pairedSecondPrimaryMappedReads: Seq[ADAMRecord] = Seq.empty,
unpairedSecondaryMappedReads: Seq[ADAMRecord] = Seq.empty,
pairedFirstSecondaryMappedReads: Seq[ADAMRecord] = Seq.empty,
pairedSecondSecondaryMappedReads: Seq[ADAMRecord] = Seq.empty,
unmappedReads: Seq[ADAMRecord] = Seq.empty) {
pairedFirstPrimaryMappedReads: Seq[ADAMRecord] = Seq.empty,
pairedSecondPrimaryMappedReads: Seq[ADAMRecord] = Seq.empty,
unpairedSecondaryMappedReads: Seq[ADAMRecord] = Seq.empty,
pairedFirstSecondaryMappedReads: Seq[ADAMRecord] = Seq.empty,
pairedSecondSecondaryMappedReads: Seq[ADAMRecord] = Seq.empty,
unmappedReads: Seq[ADAMRecord] = Seq.empty) {
def allReads(): Seq[ADAMRecord] =
unpairedPrimaryMappedReads ++
pairedFirstPrimaryMappedReads ++
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@ object ReferencePositionWithOrientation {
}

case class ReferencePositionWithOrientation(refPos: Option[ReferencePosition], negativeStrand: Boolean)
extends Ordered[ReferencePositionWithOrientation] {
extends Ordered[ReferencePositionWithOrientation] {
override def compare(that: ReferencePositionWithOrientation): Int = {
val posCompare = refPos.compare(that.refPos)
if (posCompare != 0) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@ object ReferencePositionPair extends Logging {
}

case class ReferencePositionPair(read1refPos: Option[ReferencePositionWithOrientation],
read2refPos: Option[ReferencePositionWithOrientation])
read2refPos: Option[ReferencePositionWithOrientation])

class ReferencePositionPairSerializer extends Serializer[ReferencePositionPair] {
val rps = new ReferencePositionWithOrientationSerializer()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -122,7 +122,7 @@ case class ReferenceRegion(refId: Int, start: Long, end: Long) extends Ordered[R
*/
def isAdjacent(region: ReferenceRegion): Boolean = distance(region) match {
case Some(d) => d == 1
case None => false
case None => false
}

/**
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -208,7 +208,7 @@ class SequenceDictionary(val recordsIn: Array[SequenceRecord]) extends Serializa
def records: Set[SequenceRecord] = recordIndices.values.toSet

private[models] def cleanAndMerge(a1: Array[SequenceRecord],
a2: Array[SequenceRecord]): Array[SequenceRecord] = {
a2: Array[SequenceRecord]): Array[SequenceRecord] = {
val a2filt = a2.filter(k => !a1.contains(k))

a1 ++ a2filt
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -36,8 +36,8 @@ object SingleReadBucket extends Logging {
}

case class SingleReadBucket(primaryMapped: Seq[ADAMRecord] = Seq.empty,
secondaryMapped: Seq[ADAMRecord] = Seq.empty,
unmapped: Seq[ADAMRecord] = Seq.empty) {
secondaryMapped: Seq[ADAMRecord] = Seq.empty,
unmapped: Seq[ADAMRecord] = Seq.empty) {
// Note: not a val in order to save serialization/memory cost
def allReads = {
primaryMapped ++ secondaryMapped ++ unmapped
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -113,12 +113,12 @@ object ADAMContext {
* @return Returns a properly configured Spark Context.
*/
def createSparkContext(name: String,
master: String,
sparkHome: String,
sparkJars: Seq[String],
sparkEnvVars: Seq[String],
sparkAddStatsListener: Boolean = false,
sparkKryoBufferSize: Int = 4): SparkContext = {
master: String,
sparkHome: String,
sparkJars: Seq[String],
sparkEnvVars: Seq[String],
sparkAddStatsListener: Boolean = false,
sparkKryoBufferSize: Int = 4): SparkContext = {
ADAMKryoProperties.setupContextProperties(sparkKryoBufferSize)
val appName = "adam: " + name
val environment: Map[String, String] = if (sparkEnvVars.isEmpty) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -52,8 +52,8 @@ import scala.math.{ min, max }
class ADAMRDDFunctions[T <% SpecificRecord: Manifest](rdd: RDD[T]) extends Serializable {

def adamSave(filePath: String, blockSize: Int = 128 * 1024 * 1024,
pageSize: Int = 1 * 1024 * 1024, compressCodec: CompressionCodecName = CompressionCodecName.GZIP,
disableDictionaryEncoding: Boolean = false): RDD[T] = {
pageSize: Int = 1 * 1024 * 1024, compressCodec: CompressionCodecName = CompressionCodecName.GZIP,
disableDictionaryEncoding: Boolean = false): RDD[T] = {
val job = new Job(rdd.context.hadoopConfiguration)
ParquetLogger.hadoopLoggerLevel(Level.SEVERE)
ParquetOutputFormat.setWriteSupportClass(job, classOf[AvroWriteSupport])
Expand Down Expand Up @@ -131,7 +131,7 @@ abstract class ADAMSequenceDictionaryRDDAggregator[T](rdd: RDD[T]) extends Seria
* @param rdd RDD over which aggregation is supported.
*/
class ADAMSpecificRecordSequenceDictionaryRDDAggregator[T <% SpecificRecord: Manifest](rdd: RDD[T])
extends ADAMSequenceDictionaryRDDAggregator[T](rdd) {
extends ADAMSequenceDictionaryRDDAggregator[T](rdd) {

def getSequenceRecordsFromElement(elem: T): Set[SequenceRecord] = {
Set(SequenceRecord.fromSpecificRecord(elem))
Expand Down Expand Up @@ -223,7 +223,7 @@ class ADAMRecordRDDFunctions(rdd: RDD[ADAMRecord]) extends ADAMSequenceDictionar
* @return RDD of ADAMRods.
*/
def adamRecords2Rods(bucketSize: Int = 1000,
secondaryAlignments: Boolean = false): RDD[ADAMRod] = {
secondaryAlignments: Boolean = false): RDD[ADAMRod] = {

/**
* Maps a read to one or two buckets. A read maps to a single bucket if both
Expand Down Expand Up @@ -458,7 +458,7 @@ class ADAMNucleotideContigFragmentRDDFunctions(rdd: RDD[ADAMNucleotideContigFrag
}

def reducePairs(kv1: (ReferenceRegion, String),
kv2: (ReferenceRegion, String)): (ReferenceRegion, String) = {
kv2: (ReferenceRegion, String)): (ReferenceRegion, String) = {
assert(kv1._1.isAdjacent(kv2._1), "Regions being joined must be adjacent. For: " +
kv1 + ", " + kv2)

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -56,10 +56,10 @@ case class DuplicateMetrics(total: Long, bothMapped: Long, onlyReadMapped: Long,
}

case class FlagStatMetrics(total: Long, duplicatesPrimary: DuplicateMetrics, duplicatesSecondary: DuplicateMetrics,
mapped: Long, pairedInSequencing: Long,
read1: Long, read2: Long, properlyPaired: Long, withSelfAndMateMapped: Long,
singleton: Long, withMateMappedToDiffChromosome: Long,
withMateMappedToDiffChromosomeMapQ5: Long, failedQuality: Boolean) {
mapped: Long, pairedInSequencing: Long,
read1: Long, read2: Long, properlyPaired: Long, withSelfAndMateMapped: Long,
singleton: Long, withMateMappedToDiffChromosome: Long,
withMateMappedToDiffChromosomeMapQ5: Long, failedQuality: Boolean) {
def +(that: FlagStatMetrics): FlagStatMetrics = {
assert(failedQuality == that.failedQuality, "Can't reduce passedVendorQuality with different failedQuality values")
new FlagStatMetrics(total + that.total,
Expand Down
Loading

0 comments on commit b320f02

Please sign in to comment.