Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Use scalameta scalafmt plugin #681

Merged
merged 2 commits into from
Sep 24, 2019
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions .scalafmt.conf
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
version=2.0.1
maxColumn = 110
docstrings = JavaDoc
newlines.penalizeSingleSelectMultiArgList = false
Expand Down
2 changes: 1 addition & 1 deletion .travis.yml
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ matrix:

- scala: 2.12.9
jdk: openjdk8
script: sbt "++$TRAVIS_SCALA_VERSION clean" "++$TRAVIS_SCALA_VERSION test" "scalafmt::test" "test:scalafmt::test" "++$TRAVIS_SCALA_VERSION mimaReportBinaryIssues" "++$TRAVIS_SCALA_VERSION docs/makeMicrosite"
script: sbt "++$TRAVIS_SCALA_VERSION clean" "++$TRAVIS_SCALA_VERSION test" "scalafmtCheckAll" "scalafmtSbtCheck" "++$TRAVIS_SCALA_VERSION mimaReportBinaryIssues" "++$TRAVIS_SCALA_VERSION docs/makeMicrosite"
#script: ./sbt "+++$TRAVIS_SCALA_VERSION clean" "+++$TRAVIS_SCALA_VERSION test" "++$TRAVIS_SCALA_VERSION docs/makeMicrosite"

before_install:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -100,7 +100,8 @@ object AsyncSummerBenchmark {
Counter("insertOp"),
Counter("tuplesOut"),
Counter("size"),
workPool)
workPool
)
syncSummingQueue = new SyncSummingQueue[Long, HLL](
bufferSize,
flushFrequency,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -41,18 +41,22 @@ object BloomFilterDistanceBenchmark {
val sparseBF1: BF[String] =
toSparse(
BloomFilter[String](nbrOfElements, falsePositiveRate)
.create(randomElements: _*))
.create(randomElements: _*)
)
val sparesBF2: BF[String] =
toSparse(
BloomFilter[String](nbrOfElements, falsePositiveRate)
.create(randomElements: _*))
.create(randomElements: _*)
)

val denseBF1: BF[String] = toDense(
BloomFilter[String](nbrOfElements, falsePositiveRate)
.create(randomElements: _*))
.create(randomElements: _*)
)
val denseBF2: BF[String] = toDense(
BloomFilter[String](nbrOfElements, falsePositiveRate)
.create(randomElements: _*))
.create(randomElements: _*)
)

}
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,9 @@ object CMSHashingBenchmark {
"11" /* eps = 0.271 */,
"544" /* eps = 0.005 */,
"2719" /* eps = 1E-3 */,
"271829" /* eps = 1E-5 */ ))
"271829" /* eps = 1E-5 */
)
)
var width: Int = 0

/**
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,8 @@ class BijectedRing[T, U](implicit val ring: Ring[T], bij: ImplicitBijection[T, U

trait AlgebirdBijections {
implicit def semigroupBijection[T, U](
implicit bij: ImplicitBijection[T, U]): Bijection[Semigroup[T], Semigroup[U]] =
implicit bij: ImplicitBijection[T, U]
): Bijection[Semigroup[T], Semigroup[U]] =
new AbstractBijection[Semigroup[T], Semigroup[U]] {
override def apply(sg: Semigroup[T]) =
new BijectedSemigroup[T, U]()(sg, bij)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -106,7 +106,8 @@ object AdaptiveVector {
case _ if valueIsNonZero(left.sparseValue) =>
fromVector(
Vector(Semigroup.plus(toVector(left): IndexedSeq[V], toVector(right): IndexedSeq[V]): _*),
left.sparseValue)
left.sparseValue
)
case _ => // sparse is zero:
fromMap(Semigroup.plus(toMap(left), toMap(right)), left.sparseValue, maxSize)
}
Expand Down
70 changes: 44 additions & 26 deletions algebird-core/src/main/scala/com/twitter/algebird/Aggregator.scala
Original file line number Diff line number Diff line change
Expand Up @@ -69,7 +69,8 @@ object Aggregator extends java.io.Serializable {
* Equivalent to {{{ appendSemigroup(prep, appnd, identity[T]_)(sg) }}}
*/
def appendSemigroup[F, T](prep: F => T, appnd: (T, F) => T)(
implicit sg: Semigroup[T]): Aggregator[F, T, T] =
implicit sg: Semigroup[T]
): Aggregator[F, T, T] =
appendSemigroup(prep, appnd, identity[T] _)(sg)

/**
Expand All @@ -85,7 +86,8 @@ object Aggregator extends java.io.Serializable {
* @note The functions 'appnd' and 'prep' are expected to obey the law: {{{ appnd(t, f) == sg.plus(t, prep(f)) }}}
*/
def appendSemigroup[F, T, P](prep: F => T, appnd: (T, F) => T, pres: T => P)(
implicit sg: Semigroup[T]): Aggregator[F, T, P] =
implicit sg: Semigroup[T]
): Aggregator[F, T, P] =
new Aggregator[F, T, P] {
def semigroup: Semigroup[T] = sg
def prepare(input: F): T = prep(input)
Expand Down Expand Up @@ -130,7 +132,8 @@ object Aggregator extends java.io.Serializable {
* @note The function 'appnd' is expected to obey the law: {{{ appnd(t, f) == m.plus(t, appnd(m.zero, f)) }}}
*/
def appendMonoid[F, T, P](appnd: (T, F) => T, pres: T => P)(
implicit m: Monoid[T]): MonoidAggregator[F, T, P] =
implicit m: Monoid[T]
): MonoidAggregator[F, T, P] =
new MonoidAggregator[F, T, P] {
def monoid: Monoid[T] = m
def prepare(input: F): T = appnd(m.zero, input)
Expand Down Expand Up @@ -237,8 +240,9 @@ object Aggregator extends java.io.Serializable {
*
* This function is like writing list.sortBy(fn).reverse.take(count).
*/
def sortByReverseTake[T, U: Ordering](count: Int)(
fn: T => U): MonoidAggregator[T, PriorityQueue[T], Seq[T]] =
def sortByReverseTake[T, U: Ordering](
count: Int
)(fn: T => U): MonoidAggregator[T, PriorityQueue[T], Seq[T]] =
Aggregator.sortedReverseTake(count)(Ordering.by(fn))

/**
Expand All @@ -258,8 +262,10 @@ object Aggregator extends java.io.Serializable {
* selected. This assumes that all sampled records can fit in memory, so use this only when the
* expected number of sampled values is small.
*/
def randomSample[T](prob: Double,
seed: Int = DefaultSeed): MonoidAggregator[T, Option[Batched[T]], List[T]] = {
def randomSample[T](
prob: Double,
seed: Int = DefaultSeed
): MonoidAggregator[T, Option[Batched[T]], List[T]] = {
assert(prob >= 0 && prob <= 1, "randomSample.prob must lie in [0, 1]")
val rng = new java.util.Random(seed)
Preparer[T]
Expand All @@ -272,8 +278,10 @@ object Aggregator extends java.io.Serializable {
* then 'count' total records). This assumes that all 'count' of the records can fit in memory,
* so use this only for small values of 'count'.
*/
def reservoirSample[T](count: Int,
seed: Int = DefaultSeed): MonoidAggregator[T, PriorityQueue[(Double, T)], Seq[T]] = {
def reservoirSample[T](
count: Int,
seed: Int = DefaultSeed
): MonoidAggregator[T, PriorityQueue[(Double, T)], Seq[T]] = {
val rng = new java.util.Random(seed)
Preparer[T]
.map(rng.nextDouble() -> _)
Expand Down Expand Up @@ -324,15 +332,17 @@ object Aggregator extends java.io.Serializable {
* The items that are iterated over cannot be negative.
*/
def approximatePercentile[T](percentile: Double, k: Int = QTreeAggregator.DefaultK)(
implicit num: Numeric[T]): QTreeAggregatorLowerBound[T] =
implicit num: Numeric[T]
): QTreeAggregatorLowerBound[T] =
QTreeAggregatorLowerBound[T](percentile, k)

/**
* Returns the intersection of a bounded percentile where the percentile is between (0,1]
* The items that are iterated over cannot be negative.
*/
def approximatePercentileBounds[T](percentile: Double, k: Int = QTreeAggregator.DefaultK)(
implicit num: Numeric[T]): QTreeAggregator[T] =
implicit num: Numeric[T]
): QTreeAggregator[T] =
QTreeAggregator[T](percentile, k)

/**
Expand Down Expand Up @@ -429,8 +439,9 @@ trait Aggregator[-A, B, +C] extends java.io.Serializable { self =>
* This returns the cumulative sum of its inputs, in the same order.
* If the inputs are empty, the result will be empty too.
*/
def applyCumulatively[In <: TraversableOnce[A], Out](inputs: In)(
implicit bf: CanBuildFrom[In, C, Out]): Out = {
def applyCumulatively[In <: TraversableOnce[A], Out](
inputs: In
)(implicit bf: CanBuildFrom[In, C, Out]): Out = {
val builder = bf()
builder ++= cumulativeIterator(inputs.toIterator)
builder.result
Expand Down Expand Up @@ -509,22 +520,28 @@ class AggregatorApplicative[I] extends Applicative[({ type L[O] = Aggregator[I,
Aggregator.const(v)
override def join[T, U](mt: Aggregator[I, _, T], mu: Aggregator[I, _, U]): Aggregator[I, _, (T, U)] =
mt.join(mu)
override def join[T1, T2, T3](m1: Aggregator[I, _, T1],
m2: Aggregator[I, _, T2],
m3: Aggregator[I, _, T3]): Aggregator[I, _, (T1, T2, T3)] =
override def join[T1, T2, T3](
m1: Aggregator[I, _, T1],
m2: Aggregator[I, _, T2],
m3: Aggregator[I, _, T3]
): Aggregator[I, _, (T1, T2, T3)] =
GeneratedTupleAggregator.from3((m1, m2, m3))

override def join[T1, T2, T3, T4](m1: Aggregator[I, _, T1],
m2: Aggregator[I, _, T2],
m3: Aggregator[I, _, T3],
m4: Aggregator[I, _, T4]): Aggregator[I, _, (T1, T2, T3, T4)] =
override def join[T1, T2, T3, T4](
m1: Aggregator[I, _, T1],
m2: Aggregator[I, _, T2],
m3: Aggregator[I, _, T3],
m4: Aggregator[I, _, T4]
): Aggregator[I, _, (T1, T2, T3, T4)] =
GeneratedTupleAggregator.from4((m1, m2, m3, m4))

override def join[T1, T2, T3, T4, T5](m1: Aggregator[I, _, T1],
m2: Aggregator[I, _, T2],
m3: Aggregator[I, _, T3],
m4: Aggregator[I, _, T4],
m5: Aggregator[I, _, T5]): Aggregator[I, _, (T1, T2, T3, T4, T5)] =
override def join[T1, T2, T3, T4, T5](
m1: Aggregator[I, _, T1],
m2: Aggregator[I, _, T2],
m3: Aggregator[I, _, T3],
m4: Aggregator[I, _, T4],
m5: Aggregator[I, _, T5]
): Aggregator[I, _, (T1, T2, T3, T4, T5)] =
GeneratedTupleAggregator.from5((m1, m2, m3, m4, m5))
}

Expand Down Expand Up @@ -558,7 +575,8 @@ trait MonoidAggregator[-A, B, +C] extends Aggregator[A, B, C] { self =>
* and outputs the pair from both
*/
def either[A2, B2, C2](
that: MonoidAggregator[A2, B2, C2]): MonoidAggregator[Either[A, A2], (B, B2), (C, C2)] =
that: MonoidAggregator[A2, B2, C2]
): MonoidAggregator[Either[A, A2], (B, B2), (C, C2)] =
new MonoidAggregator[Either[A, A2], (B, B2), (C, C2)] {
def prepare(e: Either[A, A2]) = e match {
case Left(a) => (self.prepare(a), that.monoid.zero)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -63,11 +63,13 @@ trait Applicative[M[_]] extends Functor[M] {
case (((t1, t2), t3), t4) => (t1, t2, t3, t4)
}

def join[T1, T2, T3, T4, T5](m1: M[T1],
m2: M[T2],
m3: M[T3],
m4: M[T4],
m5: M[T5]): M[(T1, T2, T3, T4, T5)] =
def join[T1, T2, T3, T4, T5](
m1: M[T1],
m2: M[T2],
m3: M[T3],
m4: M[T4],
m5: M[T5]
): M[(T1, T2, T3, T4, T5)] =
joinWith(join(join(join(m1, m2), m3), m4), m5) {
case ((((t1, t2), t3), t4), t5) => (t1, t2, t3, t4, t5)
}
Expand All @@ -90,10 +92,12 @@ object Applicative {
def join[M[_], T1, T2, T3](m1: M[T1], m2: M[T2], m3: M[T3])(implicit app: Applicative[M]): M[(T1, T2, T3)] =
app.join(m1, m2, m3)
def join[M[_], T1, T2, T3, T4](m1: M[T1], m2: M[T2], m3: M[T3], m4: M[T4])(
implicit app: Applicative[M]): M[(T1, T2, T3, T4)] =
implicit app: Applicative[M]
): M[(T1, T2, T3, T4)] =
app.join(m1, m2, m3, m4)
def join[M[_], T1, T2, T3, T4, T5](m1: M[T1], m2: M[T2], m3: M[T3], m4: M[T4], m5: M[T5])(
implicit app: Applicative[M]): M[(T1, T2, T3, T4, T5)] =
implicit app: Applicative[M]
): M[(T1, T2, T3, T4, T5)] =
app.join(m1, m2, m3, m4, m5)
def sequence[M[_], T](ms: Seq[M[T]])(implicit app: Applicative[M]): M[Seq[T]] =
app.sequence(ms)
Expand All @@ -102,7 +106,8 @@ object Applicative {
* A Generic sequence that uses CanBuildFrom
*/
def sequenceGen[M[_], T, S[X] <: TraversableOnce[X], R[_]](
ms: S[M[T]])(implicit app: Applicative[M], cbf: CanBuildFrom[Nothing, T, R[T]]): M[R[T]] = {
ms: S[M[T]]
)(implicit app: Applicative[M], cbf: CanBuildFrom[Nothing, T, R[T]]): M[R[T]] = {
val bldr = cbf()
val mbldr = ms.toIterator.foldLeft(app.apply(bldr)) { (mb, mt) =>
app.joinWith(mb, mt)(_ += _)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -72,8 +72,8 @@ object ApproximateBoolean {

// Note the probWithinBounds is a LOWER BOUND (at least this probability)
case class Approximate[N](min: N, estimate: N, max: N, probWithinBounds: Double)(
implicit val numeric: Numeric[N])
extends ApproximateSet[N] {
implicit val numeric: Numeric[N]
) extends ApproximateSet[N] {
require(numeric.lteq(min, estimate) && numeric.lteq(estimate, max))

/**
Expand Down Expand Up @@ -101,7 +101,8 @@ case class Approximate[N](min: N, estimate: N, max: N, probWithinBounds: Double)
n.plus(min, right.min),
n.plus(estimate, right.estimate),
n.plus(max, right.max),
probWithinBounds * right.probWithinBounds)
probWithinBounds * right.probWithinBounds
)
}
def -(right: Approximate[N]): Approximate[N] =
this.+(right.negate)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -192,8 +192,10 @@ object Batched {
* (e.g. when there is temporary mutable state used to make
* summation fast).
*/
def monoidAggregator[A, B, C](batchSize: Int,
agg: MonoidAggregator[A, B, C]): MonoidAggregator[A, Batched[B], C] =
def monoidAggregator[A, B, C](
batchSize: Int,
agg: MonoidAggregator[A, B, C]
): MonoidAggregator[A, Batched[B], C] =
new MonoidAggregator[A, Batched[B], C] {
def prepare(a: A): Batched[B] = Item(agg.prepare(a))
def monoid: Monoid[Batched[B]] = new BatchedMonoid(batchSize)(agg.monoid)
Expand Down
25 changes: 15 additions & 10 deletions algebird-core/src/main/scala/com/twitter/algebird/BloomFilter.scala
Original file line number Diff line number Diff line change
Expand Up @@ -101,7 +101,8 @@ object BloomFilter {
BloomFilter.optimalWidth(numEntries, fpProb) match {
case None =>
throw new java.lang.IllegalArgumentException(
s"BloomFilter cannot guarantee the specified false positive probability for the number of entries! (numEntries: $numEntries, fpProb: $fpProb)")
s"BloomFilter cannot guarantee the specified false positive probability for the number of entries! (numEntries: $numEntries, fpProb: $fpProb)"
)
case Some(width) =>
val numHashes = BloomFilter.optimalNumHashes(numEntries, width)
BloomFilterMonoid[A](numHashes, width)(hash)
Expand Down Expand Up @@ -137,10 +138,12 @@ object BloomFilter {
* (min, estimate, max) =
* ((1 - approxWidth) * estimate, estimate, (1 + approxWidth) * estimate)
*/
def sizeEstimate(numBits: Int,
numHashes: Int,
width: Int,
approximationWidth: Double = 0.05): Approximate[Long] = {
def sizeEstimate(
numBits: Int,
numHashes: Int,
width: Int,
approximationWidth: Double = 0.05
): Approximate[Long] = {
assert(0 <= approximationWidth && approximationWidth < 1, "approximationWidth must lie in [0, 1)")

/**
Expand Down Expand Up @@ -636,11 +639,13 @@ case class BFHash[A](numHashes: Int, width: Int)(implicit hash: Hash128[A]) {
}

@annotation.tailrec
private def nextHash(valueToHash: A,
hashIndex: Int,
buffer: Array[Int],
bidx: Int,
target: Array[Int]): Array[Int] =
private def nextHash(
valueToHash: A,
hashIndex: Int,
buffer: Array[Int],
bidx: Int,
target: Array[Int]
): Array[Int] =
if (hashIndex == numHashes) target
else {
val thisBidx = if (bidx > 3) {
Expand Down
Loading