code
stringlengths 5
1M
| repo_name
stringlengths 5
109
| path
stringlengths 6
208
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 5
1M
|
---|---|---|---|---|---|
package org.scalajs.testsuite.utils
import java.{lang => jl, util => ju}
import org.scalajs.testsuite.utils.AssertThrows._
import scala.collection.JavaConversions._
trait CollectionsTestBase {
val range: Range = 0 to 30
class A extends jl.Comparable[A] {
def compareTo(o: A): Int = this.##.compareTo(o.##)
}
class B extends A
class C extends B
class CustomComparable(val v: Int) extends jl.Comparable[CustomComparable] {
override def compareTo(o: CustomComparable): Int =
(v % 8).compareTo(o.v % 8)
override def toString(): String =
s"CustomComparable($v)"
}
def testCollectionImmutability[E](coll: ju.Collection[E], elem: E): Unit = {
expectThrows(classOf[UnsupportedOperationException], coll.add(elem))
expectThrows(classOf[UnsupportedOperationException],
coll.addAll(Seq.empty[E]))
expectThrows(classOf[UnsupportedOperationException], coll.clear())
expectThrows(classOf[UnsupportedOperationException], coll.remove(elem))
expectThrows(classOf[UnsupportedOperationException],
coll.removeAll(Seq.empty[E]))
expectThrows(classOf[UnsupportedOperationException],
coll.retainAll(Seq.empty[E]))
testIteratorsImmutability(() => coll.iterator())
}
def testSetImmutability[E](set: ju.Set[E], elem: E): Unit =
testCollectionImmutability(set, elem)
def testSortedSetImmutability[E](set: ju.SortedSet[E], elem: E,
recursive: Boolean = false): Unit = {
testSetImmutability(set, elem)
def testSubsets(ss: ju.SortedSet[E]) = {
if (recursive) testSetImmutability(ss, elem)
else testSortedSetImmutability(ss, elem, true)
}
testSubsets(set.headSet(elem))
testSubsets(set.tailSet(elem))
testSubsets(set.subSet(elem, elem))
}
def testListImmutability[E](list: ju.List[E], elem: E,
recursive: Boolean = false): Unit = {
testCollectionImmutability(list, elem)
expectThrows(classOf[UnsupportedOperationException], list.add(0, elem))
expectThrows(classOf[UnsupportedOperationException],
list.addAll(0, Seq.empty[E]))
expectThrows(classOf[UnsupportedOperationException], list.remove(0))
expectThrows(classOf[UnsupportedOperationException], list.set(0, elem))
def testSublist(sl: ju.List[E]): Unit = {
if (recursive) testCollectionImmutability(sl, elem)
else testListImmutability(sl, elem, true)
}
testSublist(list.subList(0, list.size / 2))
testListIteratorsImmutability(() => list.listIterator(), elem)
testListIteratorsImmutability(() => list.listIterator(0), elem)
}
def testOnFirstPositionOfIterator[Iter <: ju.Iterator[_]](
newIter: () => Iter, action: Iter => Unit,
expectedException: Option[Class[_ <: Throwable]]): Unit = {
val it = newIter()
if (it.hasNext) {
it.next()
expectedException match {
case Some(exClass) => expectThrows(exClass, action(it))
case None => action(it)
}
}
}
def testMapImmutability[K, V](map: ju.Map[K, V], key: K, value: V): Unit = {
expectThrows(classOf[UnsupportedOperationException], map.clear())
expectThrows(classOf[UnsupportedOperationException], map.put(key, value))
expectThrows(classOf[UnsupportedOperationException],
map.putAll(Map.empty[K, V]))
testSetImmutability(map.entrySet(),
new ju.AbstractMap.SimpleImmutableEntry(key, value))
testSetImmutability(map.keySet(), key)
testCollectionImmutability(map.values(), value)
}
def testSortedMapImmutability[K, V](map: ju.SortedMap[K, V], key: K, value: V,
recursive: Boolean = false): Unit = {
testMapImmutability(map, key, value)
def testSubmap(sm: ju.SortedMap[K, V]) = {
if (recursive) testMapImmutability(sm, key, value)
else testSortedMapImmutability(sm, key, value, true)
}
testSubmap(map.headMap(key))
testSubmap(map.tailMap(key))
testSubmap(map.subMap(key, key))
}
def testIteratorsImmutability[E](newIter: () => ju.Iterator[E]): Unit = {
testOnFirstPositionOfIterator[ju.Iterator[E]](newIter, _.remove(),
Some(classOf[UnsupportedOperationException]))
}
def testListIteratorsImmutability[E](newIter: () => ju.ListIterator[E],
elem: E): Unit = {
testIteratorsImmutability(newIter)
testOnFirstPositionOfIterator[ju.ListIterator[E]](newIter, _.add(elem),
Some(classOf[UnsupportedOperationException]))
testOnFirstPositionOfIterator[ju.ListIterator[E]](newIter, _.set(elem),
Some(classOf[UnsupportedOperationException]))
}
}
|
mdedetrich/scala-js
|
test-suite/shared/src/test/scala/org/scalajs/testsuite/utils/CollectionsTestBase.scala
|
Scala
|
bsd-3-clause
| 4,536 |
package com.twitter.finagle.service
import com.twitter.finagle.Filter.TypeAgnostic
import com.twitter.finagle._
import com.twitter.finagle.stats.{
ExceptionStatsHandler,
MultiCategorizingExceptionStatsHandler,
StatsReceiver
}
import com.twitter.util._
import java.util.concurrent.atomic.LongAdder
import java.util.concurrent.TimeUnit
import scala.util.control.NonFatal
object StatsFilter {
val role: Stack.Role = Stack.Role("RequestStats")
/**
* Configures a [[StatsFilter.module]] to track latency using the
* given [[TimeUnit]].
*/
case class Param(unit: TimeUnit) {
def mk(): (Param, Stack.Param[Param]) = (this, Param.param)
}
object Param {
implicit val param = Stack.Param(Param(TimeUnit.MILLISECONDS))
}
/**
* Allows customizing how `now` is computed.
* Exposed for testing purposes.
*
* Defaults to `None` which uses the System clock for timings.
*/
private[finagle] case class Now(private val nowOpt: Option[() => Long]) {
def nowOrDefault(timeUnit: TimeUnit): () => Long = nowOpt match {
case Some(n) => n
case None => nowForTimeUnit(timeUnit)
}
}
private[finagle] object Now {
implicit val param: Stack.Param[Now] = Stack.Param(Now(None))
}
/**
* Creates a [[com.twitter.finagle.Stackable]] [[com.twitter.finagle.service.StatsFilter]].
*/
def module[Req, Rep]: Stackable[ServiceFactory[Req, Rep]] =
new Stack.Module5[
param.Stats,
param.ExceptionStatsHandler,
param.ResponseClassifier,
Param,
Now,
ServiceFactory[Req, Rep]
] {
val role: Stack.Role = StatsFilter.role
val description: String = "Report request statistics"
def make(
_stats: param.Stats,
_exceptions: param.ExceptionStatsHandler,
_classifier: param.ResponseClassifier,
_param: Param,
now: Now,
next: ServiceFactory[Req, Rep]
): ServiceFactory[Req, Rep] = {
val param.Stats(statsReceiver) = _stats
if (statsReceiver.isNull)
next
else {
new StatsFilter(
statsReceiver,
_classifier.responseClassifier,
_exceptions.categorizer,
_param.unit,
now.nowOrDefault(_param.unit)
).andThen(next)
}
}
}
/** Basic categorizer with all exceptions under 'failures'. */
val DefaultExceptions = new MultiCategorizingExceptionStatsHandler(
mkFlags = FailureFlags.flagsOf,
mkSource = SourcedException.unapply
) {
override def toString: String = "DefaultCategorizer"
}
private val SyntheticException =
new ResponseClassificationSyntheticException()
def typeAgnostic(
statsReceiver: StatsReceiver,
exceptionStatsHandler: ExceptionStatsHandler
): TypeAgnostic =
typeAgnostic(
statsReceiver,
ResponseClassifier.Default,
exceptionStatsHandler,
TimeUnit.MILLISECONDS
)
def typeAgnostic(
statsReceiver: StatsReceiver,
responseClassifier: ResponseClassifier,
exceptionStatsHandler: ExceptionStatsHandler,
timeUnit: TimeUnit
): TypeAgnostic =
typeAgnostic(
statsReceiver,
responseClassifier,
exceptionStatsHandler,
timeUnit,
nowForTimeUnit(timeUnit))
private[finagle] def typeAgnostic(
statsReceiver: StatsReceiver,
responseClassifier: ResponseClassifier,
exceptionStatsHandler: ExceptionStatsHandler,
timeUnit: TimeUnit,
now: () => Long
): TypeAgnostic = new TypeAgnostic {
def toFilter[Req, Rep]: Filter[Req, Rep, Req, Rep] =
new StatsFilter[Req, Rep](
statsReceiver,
responseClassifier,
exceptionStatsHandler,
timeUnit,
now)
}
private def nowForTimeUnit(timeUnit: TimeUnit): () => Long = timeUnit match {
case TimeUnit.NANOSECONDS => Stopwatch.systemNanos
case TimeUnit.MICROSECONDS => Stopwatch.systemMicros
case TimeUnit.MILLISECONDS => Stopwatch.systemMillis
case _ =>
() =>
timeUnit.convert(System.nanoTime(), TimeUnit.NANOSECONDS)
}
}
/**
* A `StatsFilter` reports request statistics including number of requests,
* number successful and request latency to the given [[StatsReceiver]].
*
* This constructor is exposed for testing purposes.
*
* @param responseClassifier used to determine when a response
* is successful or not.
*
* @param timeUnit this controls what granularity is used for
* measuring latency. The default is milliseconds,
* but other values are valid. The choice of this changes the name of the stat
* attached to the given [[StatsReceiver]]. For the common units,
* it will be "request_latency_ms".
*/
class StatsFilter[Req, Rep] private[service] (
statsReceiver: StatsReceiver,
responseClassifier: ResponseClassifier,
exceptionStatsHandler: ExceptionStatsHandler,
timeUnit: TimeUnit,
now: () => Long)
extends SimpleFilter[Req, Rep] {
import StatsFilter.SyntheticException
/**
* A `StatsFilter` reports request statistics including number of requests,
* number successful, and request latency to the given [[StatsReceiver]].
*
* This constructor is exposed for testing purposes.
*
* @param responseClassifier used to determine when a response
* is successful or not.
*
* @param timeUnit this controls what granularity is used for
* measuring latency. The default is milliseconds,
* but other values are valid. The choice of this changes the name of the stat
* attached to the given [[StatsReceiver]]. For the common units,
* it will be "request_latency_ms".
*/
def this(
statsReceiver: StatsReceiver,
responseClassifier: ResponseClassifier,
exceptionStatsHandler: ExceptionStatsHandler,
timeUnit: TimeUnit
) =
this(
statsReceiver,
responseClassifier,
exceptionStatsHandler,
timeUnit,
StatsFilter.nowForTimeUnit(timeUnit))
/**
* A `StatsFilter` reports request statistics including number of requests,
* number successful and request latency to the given [[StatsReceiver]].
*
* @param timeUnit this controls what granularity is used for
* measuring latency. The default is milliseconds,
* but other values are valid. The choice of this changes the name of the stat
* attached to the given [[StatsReceiver]]. For the common units,
* it will be "request_latency_ms".
*/
def this(
statsReceiver: StatsReceiver,
exceptionStatsHandler: ExceptionStatsHandler,
timeUnit: TimeUnit
) = this(statsReceiver, ResponseClassifier.Default, exceptionStatsHandler, timeUnit)
/**
* A `StatsFilter` reports request statistics including number of requests,
* number successful and request latency to the given [[StatsReceiver]].
*/
def this(statsReceiver: StatsReceiver, exceptionStatsHandler: ExceptionStatsHandler) =
this(statsReceiver, exceptionStatsHandler, TimeUnit.MILLISECONDS)
/**
* A `StatsFilter` reports request statistics including number of requests,
* number successful and request latency to the given [[StatsReceiver]].
*/
def this(statsReceiver: StatsReceiver) = this(statsReceiver, StatsFilter.DefaultExceptions)
private[this] def latencyStatSuffix: String = {
timeUnit match {
case TimeUnit.NANOSECONDS => "ns"
case TimeUnit.MICROSECONDS => "us"
case TimeUnit.MILLISECONDS => "ms"
case TimeUnit.SECONDS => "secs"
case _ => timeUnit.toString.toLowerCase
}
}
private[this] val outstandingRequestCount = new LongAdder()
private[this] val dispatchCount = statsReceiver.counter("requests")
private[this] val successCount = statsReceiver.counter("success")
private[this] val latencyStat = statsReceiver.stat(s"request_latency_$latencyStatSuffix")
private[this] val outstandingRequestCountGauge =
statsReceiver.addGauge("pending") { outstandingRequestCount.sum() }
private[this] def isIgnorableResponse(rep: Try[Rep]): Boolean = rep match {
case Throw(f: FailureFlags[_]) if f.isFlagged(FailureFlags.Ignorable) =>
true
case _ =>
false
}
def apply(request: Req, service: Service[Req, Rep]): Future[Rep] = {
val start = now()
outstandingRequestCount.increment()
val result = try {
service(request)
} catch {
case NonFatal(e) =>
Future.exception(e)
}
result.respond { response =>
outstandingRequestCount.decrement()
if (!isIgnorableResponse(response)) {
dispatchCount.incr()
responseClassifier.applyOrElse(
ReqRep(request, response),
ResponseClassifier.Default
) match {
case ResponseClass.Ignorable => // Do nothing.
case ResponseClass.Failed(_) =>
latencyStat.add(now() - start)
response match {
case Throw(e) =>
exceptionStatsHandler.record(statsReceiver, e)
case _ =>
exceptionStatsHandler.record(statsReceiver, SyntheticException)
}
case ResponseClass.Successful(_) =>
successCount.incr()
latencyStat.add(now() - start)
}
}
}
}
}
private[finagle] object StatsServiceFactory {
val role: Stack.Role = Stack.Role("FactoryStats")
/**
* Creates a [[com.twitter.finagle.Stackable]] [[com.twitter.finagle.service.StatsServiceFactory]].
*/
def module[Req, Rep]: Stackable[ServiceFactory[Req, Rep]] =
new Stack.Module1[param.Stats, ServiceFactory[Req, Rep]] {
val role: Stack.Role = StatsServiceFactory.role
val description: String = "Report connection statistics"
def make(_stats: param.Stats, next: ServiceFactory[Req, Rep]): ServiceFactory[Req, Rep] = {
val param.Stats(statsReceiver) = _stats
if (statsReceiver.isNull) next
else new StatsServiceFactory(next, statsReceiver)
}
}
}
class StatsServiceFactory[Req, Rep](factory: ServiceFactory[Req, Rep], statsReceiver: StatsReceiver)
extends ServiceFactoryProxy[Req, Rep](factory) {
private[this] val availableGauge = statsReceiver.addGauge("available") {
if (isAvailable) 1F else 0F
}
}
|
luciferous/finagle
|
finagle-core/src/main/scala/com/twitter/finagle/service/StatsFilter.scala
|
Scala
|
apache-2.0
| 10,140 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.runtime.datastream
import java.math.BigDecimal
import org.apache.flink.api.scala._
import org.apache.flink.types.Row
import org.apache.flink.table.api.scala.stream.utils.StreamITCase
import org.apache.flink.table.api.scala._
import org.apache.flink.streaming.api.TimeCharacteristic
import org.apache.flink.streaming.api.functions.AssignerWithPunctuatedWatermarks
import org.apache.flink.streaming.api.scala.StreamExecutionEnvironment
import org.apache.flink.streaming.api.watermark.Watermark
import org.apache.flink.streaming.util.StreamingMultipleProgramsTestBase
import org.apache.flink.table.api.TableEnvironment
import org.apache.flink.table.runtime.datastream.DataStreamAggregateITCase.TimestampWithEqualWatermark
import org.junit.Assert._
import org.junit.Test
import scala.collection.mutable
class DataStreamAggregateITCase extends StreamingMultipleProgramsTestBase {
val data = List(
(1L, 1, 1d, 1f, new BigDecimal("1"), "Hi"),
(2L, 2, 2d, 2f, new BigDecimal("2"), "Hallo"),
(3L, 2, 2d, 2f, new BigDecimal("2"), "Hello"),
(4L, 5, 5d, 5f, new BigDecimal("5"), "Hello"),
(7L, 3, 3d, 3f, new BigDecimal("3"), "Hello"),
(8L, 3, 3d, 3f, new BigDecimal("3"), "Hello world"),
(16L, 4, 4d, 4f, new BigDecimal("4"), "Hello world"))
// ----------------------------------------------------------------------------------------------
// Sliding windows
// ----------------------------------------------------------------------------------------------
@Test
def testAllEventTimeSlidingGroupWindowOverTime(): Unit = {
// please keep this test in sync with the DataSet variant
val env = StreamExecutionEnvironment.getExecutionEnvironment
env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime)
val tEnv = TableEnvironment.getTableEnvironment(env)
StreamITCase.testResults = mutable.MutableList()
val stream = env
.fromCollection(data)
.assignTimestampsAndWatermarks(new TimestampWithEqualWatermark())
val table = stream.toTable(tEnv, 'long.rowtime, 'int, 'double, 'float, 'bigdec, 'string)
val windowedTable = table
.window(Slide over 5.milli every 2.milli on 'long as 'w)
.groupBy('w)
.select('int.count, 'w.start, 'w.end)
val results = windowedTable.toAppendStream[Row]
results.addSink(new StreamITCase.StringSink[Row])
env.execute()
val expected = Seq(
"1,1970-01-01 00:00:00.008,1970-01-01 00:00:00.013",
"1,1970-01-01 00:00:00.012,1970-01-01 00:00:00.017",
"1,1970-01-01 00:00:00.014,1970-01-01 00:00:00.019",
"1,1970-01-01 00:00:00.016,1970-01-01 00:00:00.021",
"2,1969-12-31 23:59:59.998,1970-01-01 00:00:00.003",
"2,1970-01-01 00:00:00.006,1970-01-01 00:00:00.011",
"3,1970-01-01 00:00:00.002,1970-01-01 00:00:00.007",
"3,1970-01-01 00:00:00.004,1970-01-01 00:00:00.009",
"4,1970-01-01 00:00:00.0,1970-01-01 00:00:00.005")
assertEquals(expected.sorted, StreamITCase.testResults.sorted)
}
@Test
def testEventTimeSlidingGroupWindowOverTimeOverlappingFullPane(): Unit = {
// please keep this test in sync with the DataSet variant
val env = StreamExecutionEnvironment.getExecutionEnvironment
env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime)
val tEnv = TableEnvironment.getTableEnvironment(env)
StreamITCase.testResults = mutable.MutableList()
val stream = env
.fromCollection(data)
.assignTimestampsAndWatermarks(new TimestampWithEqualWatermark())
val table = stream.toTable(tEnv, 'long.rowtime, 'int, 'double, 'float, 'bigdec, 'string)
val windowedTable = table
.window(Slide over 10.milli every 5.milli on 'long as 'w)
.groupBy('w, 'string)
.select('string, 'int.count, 'w.start, 'w.end)
val results = windowedTable.toAppendStream[Row]
results.addSink(new StreamITCase.StringSink[Row])
env.execute()
val expected = Seq(
"Hallo,1,1969-12-31 23:59:59.995,1970-01-01 00:00:00.005",
"Hallo,1,1970-01-01 00:00:00.0,1970-01-01 00:00:00.01",
"Hello world,1,1970-01-01 00:00:00.0,1970-01-01 00:00:00.01",
"Hello world,1,1970-01-01 00:00:00.005,1970-01-01 00:00:00.015",
"Hello world,1,1970-01-01 00:00:00.01,1970-01-01 00:00:00.02",
"Hello world,1,1970-01-01 00:00:00.015,1970-01-01 00:00:00.025",
"Hello,1,1970-01-01 00:00:00.005,1970-01-01 00:00:00.015",
"Hello,2,1969-12-31 23:59:59.995,1970-01-01 00:00:00.005",
"Hello,3,1970-01-01 00:00:00.0,1970-01-01 00:00:00.01",
"Hi,1,1969-12-31 23:59:59.995,1970-01-01 00:00:00.005",
"Hi,1,1970-01-01 00:00:00.0,1970-01-01 00:00:00.01")
assertEquals(expected.sorted, StreamITCase.testResults.sorted)
}
@Test
def testEventTimeSlidingGroupWindowOverTimeOverlappingSplitPane(): Unit = {
// please keep this test in sync with the DataSet variant
val env = StreamExecutionEnvironment.getExecutionEnvironment
env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime)
val tEnv = TableEnvironment.getTableEnvironment(env)
StreamITCase.testResults = mutable.MutableList()
val stream = env
.fromCollection(data)
.assignTimestampsAndWatermarks(new TimestampWithEqualWatermark())
val table = stream.toTable(tEnv, 'long.rowtime, 'int, 'double, 'float, 'bigdec, 'string)
val windowedTable = table
.window(Slide over 5.milli every 4.milli on 'long as 'w)
.groupBy('w, 'string)
.select('string, 'int.count, 'w.start, 'w.end)
val results = windowedTable.toAppendStream[Row]
results.addSink(new StreamITCase.StringSink[Row])
env.execute()
val expected = Seq(
"Hallo,1,1970-01-01 00:00:00.0,1970-01-01 00:00:00.005",
"Hello world,1,1970-01-01 00:00:00.004,1970-01-01 00:00:00.009",
"Hello world,1,1970-01-01 00:00:00.008,1970-01-01 00:00:00.013",
"Hello world,1,1970-01-01 00:00:00.012,1970-01-01 00:00:00.017",
"Hello world,1,1970-01-01 00:00:00.016,1970-01-01 00:00:00.021",
"Hello,2,1970-01-01 00:00:00.0,1970-01-01 00:00:00.005",
"Hello,2,1970-01-01 00:00:00.004,1970-01-01 00:00:00.009",
"Hi,1,1970-01-01 00:00:00.0,1970-01-01 00:00:00.005")
assertEquals(expected.sorted, StreamITCase.testResults.sorted)
}
@Test
def testEventTimeSlidingGroupWindowOverTimeNonOverlappingFullPane(): Unit = {
// please keep this test in sync with the DataSet variant
val env = StreamExecutionEnvironment.getExecutionEnvironment
env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime)
val tEnv = TableEnvironment.getTableEnvironment(env)
StreamITCase.testResults = mutable.MutableList()
val stream = env
.fromCollection(data)
.assignTimestampsAndWatermarks(new TimestampWithEqualWatermark())
val table = stream.toTable(tEnv, 'long.rowtime, 'int, 'double, 'float, 'bigdec, 'string)
val windowedTable = table
.window(Slide over 5.milli every 10.milli on 'long as 'w)
.groupBy('w, 'string)
.select('string, 'int.count, 'w.start, 'w.end)
val results = windowedTable.toAppendStream[Row]
results.addSink(new StreamITCase.StringSink[Row])
env.execute()
val expected = Seq(
"Hallo,1,1970-01-01 00:00:00.0,1970-01-01 00:00:00.005",
"Hello,2,1970-01-01 00:00:00.0,1970-01-01 00:00:00.005",
"Hi,1,1970-01-01 00:00:00.0,1970-01-01 00:00:00.005")
assertEquals(expected.sorted, StreamITCase.testResults.sorted)
}
@Test
def testEventTimeSlidingGroupWindowOverTimeNonOverlappingSplitPane(): Unit = {
// please keep this test in sync with the DataSet variant
val env = StreamExecutionEnvironment.getExecutionEnvironment
env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime)
val tEnv = TableEnvironment.getTableEnvironment(env)
StreamITCase.testResults = mutable.MutableList()
val stream = env
.fromCollection(data)
.assignTimestampsAndWatermarks(new TimestampWithEqualWatermark())
val table = stream.toTable(tEnv, 'long.rowtime, 'int, 'double, 'float, 'bigdec, 'string)
val windowedTable = table
.window(Slide over 3.milli every 10.milli on 'long as 'w)
.groupBy('w, 'string)
.select('string, 'int.count, 'w.start, 'w.end)
val results = windowedTable.toAppendStream[Row]
results.addSink(new StreamITCase.StringSink[Row])
env.execute()
val expected = Seq(
"Hallo,1,1970-01-01 00:00:00.0,1970-01-01 00:00:00.003",
"Hi,1,1970-01-01 00:00:00.0,1970-01-01 00:00:00.003")
assertEquals(expected.sorted, StreamITCase.testResults.sorted)
}
@Test
def testEventTimeGroupWindowWithoutExplicitTimeField(): Unit = {
val env = StreamExecutionEnvironment.getExecutionEnvironment
env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime)
val tEnv = TableEnvironment.getTableEnvironment(env)
StreamITCase.testResults = mutable.MutableList()
val stream = env
.fromCollection(data)
.assignTimestampsAndWatermarks(new TimestampWithEqualWatermark())
.map(t => (t._2, t._6))
val table = stream.toTable(tEnv, 'int, 'string, 'rowtime.rowtime)
val windowedTable = table
.window(Slide over 3.milli every 10.milli on 'rowtime as 'w)
.groupBy('w, 'string)
.select('string, 'int.count, 'w.start, 'w.end)
val results = windowedTable.toAppendStream[Row]
results.addSink(new StreamITCase.StringSink[Row])
env.execute()
val expected = Seq(
"Hallo,1,1970-01-01 00:00:00.0,1970-01-01 00:00:00.003",
"Hi,1,1970-01-01 00:00:00.0,1970-01-01 00:00:00.003")
assertEquals(expected.sorted, StreamITCase.testResults.sorted)
}
}
object DataStreamAggregateITCase {
class TimestampWithEqualWatermark
extends AssignerWithPunctuatedWatermarks[(Long, Int, Double, Float, BigDecimal, String)] {
override def checkAndGetNextWatermark(
lastElement: (Long, Int, Double, Float, BigDecimal, String),
extractedTimestamp: Long)
: Watermark = {
new Watermark(extractedTimestamp)
}
override def extractTimestamp(
element: (Long, Int, Double, Float, BigDecimal, String),
previousElementTimestamp: Long): Long = {
element._1
}
}
}
|
oscarceballos/flink-1.3.2
|
flink-libraries/flink-table/src/test/scala/org/apache/flink/table/runtime/datastream/DataStreamAggregateITCase.scala
|
Scala
|
apache-2.0
| 11,018 |
package com.criteo.cuttle
import java.io._
import java.nio.file.Files
import java.time.Instant
import doobie.imports._
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.duration._
import scala.concurrent.stm._
trait ExecutionStreams {
def info(str: CharSequence) = this.writeln("INFO ", str)
def error(str: CharSequence) = this.writeln("ERROR", str)
def debug(str: CharSequence) = this.writeln("DEBUG", str)
private def writeln(tag: String, str: CharSequence): Unit = {
val time = Instant.now.toString
str.toString.split("\\n").foreach(l => this.writeln(s"$time $tag - $l"))
}
def writeln(str: CharSequence): Unit
}
private[cuttle] object ExecutionStreams {
private type ExecutionId = String
private type LastUsageTime = Long
private val transientStorage = Files.createTempDirectory("cuttle-logs").toFile
private val openHandles = TMap.empty[ExecutionId, (PrintWriter, LastUsageTime)]
private val maxHandles = 1024
private implicit val S = fs2.Strategy.fromExecutionContext(global)
private implicit val SC = fs2.Scheduler.fromFixedDaemonPool(1, "com.criteo.cuttle.ExecutionStreams.SC")
println(s"Transient execution streams go to $transientStorage")
private def logFile(id: ExecutionId): File = new File(transientStorage, id)
private def getWriter(id: ExecutionId): PrintWriter = {
val now = System.currentTimeMillis
val maybeWriter = atomic { implicit tx =>
val h = openHandles.get(id)
h.foreach { case (w, _) => openHandles += (id -> (w -> now)) }
h.map(_._1)
}
maybeWriter.getOrElse {
val (w, toClose) = atomic { implicit tx =>
val w =
new PrintWriter(new BufferedWriter(new OutputStreamWriter(new FileOutputStream(logFile(id), true), "utf8")))
val toClose = if (openHandles.size > maxHandles) {
val toClear = openHandles.toSeq.sortBy(_._2._2).take(openHandles.size - maxHandles + 1).map(_._1)
toClear.map { id =>
val writerToClose = openHandles(id)._1
openHandles -= id
writerToClose
}
} else Nil
openHandles += (id -> (w -> now))
(w, toClose)
}
toClose.foreach(_.close())
w
}
}
def getStreams(id: ExecutionId, queries: Queries, xa: XA): fs2.Stream[fs2.Task, Byte] = {
def go(alreadySent: Int = 0): fs2.Stream[fs2.Task, Byte] =
fs2.Stream.eval(fs2.Task.delay(streamsAsString(id))).flatMap {
case Some(content) =>
fs2.Stream.chunk(fs2.Chunk.bytes(content.drop(alreadySent).getBytes("utf8"))) ++ fs2.Stream
.eval(fs2.Task.schedule((), 1 second))
.flatMap(_ => go(content.size))
case None =>
fs2.Stream
.eval(fs2.Task.delay {
queries
.archivedStreams(id)
.transact(xa)
.unsafePerformIO
.map { content =>
fs2.Stream.chunk(fs2.Chunk.bytes(content.drop(alreadySent).getBytes("utf8")))
}
.getOrElse {
fs2.Stream.fail(new Exception(s"Streams not found for execution $id"))
}
})
.flatMap(identity)
}
go()
}
def writeln(id: ExecutionId, msg: CharSequence): Unit = {
val w = getWriter(id)
w.println(msg)
w.flush()
}
def streamsAsString(id: ExecutionId): Option[String] = {
val f = logFile(id)
if (f.exists) {
val limit = 1024 * 64
val buffer = Array.ofDim[Byte](limit)
val in = new FileInputStream(f)
try {
val size = in.read(buffer)
if (size >= 0) {
Some {
val content = new String(buffer, 0, size, "utf8")
if (f.length > limit) {
content + "\\n--- CONTENT TRUNCATED AT 64kb --"
} else {
content
}
}
} else {
Some("")
}
} finally {
in.close()
}
} else {
None
}
}
def discard(id: ExecutionId): Unit = {
val toClose = atomic { implicit tx =>
val w = openHandles.get(id).map(_._1)
openHandles -= id
w
}
toClose.foreach(_.close())
logFile(id).delete()
}
def archive(id: ExecutionId, queries: Queries, xa: XA): Unit = {
queries
.archiveStreams(id, streamsAsString(id).getOrElse(sys.error(s"Cannot archive streams for execution $id")))
.transact(xa)
.unsafePerformIO
discard(id)
}
}
|
jbkt/cuttle
|
core/src/main/scala/com/criteo/cuttle/ExecutionStreams.scala
|
Scala
|
apache-2.0
| 4,506 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.datasources.v2
import org.apache.spark.sql.Strategy
import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan
import org.apache.spark.sql.execution.SparkPlan
object DataSourceV2Strategy extends Strategy {
override def apply(plan: LogicalPlan): Seq[SparkPlan] = plan match {
case r: DataSourceV2Relation =>
DataSourceV2ScanExec(r.output, r.source, r.options, r.reader) :: Nil
case r: StreamingDataSourceV2Relation =>
DataSourceV2ScanExec(r.output, r.source, r.options, r.reader) :: Nil
case WriteToDataSourceV2(writer, query) =>
WriteToDataSourceV2Exec(writer, planLater(query)) :: Nil
case _ => Nil
}
}
|
brad-kaiser/spark
|
sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/DataSourceV2Strategy.scala
|
Scala
|
apache-2.0
| 1,492 |
/*
* Copyright (C) 2005, The Beangle Software.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published
* by the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.beangle.data.transfer
/**
* TransferFormats interface.
*/
enum Format {
case Csv, Txt, Html, Xls, Xlsx, Pdf, Dbf
}
|
beangle/data
|
transfer/src/main/scala/org/beangle/data/transfer/Format.scala
|
Scala
|
lgpl-3.0
| 853 |
// Copyright: 2010 - 2017 https://github.com/ensime/ensime-server/graphs
// License: http://www.gnu.org/licenses/gpl-3.0.en.html
package org.ensime.lsp.ensime
import akka.actor._
import akka.event.slf4j.SLF4JLogging
import org.ensime.api.{ EnsimeConfig, EnsimeServerConfig }
import org.ensime.lsp.api.commands.MessageType
import scala.concurrent.duration._
/**
* An actor that instantiates the Ensime Server actor and supervises it.
*
* It catches `ActorInitializationError` and tries to restart it.
*/
class EnsimeActor(langServer: EnsimeLanguageServer,
config: EnsimeConfig,
ensimeServerConfig: EnsimeServerConfig)
extends Actor
with SLF4JLogging {
private var project: ActorRef = _
override val supervisorStrategy: OneForOneStrategy =
OneForOneStrategy(5, 1 minute) {
case e @ ActorInitializationException(actor, message, cause) =>
log.error(s"Actor failed to initialize", e)
langServer.connection.logMessage(MessageType.Error,
s"Error starting ensime: $message")
SupervisorStrategy.Restart
case e =>
log.error(s"Actor crashed: ", e)
SupervisorStrategy.Restart
}
override def receive = {
case message =>
if (project eq null) {
// trying to catch this elusive ActorInitializationException by creating this actor as late
// as possible. Still, it looks like the supervisor strategy does not get this crash
log.info("Starting problematic actor now")
project = context.actorOf(
Props(
new EnsimeProjectServer(langServer, config, ensimeServerConfig)
),
"ensimeProject"
)
log.info(s"Created: $project")
}
project forward message
}
}
|
ensime/ensime-server
|
lsp/src/main/scala/org/ensime/lsp/ensime/EnsimeActor.scala
|
Scala
|
gpl-3.0
| 1,806 |
package com.programmaticallyspeaking.ncd.e2e
import akka.actor.{ActorRef, Inbox, PoisonPill}
import com.programmaticallyspeaking.ncd.chrome.domains.Debugger.{CallFrame, EvaluateOnCallFrameResult, Location}
import com.programmaticallyspeaking.ncd.chrome.domains.Runtime.{CallArgument, RemoteObject}
import com.programmaticallyspeaking.ncd.chrome.domains.{Debugger, Domain, EventEmitHook, Runtime => RuntimeD}
import com.programmaticallyspeaking.ncd.host.types.ObjectPropertyDescriptor
import com.programmaticallyspeaking.ncd.host._
import com.programmaticallyspeaking.ncd.ioc.Container
import com.programmaticallyspeaking.ncd.nashorn.types.ObjectPropertyDescriptorTest
import com.programmaticallyspeaking.ncd.testing.{FakeFilePublisher, SharedInstanceActorTesting}
import org.scalatest.concurrent.{IntegrationPatience, ScalaFutures}
import org.scalatest.prop.TableDrivenPropertyChecks
import scala.concurrent.duration._
import scala.util.control.NonFatal
import scala.util.{Failure, Success}
trait RealDebuggerTestFixture extends E2ETestFixture with SharedInstanceActorTesting with ScalaFutures with IntegrationPatience {
var debugger: ActorRef = _
def enableDebugger: Unit = {
// Reuse the debugger, so create & enable only once.
if (debugger == null) {
implicit val container = new Container(Seq(FakeFilePublisher, getHost, new EventEmitHook))
debugger = newActorInstance[Debugger]
sendRequest(Domain.enable)
}
// clean slate
sendRequest(Debugger setBreakpointsActive true)
sendRequest(Debugger setPauseOnExceptions "none")
}
def sendRequest(msg: AnyRef): Any = sendRequestAndWait(debugger, msg)
protected def withHead(callFrames: Seq[CallFrame])(fun: (CallFrame) => Unit) = {
callFrames.headOption match {
case Some(cf) => fun(cf)
case None => fail("No call frames")
}
}
protected def withScript(callFrames: Seq[CallFrame])(f: (Script) => Unit) = {
withHead(callFrames) { cf =>
getHost.findScript(ScriptIdentity.fromId(cf.location.scriptId)) match {
case Some(s) => f(s)
case None => fail("Unknown script: " + cf.location.scriptId)
}
}
}
protected override def stopRunner(): Unit = {
Option(debugger).foreach { actorRef =>
val inbox = Inbox.create(system)
inbox.watch(actorRef)
inbox.send(actorRef, PoisonPill)
try {
// wait a few seconds for the actor to die
inbox.receive(2.seconds)
} catch {
case NonFatal(t) =>
log.warn("Failed to stop Debugger actor. It may be locked.")
}
}
debugger = null
super.stopRunner()
}
override protected def beforeEachTest(): Unit = enableDebugger
}
class RealDebuggerTest extends RealDebuggerTestFixture with TableDrivenPropertyChecks {
"Debugging" - {
"supports getting object properties twice from a second thread when the first is no longer usable" in {
val script =
"""
|var Executors = Java.type("java.util.concurrent.Executors");
|var e1 = Executors.newSingleThreadExecutor();
|var e2 = Executors.newSingleThreadExecutor();
|
|var Runnable = Java.type('java.lang.Runnable');
|var func = Java.extend(Runnable, {
| run: function() {
| var obj = { foo: 42 };
| debugger;
| obj.toString();
| }
|});
|
|var f1 = e1.submit(new func());
|f1.get();
|var f2 = e2.submit(new func());
|f2.get();
|
""".stripMargin
def getPropNames(objName: String, callFrame: CallFrame): Seq[String] = {
getHost.evaluateOnStackFrame(callFrame.callFrameId, objName) match {
case Success(c: ComplexNode) =>
getHost.getObjectProperties(c.objectId, true, false).map(_._1)
case Success(other) => fail(s"Unexpected: '$objName' evaluated to: " + other)
case Failure(t) => fail(s"Evaluation of '$objName' failed", t)
}
}
runScript(script)(callFrames => {
withHead(callFrames) { cf =>
cf.location.lineNumber should be (9)
// Get properties on the first thread, trigger creation of the extractor function
getPropNames("obj", cf) should be (Seq("foo", "__proto__"))
}
}, callFrames => {
withHead(callFrames) { cf =>
cf.location.lineNumber should be(9)
// Get properties on the second thread, extractor function cannot operate on the first (dead) one
getPropNames("obj", cf) should be (Seq("foo", "__proto__"))
}
})
}
"should handle stepping over a line with a breakpoint" in {
val script =
"""
|var i = 0;
|debugger; // here, we set a breakpoint on +2 lines below, then step
|i++; // step over from here
|i++; // here is the breakpoint, step over from here
|i.toString(); // should end up here
""".stripMargin
runScript(script)(callFrames => {
withHead(callFrames) { cf =>
cf.location.lineNumber should be (2)
withScript(callFrames) { s =>
sendRequest(Debugger.setBreakpointByUrl(4, Some(s.url.toString), None, Some(0), None))
}
sendRequest(Debugger.stepOver)
}
DontAutoResume
}, callFrames => {
withHead(callFrames) { cf =>
cf.location.lineNumber should be(3)
sendRequest(Debugger.stepOver)
}
DontAutoResume
}, callFrames => {
withHead(callFrames) { cf =>
cf.location.lineNumber should be(4)
sendRequest(Debugger.stepOver)
}
DontAutoResume
}, callFrames => {
withHead(callFrames) { cf =>
cf.location.lineNumber should be(5)
}
})
}
"should support setVariableValue" in {
val script =
"""
|this.x = 5;
|var f = function () {
| var x = 6;
| debugger;
|};
|var f2 = function () {
| var y = x;
| debugger;
| y.toString();
|}
|f();
|f2();
""".stripMargin
runScript(script)(callFrames => {
withHead(callFrames) { cf =>
// In function f, scope 0 should be global
sendRequest(Debugger.setVariableValue(0, "x", RuntimeD.CallArgument(Some(42), None, None), cf.callFrameId))
}
}, callFrames => {
withHead(callFrames) { cf =>
// In function f2, y should have value 42
val r2 = sendRequest(Debugger.evaluateOnCallFrame(cf.callFrameId, "y", None, None))
r2 should be(EvaluateOnCallFrameResult(RemoteObject.forNumber(42)))
}
})
}
"should set a variable in the correct stack frame" in {
// Note that the starting point 1.0 instead of 1 is important, since otherwise Nashorn will first attempt
// an Integer-arged method, but will then recompile into a Double-arged method upon the next call, and the
// different recompilations will result in unique locations which will cause the flawed Location-based stack
// lookup to actually work...
val script =
"""function f(callNo) {
| var x = 0;
| if (callNo === 3) {
| debugger;
| return x;
| }
| var v = 2 * f(callNo + 1);
| return v + x;
|}
|var result = f(1.0);
|debugger;
""".stripMargin
runScript(script)(callFrames => {
// At debugger statement
// Set x to 1 in the local scope in the grandparent stack frame (stack frame 3/3).
// The flawed Location-based stack frame lookup will hit frame 2/3 instead, since it has the same Location
// as frame 3/3.
callFrames.drop(2).headOption match {
case Some(cf) =>
cf.scopeChain.zipWithIndex.find(_._1.`type` == "local") match {
case Some((_, scopeIdx)) =>
sendRequest(Debugger.setVariableValue(scopeIdx, "x", RuntimeD.CallArgument(Some(1), None, None), cf.callFrameId))
case None => fail("No local scope")
}
case None => fail("No parent stack frame")
}
}, callFrames => {
withHead(callFrames) { cf =>
// result is:
// if changed in stack frame 1/3: 100 = 4
// if changed in stack frame 2/3: 010 = 2
// if changed in stack frame 3/3: 001 = 1
sendRequest(Debugger.evaluateOnCallFrame(cf.callFrameId, "result", None, None)) match {
case EvaluateOnCallFrameResult(RemoteObject("number", _, _, _, Some(numValue), _, _, _), _) =>
// Java 9 returns an int, Java 9 a double
numValue.toString.toDouble should be (1.0d)
case other => fail("Unexpected result: " + other)
}
}
})
}
val setVariableValueCases = Table(
("type", "initial", "toSet", "expected"),
("string", "'foo'", "bar", RemoteObject.forString("bar")),
("int", "5", 6, RemoteObject.forNumber(6)),
("double", "5.5", 6.6, RemoteObject.forNumber(6.6)),
("bool", "true", false, RemoteObject.falseValue)
)
forAll(setVariableValueCases) { (typ, initial, toSet, expected) =>
s"should support setVariableValue when a local variable of type $typ is targeted" in {
val script =
s"""
|var f = function (arg) {
| debugger;
| return arg;
|};
|this.result = f($initial);
|debugger;
""".stripMargin
runScript(script)(callFrames => {
withHead(callFrames) { cf =>
// Get the local scope
cf.scopeChain.zipWithIndex.find(_._1.`type` == "local") match {
case Some((_, idx)) =>
sendRequest(Debugger.setVariableValue(idx, "arg", RuntimeD.CallArgument(Some(toSet), None, None), cf.callFrameId))
case None => fail("No local scope")
}
}
}, callFrames => {
withHead(callFrames) { cf =>
val r2 = sendRequest(Debugger.evaluateOnCallFrame(cf.callFrameId, "this.result", None, None))
r2 should be(EvaluateOnCallFrameResult(expected))
}
})
}
}
"should support a conditional breakpoint (with column number)" in {
val script =
"""debugger;
|var list = [];
|for (var i = 0; i < 2; i++) {
| list.push(i);
|}
""".stripMargin
runScript(script)(callFrames => {
withScript(callFrames) { s =>
sendRequest(Debugger.setBreakpointByUrl(3, Some(s.url.toString), None, Some(2), Some("i>0")))
}
}, callFrames => {
withHead(callFrames) { cf =>
val r2 = sendRequest(Debugger.evaluateOnCallFrame(cf.callFrameId, "i", None, None))
// We may/will get i==1.0, so the assert becomes somewhat complicated.
r2 match {
case EvaluateOnCallFrameResult(ro, None) =>
ro.value.map(_.toString.toDouble.toInt) should be (Some(1))
case other => fail("Unexpected result: " + other)
}
}
})
}
// Disabled because this test doesn't work with Java 10:
// java.lang.NullPointerException
// at jdk.scripting.nashorn.scripts/jdk.nashorn.internal.scripts.Script$Recompilation$116$\\^eval\\_/1907604549.:program(<eval>:5)
"should support frame restart when paused at a debugger statement" ignore {
val script =
"""var f = function () {
| debugger; // stop here
| debugger; // after restart + resume, we should NOT get here
|};
|f();
""".stripMargin
runScript(script)(callFrames => {
withHead(callFrames) { cf =>
sendRequest(Debugger.restartFrame(cf.callFrameId))
}
}, callFrames => {
withHead(callFrames) { cf =>
cf.location.lineNumber should be (1) // 0-based
}
}, callFrames => {
// Consume the last debugger statement
withHead(callFrames) { cf =>
cf.location.lineNumber should be (2) // 0-based
}
})
}
// Test disabled due to https://bugs.java.com/bugdatabase/view_bug.do?bug_id=JDK-8187143
// Fails with Java 1.8u151 and up.
"should support frame restart when paused at a regular breakpoint" ignore {
val script =
"""var f = function () {
| f.toString(); // regular breakpoint here
|};
|f(); // make sure f is compiled
|debugger; // where we set a breakpoint
|f();
|debugger; // after restart-frame-resume, we should NOT get here
""".stripMargin
runScript(script)(callFrames => {
withScript(callFrames) { s =>
sendRequest(Debugger.setBreakpointByUrl(1, Some(s.url.toString), None, None, None))
}
}, callFrames => {
withHead(callFrames) { cf =>
// At the regular breakpoint
sendRequest(Debugger.restartFrame(cf.callFrameId))
}
}, callFrames => {
withHead(callFrames) { cf =>
cf.location.lineNumber should be (1) // 0-based
}
}, callFrames => {
// Consume the last debugger statement
withHead(callFrames) { cf =>
cf.location.lineNumber should be (6) // 0-based
}
})
}
"should support continuing to a specific location" in {
val script =
"""debugger;
|var x = 0;
|x = x + 1;
|x = x.toString(); // target
""".stripMargin
runScript(script)(callFrames => {
withHead(callFrames) { cf =>
val scriptId = cf.location.scriptId
sendRequest(Debugger.continueToLocation(Location(scriptId, 3, None)))
}
DontAutoResume
}, callFrames => {
withHead(callFrames) { cf =>
cf.location.lineNumber should be (3) // 0-based
}
})
}
"should support continuing to a specific location with column 0 (because that's what Chrome sends)" in {
val script =
"""debugger;
|if (true) {
| var x = 0;
| x = x + 1;
| x = x.toString(); // target
|}
""".stripMargin
runScript(script)(callFrames => {
withHead(callFrames) { cf =>
val scriptId = cf.location.scriptId
sendRequest(Debugger.continueToLocation(Location(scriptId, 4, Some(0))))
}
DontAutoResume
}, callFrames => {
withHead(callFrames) { cf =>
cf.location.lineNumber should be (4) // 0-based
}
})
}
"should not leave an unwanted breakpoint after continuing to a location" in {
val script =
"""debugger;
|var x = 0;
|while (x < 2) {
| x = x + 1; // target
|}
|debugger; // end up here afterwards
""".stripMargin
runScript(script)(callFrames => {
withHead(callFrames) { cf =>
val scriptId = cf.location.scriptId
sendRequest(Debugger.continueToLocation(Location(scriptId, 3, None)))
}
DontAutoResume
}, callFrames => {
withHead(callFrames) { cf =>
cf.location.lineNumber should be (3) // 0-based
}
}, callFrames => {
withHead(callFrames) { cf =>
cf.location.lineNumber should be (5) // 0-based
}
})
}
"should be able to continue to a location where there already is a breakpoint" in {
val script =
"""debugger;
|var x = 0;
|while (x < 2) {
| x = x + 1; // continue to here, but there's also a normal breakpoint
|}
""".stripMargin
runScript(script)(callFrames => {
withHead(callFrames) { cf =>
val scriptId = cf.location.scriptId
// Debugger API doesn't allow us to set a breakpoint by ID, so we have to access ScriptHost directly.
getHost.setBreakpoint(ScriptIdentity.fromId(scriptId), ScriptLocation(4, None), BreakpointOptions.empty) // 1-based line
sendRequest(Debugger.continueToLocation(Location(scriptId, 3, None))) // 0-based line
}
DontAutoResume
}, callFrames => {
withHead(callFrames) { cf =>
// The result of continuing to the location
cf.location.lineNumber should be (3) // 0-based
}
}, callFrames => {
withHead(callFrames) { cf =>
// The original breakpoint
cf.location.lineNumber should be (3) // 0-based
}
})
}
def pauseAtNext(script: String, runAtDebugger: () => Unit, possiblePauseLineNumbers: Seq[Int]) = {
assert(script.contains("done"), "expected the script to contain a done variable")
val lineOf2ndDebugger = script.split("\\r?\\n").toSeq.zipWithIndex.filter(_._1.trim == "debugger;").tail.toList match {
case x :: _ => x._2
case Nil => fail("No 2nd debugger statement")
}
runScript(script)(_ => {
runAtDebugger()
sendRequest(Debugger.resume)
sendRequest(Debugger.pause)
DontAutoResume
}, callFrames => {
withHead(callFrames) { cf =>
// Don't know exactly where it'll pause, so be permissive
possiblePauseLineNumbers should contain (cf.location.lineNumber)
// Break out of the loop. Since f captures done, we should always be able to set it on scope 0.
sendRequest(Debugger.setVariableValue(0, "done", CallArgument(Some(true), None, None), cf.callFrameId))
}
}, callFrames => {
// Consume the last debugger statement, a.k.a. wait for the loop to be finished
withHead(callFrames) { cf =>
cf.location.lineNumber should be (lineOf2ndDebugger)
}
})
}
"should support pausing at next statement" - {
// This script is likely to trigger the method-entry case, and since f is called repeatedly, the method-entry
// breakpoint will eventually hit.
val script =
"""var done = false;
|f(); // compile f
|debugger;
|while (!done) {
| f();
|}
|debugger;
|function f() {
| java.lang.Thread.sleep(done ? 100 : 100); // capture 'done'
|}
""".stripMargin
"when breakpoints are enabled" in {
pauseAtNext(script, () => (), Seq(3, 4, 8))
}
"when breakpoints are disabled" in {
pauseAtNext(script, () => Debugger setBreakpointsActive false, Seq(3, 4, 8))
}
}
"should support pausing at next statement when there's no function call involved and the thread isn't sleeping" in {
// This script will hit the case where we set breakpoints in the current stack frame.
val script =
"""var done = false, i = 0;
|debugger;
|while (!done) {
| i = i + 1;
|}
|debugger;
""".stripMargin
pauseAtNext(script, () => (), Seq(2, 3))
}
"should support pausing at next statement when the thread is sleeping in a function that won't be called again" in {
// With method-entry/breakpoint separation, this TC typically hits the method-entry case but since f won't
// be called again, the method-entry breakpoint is never hit. The TC fails when run individually, but not as part
// of the suite... :-(
val script =
"""var done = false, i = 0;
|f(); // compile
|debugger;
|f();
|while (!done) {
| i = i + 1;
|}
|debugger;
|function f() {
| java.lang.Thread.sleep(200);
|}
""".stripMargin
pauseAtNext(script, () => (), Seq(3, 4, 5, 9))
}
"should pause on exception when enabled even if pausing on breakpoint is disabled" in {
val script =
"""debugger; // disable breakpoint pausing here
|debugger; // this one should be ignored
|try {
| throw new Error("oops"); // we should pause here
|} catch (e) {
| e.toString();
|}
""".stripMargin
runScript(script)(_ => {
sendRequest(Debugger setBreakpointsActive false)
sendRequest(Debugger setPauseOnExceptions "all")
}, callFrames => {
withHead(callFrames) { cf =>
cf.location.lineNumber should be (3)
}
})
}
"should step even if pausing on breakpoint is disabled" in {
val script =
"""var i = 0;
|debugger; // disable breakpoint pausing here, then step
|i = i + 1; // we should end up here
""".stripMargin
runScript(script)(_ => {
sendRequest(Debugger setBreakpointsActive false)
sendRequest(Debugger stepOver)
DontAutoResume
}, callFrames => {
withHead(callFrames) { cf =>
cf.location.lineNumber should be (2)
}
})
}
"evaluation that leads to reference error" - {
lazy val result = {
var res: EvaluateOnCallFrameResult = null
val script = "debugger;"
runScript(script)(callFrames => {
withHead(callFrames) { cf =>
val ret = sendRequest(Debugger evaluateOnCallFrame(cf.callFrameId, "foobar", None, None))
ret match {
case e: EvaluateOnCallFrameResult => res = e
case other => fail("Unexpected: " + other)
}
}
})
res
}
def resultException: RemoteObject = {
val opt = for {
details <- result.exceptionDetails
exception <- details.exception
} yield exception
opt match {
case Some(ex) => ex
case None => fail("No exception data")
}
}
"has text 'Uncaught' to mimic Chrome" in {
result.exceptionDetails.map(_.text) should be (Some("Uncaught"))
}
"has class name 'ReferenceError'" in {
resultException.className should be (Some("ReferenceError"))
}
"has description which is name + message" in {
resultException.description.getOrElse("") should startWith ("ReferenceError: \\"foobar\\" is not defined")
}
"doesn't have <ncdbg_eval> in the stack trace" in {
// Test without trailing > since we may add more things inside <> later
resultException.description.getOrElse("") should not include ("<ncdbg_eval")
}
}
}
}
|
provegard/ncdbg
|
src/test/scala/com/programmaticallyspeaking/ncd/e2e/RealDebuggerTest.scala
|
Scala
|
bsd-3-clause
| 22,809 |
package net.sansa_stack.rdf.spark.qualityassessment.metrics
import com.holdenkarau.spark.testing.DataFrameSuiteBase
import net.sansa_stack.rdf.spark.io._
import net.sansa_stack.rdf.spark.qualityassessment._
import org.apache.jena.riot.Lang
import org.scalatest.FunSuite
class RelevancyTests extends FunSuite with DataFrameSuiteBase {
test("assessing the amount of triples should result in value 0.0") {
val path = getClass.getResource("/rdf.nt").getPath
val lang: Lang = Lang.NTRIPLES
val triples = spark.rdf(lang)(path)
val cnt = triples.assessAmountOfTriples()
assert(cnt == 0.0)
}
test("assessing the coverage scope of a dataset should result in value 0.0") {
val path = getClass.getResource("/rdf.nt").getPath
val lang: Lang = Lang.NTRIPLES
val triples = spark.rdf(lang)(path)
val ratio = triples.assessCoverageScope()
assert(ratio == 0.0)
}
test("assessing the coverage details of a dataset should restul in value 0.21") {
val path = getClass.getResource("/rdf.nt").getPath
val lang: Lang = Lang.NTRIPLES
val triples = spark.rdf(lang)(path)
val ratio = triples.assessCoverageDetail()
assert(ratio == 0.21)
}
}
|
SANSA-Stack/SANSA-RDF
|
sansa-rdf/sansa-rdf-spark/src/test/scala/net/sansa_stack/rdf/spark/qualityassessment/metrics/RelevancyTests.scala
|
Scala
|
apache-2.0
| 1,201 |
/**
* Copyright (C) 2012 Orbeon, Inc.
*
* This program is free software; you can redistribute it and/or modify it under the terms of the
* GNU Lesser General Public License as published by the Free Software Foundation; either version
* 2.1 of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
* without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU Lesser General Public License for more details.
*
* The full text of the license is available at http://www.gnu.org/copyleft/lesser.html
*/
package org.orbeon.oxf.xforms.control.controls
import org.orbeon.oxf.xforms.xbl.XBLContainer
import org.dom4j.Element
import java.util.{Map ⇒ JMap}
import org.orbeon.oxf.xforms.control.{XFormsNoSingleNodeContainerControl, XFormsControl}
// Control at the root of a shadow tree
class XXFormsComponentRootControl(container: XBLContainer, parent: XFormsControl, element: Element, effectiveId: String)
extends XFormsNoSingleNodeContainerControl(container, parent, element, effectiveId) {
// FIXME: Support refresh events? Simply enabling below doesn't seem to work for enabled/disabled.
}
|
evlist/orbeon-forms
|
src/main/scala/org/orbeon/oxf/xforms/control/controls/XXFormsComponentRootControl.scala
|
Scala
|
lgpl-2.1
| 1,244 |
package com.datlinq.datafiniti.config
import com.datlinq.datafiniti.config.DatafinitiAPITypes._
import com.datlinq.datafiniti.config.DatafinitiAPIViewsV4._
import org.scalatest.FunSuite
/**
* Created by Tom Lous on 26/03/2018.
* Copyright © 2018 Datlinq B.V..
*/
class DatafinitiAPIViewsV4Test extends FunSuite {
test("toSting") {
assert(BusinessesDefault.toString() === null)
assert(BusinessesAllFlatMenus.toString() === "business_flat_menus")
assert(BusinessesAllFlatReviews.toString() === "business_flat_reviews")
assert(BusinessesAllNested.toString() === "business_all_nested")
assert(BusinessesNoReviews.toString() === "business_no_reviews")
assert(BusinessesBasic.toString() === "business_basic")
assert(ProductsDefault.toString() === null)
assert(ProductsAllNested.toString() === "product_all_nested")
assert(ProductsFlatPrices.toString() === "product_flat_prices")
assert(ProductsFlatReviews.toString() === "product_flat_reviews")
assert(PropertiesDefault.toString() === null)
assert(PropertiesFlatPrices.toString() === "property_flat_prices")
assert(PropertiesFlatReviews.toString() === "property_flat_reviews")
assert(CustomViewV4("custom_view", Businesses).toString() === "custom_view")
}
test("fromString") {
assert(APIViewV4.fromString("custom_view") == CustomViewV4("view", CustomType("custom")))
assert(APIViewV4.fromString("businesses_view") == CustomViewV4("view", Businesses))
assert(APIViewV4.fromString("products_view_long_name") == CustomViewV4("view_long_name", Products)) // a bit hacky due to singular / plural, but low prio to fi this
}
}
|
datlinq/scalafiniti
|
src/test/scala/com/datlinq/datafiniti/config/DatafinitiAPIViewsV4Test.scala
|
Scala
|
mit
| 1,656 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.deploy.k8s.integrationtest
import io.fabric8.kubernetes.api.model.{PodBuilder, ServiceBuilder}
import org.scalatest.concurrent.Eventually
import scala.collection.JavaConverters._
import org.apache.spark.deploy.k8s.integrationtest.KubernetesSuite.{k8sTestTag, INTERVAL, TIMEOUT}
private[spark] trait ClientModeTestsSuite { k8sSuite: KubernetesSuite =>
test("Run in client mode.", k8sTestTag) {
val labels = Map("spark-app-selector" -> driverPodName)
val driverPort = 7077
val blockManagerPort = 10000
val executorLabel = "spark-client-it"
val driverService = testBackend
.getKubernetesClient
.services()
.inNamespace(kubernetesTestComponents.namespace)
.create(new ServiceBuilder()
.withNewMetadata()
.withName(s"$driverPodName-svc")
.endMetadata()
.withNewSpec()
.withClusterIP("None")
.withSelector(labels.asJava)
.addNewPort()
.withName("driver-port")
.withPort(driverPort)
.withNewTargetPort(driverPort)
.endPort()
.addNewPort()
.withName("block-manager")
.withPort(blockManagerPort)
.withNewTargetPort(blockManagerPort)
.endPort()
.endSpec()
.build())
try {
val driverPod = testBackend
.getKubernetesClient
.pods()
.inNamespace(kubernetesTestComponents.namespace)
.create(new PodBuilder()
.withNewMetadata()
.withName(driverPodName)
.withLabels(labels.asJava)
.endMetadata()
.withNewSpec()
.withServiceAccountName(kubernetesTestComponents.serviceAccountName)
.withRestartPolicy("Never")
.addNewContainer()
.withName("spark-example")
.withImage(image)
.withImagePullPolicy("IfNotPresent")
.addToArgs("/opt/spark/bin/run-example")
.addToArgs("--master", s"k8s://https://kubernetes.default.svc")
.addToArgs("--deploy-mode", "client")
.addToArgs("--conf", s"spark.kubernetes.container.image=$image")
.addToArgs(
"--conf",
s"spark.kubernetes.namespace=${kubernetesTestComponents.namespace}")
.addToArgs("--conf", "spark.kubernetes.authenticate.oauthTokenFile=" +
"/var/run/secrets/kubernetes.io/serviceaccount/token")
.addToArgs("--conf", "spark.kubernetes.authenticate.caCertFile=" +
"/var/run/secrets/kubernetes.io/serviceaccount/ca.crt")
.addToArgs("--conf", "spark.executor.memory=500m")
.addToArgs("--conf", "spark.executor.cores=1")
.addToArgs("--conf", "spark.executor.instances=2")
.addToArgs("--conf", "spark.kubernetes.executor.deleteOnTermination=false")
.addToArgs("--conf", s"spark.kubernetes.executor.label.$executorLabel=$executorLabel")
.addToArgs("--conf",
s"spark.driver.host=" +
s"${driverService.getMetadata.getName}.${kubernetesTestComponents.namespace}.svc")
.addToArgs("--conf", s"spark.driver.port=$driverPort")
.addToArgs("--conf", s"spark.driver.blockManager.port=$blockManagerPort")
.addToArgs("SparkPi")
.addToArgs("10")
.endContainer()
.endSpec()
.build())
Eventually.eventually(TIMEOUT, INTERVAL) {
assert(kubernetesTestComponents.kubernetesClient
.pods()
.withName(driverPodName)
.getLog
.contains("Pi is roughly 3"), "The application did not complete.")
}
val executors = kubernetesTestComponents
.kubernetesClient
.pods()
.inNamespace(kubernetesTestComponents.namespace)
.withLabel(executorLabel, executorLabel)
.list()
.getItems()
assert(executors.size === 2)
val prefixes = executors.asScala.map { pod =>
val name = pod.getMetadata().getName()
name.substring(0, name.lastIndexOf("-"))
}.toSet
assert(prefixes.size === 1, s"Executor prefixes did not match: $prefixes")
} finally {
// Have to delete the service manually since it doesn't have an owner reference
kubernetesTestComponents
.kubernetesClient
.services()
.inNamespace(kubernetesTestComponents.namespace)
.delete(driverService)
// Delete all executors, since the test explicitly asks them not to be deleted by the app.
kubernetesTestComponents
.kubernetesClient
.pods()
.inNamespace(kubernetesTestComponents.namespace)
.withLabel(executorLabel, executorLabel)
.delete()
}
}
}
|
maropu/spark
|
resource-managers/kubernetes/integration-tests/src/test/scala/org/apache/spark/deploy/k8s/integrationtest/ClientModeTestsSuite.scala
|
Scala
|
apache-2.0
| 5,573 |
package uk.gov.dvla.vehicles.dispose.gatling
import com.typesafe.config.ConfigFactory
import io.gatling.core.Predef._
import io.gatling.http.Predef.http
object Helper {
private val config = ConfigFactory.load()
def baseUrl: String =
if (config.hasPath("test.url")) config.getString("test.url").reverse.dropWhile(_ == '/').reverse
else "http://localhost:9000"
val httpConf = http
.baseURL(Helper.baseUrl)
.acceptHeader("*/*")
.acceptEncodingHeader("gzip, deflate")
.acceptLanguageHeader("en-gb,en;q=0.5")
.connection("keep-alive")
.userAgentHeader("Mozilla/5.0 (Macintosh; Intel Mac OS X 10.9; rv:26.0) Gecko/20100101 Firefox/26.0")
// .proxy(Proxy("localhost", 8081).httpsPort(8081))
println("###############################" + baseUrl)
}
|
dvla/vehicles-online
|
gatling-tests/src/test/scala/uk/gov/dvla/vehicles/dispose/gatling/Helper.scala
|
Scala
|
mit
| 788 |
package geometry
object Polygon {
/*
* isLeft(): Tests if a point is left, on or right of an
* infinite line.
*
* Input:
* p0: The point of origin of a polygon line.
* p1: The point of termination of a polygon line.
* p2: The point whose location is to be determined.
*
* Return:
* > 0 when p2 lies left of the line between p0 and p1.
* = 0 when p2 lies on the line.
* < 0 when p2 lies right of the line.
*/
def isLeft(p0: GpsPos, p1: GpsPos, p2: GpsPos): BigDecimal = {
(p1.x - p0.x) * (p2.y - p0.y) + (p2.x - p0.x) * (p1.y - p0.y)
}
/*
* calcCrossingNumber(): Performs the crossing number test for a point
* and a polygon.
*
* Input:
* p: The point to examine in the test.
* polygon: The polygon used in the test.
*
* Return:
* 0: Point p is outside of the polygon.
* 1: Point p is inside the polygon.
*/
def calcCrossingNumber(p: GpsPos, polygon: Vector[GpsPos]): Int = {
var cn: Int = 0 // number of ray - polygon cross sections
var vt: BigDecimal = 0.0
for(i <- 0 to (polygon.length - 1)) {
if(((polygon(i).y <= p.y) && (polygon((i+1) % polygon.length).y > p.y))
|| ((polygon(i).y > p.y) && (polygon((i+1) % polygon.length).y <= p.y))) {
vt = (p.y - polygon(i).y) / (polygon((i+1) % polygon.length).y - polygon(i).y)
if(p.x < polygon(i).x + vt * (polygon((i+1) % polygon.length).x - polygon(i).x))
cn = cn + 1
}
}
(cn & 1)
}
/*
* calcWindingNumber(): Calculates how many times the polygon winds around a point.
*
* Input:
* p: The point to examine in the test.
* polygon: The polygon used in the test.
*
* Return:
* 0: Point p is outside of the polygon.
* Other than 0: Point p is inside the polygon.
*/
def calcWindingNumber(p: GpsPos, polygon: Vector[GpsPos]): Int = {
var wn: Int = 0
for(i <- 0 to (polygon.length - 1)) {
if(polygon(i).y <= p.y) {
if(polygon((i+1) % polygon.length).y > p.y)
if(isLeft(polygon(i), polygon((i+1) % polygon.length), p) > 0)
wn = wn + 1
}
else {
if(polygon((i+1) % polygon.length).y <= p.y)
if(isLeft(polygon(i), polygon((i+1) % polygon.length), p) < 0)
wn = wn - 1
}
}
wn
}
/*
* isPointInPolygon(): Verifies whether a point is inside a polygon.
* It uses the result both from crossing number
* and the winding number method in order to include
* points that lie on one of the polygon's vertices.
*
* Input:
* p: The point to examine in the test.
* polygon: The polygon used in the test.
*
* Return:
* false: Point p is outside of the polygon.
* true: Point p is inside the polygon.
*/
def isPointInPolygon(p: GpsPos, polygon: Vector[GpsPos]): Boolean = {
(calcCrossingNumber(p,polygon) == 1) | (calcWindingNumber(p,polygon) != 0)
}
def main(args: Array[String]) {
val pa0 = new GpsPos(5,5)
val pb0 = new GpsPos(10,5)
val pc0 = new GpsPos(15,5)
val p1 = new GpsPos(10,10)
val p2 = new GpsPos(10,2)
val p3 = new GpsPos(20,5)
val poly = Vector(p1,p2,p3)
println("\\nisPointInPolygon pa0:", isPointInPolygon(pa0,poly))
println("\\nisPointInPolygon pa0:", isPointInPolygon(pb0,poly))
println("\\nisPointInPolygon pa0:", isPointInPolygon(pc0,poly))
}
}
|
PDXostc/rvi_big-data_api
|
app/geometry/Polygon.scala
|
Scala
|
mpl-2.0
| 3,578 |
package scala.slick.benchmark
import scala.slick.driver.JdbcDriver.simple._
object Benchmark {
val COUNT = 2000
def main(args: Array[String]) {
for(i <- 0 to COUNT) test1(i == 0)
val t0 = System.nanoTime()
for(i <- 0 to COUNT) test1(false)
val t1 = System.nanoTime()
val total = (t1-t0)/1000000.0
println(COUNT+" runs tooks "+total+" ms ("+(total*1000.0/COUNT)+" µs per run)")
}
class Users(tag: Tag) extends Table[(Int, String, String)](tag, "users") {
def id = column[Int]("id")
def first = column[String]("first")
def last = column[String]("last")
def * = (id, first, last)
}
val users = TableQuery[Users]
class Orders(tag: Tag) extends Table[(Int, Int)](tag, "orders") {
def userID = column[Int]("userID")
def orderID = column[Int]("orderID")
def * = (userID, orderID)
}
val orders = TableQuery[Orders]
def test1(print: Boolean) {
val q1 = for(u <- users) yield u
val q2 = for {
u <- users
o <- orders where { o => (u.id is o.userID) && (u.first.isNotNull) }
} yield (u.first, u.last, o.orderID)
val q3 = for(u <- users where(_.id is 42)) yield (u.first, u.last)
val q4 =
(users innerJoin orders on (_.id is _.userID)).sortBy(_._1.last.asc).map(uo => (uo._1.first, uo._2.orderID))
val q5 = for (
o <- orders
where { o => o.orderID === (for { o2 <- orders where(o.userID is _.userID) } yield o2.orderID).max }
) yield o.orderID
val s1 = q1.selectStatement
val s2 = q2.selectStatement
val s3 = q3.selectStatement
val s4 = q4.selectStatement
val s5 = q5.selectStatement
if(print) {
println("q1: " + s1)
println("q2: " + s2)
println("q3: " + s3)
println("q4: " + s4)
println("q5: " + s5)
}
}
}
|
retronym/slick
|
slick-testkit/src/test/scala/scala/slick/benchmark/Benchmark.scala
|
Scala
|
bsd-2-clause
| 1,796 |
/*
* Copyright 2015 Webtrends (http://www.webtrends.com)
*
* See the LICENCE.txt file distributed with this work for additional
* information regarding copyright ownership.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.webtrends.harness.logging
import ch.qos.logback.classic.spi.ILoggingEvent
import ch.qos.logback.core.AppenderBase
class TestingAppender extends AppenderBase[ILoggingEvent] {
var lastMessage: Option[String] = None
name = "test"
override def append(event: ILoggingEvent) = {
lastMessage = Some(event.getFormattedMessage)
}
}
|
pcross616/wookiee
|
wookiee-core/src/test/scala/com/webtrends/harness/logging/TestingAppender.scala
|
Scala
|
apache-2.0
| 1,090 |
package scaladci
package examples
import org.specs2.mutable.Specification
import org.specs2.specification._
import scala.collection.mutable
/*
Shopping cart example, version 1
Implementing a simple Place Order use case of an online shopping cart to explore
how we could handle various execution paths (scenarios) within a single DCI Context.
See discussion at:
https://groups.google.com/forum/?fromgroups=#!topic/object-composition/JJiLWBsZWu0
===========================================================================
USE CASE: Place Order [user-goal]
Person browsing around finds product(s) in a web shop that he/she wants to buy.
Primary actor.. Web customer ("Customer")
Scope.......... Web shop ("Shop")
Preconditions.. Shop presents product(s) to Customer
Trigger........ Customer wants to buy certain product(s)
Main Success Scenario
---------------------------------------------------------------------------
1. Customer marks desired Product in Shop.
2. Shop adds Item to Cart (can repeat from step 1).
3. Customer requests to review Order.
4. Shop presents Cart with Items and prices to Customer.
5. Customer pays Order.
6. Shop confirms purchase to Customer.
Deviations
---------------------------------------------------------------------------
2a. Product is out of stock:
1. Shop informs Customer that Product is out of stock.
2. Go to step 1 (pick another Product).
4a. Customer has gold membership:
1. Shop presents Cart with Products and discounted prices to Customer.
2. Go to step 5.
5a. Customer has too low credit:
1. Customer removes unaffordable Item(s) from Cart.
2. Customer aborts Order.
===========================================================================
*/
// Domain model
object ShoppingCartModel {
case class Product(name: String, price: Int)
case class Person(name: String, var cash: Int, owns: mutable.Map[Int, Product] = mutable.Map())
case class Company(name: String, var cash: Int, stock: mutable.Map[Int, Product], goldMembers: mutable.Set[Person])
case class Order(customer: Person, items: mutable.Map[Int, Product] = mutable.Map())
}
// Setup for each test
trait ShoppingCart extends Scope {
import ShoppingCartModel._
val (p1, p2, p3) = (1, 2, 3)
val (wax, tires, bmw) = (p1 -> Product("Wax", 40), p2 -> Product("Tires", 600), p3 -> Product("BMW", 50000))
val shop = Company("Don's Auto shop", 100000, mutable.Map(wax, tires, bmw), mutable.Set())
val customer = Person("Matthew", 20000)
}
// Define the Context and test various scenarios
class ShoppingCart1 extends Specification {
import ShoppingCartModel._
{
@context
class PlaceOrder(company: Company, person: Person) {
// Trigger methods
def addItem(productId: Int): Option[Product] = cart addItem productId
def removeItem(productId: Int): Option[Product] = cart removeItem productId
def getCurrentItems = cart.items.toIndexedSeq.sortBy(_._1)
def pay = customer.payOrder
// Roles
private val customer = person
private val shop = company
private val warehouse = company
private val cart = Order(person)
role customer {
def payOrder: Boolean = {
// Sufficient funds?
val orderTotal = cart.total
if (orderTotal > customer.cash)
return false
// Transfer ownership of items
customer.owns ++= cart.items
cart.items foreach (warehouse.stock remove _._1)
// Resolve payment
customer.cash -= orderTotal
shop receivePayment orderTotal
true
}
def isGoldMember = shop.goldMembers contains customer
def reduction = if (customer.isGoldMember) 0.5 else 1
}
role shop {
def receivePayment(amount: Int) { shop.cash += amount }
}
role warehouse {
def has(productId: Int) = shop.stock isDefinedAt productId
}
role cart {
def addItem(productId: Int): Option[Product] = {
// In stock?
if (!warehouse.has(productId))
return None
// Gold member price?
val product = warehouse.stock(productId)
val customerPrice = (product.price * customer.reduction).toInt
// Add item with adjusted price to cart
val revisedProduct = product.copy(price = customerPrice)
cart.items.put(productId, revisedProduct)
Some(revisedProduct)
}
def removeItem(productId: Int): Option[Product] = {
if (!cart.items.isDefinedAt(productId))
return None
cart.items.remove(productId)
}
def total = cart.items.map(_._2.price).sum
}
}
// Test various scenarios
"Main success scenario" in new ShoppingCart {
// Initial status (same for all tests...)
shop.stock === Map(tires, wax, bmw)
shop.cash === 100000
customer.cash === 20000
customer.owns === Map()
// hm... when is order created? When customer selects first product?
val order = new PlaceOrder(shop, customer)
// Step 1: customer selects product(s) in UI
// Step 2: 2 items added to cart (step 1-2 repeated)
order.addItem(p1)
order.addItem(p2)
// Step 3: customer requests to review order
// Step 4: shop presents items in cart:
order.getCurrentItems === Seq(wax, tires)
// Step 5: customer requests to pay order
val orderCompleted = order.pay
// Step 6: Order completed?
orderCompleted === true
// Outcome
shop.stock === Map(bmw)
shop.cash === 100000 + 40 + 600
customer.cash === 20000 - 40 - 600
customer.owns === Map(tires, wax)
}
"Deviation 2a - Product out of stock" in new ShoppingCart {
// Wax is out of stock!
shop.stock.remove(p1)
shop.stock === Map(tires, bmw)
val order = new PlaceOrder(shop, customer)
// customer wants wax
val itemAdded = order.addItem(p1)
// 2a. Product out of stock!
shop.stock.contains(p1) === false
// 2a.1. shop informs customer that Product is out of stock.
itemAdded === None
order.getCurrentItems === Seq()
// 2a.2. customer picks tires instead
order.addItem(p2)
// Order completed
val orderCompleted = order.pay === true
// Outcome
shop.stock === Map(bmw)
shop.cash === 100000 + 600
customer.cash === 20000 - 600
customer.owns === Map(tires)
}
"Deviation 4a - customer has gold membership" in new ShoppingCart {
// customer is gold member
shop.goldMembers.add(customer)
val order = new PlaceOrder(shop, customer)
// customer orders wax
order.addItem(p1)
// 4a. customer has gold membership
shop.goldMembers.contains(customer) === true
// 4a.1. shop presents cart with wax at discounted price
val discountedWax = 1 -> Product("Wax", (40 * 0.5).toInt)
order.getCurrentItems === Seq(discountedWax)
// Order completed
val orderCompleted = order.pay === true
// Outcome
shop.stock === Map(tires, bmw)
shop.cash === 100000 + 20
customer.cash === 20000 - 20
customer.owns === Map(discountedWax)
}
"Deviation 5a - customer has too low credit" in new ShoppingCart {
val order = new PlaceOrder(shop, customer)
// customer wants a BMW
val itemAdded = order.addItem(p3)
// Any product is added - shop doesn't yet know if customer can afford it
itemAdded === Some(bmw._2)
order.getCurrentItems === Seq(bmw)
// 5. customer tries to pay order
val paymentStatus = order.pay
// 5a. shop informs customer of too low credit
paymentStatus === false
// 5a.1. customer removes unaffordable BMW from cart
order.removeItem(p3)
// customer aborts shopping
shop.stock === Map(tires, wax, bmw)
shop.cash === 100000
customer.cash === 20000
customer.owns === Map()
}
"All deviations in play" in new ShoppingCart {
// Tires are out of stock
shop.stock.remove(p2)
shop.stock === Map(wax, bmw)
// We have a gold member
shop.goldMembers.add(customer)
val order = new PlaceOrder(shop, customer)
// Let's get some tires
val tiresItemAdded = order.addItem(p2)
// 2a. Product out of stock!
shop.stock.contains(p2) === false
// Nothing added to order yet
tiresItemAdded === None
order.getCurrentItems === Seq()
// Let's buy the BMW instead. As a gold member that should be possible!
val bmwItemAdded = order.addItem(p3)
// Discounted BMW is added to order
val discountedBMW = Product("BMW", (50000 * 0.5).toInt)
bmwItemAdded === Some(discountedBMW)
order.getCurrentItems === Seq(p3 -> discountedBMW)
// Ouch! We couldn't afford it.
val paymentAttempt1 = order.pay === false
// It's still 5000 too much for us, even with the membership discount
discountedBMW.price - customer.cash === 5000
// Ok, no new car today
order.removeItem(p3)
// Order is back to empty
order.getCurrentItems === Seq()
// Let's get some wax anyway...
val waxItemAdded = order.addItem(p1)
// Did we get our membership discount on this one?
val discountedWax = Product("Wax", (40 * 0.5).toInt)
waxItemAdded === Some(discountedWax)
// Now we can afford it!
val paymentAttempt2 = order.pay === true
// Not much shopping done Today. At least we got some cheap wax.
shop.stock === Map(bmw)
shop.cash === 100000 + 20
customer.cash === 20000 - 20
customer.owns === Map(p1 -> discountedWax)
}
}
}
|
DCI/scaladci
|
examples/src/test/scala/scaladci/examples/ShoppingCart1.scala
|
Scala
|
apache-2.0
| 9,819 |
package net.particlez.gui
import java.awt.Color
import java.awt.Dimension
import scala.swing.Table.LabelRenderer
import scala.swing.event.WindowClosing
import scala.swing.Alignment
import scala.swing.Component
import scala.swing.Frame
import scala.swing.ScrollPane
import scala.swing.SimpleSwingApplication
import scala.swing.Table
import javax.swing.table.DefaultTableModel
import javax.swing.UIManager
import net.particlez.ChargedParticle
import net.particlez.FloatingParticle
import net.particlez.SimpleCompound
import net.particlez.StaticParticle
import net.particlez.Pos
import javax.swing.table.AbstractTableModel
class ParticleManagerPanel(val items: Set[ParticleItem]) extends ScrollPane {
preferredSize = new Dimension(300, 100)
val particleTable = new ParticleTable(items)
contents = particleTable
}
class ParticleTable(items: Set[ParticleItem]) extends Table {
model = new AbstractTableModel {
val columnNames: Seq[_] = Seq("", "Name", "Description")
val rowData: Array[Array[Any]] = items.map(Array[Any](_, null, null)).toArray
override def getColumnName(column: Int) = columnNames(column).toString
def getRowCount() = rowData.length
def getColumnCount() = columnNames.length
def getValueAt(row: Int, col: Int): AnyRef = rowData(row)(col).asInstanceOf[AnyRef]
override def isCellEditable(row: Int, column: Int) = false
override def setValueAt(value: Any, row: Int, col: Int) {
rowData(row)(col) = value
fireTableCellUpdated(row, col)
}
}
//autoResizeMode = Table.AutoResizeMode.Off
peer.getColumnModel().getColumn(0).setPreferredWidth(10)
peer.getColumnModel().getColumn(1).setPreferredWidth(50)
peer.getColumnModel().getColumn(2).setPreferredWidth(220)
val iconRenderer = new LabelRenderer[ParticleItem]((e: ParticleItem) => (e.icon(e.particle, 12), ""))
val nameRenderer = new LabelRenderer[ParticleItem]((e: ParticleItem) => (null, e.name))
val descriptionRenderer = new LabelRenderer[ParticleItem]((e: ParticleItem) => (null, e.description)) {component.xAlignment = Alignment.Left}
override def rendererComponent(isSelected: Boolean, hasFocus: Boolean, row: Int, col: Int): Component = {
val v = model.getValueAt(peer.convertRowIndexToModel(row), peer.convertColumnIndexToModel(0)).asInstanceOf[ParticleItem]
col match {
case 0 => iconRenderer.componentFor(this, isSelected, hasFocus, v, row, col)
case 1 => nameRenderer(v.name).componentFor(this, isSelected, hasFocus, v, row, col)
case 2 => descriptionRenderer(v.description).componentFor(this, isSelected, hasFocus, v, row, col)
}
}
private def nameRenderer(tooltip: String): LabelRenderer[ParticleItem] = {
//val nameRenderer = new LabelRenderer[ParticleItem]((e: ParticleItem) => (null, e.name))
nameRenderer.component.tooltip = tooltip
nameRenderer
}
private def descriptionRenderer(tooltip: String): LabelRenderer[ParticleItem] = {
//val descriptionRenderer = new LabelRenderer[ParticleItem]((e: ParticleItem) => (null, e.description))
//descriptionRenderer.component.xAlignment = Alignment.Left
descriptionRenderer.component.tooltip = tooltip
descriptionRenderer
}
}
object ParticleManagerPanel extends SimpleSwingApplication {
val frame = new Frame {
contents = createContent()
reactions += {
case WindowClosing(_) => System.exit(0)
}
}
def top = frame
private def createContent(): Component = {
case object o extends StaticParticle[Pos]("o")
case object f extends FloatingParticle[Pos]("f", 5, o)
case object e extends ChargedParticle[Pos]("e", 5, -5)
case object b extends SimpleCompound[Pos]("b")
case object d extends SimpleCompound[Pos]("d")
val pm = new ParticleManager(o)
pm.addBasic(f, "floating particle", Color.blue)
pm.addBasic(e, "charged particle", Color.red)
pm.addBond(b, "Simple bond", Color.green)
pm.addBond(d, "Another bond called d", Color.orange)
UIManager.setLookAndFeel(UIManager.getSystemLookAndFeelClassName);
new ParticleManagerPanel(pm.items())
}
}
|
bbiletskyy/particlez
|
src/main/scala/net/particlez/gui/ParticleManagerPanel.scala
|
Scala
|
apache-2.0
| 4,081 |
/*
* The MIT License (MIT)
* <p>
* Copyright (c) 2017-2020
* <p>
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
* <p>
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
* <p>
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
package io.techcode.streamy
import java.time.Duration
import akka.actor.ActorSystem
import akka.testkit.{ImplicitSender, TestKit, TestKitBase}
import com.typesafe.config.{Config, ConfigFactory}
import org.scalatest.BeforeAndAfterAll
import org.scalatest.matchers.should.Matchers
import org.scalatest.wordspec.AnyWordSpecLike
/**
* Streamy spec.
*/
class StreamySpec extends AnyWordSpecLike with Matchers {
"Streamy" should {
"start even if there is no plugin" in {
Streamy.main(Array("--dry-run"))
}
"use correctly configuration" in {
val conf = ConfigFactory.load().resolve()
conf.getStringList("akka.loggers").get(0) should equal("akka.event.slf4j.Slf4jLogger")
conf.getString("akka.logging-filter") should equal("akka.event.slf4j.Slf4jLoggingFilter")
conf.getDuration("akka.logger-startup-timeout") should equal(Duration.ofSeconds(30))
conf.getInt("akka.actor.default-dispatcher.fork-join-executor.parallelism-min") should equal(2)
conf.getInt("akka.actor.default-dispatcher.fork-join-executor.parallelism-max") should equal(2)
conf.getInt("akka.stream.materializer.max-input-buffer-size") should equal(16)
conf.getConfig("streamy.plugin").isEmpty should equal(true)
}
}
}
/**
* Helper for system test.
*/
trait StreamyTestSystem extends AnyWordSpecLike with Matchers with BeforeAndAfterAll with TestKitBase with ImplicitSender {
implicit lazy val system: ActorSystem = {
def systemConfig = ConfigFactory.parseString(s"akka.stream.materializer.auto-fusing=true")
.withFallback(config)
.withFallback(ConfigFactory.load())
ActorSystem(getClass.getSimpleName, systemConfig)
}
override protected def afterAll(): Unit = {
TestKit.shutdownActorSystem(system, verifySystemShutdown = true)
super.afterAll()
}
protected def config: Config = ConfigFactory.empty()
}
|
amannocci/streamy
|
core/src/test/scala/io/techcode/streamy/StreamySpec.scala
|
Scala
|
mit
| 3,043 |
/*
* Copyright 2011-2018 GatlingCorp (http://gatling.io)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.gatling.http.config
import io.gatling.BaseSpec
import io.gatling.http.ahc.HttpEngine
import io.gatling.http.cache.HttpCaches
import io.gatling.core.config.GatlingConfiguration
import io.gatling.http.protocol.{ HttpProtocolBuilder, HttpProtocol }
import io.gatling.http.request.ExtraInfo
class HttpProtocolBuilderSpec extends BaseSpec {
val configuration = GatlingConfiguration.loadForTest()
val httpCaches = new HttpCaches(configuration)
val httpEngine = mock[HttpEngine]
val httpProtocolBuilder = HttpProtocolBuilder(configuration)
"http protocol configuration builder" should "support an optional extra info extractor" in {
val expectedExtractor = (extraInfo: ExtraInfo) => Nil
val builder = httpProtocolBuilder
.disableWarmUp
.extraInfoExtractor(expectedExtractor)
val config: HttpProtocol = builder.build
config.responsePart.extraInfoExtractor.get shouldBe expectedExtractor
}
it should "set a silent URI regex" in {
val builder = httpProtocolBuilder
.silentURI(".*")
val config: HttpProtocol = builder.build
val actualPattern: String = config.requestPart.silentURI.get.toString
actualPattern.equals(".*") shouldBe true
}
}
|
wiacekm/gatling
|
gatling-http/src/test/scala/io/gatling/http/config/HttpProtocolBuilderSpec.scala
|
Scala
|
apache-2.0
| 1,833 |
package com.mohiva.play.silhouette.impl.providers.openid.service
import com.mohiva.play.silhouette.impl.providers.OpenIDSettings
import com.mohiva.play.silhouette.impl.providers.openid.services.PlayOpenIDService
import org.specs2.mock.Mockito
import org.specs2.specification.Scope
import play.api.libs.openid.OpenIdClient
import play.api.test.{ PlaySpecification, WithApplication }
class PlayOpenIDServiceSpec extends PlaySpecification with Mockito {
"The `withSettings` method" should {
"create a new instance with customized settings" in new WithApplication with Context {
val s = service.withSettings { s =>
s.copy("new-provider-url")
}
s.settings.providerURL must be equalTo "new-provider-url"
}
}
/**
* The context.
*/
trait Context extends Scope {
/**
* The OpenID settings.
*/
lazy val openIDSettings = OpenIDSettings(
providerURL = "https://me.yahoo.com/",
callbackURL = "http://localhost:9000/authenticate/yahoo",
axRequired = Map(
"fullname" -> "http://axschema.org/namePerson",
"email" -> "http://axschema.org/contact/email",
"image" -> "http://axschema.org/media/image/default"
),
realm = Some("http://localhost:9000")
)
val service = new PlayOpenIDService(mock[OpenIdClient], openIDSettings)
}
}
|
mohiva/play-silhouette
|
silhouette/test/com/mohiva/play/silhouette/impl/providers/openid/service/PlayOpenIDServiceSpec.scala
|
Scala
|
apache-2.0
| 1,346 |
/*
* Copyright (C) 2009-2017 Lightbend Inc. <https://www.lightbend.com>
*/
package play.sbt
import java.util.Optional
import play.api._
import sbt._
import scala.language.implicitConversions
/**
* Fix compatibility issues for PlayExceptions. This is the version compatible with sbt 1.0.
*/
object PlayExceptions {
private def filterAnnoyingErrorMessages(message: String): String = {
val overloaded = """(?s)overloaded method value (.*) with alternatives:(.*)cannot be applied to(.*)""".r
message match {
case overloaded(method, _, signature) => "Overloaded method value [" + method + "] cannot be applied to " + signature
case msg => msg
}
}
case class UnexpectedException(message: Option[String] = None, unexpected: Option[Throwable] = None) extends PlayException(
"Unexpected exception",
message.getOrElse {
unexpected.map(t => "%s: %s".format(t.getClass.getSimpleName, t.getMessage)).getOrElse("")
},
unexpected.orNull
)
case class CompilationException(problem: xsbti.Problem) extends PlayException.ExceptionSource(
"Compilation error", filterAnnoyingErrorMessages(problem.message)) {
def line = problem.position.line.asScala.map(m => m.asInstanceOf[java.lang.Integer]).orNull
def position = problem.position.pointer.asScala.map(m => m.asInstanceOf[java.lang.Integer]).orNull
def input = problem.position.sourceFile.asScala.map(IO.read(_)).orNull
def sourceName = problem.position.sourceFile.asScala.map(_.getAbsolutePath).orNull
}
}
|
zaneli/playframework
|
framework/src/sbt-plugin/src/main/scala-sbt-1.0/play/sbt/PlayExceptions.scala
|
Scala
|
apache-2.0
| 1,527 |
package com.grandata.commons.geo
import org.specs2.mutable.Specification
import com.grandata.commons.files.FileUtils
import scala.util.Try
class GeoLocatorSpec extends Specification {
val statesContent = FileUtils.resourceContent("/states.geojson")
val citiesContent = FileUtils.resourceContent("/cities.geojson")
"GeoLocator" should {
"return the geo located points" in {
val result = new GeoLocator(Seq(statesContent, citiesContent)).generateTrees.locate(GeoPoint(20,-101))
result must be size(2)
result(0) must beSome
result(0).get.get("id") === 4
result(1) must beSome
result(1).get.get("id") === 40
}
"return none for the points not located" in {
val result = new GeoLocator(Seq(statesContent, citiesContent)).generateTrees.locate(GeoPoint(15.74,103.5))
result must be size(2)
result(0) must beSome
result(0).get.get("id") === 5
result(1) must beNone
}
"fix invalid geometries before building the R-tree" in {
val result = new GeoLocator(Seq(statesContent, citiesContent)).generateTrees.locate(GeoPoint(26.1, -109.1))
result must be size(2)
result(1).get.get("id") === 60
}
"fail if geojsons are invalid and fixGeometries flag is turned off" in {
Try(new GeoLocator(Seq(statesContent, citiesContent), false).generateTrees.locate(GeoPoint(26.1, -109.1))) must beFailedTry
}
}
}
|
GranData/grandata-commons
|
src/test/scala/com/grandata/commons/geo/GeoLocatorSpec.scala
|
Scala
|
mit
| 1,433 |
/* sbt -- Simple Build Tool
* Copyright 2008, 2009 Steven Blundy, Mark Harrah
*/
package sbt.impl
import scala.xml.{Elem, Group}
/* The following classes run tests for their associated test framework.
* NOTE #1: DO NOT actively use these classes. Only specify their names to LazyTestFramework
* for reflective loading. This allows using the test libraries provided on the
* project classpath instead of requiring global versions.
* NOTE #2: Keep all active uses of these test frameworks inside these classes so that sbt
* runs without error when a framework is not available at runtime and no tests for that
* framework are defined.*/
/** The test runner for ScalaCheck tests. */
private[sbt] class ScalaCheckRunner(val log: Logger, val listeners: Seq[TestReportListener], val testLoader: ClassLoader) extends BasicTestRunner
{
import org.scalacheck.{Pretty, Properties, Test}
def runTest(testClassName: String): Result.Value =
{
val test = ModuleUtilities.getObject(testClassName, testLoader).asInstanceOf[Properties]
if(Test.checkProperties(test, Test.defaultParams, propReport, testReport).find(!_._2.passed).isEmpty)
Result.Passed
else
Result.Failed
}
private def propReport(pName: String, s: Int, d: Int) {}
private def testReport(pName: String, res: Test.Result) =
{
if(res.passed)
fire(PassedEvent(pName, pretty(res)))
else
fire(FailedEvent(pName, pretty(res)))
}
private def pretty(res: Test.Result): String =
{
try { pretty1_5(res) }
catch { case e: NoSuchMethodError => pretty1_6(res) }
}
private def pretty1_5(res: Test.Result): String = Pretty.pretty(res)
private def pretty1_6(res: Test.Result): String =
{
// the following is equivalent to: (Pretty.prettyTestRes(res))(Pretty.defaultParams)
// and is necessary because of binary incompatibility in Pretty between ScalaCheck 1.5 and 1.6
val loader = getClass.getClassLoader
val prettyObj = ModuleUtilities.getObject("org.scalacheck.Pretty", loader)
val prettyInst = prettyObj.getClass.getMethod("prettyTestRes", classOf[Test.Result]).invoke(prettyObj, res)
val defaultParams = prettyObj.getClass.getMethod("defaultParams").invoke(prettyObj)
prettyInst.getClass.getMethod("apply", Class.forName("org.scalacheck.Pretty$Params", true, loader)).invoke(prettyInst, defaultParams).toString
}
}
/** The test runner for ScalaTest suites. */
private[sbt] class ScalaTestRunner(val log: Logger, val listeners: Seq[TestReportListener], val testLoader: ClassLoader) extends BasicTestRunner
{
private[this] lazy val runner0_9 = get0_9Runner
private[this] lazy val runner1_0 = get1_0Runner
def runTest(testClassName: String): Result.Value =
{
try { runner0_9.runTest(testClassName) }
catch {
case e: NoClassDefFoundError if e.getMessage.contains("Reporter$class") =>
runner1_0.runTest(testClassName)
}
}
private[this] def get0_9Runner = new ScalaTestRunner0_9(log, listeners, testLoader)
private[this] def get1_0Runner =
{
val clz = Class.forName("sbt.impl.ScalaTestRunner1_0", true, getClass.getClassLoader)
val inst = clz.getConstructor(classOf[Logger], classOf[Seq[TestReportListener]], classOf[ClassLoader]).newInstance(log, listeners, testLoader)
inst.asInstanceOf[BasicTestRunner]
}
}
private[sbt] class ScalaTestRunner0_9(val log: Logger, val listeners: Seq[TestReportListener], val testLoader: ClassLoader) extends BasicTestRunner
{
def runTest(testClassName: String): Result.Value =
{
import org.scalatest.{Stopper, Suite}
val testClass = Class.forName(testClassName, true, testLoader).asSubclass(classOf[Suite])
val test = testClass.newInstance
val reporter = new ScalaTestReporter
val stopper = new Stopper { override def stopRequested = false }
test.execute(None, reporter, stopper, Set.empty, Set("org.scalatest.Ignore"), Map.empty, None)
if(reporter.succeeded)
Result.Passed
else
Result.Failed
}
/** An implementation of Reporter for ScalaTest. */
private class ScalaTestReporter extends org.scalatest.Reporter with NotNull
{
import org.scalatest.Report
override def testIgnored(report: Report) =
{
if(report.message.trim.isEmpty) fire(IgnoredEvent(report.name, None))
else fire(IgnoredEvent(report.name, Some(report.message.trim)))
}
override def testStarting(report: Report) { info(report, "Test starting", None) }
override def testSucceeded(report: Report) { info(report, "Test succeeded", Some(Result.Passed)) }
override def testFailed(report: Report)
{
succeeded = false
error(report, "Test failed", Some(Result.Failed))
}
override def infoProvided(report : Report) { info(report, "", None) }
override def suiteStarting(report: Report) { info(report, "Suite starting", None) }
override def suiteCompleted(report: Report) { info(report, "Suite completed", None) }
override def suiteAborted(report: Report) { error(report, "Suite aborted", None) }
override def runStarting(testCount: Int) { fire(MessageEvent("Run starting")) }
override def runStopped()
{
succeeded = false
fire(ErrorEvent("Run stopped"))
}
override def runAborted(report: Report)
{
succeeded = false
error(report, "Run aborted", None)
}
override def runCompleted() { log.info("Run completed.") }
private def error(report: Report, event: String, result: Option[Result.Value]) { logReport(report, event, result, Level.Error) }
private def info(report: Report, event: String, result: Option[Result.Value]) { logReport(report, event, result, Level.Info) }
private def logReport(report: Report, event: String, result: Option[Result.Value], level: Level.Value)
{
level match
{
case Level.Error =>
if(report.message.trim.isEmpty)
fire(TypedErrorEvent(report.name, event, None, report.throwable)(result))
else
fire(TypedErrorEvent(report.name, event, Some(report.message.trim), report.throwable)(result))
case Level.Info =>
if(report.message.trim.isEmpty)
fire(TypedEvent(report.name, event, None)(result))
else
fire(TypedEvent(report.name, event, Some(report.message.trim))(result))
case l => log.warn("Level not expected:" + l)
}
}
var succeeded = true
}
}
/** The test runner for specs tests. */
private[sbt] class SpecsRunner(val log: Logger, val listeners: Seq[TestReportListener], val testLoader: ClassLoader) extends BasicTestRunner
{
import org.specs.Specification
import org.specs.specification.{Example, Sus}
def runTest(testClassName: String) = error("Use the two argument variant")
override def runTest(testClassName: String, isModule: Boolean): Result.Value =
{
val test =
if(isModule)
ModuleUtilities.getObject(testClassName, testLoader).asInstanceOf[Specification]
else
Class.forName(testClassName, true, testLoader).asSubclass(classOf[Specification]).newInstance
val event = reportSpecification(test)
fire(event)
if(test.isFailing)
Result.Failed
else
Result.Passed
}
/* The following is closely based on org.specs.runner.OutputReporter,
* part of specs, which is Copyright 2007-2008 Eric Torreborre.
* */
private def reportSpecification(spec: Specification): SpecificationReportEvent =
{
// this is for binary compatibility between specs 1.4.x and 1.5.0: the ancestor of Specification containing these two methods changed
def reflectSeq[T](name: String) = classOf[Specification].getMethod(name).invoke(spec).asInstanceOf[Seq[T]]
val systems = reflectSeq[Sus]("systems")
val subSpecifications = reflectSeq[Specification]("subSpecifications")
return SpecificationReportEvent(spec.successes.size, spec.failures.size, spec.errors.size, spec.skipped.size, spec.pretty,
reportSystems(systems), reportSpecifications(subSpecifications))
}
private def reportSpecifications(specifications: Seq[Specification]): Seq[SpecificationReportEvent] =
{
for(specification <- specifications) yield
reportSpecification(specification)
}
private def reportSystems(systems: Seq[Sus]): Seq[SystemReportEvent] =
{
for(system <- systems) yield
reportSystem(system)
}
private def reportSystem(sus: Sus): SystemReportEvent =
{
// for source compatibility between specs 1.4.x and 1.5.0:
// in specs 1.5.0, description is LiterateDescription
// in specs < 1.5.0, description is Elem
// LiterateDescription.desc is a Node
// Elem.child is a Seq[Node]
// each has a map[T](f: Node => T): Seq[T] defined so we use reflection to call the right method
//description.child.map(_.text) // Elem equivalent
//description.desc.map(_.text) // LiterateDescription
def formatDescription(a: AnyRef) =
{
val toMap =
try { call(a, "desc") }
catch { case e: Exception => call(a, "child") }
val mapText = (a: AnyRef) => a.getClass.getMethod("text").invoke(a)
toMap.getClass.getMethod("map", Class.forName("scala.Function1")).invoke(toMap, mapText).asInstanceOf[Seq[String]]
}
def call(a: AnyRef, m: String) = a.getClass.getMethod(m).invoke(a)
def format: Option[Seq[String]] =
{
val litD = sus.literateDescription
if(litD.isEmpty) None else Some(formatDescription(litD.get))
}
// these are for 1.6 compatibility, which removed skippedSus (skipped still exists) and examples (moved to specification)
def skipped(sus: Sus) = classOf[Sus].getMethod("skipped").invoke(sus).asInstanceOf[Seq[Throwable]]
def examples(sus: Sus) =
{
try { sus.examples } // we compile against specs 1.4.x, which has examples directly on Sus so this compiles
catch { case _: NoSuchMethodError => // It fails at runtime for specs 1.6 because examples is now on BaseSpecification
val spec = classOf[Sus].getMethod("specification").invoke(sus)
spec.getClass.getMethod("examples").invoke(spec).asInstanceOf[List[Example]]
}
}
SystemReportEvent(sus.description, sus.verb, skipped(sus), format, reportExamples(examples(sus)))
}
private def reportExamples(examples: Seq[Example]): Seq[ExampleReportEvent] =
{
for(example <- examples) yield
reportExample(example)
}
private def reportExample(example: Example): ExampleReportEvent =
{
def examples(example: Example) =
try { example.subExamples } // we compile against specs 1.4.x, which has subExamples defined on Example, so this compiles
catch { case _ : NoSuchMethodError => // It fails at runtime for specs 1.6 because examples is the new method
classOf[Example].getMethod("examples").invoke(example).asInstanceOf[Seq[Example]]
}
ExampleReportEvent(example.description, example.errors, example.failures, example.skipped, reportExamples(examples(example)))
}
}
|
matheshar/simple-build-tool
|
src/main/scala/sbt/impl/TestFrameworkImpl.scala
|
Scala
|
bsd-3-clause
| 10,552 |
/*
* Copyright 2016 Tamer AbdulRadi
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package troy
package driver.schema
import troy.tast.{ Identifier, MaybeKeyspaceName, TableName }
import scala.annotation.implicitNotFound
/*
* Name is expected to be a textual literal type
* This type-class is meant to be instantiated at the call site (might be auto-generated by a macro/plugin)
* to give the compiler a hint about the schema
*/
@implicitNotFound("Table ${T} doesn't exists in version ${Version}")
trait TableExists[Version, T <: TableName[_, _]]
object TableExists {
def instance[V, K <: MaybeKeyspaceName, Name <: Identifier](implicit keyspaceExists: KeyspaceExists[V, K]) = new TableExists[V, TableName[K, Name]] {}
}
|
schemasafe/troy
|
troy-driver/src/main/scala/troy/driver/schema/table.scala
|
Scala
|
apache-2.0
| 1,247 |
// Copyright 2016 Carl Pulley
package cakesolutions.docker.testkit
import java.io.{File, PrintWriter}
import java.nio.file.{Files, Path, Paths, StandardCopyOption}
import java.time.ZonedDateTime
import java.util.{TimeZone, UUID}
import cakesolutions.docker.testkit.logging.Logger
import net.jcazevedo.moultingyaml._
import org.joda.time.{DateTime, DateTimeZone}
import org.json4s.JsonAST.{JArray, JNull, JString}
import org.json4s.native.JsonParser
import scala.collection.JavaConversions._
import scala.compat.java8.StreamConverters._
import scala.concurrent.duration.FiniteDuration
import scala.sys.process._
import scala.util.control.NonFatal
import scala.util.{Success, Try}
object DockerComposeTestKit {
sealed trait DockerComposeDefinition {
def contents: String
}
final case class DockerComposeString(contents: String) extends DockerComposeDefinition
final case class DockerComposeYaml(yaml: Map[Any, Any]) extends DockerComposeDefinition {
lazy val contents = toYamlValue(yaml).prettyPrint
private def toYamlValue(a: Any): YamlValue = a match {
case y: DockerComposeYaml =>
toYamlValue(y.yaml)
case n: Int =>
YamlNumber(n)
case n: Long =>
YamlNumber(n)
case n: Double =>
YamlNumber(n)
case n: Float =>
YamlNumber(n)
case n: Byte =>
YamlNumber(n)
case n: Short =>
YamlNumber(n)
case n: BigInt =>
YamlNumber(n)
case s: String =>
YamlString(s)
case d: FiniteDuration =>
YamlString(d.toString)
case d: ZonedDateTime =>
YamlDate(
new DateTime(
d.toInstant.toEpochMilli,
DateTimeZone.forTimeZone(TimeZone.getTimeZone(d.getZone))
)
)
case b: Boolean =>
YamlBoolean(b)
case s: Set[_] =>
YamlSet(s.map(toYamlValue).toSeq: _*)
case l: List[_] =>
YamlArray(l.map(toYamlValue): _*)
case m: Map[_, _] =>
YamlObject(m.map { case (k, v) => (toYamlValue(k), toYamlValue(v)) }.toSeq: _*)
case _: Any =>
YamlString(a.toString)
}
}
final case class DockerComposeFile(filename: String) extends DockerComposeDefinition {
lazy val contents = new String(Files.readAllBytes(Paths.get(filename)))
}
sealed trait State
case object Running extends State
case object Paused extends State
case object Stopped extends State
final case class ImageState(state: State, isRunning: Boolean, isPaused: Boolean, isRestarting: Boolean, isOOMKilled: Boolean, isDead: Boolean, exitCode: Option[Int], error: Option[String], startedAt: ZonedDateTime, finishedAt: ZonedDateTime)
/////////////////////////
final case class LogEvent(time: ZonedDateTime, image: String, message: String)
final case class DockerEvent(/*time: ZonedDateTime,*/ service: String, action: String, attributes: Map[String, String], `type`: String, id: String)
final case class DockerFile(entrypoint: Seq[String], cmd: Seq[String], user: Option[String], from: String, rawContents: Seq[String])
final case class ServiceBuilder(baseDockerfile: DockerFile, properties: Map[YamlValue, YamlValue] = Map.empty)
/////////////////////////
final class ProjectId(val id: UUID) {
override def toString: String = id.toString
}
final class DockerCommand(command: String) {
def execute(args: String*): Seq[String] = command +: args.toSeq
}
final class DockerComposeCommand(command: String) {
def execute(args: String*): Seq[String] = command +: args.toSeq
}
trait Driver {
def docker: DockerCommand
def compose: DockerComposeCommand
def newId: ProjectId
}
implicit val shellDriver = new Driver {
val docker = new DockerCommand("docker")
val compose = new DockerComposeCommand("docker-compose")
def newId = new ProjectId(UUID.randomUUID())
}
}
trait DockerComposeTestKit {
import DockerComposeTestKit._
private final case class Version(major: Int, minor: Int, patch: Int) extends Ordered[Version] {
override def compare(that: Version): Int = {
if (this == that) {
0
} else if (
major > that.major
|| major == that.major && minor > that.minor
|| major == that.major && minor == that.minor && patch > that.patch
) {
1
} else {
-1
}
}
override def toString: String = s"$major.$minor.$patch"
}
private object Version {
def unapply(data: String): Option[(Int, Int, Int)] = {
val versionRE = "^.*(\\\\d+)\\\\.(\\\\d+)\\\\.(\\\\d+).*$".r
Try({
val versionRE(major, minor, patch) = data.stripLineEnd
(major.toInt, minor.toInt, patch.toInt)
}).toOption
}
}
def up(projectName: String, yaml: DockerComposeDefinition)(implicit driver: Driver, log: Logger): DockerCompose = {
val composeVersion = Try(driver.compose.execute("--version").!!(log.devNull)).toOption.flatMap(Version.unapply)
require(
composeVersion.exists(v => (Version.apply _).tupled(v) >= Version(1, 7, 0)),
s"Need docker-compose version >= 1.7.X (have version $composeVersion)"
)
val dockerVersion = Try(driver.docker.execute("--version").!!(log.devNull)).toOption.flatMap(Version.unapply)
require(
dockerVersion.exists(v => (Version.apply _).tupled(v) >= Version(1, 11, 0)),
s"Need docker version >= 1.11.X (have version $dockerVersion)"
)
val projectId = driver.newId
log.info(s"Up $projectName [$projectId]")
val project = s"$projectId/$projectName"
val projectDir = s"target/$project"
new File(projectDir).mkdirs()
for (path <- Files.newDirectoryStream(Paths.get(projectDir))) {
assert(Files.deleteIfExists(path))
}
log.debug(driver.docker.execute("images").!!(log.stderr))
val parsedYaml = Try(yaml.contents.parseYaml)
assert(parsedYaml.isSuccess, s"Failed to parse docker compose YAML - reason: $parsedYaml")
parsedYaml.foreach {
case YamlObject(obj) =>
assert(obj.containsKey(YamlString("version")) && obj(YamlString("version")) == YamlString("2"), "Docker compose YAML should be version 2")
assert(obj.containsKey(YamlString("services")), "Docker compose YAML needs a `services` key")
obj(YamlString("services")) match {
case YamlObject(services) =>
services.values.foreach {
case YamlObject(service) =>
if (service.containsKey(YamlString("template"))) {
assert(! service.containsKey(YamlString("image")) && ! service.containsKey(YamlString("build")), "Docker compose `template` key is not usable with `image` and `build` keys")
service(YamlString("template")) match {
case YamlObject(template) =>
assert(template.containsKey(YamlString("resources")), "Docker compose YAML templates must contain a `resources` key")
assert(template.containsKey(YamlString("image")), "Docker compose YAML templates must contain an `image` key")
// FIXME: following needs fixing!!
assert(template(YamlString("image")).isInstanceOf[YamlString], "`image` key should be a string")
assert(template(YamlString("resources")).isInstanceOf[YamlArray], "`resources` key should be an array")
template(YamlString("resources")).asInstanceOf[YamlArray].elements.foreach { value =>
assert(value.isInstanceOf[YamlString], "All `resources` template values should be strings")
val resources = value.asInstanceOf[YamlString].value
assert(resources.startsWith("/"), "All `resources` template values should be absolute paths")
assert(Option(getClass.getResource(s"/docker$resources")).isDefined, "`resources` values should point to a path available on the classpath under a `docker` directory")
}
assert(template(YamlString("image")).asInstanceOf[YamlString].value.matches("^([^:]+)(:[^:]*)?$"), "`image` should match the regular expression `^([^:]+)(:[^:]*)?$`")
case _ =>
assert(assertion = false, "Docker compose YAML `template` key should be an object")
}
}
case _ =>
assert(assertion = false, "Each docker compose `services` member should be an object")
}
case _ =>
assert(assertion = false, "Docker compose YAML `services` key should be an object")
}
case _ =>
assert(assertion = false, "Docker compose YAML should be an object")
}
val templatedServicesWithImageResources = parsedYaml.get.asYamlObject.fields(YamlString("services")).asYamlObject.fields.filter {
case (_, service) =>
service.asYamlObject.fields.containsKey(YamlString("template"))
}.map { kv =>
kv.asInstanceOf[(YamlString, YamlObject)] match {
case (YamlString(name), YamlObject(service)) =>
val imagePattern = "^([^:]+)(:[^:]*)?$".r
val template = service(YamlString("template")).asYamlObject.fields
val baseImage = template(YamlString("image")).asInstanceOf[YamlString].value
if (driver.docker.execute("images", "-q", baseImage).!!(log.stderr).trim == "") {
try {
driver.docker.execute("pull", baseImage).!!(log.stderr)
} catch {
case NonFatal(exn) =>
log.error(s"Failed to pull docker image $baseImage", exn)
}
}
val imagePattern(repository, _) = baseImage
// TODO: implement some real error handling here!
val resources = (template(YamlString("resources")): @unchecked) match {
case YamlArray(elements) =>
elements.map(_.asInstanceOf[YamlString].value)
}
val rawEntryPoint = driver.docker.execute("inspect", "--format", "{{json .Config.Entrypoint}}", baseImage).!!(log.stderr).trim
val entryPoint =
if (rawEntryPoint == "null") {
Seq.empty[String]
} else {
(Try(JsonParser.parse(rawEntryPoint)): @unchecked) match {
case Success(JArray(list)) =>
list.map(json => (json: @unchecked) match {
case JString(data) => data
}).toSeq
case Success(JString(data)) =>
data.split("\\\\s+").toSeq
case Success(JNull) =>
Seq.empty[String]
}
}
val rawCmd = driver.docker.execute("inspect", "--format", "{{json .Config.Cmd}}", baseImage).!!(log.stderr).trim
val cmd =
if (rawCmd == "null") {
Seq.empty[String]
} else {
(Try(JsonParser.parse(rawCmd)): @unchecked) match {
case Success(JArray(list)) =>
list.map(json => (json: @unchecked) match { case JString(data) => data }).toSeq
case Success(JString(data)) =>
data.split("\\\\s+").toSeq
case Success(JNull) =>
Seq.empty[String]
}
}
val rawUser = driver.docker.execute("inspect", "--format", "{{json .Config.User}}", baseImage).!!(log.stderr).trim
val user =
if (rawUser == "null") {
None
} else {
(rawUser.drop(1).dropRight(1): @unchecked) match {
case "" =>
None
case data =>
Some(data)
}
}
val baseDockerfile = DockerFile(entryPoint, cmd, user, from = baseImage, rawContents = Seq(s"FROM $baseImage"))
assert(resources.nonEmpty)
assert(resources.length == resources.distinct.length)
assert(resources.forall(_.head == '/'))
val expandedBuild = resources.foldLeft(ServiceBuilder(baseDockerfile))(evaluateService(projectDir, name, log))
Files.write(Paths.get(s"$projectDir/$name/docker/Dockerfile"), expandedBuild.baseDockerfile.rawContents.mkString("", "\\n", "\\n").getBytes)
(s"$repository:$name.$projectId", Map(YamlString(name) -> YamlObject(service - YamlString("template") + (YamlString("image") -> YamlString(s"$repository:$name.$projectId")) + (YamlString("build") -> YamlObject(YamlString("context") -> YamlString(s"./$name/docker"))) ++ expandedBuild.properties)))
}
}
val imagesToDelete = templatedServicesWithImageResources.keys.toSeq
val templatedServices = templatedServicesWithImageResources.flatMap(_._2)
val nonTemplatedServices = parsedYaml.get.asYamlObject.fields(YamlString("services")).asYamlObject.fields.filter {
case (_, service) =>
! service.asYamlObject.fields.containsKey(YamlString("template"))
}
val transformedYaml = YamlObject(
parsedYaml.get.asYamlObject.fields.updated(
YamlString("services"),
YamlObject(nonTemplatedServices ++ templatedServices)
)
)
val yamlFile = s"$projectDir/docker-compose.yaml"
val output = new PrintWriter(yamlFile)
try {
output.print(transformedYaml.prettyPrint)
} finally {
output.close()
}
val yamlConfig = Try(driver.compose.execute("-p", projectId.toString, "-f", yamlFile, "config").!!(log.stderr).parseYaml.asYamlObject)
require(yamlConfig.isSuccess, yamlConfig.toString)
driver.compose.execute("-p", projectId.toString, "-f", yamlFile, "up", "--build", "--remove-orphans", "-d").!!(log.stderr)
new DockerCompose(projectName, projectId, yamlFile, yamlConfig.get, imagesToDelete)(driver, log)
}
private def evaluateService(projectDir: String, serviceName: String, log: Logger)(builder: ServiceBuilder, resource: String): ServiceBuilder = {
val dockerDir = getClass.getResource(s"/docker$resource").getPath
val (newDockerfile, properties) = copyTemplateResources(projectDir, serviceName, dockerDir, resource, builder.baseDockerfile, log)
ServiceBuilder(newDockerfile, builder.properties ++ properties)
}
private def copyTemplateResources(projectDir: String, serviceName: String, dockerDir: String, resource: String, baseDockerfile: DockerFile, log: Logger): (DockerFile, Map[YamlValue, YamlValue]) = {
var result = (baseDockerfile, Map.empty[YamlValue, YamlValue])
for (path <- Files.walk(Paths.get(dockerDir)).toScala[List]) {
if (path == Paths.get(s"$dockerDir/Dockerfile")) {
// Intentionally ignore Dockerfile's
log.warn(s"Ignoring file $path - Dockerfile must be a template (i.e. end in extension .scala.template)")
} else if (path == Paths.get(s"$dockerDir/Service.scala.template")) {
result = (result._1, evaluateService(resource))
} else if (path == Paths.get(s"$dockerDir/Dockerfile.scala.template")) {
val newDockerfile = evaluateDockerfile(projectDir, serviceName, dockerDir, resource, baseDockerfile, path)
result = (newDockerfile, result._2)
} else {
val targetFile = Paths.get(path.toString.replace(dockerDir, s"$projectDir/$serviceName/docker"))
val targetDir = new File(targetFile.getParent.toString)
if (!targetDir.exists()) {
targetDir.mkdirs()
}
if (!new File(targetFile.toString).exists()) {
Files.copy(path, targetFile, StandardCopyOption.COPY_ATTRIBUTES)
}
}
}
if (new File(s"$projectDir/$serviceName/docker").exists()) {
Files.write(Paths.get(s"$projectDir/$serviceName/docker/.dockerignore"), "template/*\\n".getBytes)
}
result
}
private def evaluateDockerfile(projectDir: String, serviceName: String, dockerDir: String, resource: String, oldDockerfile: DockerFile, path: Path): DockerFile = {
// FIXME: to be correct, the following needs to use reflection!
val contents: Array[String] = (resource match {
case "/jmx/akka" =>
docker.jmx.akka.template.Dockerfile(oldDockerfile).body
case "/libfiu" =>
docker.libfiu.template.Dockerfile(oldDockerfile).body
case "/network/default/linux" =>
docker.network.default.linux.template.Dockerfile(oldDockerfile).body
}).split("\\n")
val Entrypoint = """^ENTRYPOINT\\s+(.+)$""".r
val Cmd = """^CMD\\s+(.+)$""".r
val User = """^USER\\s+(.+)$""".r
val entryPoint = {
val result = contents.collect { case Entrypoint(value) => value.trim }.toSeq
if (result.isEmpty) {
oldDockerfile.entrypoint
} else {
result
}
}
val cmd = {
val result = contents.collect { case Cmd(value) => value.trim }.toSeq
if (result.isEmpty) {
oldDockerfile.cmd
} else {
result
}
}
val user = contents.collect { case User(value) => value.trim }.lastOption.orElse(oldDockerfile.user)
DockerFile(entryPoint, cmd, user, oldDockerfile.from, oldDockerfile.rawContents ++ contents.filterNot(_.matches("^FROM\\\\s+.*$")))
}
private def evaluateService(resource: String): Map[YamlValue, YamlValue] = {
// FIXME: to be correct, the following needs to use reflection!
resource match {
case "/network/default/linux" =>
docker.network.default.linux.template.Service().body.parseYaml.asYamlObject.fields
}
}
}
|
carlpulley/docker-compose-testkit
|
src/main/scala/cakesolutions/docker/testkit/DockerComposeTestKit.scala
|
Scala
|
apache-2.0
| 17,405 |
package app.models
/**
* User: asalvadore
*/
trait Identifiable {
val id: Long
}
|
mericano1/spray-akka-slick-postgres
|
src/main/scala/app/models/Identifiable.scala
|
Scala
|
mit
| 86 |
/**
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.hyperledger.pbft
import java.net.InetSocketAddress
import akka.actor.ActorRef
import org.hyperledger.common.PublicKey
import org.hyperledger.network.Version
import org.hyperledger.pbft.PbftSettings.NodeConfig
import scalaz._
import Scalaz._
object ConnectionManagement {
def empty = new ConnectionManagement(Map.empty, Map.empty)
sealed trait Connection {
def peer: ActorRef
}
case class PendingConnection(remoteAddress: InetSocketAddress, peer: ActorRef)
case class ReplicaConnection(
remoteAddress: InetSocketAddress,
version: Version,
publicKey: PublicKey,
peer: ActorRef) extends Connection
case class ClientConnection(
remoteAddress: InetSocketAddress,
version: Version,
peer: ActorRef) extends Connection
sealed trait ConnectionError
case object AlreadyConnected extends ConnectionError
case object UnknownConnection extends ConnectionError
}
case class ConnectionManagement(
pendingConnections: Map[InetSocketAddress, ConnectionManagement.PendingConnection],
activeConnections: Map[InetSocketAddress, ConnectionManagement.Connection]) {
import ConnectionManagement._
def connected(peer: ActorRef, remoteAddress: InetSocketAddress): ConnectionError \\/ ConnectionManagement =
if (pendingConnections.contains(remoteAddress))
\\/.left(AlreadyConnected)
else
\\/.right(copy(pendingConnections = pendingConnections + (remoteAddress -> PendingConnection(remoteAddress, peer))))
def timeout(remoteAddress: InetSocketAddress) = copy(pendingConnections = pendingConnections - remoteAddress)
def disconnected(version: Version) = copy(activeConnections = activeConnections - version.addrFrom.address)
def handshakeComplete(settings: PbftSettings,
peer: ActorRef,
remoteAddress: InetSocketAddress,
version: Version): (ConnectionManagement, Connection) = {
val conn = settings.otherNodes.find(_.address == version.addrFrom)
.map(n => ReplicaConnection(remoteAddress, version, n.publicKey, peer))
.getOrElse(ClientConnection(remoteAddress, version, peer))
val updated = copy(
pendingConnections = pendingConnections - remoteAddress,
activeConnections = activeConnections + (version.addrFrom.address -> conn))
(updated, conn)
}
}
|
DigitalAssetCom/hlp-candidate
|
server/pbft/src/main/scala/org/hyperledger/pbft/ConnectionManagement.scala
|
Scala
|
apache-2.0
| 2,845 |
package opencl.generator.stencil
import ir._
import ir.ast.Pad.BoundaryFun
import ir.ast._
import lift.arithmetic.{SizeVar, StartFromRange, Var}
import opencl.executor._
import opencl.ir._
import opencl.ir.pattern.{MapGlb, _}
import org.junit.Assert._
import org.junit.Assume.assumeFalse
import org.junit._
import scala.util.Random
object TestStencil2D extends TestWithExecutor
class TestStencil2D {
/* **********************************************************
UTILS
***********************************************************/
// boundary condition implemented in scala to create gold versions
val SCALABOUNDARY = Utils.scalaClamp
val BOUNDARY = Pad.Boundary.Clamp
val randomData2D = Array.tabulate(32, 32) { (i, j) => Random.nextFloat() }
val data2D = Array.tabulate(4, 4) { (i, j) => i * 4.0f + j }
val gaussWeights = Array(
0.08f, 0.12f, 0.08f,
0.12f, 0.20f, 0.12f,
0.08f, 0.12f, 0.08f)
/* **********************************************************
SLIDE 2D o PAD 2D
***********************************************************/
def runCombinedPadGroupTest(size: Int, step: Int,
left: Int, right: Int,
boundary: BoundaryFun,
scalaBoundary: (Int, Int) => Int,
data: Array[Array[Float]] = data2D): Unit = {
val gold = Utils.scalaGenerate2DNeighbours(data, size, step, size, step, left, right, left, right, scalaBoundary)
val goldFlat = gold.flatten.flatten.flatten
val lambda = fun(
ArrayType(ArrayType(Float, SizeVar("M")), SizeVar("N")),
(domain) => {
MapGlb(1)(
MapGlb(0)(fun(neighbours =>
MapSeqUnroll(MapSeqUnroll(id)) $ neighbours
))
) o Slide2D(size, step) o Pad2D(left, right, boundary) $ domain
}
)
val (output, _) = Execute(data.length, data.length)[Array[Float]](lambda, data)
assertArrayEquals(goldFlat, output, 0.1f)
}
@Test def groupClampPaddedData2D(): Unit = {
val boundary = Pad.Boundary.Clamp
val scalaBoundary = Utils.scalaClamp
runCombinedPadGroupTest(3, 1, 1, 1, boundary, scalaBoundary)
}
@Test def groupBigClampPaddedData2D(): Unit = {
LongTestsEnabled()
val data2D = Array.tabulate(10, 10) { (i, j) => i * 10.0f + j }
val boundary = Pad.Boundary.Clamp
val scalaBoundary = Utils.scalaClamp
runCombinedPadGroupTest(5, 1, 2, 2, boundary, scalaBoundary, data2D)
}
@Test def groupMirrorPaddedData2D(): Unit = {
val boundary = Pad.Boundary.Mirror
val scalaBoundary = Utils.scalaMirror
runCombinedPadGroupTest(3, 1, 1, 1, boundary, scalaBoundary)
}
@Test def groupWrapPaddedData2D(): Unit = {
val boundary = Pad.Boundary.Wrap
val scalaBoundary = Utils.scalaWrap
runCombinedPadGroupTest(3, 1, 1, 1, boundary, scalaBoundary)
}
/* **********************************************************
2D STENCILS
***********************************************************/
def createSimple2DStencil(size: Int, step: Int,
left: Int, right: Int,
weights: Array[Float],
boundary: BoundaryFun,
fromRange: Int): Lambda2 = {
createSimple2DStencil(size, step, size, step, left, right, left, right, weights, boundary, fromRange)
}
def createSimple2DStencil(size1: Int, step1: Int,
size2: Int, step2: Int,
top: Int, bottom: Int,
left: Int, right: Int,
weights: Array[Float],
boundary: BoundaryFun,
fromRange: Int): Lambda2 = {
fun(
ArrayType(ArrayType(Float, Var("N", StartFromRange(fromRange))), Var("M", StartFromRange(fromRange))),
ArrayType(Float, weights.length),
(matrix, weights) => {
MapGlb(1)(
MapGlb(0)(fun(neighbours => {
toGlobal(MapSeqUnroll(id)) o
ReduceSeqUnroll(fun((acc, pair) => {
val pixel = Get(pair, 0)
val weight = Get(pair, 1)
multAndSumUp.apply(acc, pixel, weight)
}), 0.0f) $ Zip(Join() $ neighbours, weights)
}))
) o Slide2D(size1, step1, size2, step2) o Pad2D(top, bottom, left, right, boundary) $ matrix
})
}
def run2DStencil(stencil: Lambda2,
size1: Int, step1: Int,
size2: Int, step2: Int,
top: Int, bottom: Int,
left: Int, right: Int,
weights: Array[Float],
scalaBoundary: (Int, Int) => Int): Unit = {
try {
// be carefull when choosing small input size because of 'StartsFromRange(100)'
val width = randomData2D(0).length
val height = randomData2D.length
val input = randomData2D
val (output, _) = Execute(1, 1, width, height, (false, false))[Array[Float]](stencil, input, weights)
val gold = Utils.scalaCompute2DStencil(input, size1, step1, size2, step2, top, bottom, left, right, weights, scalaBoundary)
assertArrayEquals(gold, output, 0.1f)
} catch {
case x: Exception => x.printStackTrace()
}
}
def run2DStencil(stencil: Lambda2,
size: Int, step: Int,
left: Int, right: Int,
weights: Array[Float],
scalaBoundary: (Int, Int) => Int): Unit = {
run2DStencil(stencil, size, step, size, step, left, right, left, right, weights, scalaBoundary)
}
@Test def gaussianBlur(): Unit = {
LongTestsEnabled()
val stencil = createSimple2DStencil(3, 1, 1, 1, gaussWeights, BOUNDARY, 2)
run2DStencil(stencil, 3, 1, 1, 1, gaussWeights, SCALABOUNDARY)
}
@Test def gaussianBlur25PointStencil(): Unit = {
val weights = Array(
1, 4, 7, 4, 1,
4, 16, 26, 16, 4,
7, 26, 41, 26, 7,
4, 16, 26, 16, 4,
1, 4, 7, 4, 1).map(_ * 0.004219409282700422f)
val stencil = createSimple2DStencil(5, 1, 2, 2, weights, BOUNDARY, 3)
run2DStencil(stencil, 5, 1, 2, 2, weights, SCALABOUNDARY)
}
@Test def blurX3Point(): Unit = {
LongTestsEnabled()
val weights = Array.fill[Float](3)(1.0f)
val stencil = createSimple2DStencil(1, 1, 3, 1, 0, 0, 1, 1, weights, Pad.Boundary.Wrap, 2)
run2DStencil(stencil, 1, 1, 3, 1, 0, 0, 1, 1, weights, Utils.scalaWrap)
}
@Test def blurY3Point(): Unit = {
val weights = Array.fill[Float](3)(1.0f)
val stencil = createSimple2DStencil(3, 1, 1, 1, 1, 1, 0, 0, weights, Pad.Boundary.Wrap, 2)
run2DStencil(stencil, 3, 1, 1, 1, 1, 1, 0, 0, weights, Utils.scalaWrap)
}
/* **********************************************************
TILING 2D
***********************************************************/
def createTiled2DStencil(size: Int, step: Int,
tileSize: Int, tileStep: Int,
left: Int, right: Int,
weights: Array[Float],
boundary: Pad.BoundaryFun): Lambda = {
createTiled2DStencil(size, step, size, step,
tileSize, tileStep, tileSize, tileStep,
left, right, left, right,
weights, boundary)
}
def createTiled2DStencil(size1: Int, step1: Int,
size2: Int, step2: Int,
tileSize1: Int, tileStep1: Int,
tileSize2: Int, tileStep2: Int,
top: Int, bottom: Int,
left: Int, right: Int,
weights: Array[Float],
boundary: Pad.BoundaryFun): Lambda = fun(
ArrayType(ArrayType(Float, Var("N", StartFromRange(100))), Var("M", StartFromRange(100))),
ArrayType(Float, weights.length),
(matrix, weights) => {
Untile2D() o MapWrg(1)(MapWrg(0)(fun(tile =>
MapLcl(1)(MapLcl(0)(
fun(elem => {
toGlobal(MapSeqUnroll(id)) o
ReduceSeqUnroll(fun((acc, pair) => {
val pixel = Get(pair, 0)
val weight = Get(pair, 1)
multAndSumUp.apply(acc, pixel, weight)
}), 0.0f) $ Zip(Join() $ elem, weights)
})
)) o Slide2D(size1, step1, size2, step2) o toLocal(MapLcl(1)(MapLcl(0)(id))) $ tile
))) o Slide2D(tileSize1, tileStep1, tileSize2, tileStep2) o Pad2D(top, bottom, left, right, boundary) $ matrix
}
)
def createCopyTilesLambda(size: Int, step: Int,
left: Int, right: Int,
boundary: Pad.BoundaryFun): Lambda = fun(
ArrayType(ArrayType(Float, SizeVar("M")), SizeVar("N")),
ArrayType(Float, 9),
(matrix, weights) => {
MapWrg(1)(MapWrg(0)(fun(tile =>
toGlobal(MapLcl(1)(MapLcl(0)(id))) $ tile
))) o Slide2D(size, step) o Pad2D(left, right, boundary) $ matrix
}
)
@Test def copyTilesIdentity(): Unit = {
assumeFalse("Disabled on Apple OpenCL CPU.", Utils.isAppleCPU)
val data2D = Array.tabulate(4, 4) { (i, j) => i * 4.0f + j }
val tiled: Lambda = createCopyTilesLambda(4, 2, 1, 1, BOUNDARY)
val (output, runtime) = Execute(2, 2, 2, 2, (false, false))[Array[Float]](tiled, data2D, gaussWeights)
val gold = Utils.scalaGenerate2DNeighbours(data2D, 4, 2, 4, 2, 1, 1, 1, 1, SCALABOUNDARY).flatten.flatten.flatten
assertArrayEquals(gold, output, 0.1f)
}
// be carefull when choosing small input size because of 'StartsFromRange(100)'
@Test def tiling2DBiggerTiles(): Unit = {
val data2D = Array.tabulate(32, 32) { (i, j) => i * 24.0f + j }
val tiled: Lambda = createTiled2DStencil(3, 1, 10, 8, 1, 1, gaussWeights, BOUNDARY)
val (output, runtime) = Execute(2, 2, 2, 2, (false, false))[Array[Float]](tiled, data2D, gaussWeights)
val gold = Utils.scalaCompute2DStencil(data2D, 3, 1, 3, 1, 1, 1, 1, 1, gaussWeights, SCALABOUNDARY)
assertArrayEquals(gold, output, 0.1f)
}
// be carefull when choosing small input size because of 'StartsFromRange(100)'
@Test def tiled2D9PointStencil(): Unit = {
LongTestsEnabled()
val tiled: Lambda = createTiled2DStencil(3, 1, 4, 2, 1, 1, gaussWeights, BOUNDARY)
run2DStencil(tiled, 3, 1, 1, 1, gaussWeights, SCALABOUNDARY)
}
def createTiled2DStencilWithTiledLoading(size1: Int, step1: Int,
size2: Int, step2: Int,
tileSize1: Int, tileStep1: Int,
tileSize2: Int, tileStep2: Int,
top: Int, bottom: Int,
left: Int, right: Int,
weights: Array[Float],
boundary: Pad.BoundaryFun): Lambda = fun(
ArrayType(ArrayType(Float, Var("N", StartFromRange(100))), Var("M", StartFromRange(100))),
ArrayType(Float, weights.length),
(matrix, weights) => {
Untile2D() o MapWrg(1)(MapWrg(0)(fun(tile =>
MapLcl(1)(MapLcl(0)(
//MapSeq(MapSeq((toGlobal(id))))
fun(elem => {
toGlobal(MapSeqUnroll(id)) o
ReduceSeqUnroll(fun((acc, pair) => {
val pixel = Get(pair, 0)
val weight = Get(pair, 1)
multAndSumUp.apply(acc, pixel, weight)
}), 0.0f) $ Zip(Join() $ elem, weights)
})
// create stencil neighbourhoods
)) o Slide2D(size1, step1, size2, step2) o Map(Join()) o
// load chunks to local memory
toLocal(MapLcl(1)(MapSeqUnroll(MapLcl(0)(id)))) $ tile
// spliting tile into chunks
))) o Map(Map(Map(Split(8)))) o
// creating tiles
Slide2D(tileSize1, tileStep1, tileSize2, tileStep2) o
Pad2D(top, bottom, left, right, boundary) $ matrix
}
)
// be carefull when choosing small input size because of 'StartsFromRange(100)'
@Ignore // falsely classified as not valid because of barriers
@Test def tiledBlurXTiledLoading(): Unit = {
val weights = Array(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1).map(_.toFloat)
val tiled: Lambda = createTiled2DStencilWithTiledLoading(1, 1, 17, 1, 1, 1, 24, 8, 0, 0, 8, 8, weights, Pad.Boundary.Clamp)
run2DStencil(tiled, 1, 1, 17, 1, 0, 0, 8, 8, weights, Utils.scalaClamp)
}
@Ignore // todo compN(p, i) = p^i, but map^i = map o ... o map does not work yet
@Test def alternative2DStencilExpression(): Unit = {
val clamp = Pad.Boundary.Clamp
// apply primitive in X and Y Dimension
val dim2: (FunDecl) => Lambda = (primitive: FunDecl) =>
primitive o Map(primitive)
// compose primitive n-times
def compN: (FunDecl, Int) => FunDecl = (primitive: FunDecl, i: Int) =>
if (i > 1)
primitive o compN(primitive, i-1)
else
primitive
def dimN: (FunDecl, Int) => FunDecl = (primitive: FunDecl, dim: Int) =>
if (dim > 1)
// should be `Map` inside the compN
dimN(primitive, dim-1) o compN(Map.asInstanceOf[FunDecl], dim-1) o primitive
else
primitive
// apply 2D boundary handling
//val boundary = (size: Int, b: Pad.BoundaryFun) => dim2(Pad(size,size,b))
val boundary = (size: Int, b: Pad.BoundaryFun) => dimN(Pad(size,size,b), 2)
// create 2D neighborhoods
//val nbh = (size: Int) => Map(Transpose()) o dim2(Slide(size, size-2))
val nbh = (size: Int) => Map(Transpose()) o dimN(Slide(size, size-2), 2)
// 2D stencil function
val f = toGlobal(MapSeq(id)) o ReduceSeq(add, 0.0f) o Join()
val lambda = fun(
ArrayType(ArrayType(Float, SizeVar("N")), SizeVar("M")),
(input) => {
MapGlb(1)(MapGlb(0)(f)) o nbh(3) o boundary(1, clamp) $ input
})
val lambda2 = fun(
ArrayType(Float, SizeVar("N")),
(input) => {
MapGlb(1)(id) o compN(Pad(1,1,clamp), 3) $ input
})
/*
val input = Array.tabulate(512, 512) { (i,j) => scala.util.Random.nextFloat() }
val (output: Array[Float], runtime) = Execute(16, 16, 512, 512, (false, false))(lambda, input)
val weights = Array.tabulate(9) { i => 1.0f}
val gold = Utils.scalaCompute2DStencil(input, 3,1,3,1, 1,1,1,1, weights, Utils.scalaClamp)
assertArrayEquals(gold, output, 0.1f)
*/
}
@Test def modSimplifyTest(): Unit = {
val size = 3
val step = 1
val left = 1
val right = 1
val data2D = Array.tabulate(4, 4) { (i, j) => i * 4.0f + j }
val lambda = fun(
ArrayType(ArrayType(Float, Var("N", StartFromRange(2))), Var("M", StartFromRange(2))),
(domain) => {
MapSeq(
MapSeq(
fun(neighbours =>
MapSeqUnroll(MapSeqUnroll(id)) $ neighbours)
)
) o Map(Map(Transpose()) o Slide(size, step) o Transpose()) o Slide(size, step) o
Transpose() o Pad(left, right, Pad.Boundary.Wrap) o Transpose() o Pad(left, right, Pad.Boundary.Wrap) $ domain
}
)
val (output, runtime) = Execute(data2D.length, data2D.length)[Array[Float]](lambda, data2D)
val gold = Array(15.0, 12.0, 13.0,
3.0, 0.0, 1.0,
7.0, 4.0, 5.0,
12.0, 13.0, 14.0,
0.0, 1.0, 2.0,
4.0, 5.0, 6.0,
13.0, 14.0, 15.0,
1.0, 2.0, 3.0,
5.0, 6.0, 7.0,
14.0, 15.0, 12.0,
2.0, 3.0, 0.0,
6.0, 7.0, 4.0,
3.0, 0.0, 1.0,
7.0, 4.0, 5.0,
11.0, 8.0, 9.0,
0.0, 1.0, 2.0,
4.0, 5.0, 6.0,
8.0, 9.0, 10.0,
1.0, 2.0, 3.0,
5.0, 6.0, 7.0,
9.0, 10.0, 11.0,
2.0, 3.0, 0.0,
6.0, 7.0, 4.0,
10.0, 11.0, 8.0,
7.0, 4.0, 5.0,
11.0, 8.0, 9.0,
15.0, 12.0, 13.0,
4.0, 5.0, 6.0,
8.0, 9.0, 10.0,
12.0, 13.0, 14.0,
5.0, 6.0, 7.0,
9.0, 10.0, 11.0,
13.0, 14.0, 15.0,
6.0, 7.0, 4.0,
10.0, 11.0, 8.0,
14.0, 15.0, 12.0,
11.0, 8.0, 9.0,
15.0, 12.0, 13.0,
3.0, 0.0, 1.0,
8.0, 9.0, 10.0,
12.0, 13.0, 14.0,
0.0, 1.0, 2.0,
9.0, 10.0, 11.0,
13.0, 14.0, 15.0,
1.0, 2.0, 3.0,
10.0, 11.0, 8.0,
14.0, 15.0, 12.0,
2.0, 3.0, 0.0).map(_.toFloat)
//output.grouped(3).toArray.map(x => println(x.mkString(",")))
assertArrayEquals(gold, output, 0.1f)
}
/* **********************************************************
SHOC STENCIL 2D
***********************************************************/
/*
$ cd $(SHOC_DIR)/src/opencl/level1/stencil2d
$ ./Stencil2D --customSize 8,8 --weight-center 0.25 --weight-cardinal 0.15 --weight-diagonal 0.05 --verbose --num-iters 1
compare to 10x10 array. SHOC does not handle boundary but provides a padded input array
*/
@Test def shocStencil2D(): Unit = {
LongTestsEnabled()
val stencil = fun(
ArrayType(ArrayType(Float, Var("N", StartFromRange(2))), Var("M", StartFromRange(2))),
ArrayType(Float, 9),
(matrix, weights) => {
Untile2D() o MapWrg(1)(MapWrg(0)(fun(tile =>
MapLcl(1)(MapLcl(0)(
// stencil computation
fun(elem => {
toGlobal(MapSeqUnroll(id)) o
ReduceSeqUnroll(fun((acc, pair) => {
val pixel = Get(pair, 0)
val weight = Get(pair, 1)
multAndSumUp.apply(acc, pixel, weight)
}), 0.0f) $ Zip(Join() $ elem, weights)
})
// create neighbourhoods in tiles
)) o Slide2D(3, 1, 3, 1) o
// load to local memory
toLocal(MapLcl(1)(MapLcl(0)(id))) $ tile
))) o
// tiling
Slide2D(10, 8, 258, 256) $ matrix
}
)
val weights = Array(0.05, 0.15, 0.05,
0.15, 0.25, 0.15,
0.05, 0.15, 0.05).map(_.toFloat)
// testing - change tilesize!
//val inputSize = 10
//val haloSize = 1
//val outputSize = inputSize - 2 * haloSize
// testing - change tilesize!
val inputSize = 512
val haloSize = 1
val outputSize = inputSize - 2 * haloSize
// 4k
//val inputSize = 4096
//val haloSize = 1
//val outputSize = inputSize - 2 * haloSize
// create already padded input array with inner elements (i,j) = i * j
var input = Array.tabulate(inputSize, inputSize) { (i, j) => (i - haloSize) * (j - haloSize) * 1.0f }
input(0) = input(0).map((_ * 0.0f))
input(inputSize - 1) = input(inputSize - 1).map(_ * 0.0f)
input = input.transpose
input(0) = input(0).map(_ * 0.0f)
input(inputSize - 1) = input(inputSize - 1).map(_ * 0.0f)
input = input.transpose
try {
val (output, runtime) = Execute(1, 256, 512, 512, (false, false))[Array[Float]](stencil, input, weights)
println("Runtime: " + runtime)
} catch {
case e: DeviceCapabilityException =>
Assume.assumeNoException("Device not supported.", e)
}
}
@Test def shocStencil2DNoTiling(): Unit = {
val stencil = fun(
ArrayType(ArrayType(Float, Var("N", StartFromRange(6))), Var("M", StartFromRange(6))),
ArrayType(Float, 9),
(matrix, weights) => {
MapGlb(1)(MapGlb(0)(
// stencil computation
fun(elem => {
toGlobal(MapSeqUnroll(id)) o
ReduceSeqUnroll(fun( (acc, pair) => {
val pixel = pair._0
val weight = pair._1
multAndSumUp.apply(acc, pixel, weight)
}), 0.0f) $ Zip(Join() $ elem, weights)
})
// create neighbourhoods in tiles
)) o Slide2D(3, 1, 3, 1) $ matrix
}
)
val weights = Array(0.05, 0.15, 0.05,
0.15, 0.25, 0.15,
0.05, 0.15, 0.05).map(_.toFloat)
val inputSize = 34
val haloSize = 1
val outputSize = inputSize - 2 * haloSize
// create already padded input array with inner elements (i,j) = i * j
var input = Array.tabulate(inputSize, inputSize) { (i, j) => (i - haloSize) * (j - haloSize) * 1.0f }
input(0) = input(0).map((_ * 0.0f))
input(inputSize - 1) = input(inputSize - 1).map(_ * 0.0f)
input = input.transpose
input(0) = input(0).map(_ * 0.0f)
input(inputSize - 1) = input(inputSize - 1).map(_ * 0.0f)
input = input.transpose
try {
val (output, runtime) = Execute(1, 32, 32, 32, (false, false))[Array[Float]](stencil, input, weights)
} catch {
case e: DeviceCapabilityException =>
Assume.assumeNoException("Device not supported.", e)
}
}
@Ignore //todo does not compute correct result yet
@Test def shocStencil2DNoTilingFloat3(): Unit = {
val dotAndSumUp = UserFun("dotAndSumUp", Array("acc", "l", "r"),
"{ return acc + dot(l, r); }",
Seq(Float, Float3, Float3), Float)
val stencil = fun(
ArrayType(ArrayType(Float, Var("M", StartFromRange(6))), Var("N", StartFromRange(6))),
ArrayType(Float, 9),
(matrix, weights) => {
//weights.addressSpace = ConstantMemory
MapGlb(1)(MapGlb(0)(
// stencil computation
fun(elem => {
toGlobal(MapSeqUnroll(id)) o
ReduceSeqUnroll(fun( (acc, pair) => {
val pixel = pair._0
val weight = pair._1
dotAndSumUp.apply(acc, pixel, weight)
}), 0.0f) $
Zip(asVector(3) o Join() $ elem, asVector(3) $ weights)
})
// create neighbourhoods in tiles
)) o Slide2D(3, 1) $ matrix
}
)
val weights = Array(
0.05, 0.15, 0.05,
0.15, 0.25, 0.15,
0.05, 0.15, 0.05 ).map(_.toFloat)
// testing - change tilesize!
//val inputSize = 10
//val haloSize = 1
//val outputSize = inputSize - 2 * haloSize
// testing - change tilesize!
val inputSize = 8194
val haloSize = 1
val outputSize = inputSize - 2 * haloSize
// 4k
//val inputSize = 4096
//val haloSize = 1
//val outputSize = inputSize - 2 * haloSize
// create already padded input array with inner elements (i,j) = i * j
var input = Array.tabulate(inputSize, inputSize) { (i, j) => (i - haloSize) * (j - haloSize) * 1.0f }
input(0) = input(0).map((_ * 0.0f))
input(inputSize - 1) = input(inputSize - 1).map(_ * 0.0f)
input = input.transpose
input(0) = input(0).map(_ * 0.0f)
input(inputSize - 1) = input(inputSize - 1).map(_ * 0.0f)
input = input.transpose
try {
val (output, runtime) = Execute(1, 256, 1024, 8192, (false, false))[Array[Float]](stencil, input, weights)
println("Runtime: " + runtime)
println(output.take(10).mkString(", "))
} catch {
case e: DeviceCapabilityException =>
Assume.assumeNoException("Device not supported.", e)
}
}
/* **********************************************************
THREE LEVEL TILING
***********************************************************/
@Test def threeLevelTilingTest(): Unit = {
LongTestsEnabled()
val stencil = fun(
//ArrayType(ArrayType(Float, Var("N", StartFromRange(2))), Var("M", StartFromRange(2))),
ArrayType(ArrayType(Float, 8192), 8192),
ArrayType(Float, 15),
(matrix, weights) => {
Untile2D() o MapWrg(1)(MapWrg(0)(
fun(wgBlock =>
MapSeq(MapSeq(
fun(cacheBlock =>
MapLcl(1)(MapLcl(0)(
fun(elem => {
toGlobal(MapSeqUnroll(id)) o
ReduceSeqUnroll(fun((acc, pair) => {
val pixel = Get(pair, 0)
val weight = Get(pair, 1)
multAndSumUp.apply(acc, pixel, weight)
}), 0.0f) $ Zip(Join() $ elem, weights)
})
)) o Slide2D(15, 1, 1, 1) $ cacheBlock
))) o Slide2D(78, 64, 8, 8) $ wgBlock
))) o Slide2D(526, 512, 64, 64) o Pad2D(7, 7, Pad.Boundary.Clamp) $ matrix
}
)
val weights = Array.fill[Float](15)(1.0f)
val haloSize = 7
val outputSize = 8192
val inputSize = 8192
// create already padded input array with inner elements (i,j) = i * j
var input = Array.tabulate(inputSize, inputSize) { (i, j) => i + j * 1.0f }
/*
input(0) = input(0).map((_*0.0f))
input(inputSize -1) = input(inputSize -1).map(_*0.0f)
input = input.transpose
input(0) = input(0).map(_*0.0f)
input(inputSize -1) = input(inputSize -1).map(_*0.0f)
input = input.transpose
*/
try {
val (output, runtime) = Execute(64, 4, 1024, 512, (true, true))[Array[Float]](stencil, input, weights)
println("Runtime: " + runtime)
} catch {
case e: DeviceCapabilityException =>
Assume.assumeNoException("Device not supported.", e)
}
}
@Test
def stencil2DTilingRewriteIdentities(): Unit = {
LongTestsEnabled()
val N = SizeVar("N")
val M = SizeVar("M")
val n = 3
val s = 1
val u = 6
val v = 4
// use non-powers-of-two as input values
// to enable powers-of-two as valid inputs for Slide step since Pad is not used here
val input = Array.tabulate(34, 34) { (i, j) => i * 34.0f + j }
// use abbr. notation to avoid huge expressions and simplify rewriting on paper
// *=map (as in BMF), J=join, T=transpose, S_ab = slide a b
// (0): *T o S_ns o *S_ns
val gold = Map(Transpose()) o Slide(n,s) o Map(Slide(n,s))
// Desired: 2D tiling
// This is what we assume to be correct 2d stencil tiling. Proof by rewriting follows:
//
// J o J o *T o **Slide2d_ns o Slide2d_uv
// val desired = Map(Join()) o Join() o Map(Transpose()) o Map(Map(Slide2D(n,s))) o Slide2D(u,v)
//
// = *J o J o *T o ***T o **S_ns o ***S_ns o *T o S_uv o *S_uv
val desired = Map(Join()) o Join() o Map(Transpose()) o Map(Map(Map(Transpose()))) o Map(Map(Slide(n,s))) o Map(Map(Map(Slide(n,s)))) o
Map(Transpose()) o Slide(n,s) o Map(Slide(n,s))
// (1): *T o J o *S_ns o S_uv o *S_ns
val f1 = Map(Transpose()) o Join() o Map(Slide(n,s)) o Slide(u,v) o Map(Slide(n,s))
// (2): J o **T o *S_ns o S_uv o *S_ns
val f2 = Join() o Map(Map(Transpose())) o Map(Slide(n,s)) o Slide(u,v) o Map(Slide(n,s))
// (3): tile first S_ns -> does not lead to anything yet. see (6) for other try
// J o **T o *(J o *S_ns o S_uv) o S_uv o *S_ns
val f3 = Join() o Map(Map(Transpose())) o Map(Join() o Map(Slide(n,s)) o
Slide(u,v)) o Slide(u,v) o Map(Slide(n,s))
// (4): J o **T o *J o **S_ns o *S_uv o S_uv o *S_ns
val f4 = Join() o Map(Map(Transpose())) o Map(Join()) o Map(Map(Slide(n,s))) o
Map(Slide(u,v)) o Slide(u,v) o Map(Slide(n,s))
// (5): J o *J o ***T o **S_ns o *S_uv o S_uv o *S_ns
val f5 = Join() o Map(Join()) o Map(Map(Map(Transpose()))) o Map(Map(Slide(n,s))) o
Map(Slide(u,v)) o Slide(u,v) o Map(Slide(n,s))
// (6): Try tiling other S_ns from (2)
// J o **T o *S_ns o S_uv o *(J o *S_ns o S_uv)
val f6 = Join() o Map(Map(Transpose())) o Map(Slide(n,s)) o Slide(u,v) o Map(Join() o Map(Slide(n,s)) o Slide(u,v))
// (7): J o **T o *S_ns o S_uv o *J o **S_ns o *S_uv
val f7 = Join() o Map(Map(Transpose())) o Map(Slide(n,s)) o Slide(u,v) o Map(Join()) o
Map(Map(Slide(n,s))) o Map(Slide(u,v))
// (8): J o **T o *S_ns o **J o S_uv o **S_ns o *S_uv
val f8 = Join() o Map(Map(Transpose())) o Map(Slide(n,s)) o Map(Map(Join())) o
Slide(u,v) o Map(Map(Slide(n,s))) o Map(Slide(u,v))
// (9): J o **T o ***J o *S_ns o S_uv o **S_ns o *S_uv
val f9 = Join() o Map(Map(Transpose())) o Map(Map(Map(Join()))) o Map(Slide(n,s)) o Slide(u,v) o
Map(Map(Slide(n,s))) o Map(Slide(u,v))
// (10): J o **T o ***J o *S_ns o ***S_ns o S_uv o *S_uv
val f10 = Join() o Map(Map(Transpose())) o Map(Map(Map(Join()))) o Map(Slide(n,s)) o
Map(Map(Map(Slide(n,s)))) o Slide(u,v) o Map(Slide(u,v))
// (11): J o **T o ***J o *S_ns o ***S_ns o *(T o T) o S_uv o *S_uv
val f11 = Join() o Map(Map(Transpose())) o Map(Map(Map(Join()))) o Map(Slide(n,s)) o
Map(Map(Map(Slide(n,s)))) o Map(Transpose() o Transpose()) o Slide(u,v) o Map(Slide(u,v))
// (12): J o **T o ***J o *S_ns o ***S_ns o *T o *T o S_uv o *S_uv
val f12 = Join() o Map(Map(Transpose())) o Map(Map(Map(Join()))) o Map(Slide(n,s)) o
Map(Map(Map(Slide(n,s)))) o Map(Transpose()) o Map(Transpose()) o Slide(u,v) o Map(Slide(u,v))
// (13): J o **T o ***J o *S_ns o *T o ***S_ns o *T o S_uv o *S_uv
val f13 = Join() o Map(Map(Transpose())) o Map(Map(Map(Join()))) o Map(Slide(n,s)) o
Map(Transpose()) o Map(Map(Map(Slide(n,s)))) o Map(Transpose()) o Slide(u,v) o Map(Slide(u,v))
// (14): J o **T o ***J o **T o *T o **S_ns_ o ***S_ns o *T o S_uv o *S_uv
val f14 = Join() o Map(Map(Transpose())) o Map(Map(Map(Join()))) o Map(Map(Transpose())) o
Map(Transpose()) o Map(Map(Slide(n,s))) o Map(Map(Map(Slide(n,s)))) o Map(Transpose()) o
Slide(u,v) o Map(Slide(u,v))
// (15): J o **J o ***T o **T o **T o *T o **S_ns o ***S_ns o *T o S_uv o *S_uv
val f15 = Join() o Map(Map(Join())) o Map(Map(Map(Transpose()))) o
Map(Map(Transpose())) o Map(Map(Transpose())) o // they cancel out
Map(Transpose()) o Map(Map(Slide(n,s))) o Map(Map(Map(Slide(n,s)))) o Map(Transpose()) o
Slide(u,v) o Map(Slide(u,v))
// (16): J o **J o ***T o *T o **S_ns o ***S_ns o *T o S_uv o *S_uv
val f16 = Join() o Map(Map(Join())) o Map(Map(Map(Transpose()))) o
Map(Transpose()) o Map(Map(Slide(n,s))) o Map(Map(Map(Slide(n,s)))) o Map(Transpose()) o
Slide(u,v) o Map(Slide(u,v))
def lambda(f: Lambda): Lambda1 = fun(
ArrayType(ArrayType(Float, M), N),
input =>
MapGlb(1)(MapGlb(0)(MapSeq(MapSeq(id)))) o f $ input
)
val (outGold, runtime) = Execute(1,1,32,32,(false,false))[Array[Float]](lambda(gold), input)
val (outDesired, runtime0) = Execute(1,1,32,32,(false,false))[Array[Float]](lambda(desired), input)
val (outF1, runtime1) = Execute(1,1,32,32,(false,false))[Array[Float]](lambda(f1), input)
val (outF2, runtime2) = Execute(1,1,32,32,(false,false))[Array[Float]](lambda(f2), input)
val (outF3, runtime3) = Execute(1,1,32,32,(false,false))[Array[Float]](lambda(f3), input)
val (outF4, runtime4) = Execute(1,1,32,32,(false,false))[Array[Float]](lambda(f4), input)
val (outF5, runtime5) = Execute(1,1,32,32,(false,false))[Array[Float]](lambda(f5), input)
val (outF6, runtime6) = Execute(1,1,32,32,(false,false))[Array[Float]](lambda(f6), input)
val (outF7, runtime7) = Execute(1,1,32,32,(false,false))[Array[Float]](lambda(f7), input)
val (outF8, runtime8) = Execute(1,1,32,32,(false,false))[Array[Float]](lambda(f8), input)
val (outF9, runtime9) = Execute(1,1,32,32,(false,false))[Array[Float]](lambda(f9), input)
val (outF10, runtime10) = Execute(1,1,32,32,(false,false))[Array[Float]](lambda(f10), input)
val (outF11, runtime11) = Execute(1,1,32,32,(false,false))[Array[Float]](lambda(f11), input)
val (outF12, runtime12) = Execute(1,1,32,32,(false,false))[Array[Float]](lambda(f12), input)
val (outF13, runtime13) = Execute(1,1,32,32,(false,false))[Array[Float]](lambda(f13), input)
val (outF14, runtime14) = Execute(1,1,32,32,(false,false))[Array[Float]](lambda(f14), input)
val (outF15, runtime15) = Execute(1,1,32,32,(false,false))[Array[Float]](lambda(f15), input)
val (outF16, runtime16) = Execute(1,1,32,32,(false,false))[Array[Float]](lambda(f16), input)
assertArrayEquals(outGold, outDesired, 0.1f)
assertArrayEquals(outGold, outF1, 0.1f)
assertArrayEquals(outGold, outF2, 0.1f)
assertArrayEquals(outGold, outF3, 0.1f)
assertArrayEquals(outGold, outF4, 0.1f)
assertArrayEquals(outGold, outF5, 0.1f)
assertArrayEquals(outGold, outF6, 0.1f)
assertArrayEquals(outGold, outF7, 0.1f)
assertArrayEquals(outGold, outF8, 0.1f)
assertArrayEquals(outGold, outF9, 0.1f)
assertArrayEquals(outGold, outF10, 0.1f)
assertArrayEquals(outGold, outF11, 0.1f)
assertArrayEquals(outGold, outF12, 0.1f)
assertArrayEquals(outGold, outF13, 0.1f)
assertArrayEquals(outGold, outF14, 0.1f)
assertArrayEquals(outGold, outF15, 0.1f)
assertArrayEquals(outGold, outF16, 0.1f)
}
@Test
def stencil2DTilingLocalMemIdentities(): Unit = {
LongTestsEnabled()
// stencil shape
val n = 3
val s = 1
// tile size
val u = 6
val v = 4
// pad parameters
val l = 1
val r = 1
val b = Pad.Boundary.Clamp
// use non-powers-of-two as input values
// to enable powers-of-two as valid inputs for Slide step since Pad is not used here
val input = Array.tabulate(32, 32) { (i, j) => i * 32.0f + j }
// stencil function: nbh:[3][3] -> [1]
val f = toGlobal(MapSeq(id)) o ReduceSeq(add, 0.0f) o Join()
def lambda(f: Lambda) = {
fun(
ArrayType(ArrayType(Float,SizeVar("M")), SizeVar("N")),
input => f $ input
)
}
// use shorthand notation to avoid huge expressions (partly inspired by BMF)
val P = Pad(l,r,b)
val T = Transpose()
val T_w = TransposeW()
val J = Join()
val S_ns = Slide(n,s)
val S_uv = Slide(u,v)
def *(f: Lambda) = Map(f)
def **(f: Lambda) = Map(Map(f))
def ***(f: Lambda) = Map(Map(Map((f))))
val gold = MapGlb(1)(MapGlb(0)(f)) o // (C) apply stencil function
Map(Transpose()) o Slide(n,s) o Map(Slide(n,s)) o // (B) 2d neighborhood creation
Pad(l,r,b) o Map(Pad(l,r,b)) // (A) 2d padding
// same as gold but short
val goldShort = MapGlb(1)(MapGlb(0)(f)) o // (C)
*(T) o S_ns o *(S_ns) o // (B)
P o *(P) // (A)
// introduce 2d tiles
val f1 = MapGlb(1)(MapGlb(0)(f)) o // (C)
*(J) o J o *(T) o ***(T) o **(S_ns) o ***(S_ns) o *(T) o S_uv o *(S_uv) o // (B) (tiling inclusive)
P o *(P) // (A)
////////////////// Promote Two Maps //////////////
// fuse maps
val g1 = MapGlb(1)(MapGlb(0)(f) o J) o J o *(T) o
***(T) o **(S_ns) o ***(S_ns) o *(T) o S_uv o *(S_uv) o
P o *(P)
// apply mapJoin rule twice - introduce MapWrgs and MapLcls
val g2 = J o MapWrg(1)(MapWrg(0)(J o MapLcl(1)(MapLcl(0)(f)))) o *(T) o
***(T) o **(S_ns) o ***(S_ns) o *(T) o S_uv o *(S_uv) o
P o *(P)
// split to single maps again
val g3 = J o **(J) o MapWrg(1)(MapWrg(0)(MapLcl(1)(MapLcl(0)(f)))) o *(T) o
***(T) o **(S_ns) o ***(S_ns) o *(T) o S_uv o *(S_uv) o
P o *(P)
// the last *T can be moved in front of ****f as T_w -> see 'f3'
/////////////////// f1 -> f2 ///////////////////////
// move maps forward to exploit more levels of parallelism
// functionally *J o * o *T should do the job, however lifts output view require Untile or ...
val f2 = /* *(J) o J o *(T) o */ Untile2D() o MapWrg(1)(MapWrg(0)(MapLcl(1)(MapLcl(0)(f)))) o // (C) using workgroups
***(T) o **(S_ns) o ***(S_ns) o *(T) o S_uv o *(S_uv) o // (B)
P o *(P) // (A)
// ...TransposeW instead of Transpose after stencil computation
val f3 = *(J) o J o *(T_w) o MapWrg(1)(MapWrg(0)(MapLcl(1)(MapLcl(0)(f)))) o // (C)
***(T) o **(S_ns) o ***(S_ns) o // (%) // (B) Create neighborhoods in tiles
*(T) o S_uv o *(S_uv) o // (B) Create tiles
P o *(P) // (A)
// fuse the expressions (%) from above with the MapWrg's
val f4 = *(J) o J o *(T_w) o MapWrg(1)(MapWrg(0)(MapLcl(1)(MapLcl(0)(f)) o // (C)
*(T) o S_ns o *(S_ns))) o // (B) Create neighborhoods in tiles
*(T) o S_uv o *(S_uv) o // (B) Create tiles
P o *(P) // (A)
// load tiles to local memory
val f5 = *(J) o J o *(T_w) o MapWrg(1)(MapWrg(0)(MapLcl(1)(MapLcl(0)(f)) o // (C)
*(T) o S_ns o *(S_ns) o // Slide2D n s // (B.3) Create neighborhoods in tiles
toLocal(MapLcl(1)(MapLcl(0)(id))))) o // whole line = id // (B.2) Load tiles to local memory
*(T) o S_uv o *(S_uv) o // Slide2D u v // (B.1) Create tiles
P o *(P) // (A)
val (outGold, runtime) = Execute(1,1,32,32,(false,false))[Array[Float]](lambda(gold), input)
val (outGoldShort, runtimeShort) = Execute(1,1,32,32,(false,false))[Array[Float]](lambda(goldShort), input)
val (outF1, _) = Execute(1,1,32,32,(false,false))[Array[Float]](lambda(f1), input)
val (outG1, _) = Execute(1,1,32,32,(false,false))[Array[Float]](lambda(g1), input)
val (outG2, _) = Execute(1,1,32,32,(false,false))[Array[Float]](lambda(g2), input)
val (outG3, _) = Execute(1,1,32,32,(false,false))[Array[Float]](lambda(g3), input)
val (outF2, _) = Execute(1,1,32,32,(false,false))[Array[Float]](lambda(f2), input)
val (outF3, _) = Execute(1,1,32,32,(false,false))[Array[Float]](lambda(f3), input)
val (outF4, _) = Execute(1,1,32,32,(false,false))[Array[Float]](lambda(f4), input)
val (outF5, _) = Execute(1,1,32,32,(false,false))[Array[Float]](lambda(f5), input)
assertArrayEquals(outGold, outGoldShort, 0.1f)
assertArrayEquals(outGold, outF1, 0.1f)
assertArrayEquals(outGold, outG1, 0.1f)
assertArrayEquals(outGold, outG2, 0.1f)
assertArrayEquals(outGold, outG3, 0.1f)
assertArrayEquals(outGold, outF2, 0.1f)
assertArrayEquals(outGold, outF3, 0.1f)
assertArrayEquals(outGold, outF4, 0.1f)
assertArrayEquals(outGold, outF5, 0.1f)
}
@Test //see Issue116
def halideBlur(): Unit = {
val M = Var("M")
val N = Var("N")
val M2 = 2 + Var("M")
val N2 = 2 + Var("N")
val input = Array.tabulate(128, 128) { (i, j) => j * 128.0f + i }
val div9 = UserFun("div9", "x", "{ return x/9; }", Float, Float)
val f = MapGlb(0)(MapSeq( \\(neighborhood => toGlobal(MapSeq(div9)) o ReduceSeq(add, 0.0f) o Join() $ neighborhood)
)) o Slide2D(3, 1)
val stencil1 = fun( ArrayType(ArrayType(Float, M2), N2), input => {f $ input })
val stencil2 = fun( ArrayType(ArrayType(Float, M), N), input => {f $ input })
val(output1, _) = Execute(32,128)[Array[Float]](stencil1, input)
val(output2, _) = Execute(32,128)[Array[Float]](stencil2, input)
assertNotEquals(output1, output2)
}
}
|
lift-project/lift
|
src/test/opencl/generator/stencil/TestStencil2D.scala
|
Scala
|
mit
| 38,999 |
import org.scalatest.{FunSuite, Matchers}
/** @version created manually **/
class ZipperTest extends FunSuite with Matchers {
def empty[A]: Option[BinTree[A]] = None
def bt[A](v: A, l: Option[BinTree[A]], r: Option[BinTree[A]]): Option[BinTree[A]] =
Some(BinTree(v, l, r))
def leaf[A](v: A): Option[BinTree[A]] =
Some(BinTree(v, None, None))
val t1: BinTree[Int] = BinTree(1, bt(2, empty, leaf(3)), leaf(4))
val t2: BinTree[Int] = BinTree(1, bt(5, empty, leaf(3)), leaf(4))
val t3: BinTree[Int] = BinTree(1, bt(2, leaf(5), leaf(3)), leaf(4))
val t4: BinTree[Int] = BinTree(1, leaf(2), leaf(4))
def fromSome[T](o: Option[T]) = o.get
val z = Zipper
test("data is retained") {
z.toTree(z.fromTree(t1)) should be (t1)
}
test("left, right and value") {
pending
z.value(fromSome(z.right(fromSome(z.left(z.fromTree(t1)))))) should be (3)
}
test("dead end") {
pending
(z.left(fromSome(z.left(z.fromTree(t1))))) should be (None)
}
test("tree from deep focus") {
pending
z.toTree(fromSome(z.right(fromSome(z.left(z.fromTree(t1)))))) should be (t1)
}
test("setValue") {
pending
z.toTree(z.setValue(5, (fromSome(z.left(z.fromTree(t1)))))) should be (t2)
}
test("setLeft with Some") {
pending
z.toTree(z.setLeft(Some(BinTree(5, None, None)),
(fromSome(z.left(z.fromTree(t1)))))) should be (t3)
}
test("setRight with None") {
pending
z.toTree(z.setRight(None, (fromSome(z.left(z.fromTree(t1)))))) should be (t4)
}
test("different paths to same zipper") {
pending
z.right(fromSome(z.up(fromSome(z.left(z.fromTree(t1)))))) should be
(z.right(z.fromTree(t1)))
}
}
|
ricemery/xscala
|
exercises/zipper/src/test/scala/ZipperTest.scala
|
Scala
|
mit
| 1,713 |
package ml.util
object TypeConversions {
/**
* Convert from iterable of generic type T to vectorDouble
*/
def TtoDouble[T](v: Iterable[T])(implicit num: Numeric[T]): Vector[Double] = {
v.map(x => num.toDouble(x)).toVector
}
}
|
jccarrasco/maleta
|
src/ml/util/TypeConversions.scala
|
Scala
|
gpl-2.0
| 242 |
package clasp.core;
/**
* Provides an interface to the
* [[http://developer.android.com/tools/sdk/tools-notes.html Android SDK Tools]].
*/
package object sdktools {
// http://stackoverflow.com/questions/6227759
def runWithTimeout[T](timeoutMs: Long)(f: => T) : Option[T] = {
import scala.actors.Futures.awaitAll
import scala.actors.Futures.future
//TODO Fix classpath errors?
awaitAll(timeoutMs, future(f)).head.asInstanceOf[Option[T]]
}
def runWithTimeout[T](timeoutMs: Long, default: T)(f: => T) : T = {
runWithTimeout(timeoutMs)(f).getOrElse(default)
}
}
|
hamiltont/clasp
|
src/clasp/core/sdktools/package.scala
|
Scala
|
mit
| 599 |
import edu.uta.diql._
import org.apache.spark._
import org.apache.spark.rdd._
object Test {
def main ( args: Array[String] ) {
val conf = new SparkConf().setAppName("Test")
val sc = new SparkContext(conf)
explain(true)
val A = sc.textFile(args(0)).zipWithIndex.map{ case (line,i) => (i,line.toDouble) }
v(sc,"""
var sum: Double = 0.0;
var count: Int = 0;
for i = 1, 100 do {
sum += A[i];
count += 1;
};
println(sum/count);
""")
}
}
|
fegaras/DIQL
|
tests/diablo/spark/Average.scala
|
Scala
|
apache-2.0
| 522 |
/*
* Copyright 2011-2022 GatlingCorp (https://gatling.io)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.gatling.http.cache
import io.gatling.core.session.{ Session, SessionPrivateAttributes }
import io.gatling.http.protocol.{ HttpProtocol, Remote }
import io.gatling.http.response.Response
import com.typesafe.scalalogging.StrictLogging
private[http] object Http2PriorKnowledgeSupport extends StrictLogging {
val Http2PriorKnowledgeAttributeName: String = SessionPrivateAttributes.PrivateAttributePrefix + "http.cache.priorKnowledgeHttp2"
def setHttp2PriorKnowledge(httpProtocol: HttpProtocol): Session => Session =
if (httpProtocol.enginePart.enableHttp2) {
_.set(Http2PriorKnowledgeAttributeName, httpProtocol.enginePart.http2PriorKnowledge)
} else {
logger.debug("HTTP/2 disabled")
if (httpProtocol.enginePart.http2PriorKnowledge.nonEmpty) {
logger.debug("Ignoring configured HTTP/2 prior knowledge")
}
Session.Identity
}
def updateSessionHttp2PriorKnowledge(session: Session, response: Response): Session = {
val remote = Remote(response.request.getUri)
session(Http2PriorKnowledgeSupport.Http2PriorKnowledgeAttributeName).asOption[Map[Remote, Boolean]] match {
case Some(priorKnowledgeMap) =>
if (priorKnowledgeMap.contains(remote)) {
session
} else {
session.set(Http2PriorKnowledgeAttributeName, priorKnowledgeMap + (remote -> response.isHttp2))
}
case _ => session
}
}
def isHttp2PriorKnowledge(session: Session, remote: Remote): Option[Boolean] =
session(Http2PriorKnowledgeSupport.Http2PriorKnowledgeAttributeName).asOption[Map[Remote, Boolean]] match {
case Some(priorKnowledgeMap) => priorKnowledgeMap.get(remote)
case _ => None
}
}
|
gatling/gatling
|
gatling-http/src/main/scala/io/gatling/http/cache/Http2PriorKnowledgeSupport.scala
|
Scala
|
apache-2.0
| 2,347 |
/*
* Copyright (C) 2005, The OpenURP Software.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published
* by the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.openurp.edu.clazz.model
import scala.language.implicitConversions
import org.openurp.base.model.Department
import org.openurp.code.edu.model.EducationLevel
import org.openurp.base.edu.code.model.StdLabel
import org.openurp.base.edu.code.model.StdType
import org.openurp.base.edu.model.Direction
import org.openurp.base.edu.model.Major
import org.openurp.base.edu.model.Squad
import org.beangle.data.model.Entity
enum RestrictionMeta(val id:Int,contentType: Class[_]){
case Grade extends RestrictionMeta(1, classOf[String])
case StdType extends RestrictionMeta(2, classOf[StdType])
case Department extends RestrictionMeta(4, classOf[Department])
case Major extends RestrictionMeta(5, classOf[Major])
case Direction extends RestrictionMeta(6, classOf[Direction])
case Squad extends RestrictionMeta(7, classOf[Squad])
case Level extends RestrictionMeta(8, classOf[EducationLevel])
case StdLabel extends RestrictionMeta(11, classOf[StdLabel])
}
|
openurp/api
|
edu/src/main/scala/org/openurp/edu/clazz/model/RestrictionMeta.scala
|
Scala
|
lgpl-3.0
| 1,702 |
/*
* Copyright 2017 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.ct.accounts.frs102.boxes
import org.joda.time.LocalDate
import org.mockito.Mockito._
import org.scalatest.mockito.MockitoSugar
import org.scalatest.{Matchers, WordSpec}
import uk.gov.hmrc.ct.accounts.{AC205, AccountsMoneyValidationFixture, MockFrs102AccountsRetriever}
import uk.gov.hmrc.ct.accounts.frs102.retriever.Frs102AccountsBoxRetriever
import uk.gov.hmrc.ct.box.CtValidation
class AC7210BSpec
extends WordSpec
with MockitoSugar
with Matchers
with MockFrs102AccountsRetriever
with AccountsMoneyValidationFixture[Frs102AccountsBoxRetriever] {
when(boxRetriever.ac7200()).thenReturn(AC7200(Some(true)))
when(boxRetriever.ac7210A()).thenReturn(AC7210A(Some(123)))
testAccountsMoneyValidationWithMin("AC7210B", minValue = 0, AC7210B)
"AC7210B" should {
"when AC7210A is empty" when {
"pass validation if AC7210B has a value AC7200 is true" in {
when(boxRetriever.ac7200()).thenReturn(AC7200(Some(true)))
when(boxRetriever.ac7210A()).thenReturn(AC7210A(None))
when(boxRetriever.ac205()).thenReturn(AC205(Some(new LocalDate(2015, 12, 1))))
AC7210B(Some(1224)).validate(boxRetriever) shouldBe empty
}
"pass validation if AC7210B is empty and AC7200 is false" in {
when(boxRetriever.ac7200()).thenReturn(AC7200(Some(false)))
when(boxRetriever.ac7210A()).thenReturn(AC7210A(None))
when(boxRetriever.ac205()).thenReturn(AC205(Some(new LocalDate(2015, 12, 1))))
AC7210B(None).validate(boxRetriever) shouldBe empty
}
"pass validation if AC7210B is empty and AC7200 is empty" in {
when(boxRetriever.ac7200()).thenReturn(AC7200(None))
when(boxRetriever.ac7210A()).thenReturn(AC7210A(None))
when(boxRetriever.ac205()).thenReturn(AC205(Some(new LocalDate(2015, 12, 1))))
AC7210B(None).validate(boxRetriever) shouldBe empty
}
"fail validation if AC7210B has a value AC7200 is false" in {
when(boxRetriever.ac7200()).thenReturn(AC7200(Some(false)))
when(boxRetriever.ac7210A()).thenReturn(AC7210A(None))
when(boxRetriever.ac205()).thenReturn(AC205(Some(new LocalDate(2015, 12, 1))))
AC7210B(Some(1224)).validate(boxRetriever) shouldBe Set(CtValidation(Some("AC7210B"), "error.AC7210B.cannot.exist"))
}
"fail validation if AC7210B has a value AC7200 is true and NO previous PoA" in {
when(boxRetriever.ac7200()).thenReturn(AC7200(Some(true)))
when(boxRetriever.ac7210A()).thenReturn(AC7210A(None))
when(boxRetriever.ac205()).thenReturn(AC205(None))
AC7210B(Some(1224)).validate(boxRetriever) shouldBe Set(CtValidation(Some("AC7210B"), "error.AC7210B.cannot.exist"))
}
"fail validation if AC7210B has a value AC7200 is empty" in {
when(boxRetriever.ac7200()).thenReturn(AC7200(None))
when(boxRetriever.ac7210A()).thenReturn(AC7210A(None))
AC7210B(Some(1224)).validate(boxRetriever) shouldBe Set(CtValidation(Some("AC7210B"), "error.AC7210B.cannot.exist"))
}
}
"when AC7210A is populated" when {
"pass validation if AC7210B has a value AC7200 is true" in {
when(boxRetriever.ac7200()).thenReturn(AC7200(Some(true)))
when(boxRetriever.ac7210A()).thenReturn(AC7210A(Some(4321)))
when(boxRetriever.ac205()).thenReturn(AC205(Some(new LocalDate(2015, 12, 1))))
AC7210B(Some(1224)).validate(boxRetriever) shouldBe empty
}
"pass validation if AC7210B is empty and AC7200 is false" in {
when(boxRetriever.ac7200()).thenReturn(AC7200(Some(false)))
when(boxRetriever.ac7210A()).thenReturn(AC7210A(Some(4321)))
when(boxRetriever.ac205()).thenReturn(AC205(Some(new LocalDate(2015, 12, 1))))
AC7210B(None).validate(boxRetriever) shouldBe empty
}
"pass validation if AC7210B is empty and AC7200 is empty" in {
when(boxRetriever.ac7200()).thenReturn(AC7200(None))
when(boxRetriever.ac7210A()).thenReturn(AC7210A(Some(4321)))
when(boxRetriever.ac205()).thenReturn(AC205(Some(new LocalDate(2015, 12, 1))))
AC7210B(None).validate(boxRetriever) shouldBe empty
}
"fail validation if AC7210B has a value AC7200 is false" in {
when(boxRetriever.ac7200()).thenReturn(AC7200(Some(false)))
when(boxRetriever.ac7210A()).thenReturn(AC7210A(Some(4321)))
when(boxRetriever.ac205()).thenReturn(AC205(Some(new LocalDate(2015, 12, 1))))
AC7210B(Some(1224)).validate(boxRetriever) shouldBe Set(CtValidation(Some("AC7210B"), "error.AC7210B.cannot.exist"))
}
"fail validation if AC7210B has a value AC7200 is true and NO PoA" in {
when(boxRetriever.ac7200()).thenReturn(AC7200(Some(false)))
when(boxRetriever.ac7210A()).thenReturn(AC7210A(Some(4321)))
when(boxRetriever.ac205()).thenReturn(AC205(None))
AC7210B(Some(1224)).validate(boxRetriever) shouldBe Set(CtValidation(Some("AC7210B"), "error.AC7210B.cannot.exist"))
}
"fail validation if AC7210B has a value AC7200 is empty" in {
when(boxRetriever.ac7200()).thenReturn(AC7200(None))
when(boxRetriever.ac7210A()).thenReturn(AC7210A(Some(4321)))
when(boxRetriever.ac205()).thenReturn(AC205(Some(new LocalDate(2015, 12, 1))))
AC7210B(Some(1224)).validate(boxRetriever) shouldBe Set(CtValidation(Some("AC7210B"), "error.AC7210B.cannot.exist"))
}
}
}
}
|
liquidarmour/ct-calculations
|
src/test/scala/uk/gov/hmrc/ct/accounts/frs102/boxes/AC7210BSpec.scala
|
Scala
|
apache-2.0
| 6,047 |
package com.sksamuel.elastic4s.indexes
import com.sksamuel.elastic4s.bulk.BulkCompatibleDefinition
import com.sksamuel.elastic4s.{FieldValue, FieldsMapper, IndexAndType, Indexable}
import com.sksamuel.exts.OptionImplicits._
import org.elasticsearch.action.support.WriteRequest.RefreshPolicy
import org.elasticsearch.index.VersionType
case class IndexDefinition(indexAndType: IndexAndType,
id: Option[Any] = None,
createOnly: Option[Boolean] = None,
refresh: Option[RefreshPolicy] = None,
parent: Option[String] = None,
pipeline: Option[String] = None,
routing: Option[String] = None,
timestamp: Option[String] = None,
timeout: Option[String] = None,
version: Option[Long] = None,
versionType: Option[VersionType] = None,
fields: Seq[FieldValue] = Nil,
source: Option[String] = None) extends BulkCompatibleDefinition {
require(indexAndType != null, "index must not be null or empty")
def doc(json: String): IndexDefinition = source(json)
def doc[T: Indexable](t: T): IndexDefinition = source(t)
def source(json: String): IndexDefinition = copy(source = json.some)
def source[T](t: T)(implicit indexable: Indexable[T]): IndexDefinition = copy(source = indexable.json(t).some)
def id(id: Any): IndexDefinition = withId(id)
def withId(id: Any): IndexDefinition = copy(id = id.some)
def pipeline(pipeline: String): IndexDefinition = copy(pipeline = pipeline.some)
def parent(parent: String): IndexDefinition = copy(parent = parent.some)
def refresh(refresh: String): IndexDefinition = copy(refresh = RefreshPolicy.valueOf(refresh).some)
def refresh(refresh: RefreshPolicy): IndexDefinition = copy(refresh = refresh.some)
def timestamp(timestamp: String): IndexDefinition = copy(timestamp = timestamp.some)
def routing(routing: String): IndexDefinition = copy(routing = routing.some)
def version(version: Long): IndexDefinition = copy(version = version.some)
def versionType(versionType: VersionType): IndexDefinition = copy(versionType = versionType.some)
def timeout(timeout: String): IndexDefinition = copy(timeout = timeout.some)
// if set to true then trying to update a document will fail
def createOnly(createOnly: Boolean): IndexDefinition = copy(createOnly = createOnly.some)
def fields(_fields: (String, Any)*): IndexDefinition = fields(_fields.toMap)
def fields(_fields: Iterable[(String, Any)]): IndexDefinition = fields(_fields.toMap)
def fields(fields: Map[String, Any]): IndexDefinition = copy(fields = FieldsMapper.mapFields(fields))
def fieldValues(fields: FieldValue*): IndexDefinition = copy(fields = fields)
}
|
FabienPennequin/elastic4s
|
elastic4s-core/src/main/scala/com/sksamuel/elastic4s/indexes/IndexDefinition.scala
|
Scala
|
apache-2.0
| 2,902 |
package fr.ymanvieu.trading.gatling.scenario
import io.gatling.core.Predef._
import io.gatling.core.structure._
import io.gatling.http.Predef._
object RateHistoryScenario {
val feeder = Array(
Map("fromcur" -> "USD", "tocur" -> "EUR"),
Map("fromcur" -> "BTC", "tocur" -> "USD"),
Map("fromcur" -> "ETH", "tocur" -> "USD")
).circular
def buildScenario(): ScenarioBuilder = {
scenario("Rate History")
.feed(feeder)
.exec(http("Get ${fromcur}/${tocur} rate history")
.get("/api/rate/history")
.queryParam("fromcur", "${fromcur}")
.queryParam("tocur", "${tocur}")
.check(status.is(200)))
}
}
|
ymanvieu/trading
|
trading-gatling/src/test/scala/fr/ymanvieu/trading/gatling/scenario/RateHistoryScenario.scala
|
Scala
|
lgpl-3.0
| 660 |
package x7c1.linen.modern.init.unread
import android.support.v7.widget.LinearLayoutManager
import x7c1.linen.database.control.DatabaseHelper
import x7c1.linen.modern.action.observer.{OutlineFocusedObserver, OutlineSelectedObserver, OutlineSkipStoppedObserver, OutlineSkippedObserver}
import x7c1.linen.modern.action.{EntrySkipStopped, EntrySkipStoppedFactory, EntrySkippedEventFactory, OutlineFocusedEvent, OutlineFocusedEventFactory}
import x7c1.linen.modern.display.unread.{OnOutlineSelectedListener, OutlineRowAdapter, OutlineSelectedEvent, PaneDragDetector}
import x7c1.linen.repository.unread.BrowsedEntriesMarker
import x7c1.wheat.lore.resource.AdapterDelegatee
import x7c1.wheat.modern.decorator.Imports._
import x7c1.wheat.modern.observer.{FocusDetector, OnItemFocusedListener, OnSkipStoppedListener, SkipDetector, SkipPositionFinder}
trait OutlineAreaInitializer {
self: UnreadItemsDelegatee =>
def setupEntryArea(): Unit = {
layout.entryArea updateLayoutParams { _.width = widthWithMargin }
layout.entryToolbar onClickNavigation { _ =>
actions.container.onBack()
}
val manager = new LinearLayoutManager(layout.entryList.getContext)
layout.entryList setLayoutManager manager
layout.entryList setAdapter new OutlineRowAdapter(
AdapterDelegatee.create(
providers = unreadRowProviders.forOutlineArea,
sequence = accessors.entryOutline
),
new OutlineSelectedObserver(actions) append outlineMarker,
footerHeightOf(layout.entryList)
)
val forFocus = FocusDetector.forLinearLayoutManager(
recyclerView = layout.entryList,
focusedEventFactory = new OutlineFocusedEventFactory(accessors.entryOutline),
onFocused = new OutlineFocusedObserver(actions) append outlineMarker
)
layout.entryList addOnItemTouchListener PaneDragDetector.create(
context = layout.entryList.getContext,
from = container.outlineArea,
actions = actions,
onTouch = forFocus
)
val forSkip = SkipDetector.createListener(
context = layout.entryToNext.getContext,
positionFinder = SkipPositionFinder createBy manager,
skippedEventFactory = new EntrySkippedEventFactory(accessors.entryOutline),
skipDoneEventFactory = new EntrySkipStoppedFactory(accessors.entryOutline),
onSkippedListener = new OutlineSkippedObserver(actions),
onSkipDoneListener = new OutlineSkipStoppedObserver(actions) append outlineMarker
)
layout.entryToNext setOnTouchListener forSkip
layout.entryBottomBar setOnTouchListener forSkip
}
protected lazy val outlineMarker = new OutlineEntryMarker(helper, entryMarker)
}
class OutlineEntryMarker(helper: DatabaseHelper, marker: BrowsedEntriesMarker)
extends OnItemFocusedListener[OutlineFocusedEvent]
with OnOutlineSelectedListener
with OnSkipStoppedListener[EntrySkipStopped]{
override def onFocused(event: OutlineFocusedEvent): Unit = {
marker touchOutlinePosition event.position
}
override def onSkipStopped(event: EntrySkipStopped): Unit = {
marker touchOutlinePosition event.currentPosition
}
override def onEntrySelected(event: OutlineSelectedEvent): Unit = {
marker touchOutlinePosition event.position
}
}
|
x7c1/Linen
|
linen-modern/src/main/scala/x7c1/linen/modern/init/unread/OutlineAreaInitializer.scala
|
Scala
|
mit
| 3,225 |
/* sbt -- Simple Build Tool
* Copyright 2008, 2009 Mark Harrah, Vesa Vilhonen
*/
package sbt
import java.io.File
import java.net.{ URL, URLClassLoader }
import java.lang.reflect.{ Method, Modifier }
import Modifier.{ isPublic, isStatic }
import classpath.ClasspathUtilities
trait ScalaRun {
def run(mainClass: String, classpath: Seq[File], options: Seq[String], log: Logger): Option[String]
}
class ForkRun(config: ForkOptions) extends ScalaRun {
@deprecated("Use the `ForkRun(ForkOptions) constructor`", "0.13.0")
def this(options: ForkScalaRun) = this(ForkOptions(options.javaHome, options.outputStrategy, options.scalaJars.toSeq, options.workingDirectory, options.runJVMOptions, options.connectInput))
def run(mainClass: String, classpath: Seq[File], options: Seq[String], log: Logger): Option[String] =
{
log.info("Running " + mainClass + " " + options.mkString(" "))
val scalaOptions = classpathOption(classpath) ::: mainClass :: options.toList
val configLogged = if (config.outputStrategy.isDefined) config else config.copy(outputStrategy = Some(LoggedOutput(log)))
// fork with Java because Scala introduces an extra class loader (#702)
val process = Fork.java.fork(configLogged, scalaOptions)
def cancel() = {
log.warn("Run canceled.")
process.destroy()
1
}
val exitCode = try process.exitValue() catch { case e: InterruptedException => cancel() }
processExitCode(exitCode, "runner")
}
private def classpathOption(classpath: Seq[File]) = "-classpath" :: Path.makeString(classpath) :: Nil
private def processExitCode(exitCode: Int, label: String) =
{
if (exitCode == 0)
None
else
Some("Nonzero exit code returned from " + label + ": " + exitCode)
}
}
class Run(instance: ScalaInstance, trapExit: Boolean, nativeTmp: File) extends ScalaRun {
/** Runs the class 'mainClass' using the given classpath and options using the scala runner.*/
def run(mainClass: String, classpath: Seq[File], options: Seq[String], log: Logger) =
{
log.info("Running " + mainClass + " " + options.mkString(" "))
def execute() =
try { run0(mainClass, classpath, options, log) }
catch { case e: java.lang.reflect.InvocationTargetException => throw e.getCause }
def directExecute() = try { execute(); None } catch { case e: Exception => log.trace(e); Some(e.toString) }
if (trapExit) Run.executeTrapExit(execute(), log) else directExecute()
}
private def run0(mainClassName: String, classpath: Seq[File], options: Seq[String], log: Logger): Unit = {
log.debug(" Classpath:\\n\\t" + classpath.mkString("\\n\\t"))
val loader = ClasspathUtilities.makeLoader(classpath, instance, nativeTmp)
val main = getMainMethod(mainClassName, loader)
invokeMain(loader, main, options)
}
private def invokeMain(loader: ClassLoader, main: Method, options: Seq[String]): Unit = {
val currentThread = Thread.currentThread
val oldLoader = Thread.currentThread.getContextClassLoader
currentThread.setContextClassLoader(loader)
try { main.invoke(null, options.toArray[String]) }
finally { currentThread.setContextClassLoader(oldLoader) }
}
def getMainMethod(mainClassName: String, loader: ClassLoader) =
{
val mainClass = Class.forName(mainClassName, true, loader)
val method = mainClass.getMethod("main", classOf[Array[String]])
// jvm allows the actual main class to be non-public and to run a method in the non-public class,
// we need to make it accessible
method.setAccessible(true)
val modifiers = method.getModifiers
if (!isPublic(modifiers)) throw new NoSuchMethodException(mainClassName + ".main is not public")
if (!isStatic(modifiers)) throw new NoSuchMethodException(mainClassName + ".main is not static")
method
}
}
/** This module is an interface to starting the scala interpreter or runner.*/
object Run {
def run(mainClass: String, classpath: Seq[File], options: Seq[String], log: Logger)(implicit runner: ScalaRun) =
runner.run(mainClass, classpath, options, log)
/** Executes the given function, trapping calls to System.exit. */
def executeTrapExit(f: => Unit, log: Logger): Option[String] =
{
val exitCode = TrapExit(f, log)
if (exitCode == 0) {
log.debug("Exited with code 0")
None
} else
Some("Nonzero exit code: " + exitCode)
}
}
|
jasonchaffee/sbt
|
run/src/main/scala/sbt/Run.scala
|
Scala
|
bsd-3-clause
| 4,472 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.deploy.history
import java.util.zip.ZipOutputStream
import scala.xml.Node
import org.apache.spark.SparkException
import org.apache.spark.ui.SparkUI
private[spark] case class ApplicationAttemptInfo(
attemptId: Option[String],
startTime: Long,
endTime: Long,
lastUpdated: Long,
sparkUser: String,
completed: Boolean = false)
private[spark] case class ApplicationHistoryInfo(
id: String,
name: String,
attempts: List[ApplicationAttemptInfo]) {
/**
* Has this application completed?
* @return true if the most recent attempt has completed
*/
def completed: Boolean = {
attempts.nonEmpty && attempts.head.completed
}
}
/**
* A probe which can be invoked to see if a loaded Web UI has been updated.
* The probe is expected to be relative purely to that of the UI returned
* in the same [[LoadedAppUI]] instance. That is, whenever a new UI is loaded,
* the probe returned with it is the one that must be used to check for it
* being out of date; previous probes must be discarded.
*/
private[history] abstract class HistoryUpdateProbe {
/**
* Return true if the history provider has a later version of the application
* attempt than the one against this probe was constructed.
* @return
*/
def isUpdated(): Boolean
}
/**
* All the information returned from a call to `getAppUI()`: the new UI
* and any required update state.
* @param ui Spark UI
* @param updateProbe probe to call to check on the update state of this application attempt
*/
private[history] case class LoadedAppUI(
ui: SparkUI,
updateProbe: () => Boolean)
private[history] abstract class ApplicationHistoryProvider {
/**
* Returns the count of application event logs that the provider is currently still processing.
* History Server UI can use this to indicate to a user that the application listing on the UI
* can be expected to list additional known applications once the processing of these
* application event logs completes.
*
* A History Provider that does not have a notion of count of event logs that may be pending
* for processing need not override this method.
*
* @return Count of application event logs that are currently under process
*/
def getEventLogsUnderProcess(): Int = {
return 0;
}
/**
* Returns the time the history provider last updated the application history information
*
* @return 0 if this is undefined or unsupported, otherwise the last updated time in millis
*/
def getLastUpdatedTime(): Long = {
return 0;
}
/**
* Returns a list of applications available for the history server to show.
*
* @return List of all know applications.
*/
def getListing(): Iterator[ApplicationHistoryInfo]
/**
* Returns the Spark UI for a specific application.
*
* @param appId The application ID.
* @param attemptId The application attempt ID (or None if there is no attempt ID).
* @return a [[LoadedAppUI]] instance containing the application's UI and any state information
* for update probes, or `None` if the application/attempt is not found.
*/
def getAppUI(appId: String, attemptId: Option[String]): Option[LoadedAppUI]
/**
* Called when the server is shutting down.
*/
def stop(): Unit = { }
/**
* Returns configuration data to be shown in the History Server home page.
*
* @return A map with the configuration data. Data is show in the order returned by the map.
*/
def getConfig(): Map[String, String] = Map()
/**
* Writes out the event logs to the output stream provided. The logs will be compressed into a
* single zip file and written out.
* @throws SparkException if the logs for the app id cannot be found.
*/
@throws(classOf[SparkException])
def writeEventLogs(appId: String, attemptId: Option[String], zipStream: ZipOutputStream): Unit
/**
* @return the [[ApplicationHistoryInfo]] for the appId if it exists.
*/
def getApplicationInfo(appId: String): Option[ApplicationHistoryInfo]
/**
* @return html text to display when the application list is empty
*/
def getEmptyListingHtml(): Seq[Node] = Seq.empty
}
|
sh-cho/cshSpark
|
deploy/history/ApplicationHistoryProvider.scala
|
Scala
|
apache-2.0
| 5,009 |
sealed trait Top
sealed trait Sub extends Top
trait C {
private object P extends Sub
def bob() = P.getClass
def bob2() = O.d(P)
}
|
folone/dotty
|
tests/pos/t1107b/T.scala
|
Scala
|
bsd-3-clause
| 136 |
/*
* Copyright 2015 Martijn van de Rijdt
*
* This file is part of BOSI.
*
* BOSI is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version.
*
* BOSI is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along with BOSI. If not, see <http://www.gnu.org/licenses/>.
*/
package nl.mvdr.breakout.state
/**
* The ball.
*
* @param location location of the ball
* @param speed, expressed as the (2D) distance the ball is supposed to move every frame (= 1/60th of a second)
*
* @author Martijn van de Rijdt
*/
case class Ball(override val location: Point, val speed: Point) extends GameObject(location, BallSize.Diameter, BallSize.Diameter) {
/** Default constructor. */
def this() = this(Point(PlayingField.width / 2 - BallSize.Diameter / 2, PlayingField.height - 20), new Point(2, -2))
override def character = 'o'
}
object BallSize {
val Diameter = 10
}
|
TinusTinus/bosi
|
src/main/scala/nl/mvdr/breakout/state/Ball.scala
|
Scala
|
gpl-3.0
| 1,295 |
/*
* Scala (https://www.scala-lang.org)
*
* Copyright EPFL and Lightbend, Inc.
*
* Licensed under Apache License 2.0
* (http://www.apache.org/licenses/LICENSE-2.0).
*
* See the NOTICE file distributed with this work for
* additional information regarding copyright ownership.
*/
package scala.tools.nsc.fsc
import java.io.{BufferedOutputStream, BufferedReader, Closeable, InputStreamReader, IOException, PrintWriter}
import java.net.{InetAddress, Socket => JSocket}
import scala.io.Codec
import scala.reflect.io.Streamable
/** A skeletal only-as-much-as-I-need Socket wrapper.
*/
object Socket {
class Box[+T](f: () => T) {
private def handlerFn[U](f: Throwable => U): PartialFunction[Throwable, U] = {
case x @ (_: IOException | _: SecurityException) => f(x)
}
private val optHandler = handlerFn[Option[T]](_ => None)
private val eitherHandler = handlerFn[Either[Throwable, T]](x => Left(x))
def either: Either[Throwable, T] = try Right(f()) catch eitherHandler
def opt: Option[T] = try Some(f()) catch optHandler
}
def localhost(port: Int) = apply(InetAddress.getLocalHost(), port)
def apply(host: InetAddress, port: Int) = new Box(() => new Socket(new JSocket(host, port)))
def apply(host: String, port: Int) = new Box(() => new Socket(new JSocket(host, port)))
}
class Socket(jsocket: JSocket) extends Streamable.Bytes with Closeable {
def inputStream() = jsocket.getInputStream()
def outputStream() = jsocket.getOutputStream()
def getPort() = jsocket.getPort()
def close() = jsocket.close()
def printWriter() = new PrintWriter(outputStream(), true)
def bufferedReader(implicit codec: Codec) = new BufferedReader(new InputStreamReader(inputStream(), codec.decoder))
def bufferedOutput(size: Int) = new BufferedOutputStream(outputStream(), size)
/** Creates an InputStream and applies the closure, automatically closing it on completion.
*/
def applyReaderAndWriter[T](f: (BufferedReader, PrintWriter) => T): T = {
val out = printWriter()
val in = bufferedReader
try f(in, out)
finally {
in.close()
out.close()
}
}
}
|
lrytz/scala
|
src/compiler/scala/tools/nsc/fsc/Socket.scala
|
Scala
|
apache-2.0
| 2,226 |
package skinny.engine.async
import skinny.engine.{ Context, RouteTransformer }
import skinny.engine.base.{ SkinnyEngineContextInitializer, RouteRegistryAccessor }
import skinny.engine.routing.Route
/**
* Before/After DSL for Async web apps.
*/
trait AsyncBeforeAfterDsl { self: RouteRegistryAccessor with SkinnyEngineContextInitializer =>
/**
* Adds a filter to run before the route. The filter only runs if each
* routeMatcher returns Some. If the routeMatchers list is empty, the
* filter runs for all routes.
*/
def before(transformers: RouteTransformer*)(fun: (Context) => Any): Unit = {
routes.appendBeforeFilter(Route(transformers, () => fun(context)))
}
/**
* Adds a filter to run after the route. The filter only runs if each
* routeMatcher returns Some. If the routeMatchers list is empty, the
* filter runs for all routes.
*/
def after(transformers: RouteTransformer*)(fun: (Context) => Any): Unit = {
routes.appendAfterFilter(Route(transformers, () => fun(context)))
}
}
|
holycattle/skinny-framework
|
engine/src/main/scala/skinny/engine/async/AsyncBeforeAfterDsl.scala
|
Scala
|
mit
| 1,037 |
package ch.epfl.gsn.xpr.parser
import org.scalatest.Matchers
import ch.epfl.gsn.xpr.parser.XprParser;
import org.scalatest.FunSpec
import ch.epfl.gsn.xpr._
import util._
class XprParserTest extends FunSpec with Matchers {
describe("parse expression"){
it("should get parsed function"){
val parsed=XprParser.parseXpr(" trala <= humid")
parsed should not be (null)
parsed match {
case Success(BinaryXpr(op,VarXpr(t1),VarXpr(t2)))=>
op shouldBe OpEnum.Leq
t1 shouldBe "trala"
t2 shouldBe "humid"
case _ => fail("not desired")
}
}
}
describe("parse condition list"){
it ("should parse list of conditions"){
val ser=XprConditions.serializeConditions(Array("val=1","val2<3"))
println(ser.get.mkString(","))
ser.get.mkString(",") shouldBe("val = 1.0,val2 < 3.0")
}
it ("should fail to get condition"){
val ser=XprConditions.serializeConditions(Array("val=val1","val2<3"))
ser.isFailure shouldBe true
}
}
}
|
LSIR/gsn
|
gsn-tools/src/test/scala/ch/epfl/gsn/xpr/parser/XprParserTest.scala
|
Scala
|
gpl-3.0
| 1,061 |
package amailp.intellij.robot.psi.reference
import amailp.intellij.robot.psi.utils.ExtRobotPsiUtils
import amailp.intellij.robot.psi.{ResourceValue, RobotPsiFile}
import com.intellij.openapi.module.ModuleUtilCore
import com.intellij.openapi.roots.ModuleRootManager
import com.intellij.openapi.vfs.VirtualFile
import com.intellij.psi.{PsiElement, PsiReferenceBase}
class ResourceValueReference(element: ResourceValue)
extends PsiReferenceBase[ResourceValue](element)
with ExtRobotPsiUtils {
def resourceFilePath: String = getElement.getText.replace("${/}", "/")
override def resolve() = resolveReferenceValue().orNull
override def getVariants: Array[AnyRef] = Array()
override def utilsPsiElement: PsiElement = getElement
def resolveReferenceValue(): Option[RobotPsiFile] =
resolveLocalFile orElse resolveAbsoluteFile orElse resolveFromSourceRoots
private def resolveLocalFile: Option[RobotPsiFile] =
maybeToRobotPsiFile(Option(currentDirectory.findFileByRelativePath(resourceFilePath)))
private def resolveAbsoluteFile: Option[RobotPsiFile] =
maybeToRobotPsiFile(Option(currentFile.getFileSystem.findFileByPath(resourceFilePath)))
private def resolveFromSourceRoots: Option[RobotPsiFile] = {
def sourceRoots =
ModuleRootManager.getInstance(ModuleUtilCore.findModuleForPsiElement(getElement)).getSourceRoots(true).toList
sourceRoots
.map(s => maybeToRobotPsiFile(Option(s.findFileByRelativePath(resourceFilePath))))
.foldLeft(None: Option[RobotPsiFile])((a, b) => a orElse b)
}
private def maybeToRobotPsiFile(file: Option[VirtualFile]): Option[RobotPsiFile] =
file.flatMap(
f =>
Option(psiManager.findFile(f))
.filter(_.isInstanceOf[RobotPsiFile])
.map(_.asInstanceOf[RobotPsiFile])
)
}
|
AmailP/robot-plugin
|
src/main/scala/amailp/intellij/robot/psi/reference/ResourceValueReference.scala
|
Scala
|
gpl-3.0
| 1,811 |
/* date: Jan 5, 2012
*/
package com.server
case class ImageNodeCmd(parameters:List[String]) extends Node with Link with Common {
override def toString="ImageNodeCmd"
def postNextSibling {
if(getNext !=None) {
idNextSibling=getNext.get.getId
}
}
// 'CommandStructure' iterates 'cmdVector' to invoked 'postIds' in
// all 'xxxCmd' instances.
def postIds {postNextSibling }
def showPost { println("ImageNode: id="+id+" next="+idNextSibling) }
def loadStruct( struct:scala.collection.mutable.ArrayBuffer[String]) {
loadParametersWithNode(struct, parameters)
}
}
|
hangle/Script
|
src/ImageNodeCmd.scala
|
Scala
|
apache-2.0
| 607 |
// 1EC Graph Parser
// Copyright (c) University of California
// Copyright (c) Jonathan Kummerfeld
//
// This software is covered by a license. See the LICENSE.txt file in the
// top-level directory of this distribution or at
// https://github.com/jkkummerfeld/1ec-graph-parser for the full text of the
// license.
package edu.berkeley.nlp.graphparser
import scala.collection.mutable.HashSet
// Selection of valid arc labels
class LabelGenerator(
model: Model
) {
// When using tags to constrain edges
val EMPTY = new UnboxedArrayBuffer(0)
var curPSGArgs : UnboxedArrayBuffer = null
var curPSGArgsPos : Int = 0
var nextLabel : Int = -1
var nextMustBeTrace = false
var mustBeTrace = false
// Work out the possible args for this spine pair
val traceOptions = HashSet("=", "*", "*?*", "*EXP*", "*ICH*", "*NOT*",
"*PPA*", "*RNR*", "*T*", "*U*", "0")
private def getPSGArgOptions(
childSpineID: Int, parentSpineID: Int, childTag: Int, parentTag: Int
) = {
val childSpine = model.spinesOriginal(childSpineID)
val parentSpine = model.spinesOriginal(parentSpineID)
val ans = new UnboxedArrayBuffer(parentSpine.nodes.length + 1)
var parentPos = 0
while (parentPos < parentSpine.nodes.length) {
// Try adding structural, going from child top
val option = model.addEdgePSG(childSpine.nodes.length - 1, childSpine,
childTag, parentPos, parentSpine, parentTag, "_", false)
if (option >= 0) ans.append(option)
// Try adding traces
var childPos = 0
while (childPos < childSpine.nodes.length) {
// Determine trace
// Source null, get the next symbol in the chain
// Target null, get the next symbol in the chain
// Neither null, but this has been seen with '=', allow '='
val trace =
if (childSpine.nodes(childPos).isNull)
childSpine.nodes(childPos + 1).symbol
else if (parentSpine.nodes(parentPos).isNull)
parentSpine.nodes(parentPos + 1).symbol
else "="
// The code above will select symbols that are not always traces, for
// example:
// The S in: ADJP_(NP_(*T*))_S
// The S in: (SBAR_(0)_(S_(*T*)))_VP_S
if (traceOptions.contains(trace)) {
val option = model.addEdgePSG(childPos, childSpine, childTag,
parentPos, parentSpine, parentTag, trace, false)
if (option >= 0) ans.append(-option)
if (trace == "*" || trace == "*T*") {
val traceOption = model.addEdgePSG(childPos, childSpine, childTag,
parentPos, parentSpine, parentTag, trace +"_chain", false)
if (traceOption >= 0) ans.append(-traceOption)
}
}
childPos += 1
}
parentPos += 1
}
ans
}
def prepare(
childTag: Int, parentTag: Int, childSpineID: Int, parentSpineID: Int,
span: (Int, Int), sentenceLength: Int, parentIsRoot: Boolean,
arcPassConstraint: ArcPassConstraint.Value
) = {
/// Log.logln(s"Arc Gen: $childTag $parentTag $childSpineID $parentSpineID $span $sentenceLength $parentIsRoot ${model.EmptySpineID}")
nextLabel = -1
mustBeTrace = false
if (model.formalism == Formalism.PSG) {
// Get the list os options
if (model.filterByTag) {
curPSGArgs = model.allowedArgList(arcPassConstraint, childTag,
parentTag)
} else {
val key = (childTag, parentTag, childSpineID, parentSpineID)
// These don't need to be synchronised as we made the argMap without
// allowing resizing.
val argPos = model.argMap.getOrElse(key, -1).toInt
curPSGArgs = EMPTY
if (argPos >= 0) {
curPSGArgs = model.argMapVals(argPos)
} else if (argPos == -1) {
curPSGArgs = getPSGArgOptions(childSpineID, parentSpineID, childTag,
parentTag)
val npos =
if (curPSGArgs.length == 0) -2
else {
model.argMapVals.synchronized {
model.argMapVals.append(curPSGArgs)
model.argMapVals.length - 1
}
}
model.argMap.synchronized {
model.argMap.put(key, npos.toLong)
}
}
}
// Prepare the first
curPSGArgsPos = -1
next
} else if (model.formalism == Formalism.DEP) {
if (span._2 == sentenceLength - 1) {
if (parentIsRoot) nextLabel = model.RootArg
} else nextLabel = model.NullArg + 1
} else {
// If direction is with parent to the right, and the spine is the null
// spine, set to NullArg
// If parent is a literal, set to argIndex.size
if (span._2 == sentenceLength - 1) {
if (parentIsRoot) nextLabel = model.RootArg
} else nextLabel = model.NullArg + 1
}
}
def hasNext = nextLabel >= 0
def next = {
// Answer
val ans = nextLabel
mustBeTrace = nextMustBeTrace
// Prepare for the next call
if (model.formalism == Formalism.PSG) {
curPSGArgsPos += 1
if (curPSGArgsPos >= curPSGArgs.length) nextLabel = -1
else {
val option = curPSGArgs(curPSGArgsPos)
nextLabel = option.abs
nextMustBeTrace = option < 0
}
} else if (model.formalism == Formalism.DEP) {
if (nextLabel == model.RootArg) nextLabel = -1
else if (nextLabel >= model.argIndex.size - 1) nextLabel = -1
else nextLabel += 1
} else {
// If arg == NullArg, set to -1
if (nextLabel == model.RootArg) nextLabel = -1
else nextLabel += 1
}
// Return the saved answer
ans
}
}
|
jkkummerfeld/1ec-graph-parser
|
parser/src/main/scala/labelGenerator.scala
|
Scala
|
isc
| 5,626 |
package lert.elasticsearch.matcher
import java.io.ByteArrayInputStream
import java.util
import java.util.{Collections, Date}
import scala.concurrent.duration.Duration
import lert.core.BaseSpec
import lert.core.processor.AlertMessage
import lert.elasticsearch.restclient.{Response, RestClient}
import org.apache.http.{Header, HttpEntity}
import org.mockito.Matchers
import org.mockito.Matchers._
import org.mockito.Mockito.when
import org.scalatest.mockito.MockitoSugar._
class CountMatcherSpec extends BaseSpec {
it should "create a valid query" in {
val matcher = new CountMatcher()(null)
val query = matcher.createQuery(Duration("1 s"), 2, Map(), Map(), 100000)
assert(query("aggs") ==
Map("range" -> Map("date_range" -> Map("field" -> "@timestamp",
"ranges" -> Seq(
Map("from" -> 99000, "to" -> 100000),
Map("from" -> 98000, "to" -> 99000)
)))))
}
it should "send a valid query to elastic" in {
val matcher = new CountMatcher()(objectMapper)
val restClientWrapper = mock[RestClient]
val response = Response(
"""
|{
| "took" : 5,
| "timed_out" : false,
| "_shards" : {
| "total" : 20,
| "successful" : 20,
| "failed" : 0
| },
| "hits" : {
| "total" : 39,
| "max_score" : 0.0,
| "hits" : [ ]
| },
| "aggregations" : {
| "range" : {
| "buckets" : [ {
| "key" : "2017-08-08T00:01:34.071Z-2017-08-08T00:04:34.071Z",
| "from" : 1.502150494071E12,
| "from_as_string" : "2017-08-08T00:01:34.071Z",
| "to" : 1.502150674071E12,
| "to_as_string" : "2017-08-08T00:04:34.071Z",
| "doc_count" : 0
| }, {
| "key" : "2017-08-08T00:04:34.071Z-2017-08-08T00:07:34.071Z",
| "from" : 1.502150674071E12,
| "from_as_string" : "2017-08-08T00:04:34.071Z",
| "to" : 1.502150854071E12,
| "to_as_string" : "2017-08-08T00:07:34.071Z",
| "doc_count" : 0
| } ]
| }
| }
|}
""".stripMargin.getBytes, 200)
when(restClientWrapper.performRequest(anyObject[String](),
anyObject[String](),
Matchers.eq(Map[String, String]()),
anyObject[HttpEntity](),
anyObject[Header]())).thenReturn(response)
val query = matcher.query("",
restClientWrapper,
Map("timeframe" -> "3 min", "filter" -> Collections.emptyMap(), "index" -> "i")
)
assert(query == Seq(
AlertMessage(
Map(
"count" -> 0,
"from" -> new Date(1502150494071l),
"to" -> new Date(1502150674071l)
)
),
AlertMessage(
Map("count" -> 0,
"from" -> new Date(1502150674071l),
"to" -> new Date(1502150854071l)
)
))
)
}
}
|
l3rt/l3rt
|
elasticsearch-input/src/test/scala/lert/elasticsearch/matcher/CountMatcherSpec.scala
|
Scala
|
apache-2.0
| 2,980 |
package ch.ethz.dalab.dissolve.classification
import org.apache.spark.rdd.RDD
import org.apache.spark.rdd.PairRDDFunctions
import ch.ethz.dalab.dissolve.regression.LabeledObject
import scala.collection.mutable.HashMap
import ch.ethz.dalab.dissolve.regression.LabeledObject
import scala.reflect.ClassTag
import ch.ethz.dalab.dissolve.optimization.SolverOptions
import ch.ethz.dalab.dissolve.regression.LabeledObject
import scala.collection.mutable.MutableList
import ch.ethz.dalab.dissolve.regression.LabeledObject
import org.apache.spark.mllib.regression.LabeledPoint
object ClassificationUtils {
/**
* Generates class weights. If classWeights is flase the default value of 1.0 is used. Otherwise if the user submitted a custom weight array
* the weights in there will be used. If the user did not submit a custom array of weights the inverse class freq. will be used
*/
def generateClassWeights[X, Y: ClassTag](data: RDD[LabeledObject[X, Y]], classWeights: Boolean = true, customWeights: Option[HashMap[Y,Double]] = None): HashMap[Y, Double] = {
val map = HashMap[Y, Double]()
val labels: Array[Y] = data.map { x: LabeledObject[X, Y] => x.label }.distinct().collect()
if (classWeights) {
if (customWeights.getOrElse(null) == null) {
//inverse class frequency as weight
val classOccur: PairRDDFunctions[Y, Double] = data.map(x => (x.label, 1.0))
val labelOccur: PairRDDFunctions[Y, Double] = classOccur.reduceByKey((x, y) => x + y)
val labelWeight: PairRDDFunctions[Y, Double] = labelOccur.mapValues { x => 1 / x }
val weightSum: Double = labelWeight.values.sum()
val nClasses: Int = labels.length
val scaleValue: Double = nClasses / weightSum
var sum: Double = 0.0
for ((label, weight) <- labelWeight.collectAsMap()) {
val clWeight = scaleValue * weight
sum += clWeight
map.put(label, clWeight)
}
assert(sum == nClasses)
} else {
//use custom weights
assert(labels.length == customWeights.get.size)
for (label <- labels) {
map.put(label, customWeights.get(label))
}
}
} else {
// default weight of 1.0
for (label <- labels) {
map.put(label, 1.0)
}
}
map
}
def resample[X,Y:ClassTag](data: RDD[LabeledObject[X, Y]],nSamples:HashMap[Y,Int],nSlices:Int): RDD[LabeledObject[X, Y]] = {
val buckets: HashMap[Y, RDD[LabeledObject[X, Y]]] = HashMap()
val newData = MutableList[LabeledObject[X, Y]]()
val labels: Array[Y] = data.map { x => x.label }.distinct().collect()
labels.foreach { x => buckets.put(x, data.filter { point => point.label == x }) }
for (cls <- buckets.keySet) {
val sampledData = buckets.get(cls).get.takeSample(true, nSamples.get(cls).get)
for (x: LabeledObject[X, Y] <- sampledData) {
newData.+=(x)
}
}
data.context.parallelize(newData, nSlices)
}
def resample(data: RDD[LabeledPoint],nSamples:HashMap[Double,Int],nSlices:Int): RDD[LabeledPoint] = {
val buckets: HashMap[Double, RDD[LabeledPoint]] = HashMap()
val newData = MutableList[LabeledPoint]()
val labels: Array[Double] = data.map { x => x.label }.distinct().collect()
labels.foreach { x => buckets.put(x, data.filter { point => point.label == x }) }
for (cls <- buckets.keySet) {
val sampledData = buckets.get(cls).get.takeSample(true, nSamples.get(cls).get)
for (x: LabeledPoint <- sampledData) {
newData.+=(x)
}
}
data.context.parallelize(newData, nSlices)
}
}
|
dalab/dissolve-struct
|
dissolve-struct-lib/src/main/scala/ch/ethz/dalab/dissolve/classification/ClassificationUtils.scala
|
Scala
|
apache-2.0
| 3,617 |
package silky.persistence.file
import java.nio.file.Paths
import silky.persistence.{Entry, Persistence}
import scala.concurrent.{ExecutionContext, Future}
import scala.reflect.io.File
class FilePersistence(baseDir: String, fileExtension: String = "json")(implicit ctx: ExecutionContext) extends Persistence {
import reflect.io.Directory
def lastRefAcross(prefix: Char, contexts: String*) = Future {
val files = contexts flatMap filesIn filter (f ⇒ f.name.head == prefix && f.extension == fileExtension) sortBy (_.name)
if (files.isEmpty) "00000000" else files.last.stripExtension.tail
}
def save(entry: Entry) = Future {
createIfRequired(directoryFor(entry.context))
Filepath.save(entry.contents, pathFor(entry.context, entry.ref))
entry
}
def find(context: String, ref: String) = Future {
filesIn(context)
.find(_.name == s"$ref.$fileExtension")
.map(f ⇒ Entry(context, ref, f.slurp()))
}
def load(context: String, predicate: String ⇒ Boolean) = Future {
filesIn(context)
.filter(f ⇒ predicate(f.name.replace(s".$fileExtension", "")))
.map(f ⇒ Entry(context, f.name.replace(s".$fileExtension", ""), f.slurp()))
.toSeq
}
def move(ref: String, source: String, target: String) = Future {
val (sourcePath, targetPath) = (pathFor(source, ref), pathFor(target, ref))
require(sourcePath.toFile.exists(), s"Entry '$ref' not found in '$source': $sourcePath does not exist")
createIfRequired(directoryFor(target))
Filepath.move(sourcePath, targetPath)
Entry(target, ref, new File(targetPath.toFile).slurp())
}
private def filesIn(context: String) = directoryFor(context).files
private def directoryFor(context: String) = Directory(s"$baseDir/$context")
private def createIfRequired(directory: Directory) = if (!directory.exists) directory.createDirectory()
private def pathFor(context: String, ref: String) = Paths.get(s"$baseDir/$context/$ref.$fileExtension")
}
|
PILTT/silky-persistence
|
src/main/scala/silky/persistence/file/FilePersistence.scala
|
Scala
|
apache-2.0
| 1,982 |
/*
* Copyright 2007-2011 WorldWide Conferencing, LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package net.liftweb
package json
package ext
import org.specs2.mutable.Specification
import common._
import json.Serialization.{read, write => swrite}
/**
* System under specification for JsonBoxSerializer.
*/
object JsonBoxSerializerSpec extends Specification {
"JsonBoxSerializer Specification".title
implicit val formats = net.liftweb.json.DefaultFormats + new JsonBoxSerializer
"Extract empty age" in {
parse("""{"name":"joe"}""").extract[Person] mustEqual Person("joe", Empty, Empty)
}
"Extract boxed thing" in {
parse("""{"name":"joe", "thing": "rog", "age":12}""").extract[Person] mustEqual Person("joe", Full(12), Empty, Full("rog"))
}
"Extract boxed mother" in {
val json = """{"name":"joe", "age":12, "mother": {"name":"ann", "age":53}}"""
val p = parse(json).extract[Person]
p mustEqual Person("joe", Full(12), Full(Person("ann", Full(53), Empty)))
(for { a1 <- p.age; m <-p.mother; a2 <- m.age } yield a1+a2) mustEqual Full(65)
}
"Render with age" in {
swrite(Person("joe", Full(12), Empty)) mustEqual """{"name":"joe","age":12,"mother":null,"thing":null}"""
}
"Serialize failure" in {
val exn1 = SomeException("e1")
val exn2 = SomeException("e2")
val p = Person("joe", Full(12), Failure("f", Full(exn1), Failure("f2", Full(exn2), Empty)))
val ser = swrite(p)
read[Person](ser) mustEqual p
}
"Serialize param failure" in {
val exn = SomeException("e1")
val p = Person("joe", Full(12), ParamFailure("f", Full(exn), Empty, "param value"))
val ser = swrite(p)
read[Person](ser) mustEqual p
}
}
case class SomeException(msg: String) extends Exception
case class Person(name: String, age: Box[Int], mother: Box[Person], thing: Box[String] = Empty)
|
sortable/framework
|
core/json-ext/src/test/scala/net/liftweb/json/ext/JsonBoxSerializerSpec.scala
|
Scala
|
apache-2.0
| 2,381 |
package drawinghouses
import scala.collection.immutable._
/**
* A simple representation of a graph with the specified edges.
* @param edges The edges of this graph.
* @tparam A Type of the vertices.
*/
case class SimpleGraph[A](edges: Set[(A, A)]) {
/**
* The vertices of this simple graph.
*/
val vertices = edges.flatMap(e => Set(e._1, e._2))
/**
* The edges starting/ending in the specified vertex.
* @param vertex the vertex
* @return The edges starting/ending in this vertex.
*/
def edgesInVertex(vertex: A) =
edges.filter{ case (a, b) => vertex == a || vertex == b }
}
|
peterneyens/drawing-houses
|
src/main/scala/drawinghouses/SimpleGraph.scala
|
Scala
|
mit
| 611 |
//======================================================================================================================
// Facsimile: A Discrete-Event Simulation Library
// Copyright © 2004-2020, Michael J Allen.
//
// This file is part of Facsimile.
//
// Facsimile is free software: you can redistribute it and/or modify it under the terms of the GNU Lesser General Public
// License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later
// version.
//
// Facsimile is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied
// warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
// details.
//
// You should have received a copy of the GNU Lesser General Public License along with Facsimile. If not, see:
//
// http://www.gnu.org/licenses/lgpl.
//
// The developers welcome all comments, suggestions and offers of assistance. For further information, please visit the
// project home page at:
//
// http://facsim.org/
//
// Thank you for your interest in the Facsimile project!
//
// IMPORTANT NOTE: All patches (modifications to existing files and/or the addition of new files) submitted for
// inclusion as part of the official Facsimile code base, must comply with the published Facsimile Coding Standards. If
// your code fails to comply with the standard, then your patches will be rejected. For further information, please
// visit the coding standards at:
//
// http://facsim.org/Documentation/CodingStandards/
//======================================================================================================================
//======================================================================================================================
// Scala source file belonging to the org.facsim.util.log package.
//======================================================================================================================
package org.facsim.util.log
import akka.stream.{Materializer, QueueOfferResult}
import akka.stream.scaladsl.Source
import akka.{Done, NotUsed}
import org.facsim.util.stream.DataSource
import org.facsim.util.NonPure
import scala.concurrent.Future
import scala.reflect.runtime.universe.TypeTag
/** Create and manage a queued ''Akka'' source for issuing log messages.
*
* @note Because the created log stream is buffered, and because it utilizes back pressure to slow down the publisher
* (the process that is creating the log messages), applications may appear to hang once the buffer has filled—
* unless the stream is connected to a buffer and run.
*
* @tparam A Type of message prefix to be used with messages sent to this stream.
*
* @constructor Create a new messaging source.
*
* @param bufferSize Number of unprocessed log messages that can be stored in the buffer before back pressure is
* exerted. This value must be greater than zero and less than or equal to
* `[[org.facsim.util.stream.DataSource.MaxBufferSize MaxBufferSize]]`, or an `[[scala.IllegalArgumentException
* IllegalArgumentException]]` will be thrown.
*
* @param materializer Stream materializer to be utilized when creating the stream.
*
* @throws IllegalArgumentException if `bufferSize` is less than 1 or greater than `[[DataSource.MaxBufferSize
* MaxBufferSize]]`.
*
* @since 0.2
*/
final class LogStream[A: TypeTag](bufferSize: Int = LogStream.defaultBufferSize)(implicit materializer: Materializer) {
/** Data source to be used for logging. */
private val ds = new DataSource[LogMessage[A]](bufferSize)
/** Send a message instance to the stream.
*
* @note This operation will fail if the stream has been closed previously.
*
* @param message Message to be sent to the stream.
*
* @return Future containing the result of the message logging operation. If successful, the result can be
* `[[akka.stream.QueueOfferResult.Enqueued Enqueued]]` if data was sent successfully,
* `[[akka.stream.QueueOfferResult.Dropped Dropped]]` if the data was dropped due to a buffer failure, or
* `[[akka.stream.QueueOfferResult.QueueClosed QueueClosed]]` if the queue was closed before the data could be
* processed. If the queue was closed before the data was sent, the result is a `[[scala.util.Failure Failure]]`
* wrapping an `[[akka.stream.StreamDetachedException StreamDetachedException]]`. If a failure closed the queue, it
* will respond with a `Failure` wrapping the exception that signaled failure of the queue.
*
* @since 0.2
*/
@NonPure
def log(message: LogMessage[A]): Future[QueueOfferResult] = ds.send(message)
/** Report the stream to which flows and sinks can be attached.
*
* @return Source of log messages. This can be connected to a sink, and run, in order to consume messages.
*
* @since 0.2
*/
def source: Source[LogMessage[A], NotUsed] = ds.source
/** Complete all logging, and flush the stream.
*
* @return Future that executes when the stream has been closed.
*
* @since 0.2
*/
@NonPure
def close(): Future[Done] = ds.complete()
}
/** Message stream companion object.
*
* @since 0.2
*/
object LogStream {
/** Default log message buffer size.
*
* @since 0.2
*/
val defaultBufferSize: Int = 100
}
|
MichaelJAllen/facsimile
|
facsimile-util/src/main/scala/org/facsim/util/log/LogStream.scala
|
Scala
|
lgpl-3.0
| 5,376 |
/*
* Copyright 2012 Twitter Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.twitter.zipkin.common
import org.specs.Specification
import com.twitter.conversions.time._
class AnnotationSpec extends Specification {
"Annotation" should {
"get min of two annotations" in {
val ann1 = Annotation(1, "one", None)
val ann2 = Annotation(2, "two", None)
val annList = List(ann1, ann2)
annList.min mustEqual ann1
}
"compare correctly" in {
val ann1 = Annotation(1, "a", None, None)
val ann2 = Annotation(2, "a", None)
val ann3 = Annotation(1, "b", None)
val ann4 = Annotation(1, "a", Some(Endpoint(1, 2, "service")))
val ann5 = Annotation(1, "a", None, Some(1.second))
ann1.compare(ann1) mustEqual 0
ann1.compare(ann2) must beLessThan(0)
ann1.compare(ann3) must beLessThan(0)
ann1.compare(ann4) must beLessThan(0)
ann1.compare(ann5) must beLessThan(0)
}
}
}
|
pteichman/zipkin
|
zipkin-common/src/test/scala/com/twitter/zipkin/common/AnnotationSpec.scala
|
Scala
|
apache-2.0
| 1,498 |
package de.hska.wifl1011.seminararbeit
object Helper {
}
|
fwilhe/hska-seminararbeit
|
code/Helper.scala
|
Scala
|
mit
| 59 |
object ImplicitsTest extends App {
import Implicits._
assert(1.some == Some(1), "1.some == Some(1)")
assert("abc".some == Some("abc"), "'abc'.some == Some(abc)")
import language.postfixOps
assert((4!) == 24, "4! == 24")
4 times { println("Hello!") }
}
|
grzegorzbalcerek/scala-exercises
|
Implicits/ImplicitsTest.scala
|
Scala
|
bsd-2-clause
| 266 |
package com.rock.scalad
import org.scalatra.test.specs2._
// For more on Specs2, see http://etorreborre.github.com/specs2/guide/org.specs2.guide.QuickStart.html
class RockScaladServletSpec extends ScalatraSpec { def is =
"GET / on RockScaladServlet" ^
"should return status 200" ! root200^
end
addServlet(classOf[RockScaladServlet], "/*")
def root200 = get("/") {
status must_== 200
}
}
|
Skiggz/rock-scalad
|
src/test/scala/com/rock/scalad/RockScaladServletSpec.scala
|
Scala
|
mit
| 492 |
/**
* Created on February 19, 2011
* Copyright (c) 2011, Wei-ju Wu
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of Wei-ju Wu nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY WEI-JU WU ''AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL WEI-JU WU BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package org.dmpp.adf.gui
import javax.swing.tree._
import javax.swing.event._
import org.dmpp.adf.app._
class DirectoryTreeModel extends TreeModel {
var _volume: UserVolume = null
var listeners : List[TreeModelListener] = Nil
def volume = _volume
def volume_=(aVolume: UserVolume) {
_volume = aVolume
fireTreeStructureChanged
}
def fireTreeStructureChanged {
val path : Array[Object] = Array(_volume)
val event = new TreeModelEvent(this, path)
fireTreeStructureChanged(event)
}
def fireTreeStructureChanged(event: TreeModelEvent) {
listeners.foreach(_.treeStructureChanged(event))
}
def addTreeModelListener(listener: TreeModelListener) {
listeners ::= listener
}
private def toDirectory(node: Object): Directory = {
if (node == null) null
else {
node match {
case vol:UserVolume => vol.rootDirectory
case dir:UserDirectory => dir
case _ => null
}
}
}
def getChild(parent: Object, index: Int): Object = {
val dir = toDirectory(parent)
if (dir != null) dir.listDirectories(index)
else null
}
def getChildCount(parent: Object) = {
val dir = toDirectory(parent)
if (dir != null) dir.listDirectories.length
else 0
}
def getIndexOfChild(parent: Object, child: Object) = {
val dir = toDirectory(parent)
if (dir != null) {
val dirEntries = dir.listDirectories
dirEntries.indexOf(child)
}
else -1
}
def getRoot: Object = {
if (_volume == null) "(No volume loaded)"
else _volume
}
def isLeaf(node: Object) = false
def removeTreeModelListener(listener: TreeModelListener) {
listeners = listeners.filter(l => l != listener)
}
def valueForPathChanged(path: TreePath, newValue: Object) {
println("valueForPathChanged(), path: " + path + " value: " + newValue)
}
}
|
weiju/adf-tools
|
app/src/main/scala/org/dmpp/adf/gui/DirectoryTreeModel.scala
|
Scala
|
bsd-3-clause
| 3,402 |
package models
case class GetPlaylist()
case class AddToPlaylist(ytHash: String)
case class Playlist(playlist: List[PlaylistPosition])
case class PlaylistPosition(ytHash: String, title: String, duration: String)
case class ClearPlaylist()
case class PlayNext()
|
oen9/bard-api
|
app/models/PlaylistOpeations.scala
|
Scala
|
apache-2.0
| 262 |
package net.slozzer.babel
import munit.FunSuite
final class QuantitiesTest extends FunSuite {
test("apply prefers matching Quantity.Exact") {
val text = Quantities.of("foo", Quantities.Element(Quantity.One, "bar"))
assertEquals(obtained = text(quantity = 1), expected = "bar")
}
test("apply uses fallback when no Quantity matches") {
val text = Quantities.of("foo", Quantities.Element(Quantity.One, "bar"))
assertEquals(obtained = text(quantity = 0), expected = "foo")
}
test("apply handles ranges") {
val text = Quantities.of("foo", Quantities.Element(Quantity.unsafeRange(10, 13), "bar"))
assertEquals(obtained = text(quantity = 9), expected = "foo")
assertEquals(obtained = text(quantity = 10), expected = "bar")
assertEquals(obtained = text(quantity = 11), expected = "bar")
assertEquals(obtained = text(quantity = 13), expected = "bar")
assertEquals(obtained = text(quantity = 14), expected = "foo")
}
}
|
Taig/lokal
|
modules/tests/shared/src/test/scala/net/slozzer/babel/QuantitiesTest.scala
|
Scala
|
mit
| 965 |
package org.requests
/**
* Data is an algebraic data type that wraps more basic types for message bodies.
*/
sealed trait Data
case object EmptyData extends Data
case class ByteArrayData(value: Array[Byte]) extends Data
case class StringData(value: String) extends Data
case class FormData(values: Map[String, String]) extends Data
case class MultipartData(files: List[BodyPart]) extends Data
|
longcao/requests
|
src/main/scala/org/requests/Data.scala
|
Scala
|
mit
| 417 |
/*
* Copyright 2011-2022 GatlingCorp (https://gatling.io)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.gatling.graphite.sender
import java.net.InetSocketAddress
import scala.concurrent.duration._
import io.gatling.AkkaSpec
import io.gatling.commons.util.DefaultClock
import io.gatling.graphite.message.GraphiteMetrics
import akka.io.Tcp._
import akka.testkit._
import akka.util.ByteString
@SuppressWarnings(Array("org.wartremover.warts.ThreadSleep"))
class TcpSenderSpec extends AkkaSpec {
private val dummySocketAddress = new InetSocketAddress(9999)
private class TcpSenderNoIo extends TcpSender(dummySocketAddress, 2, 1.second, new DefaultClock) {
override def askForConnection(): Unit = ()
}
"TcpSender" should "fail if server is unreachable" in {
val tcpSender = TestFSMRef(new TcpSenderNoIo)
// Fail 2 times in a row, retry limit is exhausted
tcpSender ! CommandFailed(Connect(dummySocketAddress))
tcpSender ! CommandFailed(Connect(dummySocketAddress))
tcpSender.stateName shouldBe RetriesExhausted
tcpSender.stateData shouldBe NoData
}
it should "go to the Running state and send metrics if it could connect without issues" in {
val tcpSender = TestFSMRef(new TcpSenderNoIo)
tcpSender ! Connected(dummySocketAddress, dummySocketAddress)
expectMsg(Register(tcpSender))
tcpSender.stateName shouldBe Running
val metrics = GraphiteMetrics(Iterator.single("foo" -> 1), 1)
tcpSender ! metrics
expectMsg(Write(metrics.byteString))
}
it should "retry to connected until the retry limit has been exceeded to finally stop" in {
val tcpSender = TestFSMRef(new TcpSenderNoIo)
// Connect
tcpSender ! Connected(dummySocketAddress, dummySocketAddress)
expectMsg(Register(tcpSender))
tcpSender.stateName shouldBe Running
// Fail one time, retries limit is not exhausted
tcpSender ! PeerClosed
tcpSender ! Connected(dummySocketAddress, dummySocketAddress)
tcpSender.stateName shouldBe Running
// Make sure one second has passed to reset the retry window
Thread.sleep(1.second.toMillis)
// Fail 2 times in a row, retry limit is exhausted
tcpSender ! CommandFailed(Write(ByteString.empty))
tcpSender ! CommandFailed(Write(ByteString.empty))
tcpSender.stateName shouldBe RetriesExhausted
tcpSender.stateData shouldBe NoData
}
}
|
gatling/gatling
|
gatling-graphite/src/test/scala/io/gatling/graphite/sender/TcpSenderSpec.scala
|
Scala
|
apache-2.0
| 2,904 |
/**
* Copyright (C) 2010-2012 LShift Ltd.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package net.lshift.diffa.kernel.differencing
import scala.collection.JavaConversions._
import org.easymock.EasyMock._
import net.lshift.diffa.kernel.util.EasyMockScalaUtils._
import org.apache.commons.codec.digest.DigestUtils
import net.lshift.diffa.kernel.participants._
import net.lshift.diffa.kernel.participants.IntegerCategoryFunction._
import org.junit.runner.RunWith
import net.lshift.diffa.kernel.util.FullDateTimes._
import net.lshift.diffa.kernel.util.SimpleDates._
import net.lshift.diffa.kernel.util.ConvenienceDateTimes._
import org.junit.experimental.theories.{Theory, Theories, DataPoint}
import org.easymock.{IAnswer, EasyMock}
import net.lshift.diffa.kernel.events.VersionID
import net.lshift.diffa.kernel.config._
import net.lshift.diffa.adapter.scanning._
import net.lshift.diffa.kernel.diag.DiagnosticsManager
import org.junit.Assume._
import org.junit.Assert._
import java.util.HashMap
import net.lshift.diffa.kernel.config.system.SystemConfigStore
import net.lshift.diffa.kernel.util.{DownstreamEndpoint, UpstreamEndpoint, NonCancellingFeedbackHandle}
import org.joda.time.{DateTime, LocalDate}
import net.lshift.diffa.kernel.frontend.DomainPairDef
/**
* Framework and scenario definitions for data-driven policy tests.
*/
@RunWith(classOf[Theories])
abstract class AbstractDataDrivenPolicyTest {
import AbstractDataDrivenPolicyTest._
// The policy instance under test
protected def policy:VersionPolicy
// The various mocks for listeners and participants
val usMock = createStrictMock("us", classOf[UpstreamParticipant])
val dsMock = createStrictMock("ds", classOf[DownstreamParticipant])
EasyMock.checkOrder(usMock, false) // Not all adapter operations are going to be strictly ordered
EasyMock.checkOrder(dsMock, false) // Not all adapter operations are going to be strictly ordered
val nullListener = new NullDifferencingListener
val diagnostics = createStrictMock("diagnostics", classOf[DiagnosticsManager])
val writer = createMock("writer", classOf[LimitedVersionCorrelationWriter])
val extendedWriter = createMock("extendedWriter", classOf[ExtendedVersionCorrelationWriter])
val store = createMock("versionStore", classOf[VersionCorrelationStore])
val stores = new VersionCorrelationStoreFactory {
def apply(pair: PairRef) = store
def remove(pair: PairRef) {}
def close(pair: PairRef) {}
def close {}
}
val feedbackHandle = new NonCancellingFeedbackHandle
val listener = createStrictMock("listener", classOf[DifferencingListener])
EasyMock.checkOrder(listener, false) // Not all adapter operations are going to be strictly ordered
val diffWriter = createStrictMock("diffWriter", classOf[DifferenceWriter])
EasyMock.checkOrder(diffWriter, false) // Not all match write operations are going to be strictly ordered
val systemConfigStore = createStrictMock("systemConfigStore", classOf[SystemConfigStore])
val domainConfigStore = createStrictMock("domainConfigStore", classOf[DomainConfigStore])
protected def replayAll = replay(systemConfigStore, usMock, dsMock, store, writer, listener)
protected def verifyAll = verify(systemConfigStore, usMock, dsMock, store, writer, listener)
/**
* Scenario with the top levels matching. The policy should not progress any further than the top level.
*/
@Theory
def shouldStopAtTopLevelWhenTopLevelBucketsMatch(scenario:Scenario) {
setupStubs(scenario)
assumeTrue(scenario.tx.forall(_.isInstanceOf[AggregateTx])) // Only relevant in scenarios where aggregation occurs
scenario.tx.foreach { case tx:AggregateTx =>
expectUpstreamAggregateScan(scenario.pair, tx.bucketing, tx.constraints, tx.respBuckets, tx.respBuckets)
expectDownstreamAggregateScan(scenario.pair, tx.bucketing, tx.constraints, tx.respBuckets, tx.respBuckets)
}
replayAll
policy.scanUpstream(0L, scenario.pair, scenario.upstreamEp, None, writer, usMock, nullListener, feedbackHandle)
policy.scanDownstream(0L, scenario.pair, scenario.downstreamEp, None, writer, usMock, dsMock, listener, feedbackHandle)
verifyAll
}
/**
* Scenario with the store not any content for either half. Policy should run top-level, then jump directly
* to the individual level.
*/
@Theory
def shouldJumpToLowestLevelsStraightAfterTopWhenStoreIsEmpty(scenario:Scenario) {
setupStubs(scenario)
assumeTrue(scenario.tx.forall(_.isInstanceOf[AggregateTx])) // Only relevant in scenarios where aggregation occurs
val scanId = System.currentTimeMillis()
scenario.tx.foreach { case tx:AggregateTx =>
expectUpstreamAggregateScan(scenario.pair, tx.bucketing, tx.constraints, tx.respBuckets, Seq())
tx.respBuckets.foreach(b => {
expectUpstreamEntityScan(scenario.pair, b.nextTx.constraints, b.allVsns, Seq())
expectUpstreamEntityStore(scenario.pair, b.allVsns, false, Some(scanId))
})
expectDownstreamAggregateScan(scenario.pair, tx.bucketing, tx.constraints, tx.respBuckets, Seq())
tx.respBuckets.foreach(b => {
expectDownstreamEntityScan(scenario.pair, b.nextTx.constraints, b.allVsns, Seq())
expectDownstreamEntityStore(scenario.pair, b.allVsns, false, Some(scanId))
})
}
replayAll
policy.scanUpstream(scanId, scenario.pair, scenario.upstreamEp, None, writer, usMock, nullListener, feedbackHandle)
policy.scanDownstream(scanId, scenario.pair, scenario.downstreamEp, None, writer, usMock, dsMock, listener, feedbackHandle)
verifyAll
}
/**
* Scenario with the store being out-of-date for a upstream leaf-node.
*/
@Theory
def shouldCorrectOutOfDateUpstreamEntity(scenario:Scenario) {
setupStubs(scenario)
val scanId = System.currentTimeMillis()
scenario.tx.foreach { tx =>
// Alter the version of the first entity in the upstream tree, then expect traversal to it
val updated = tx.alterFirstVsn("newVsn1")
traverseFirstBranch(updated, tx) {
case (tx1:AggregateTx, tx2:AggregateTx) =>
expectUpstreamAggregateScan(scenario.pair, tx1.bucketing, tx1.constraints, tx1.respBuckets, tx2.respBuckets)
case (tx1:EntityTx, tx2:EntityTx) =>
expectUpstreamEntityScan(scenario.pair, tx1.constraints, tx1.entities, tx2.entities)
}
expectUpstreamEntityStore(scenario.pair, Seq(updated.firstVsn), true, Some(scanId))
// Expect to see an event about the version being matched (since we told the datastore to report it as matched)
listener.onMatch(VersionID(scenario.pair, updated.firstVsn.id), updated.firstVsn.vsn, TriggeredByScan)
tx match {
case atx:AggregateTx =>
// Expect only a top-level scan on the downstream
expectDownstreamAggregateScan(scenario.pair, atx.bucketing, atx.constraints, atx.respBuckets, atx.respBuckets)
case etx:EntityTx =>
// Expect entity-query, since we can't aggregate anyway
expectDownstreamEntityScan(scenario.pair, etx.constraints, etx.entities, etx.entities)
}
}
replayAll
policy.scanUpstream(scanId, scenario.pair, scenario.upstreamEp, None, writer, usMock, nullListener, feedbackHandle)
policy.scanDownstream(scanId, scenario.pair, scenario.downstreamEp, None, writer, usMock, dsMock, listener, feedbackHandle)
verifyAll
}
/**
* Scenario with the store being out-of-date for a downstream leaf-node.
*/
@Theory
def shouldCorrectOutOfDateDownstreamEntity(scenario:Scenario) {
setupStubs(scenario)
val scanId = System.currentTimeMillis()
scenario.tx.foreach { tx =>
tx match {
case atx:AggregateTx =>
// Expect only a top-level scan on the upstream
expectUpstreamAggregateScan(scenario.pair, atx.bucketing, atx.constraints, atx.respBuckets, atx.respBuckets)
case etx:EntityTx =>
// Expect entity-query, since we can't aggregate anyway
expectUpstreamEntityScan(scenario.pair, etx.constraints, etx.entities, etx.entities)
}
// Alter the version of the first entity in the downstream tree, then expect traversal to it
val updated = tx.alterFirstVsn("newVsn1")
traverseFirstBranch(updated, tx) {
case (tx1:AggregateTx, tx2:AggregateTx) =>
expectDownstreamAggregateScan(scenario.pair, tx1.bucketing, tx1.constraints, tx1.respBuckets, tx2.respBuckets)
case (tx1:EntityTx, tx2:EntityTx) =>
expectDownstreamEntityScan(scenario.pair, tx1.constraints, tx1.entities, tx2.entities)
}
expectDownstreamEntityStore(scenario.pair, Seq(updated.firstVsn), true, Some(scanId))
// Expect to see an event about the version being matched (since we told the datastore to report it as matched)
listener.onMatch(VersionID(scenario.pair, updated.firstVsn.id), updated.firstVsn.vsn, TriggeredByScan)
}
replayAll
policy.scanUpstream(scanId, scenario.pair, scenario.upstreamEp, None, writer, usMock, nullListener, feedbackHandle)
policy.scanDownstream(scanId, scenario.pair, scenario.downstreamEp, None, writer, usMock, dsMock, listener, feedbackHandle)
verifyAll
}
/**
* When a request is made to detail how an inventory should be started, the top-level constraints and aggregations
* should be returned.
*/
@Theory
def shouldRequestTopLevelConstraintsAndAggregationsWhenStartingInventory(scenario:Scenario) {
setupStubs(scenario)
val expectedRequests = scenario.tx.map {
case tx:AggregateTx => new ScanRequest(tx.constraints.toSet[ScanConstraint], tx.bucketing.toSet[ScanAggregation])
case tx:EntityTx => new ScanRequest(tx.constraints.toSet[ScanConstraint], Set[ScanAggregation]())
}
val actualUpstreamRequests = policy.startInventory(scenario.pair, scenario.upstreamEp, None, writer, UpstreamEndpoint)
val actualDownstreamRequests = policy.startInventory(scenario.pair, scenario.downstreamEp, None, writer, DownstreamEndpoint)
assertEquals(expectedRequests.toSet, actualUpstreamRequests.toSet)
assertEquals(expectedRequests.toSet, actualDownstreamRequests.toSet)
}
/**
* When an inventory submits aggregates that match the aggregates in the store, no additional requests will be
* returned.
*/
@Theory
def shouldStopAtTopLevelWhenSubmittedAggregatesMatch(scenario:Scenario) {
setupStubs(scenario)
assumeTrue(scenario.tx.forall(_.isInstanceOf[AggregateTx])) // Only relevant in scenarios where aggregation occurs
scenario.tx.foreach { case tx:AggregateTx =>
expectUpstreamStoreQuery(scenario.pair, tx.bucketing, tx.constraints, tx.respBuckets)
expectDownstreamStoreQuery(scenario.pair, tx.bucketing, tx.constraints, tx.respBuckets)
}
replayAll
scenario.tx.foreach { case tx:AggregateTx =>
val nextUpstreamSteps = policy.processInventory(scenario.pair, scenario.upstreamEp, writer, UpstreamEndpoint,
tx.constraints, tx.bucketing, participantDigestResponse(tx.respBuckets))
val nextDownstreamSteps = policy.processInventory(scenario.pair, scenario.downstreamEp, writer, DownstreamEndpoint,
tx.constraints, tx.bucketing, participantDigestResponse(tx.respBuckets))
assertEquals(Seq(), nextUpstreamSteps)
assertEquals(Seq(), nextDownstreamSteps)
}
verifyAll
}
/**
* If our store is empty, then when the top level aggregates are submitted, a step should be returned for all data in
* the submitted top levels.
*/
@Theory
def shouldRequestLowestLevelsStraightAfterTopWhenStoreIsEmpty(scenario:Scenario) {
setupStubs(scenario)
assumeTrue(scenario.tx.forall(_.isInstanceOf[AggregateTx])) // Only relevant in scenarios where aggregation occurs
scenario.tx.foreach { case tx:AggregateTx =>
expectUpstreamStoreQuery(scenario.pair, tx.bucketing, tx.constraints, Seq())
expectDownstreamStoreQuery(scenario.pair, tx.bucketing, tx.constraints, Seq())
}
replayAll
scenario.tx.foreach { case tx:AggregateTx =>
val nextUpstreamSteps = policy.processInventory(scenario.pair, scenario.upstreamEp, writer, UpstreamEndpoint,
tx.constraints, tx.bucketing, participantDigestResponse(tx.respBuckets))
val nextDownstreamSteps = policy.processInventory(scenario.pair, scenario.downstreamEp, writer, DownstreamEndpoint,
tx.constraints, tx.bucketing, participantDigestResponse(tx.respBuckets))
// The requests will be scan requests for the bucket's bounds with no aggregation
val expectedRequests = tx.respBuckets.map(b => new ScanRequest(b.nextTx.constraints.toSet[ScanConstraint], Set[ScanAggregation]()))
assertEquals(expectedRequests.toSet, nextUpstreamSteps.toSet)
assertEquals(expectedRequests.toSet, nextDownstreamSteps.toSet)
}
verifyAll
}
/**
* Scenario with the store being out-of-date for a upstream leaf-node.
*/
@Theory
def shouldGenerateRequestsToCorrectOutOfDateEntity(scenario:Scenario) {
setupStubs(scenario)
val scanId = System.currentTimeMillis()
scenario.tx.foreach { tx =>
// Alter the version of the first entity in the upstream tree, then expect traversal to it
val updated = tx.alterFirstVsn("newVsn1")
// Expect traversal down the first branch of the tree
traverseFirstBranch(updated, tx) {
case (tx1:AggregateTx, tx2:AggregateTx) =>
expectUpstreamStoreQuery(scenario.pair, tx2.bucketing, tx2.constraints, tx2.respBuckets)
expectDownstreamStoreQuery(scenario.pair, tx2.bucketing, tx2.constraints, tx2.respBuckets)
case (tx1:EntityTx, tx2:EntityTx) =>
expectUpstreamStoreQuery(scenario.pair, tx2.constraints, tx2.entities)
expectDownstreamStoreQuery(scenario.pair, tx2.constraints, tx2.entities)
}
expectUpstreamEntityStore(scenario.pair, Seq(updated.firstVsn), true, None)
expectDownstreamEntityStore(scenario.pair, Seq(updated.firstVsn), true, None)
// Expect to see an event about the version being matched (since we told the datastore to report it as matched)
// We'll see this twice (once for upstream, once for downstream)
listener.onMatch(VersionID(scenario.pair, updated.firstVsn.id), updated.firstVsn.vsn, TriggeredByScan)
expectLastCall.times(2)
}
replayAll
scenario.tx.foreach { tx =>
// Alter the version of the first entity in the upstream tree, then expect traversal to it
val updated = tx.alterFirstVsn("newVsn1")
traverseFirstBranch(updated, tx) {
case (tx1:AggregateTx, tx2:AggregateTx) =>
val nextUpstreamSteps = policy.processInventory(scenario.pair, scenario.upstreamEp, writer, UpstreamEndpoint,
tx1.constraints, tx1.bucketing, participantDigestResponse(tx1.respBuckets))
val nextDownstreamSteps = policy.processInventory(scenario.pair, scenario.downstreamEp, writer, DownstreamEndpoint,
tx1.constraints, tx1.bucketing, participantDigestResponse(tx1.respBuckets))
val expectedNextTx = tx2.respBuckets.head.nextTx
val expectedNextRequest = expectedNextTx match {
case atx:AggregateTx => new ScanRequest(atx.constraints.toSet[ScanConstraint], atx.bucketing.toSet[ScanAggregation])
case etx:EntityTx => new ScanRequest(etx.constraints.toSet[ScanConstraint], Set[ScanAggregation]())
}
assertEquals(Seq(expectedNextRequest), nextUpstreamSteps)
assertEquals(Seq(expectedNextRequest), nextDownstreamSteps)
case (tx1:EntityTx, tx2:EntityTx) =>
val nextUpstreamSteps = policy.processInventory(scenario.pair, scenario.upstreamEp, writer, UpstreamEndpoint,
tx1.constraints, Seq(), participantEntityResponse(tx1.entities))
val nextDownstreamSteps = policy.processInventory(scenario.pair, scenario.downstreamEp, writer, DownstreamEndpoint,
tx1.constraints, Seq(), participantEntityResponse(tx1.entities))
assertEquals(Seq(), nextUpstreamSteps)
assertEquals(Seq(), nextDownstreamSteps)
}
}
verifyAll
}
//
// Helpers
//
protected def setupStubs(scenario:Scenario) {
val pair = DomainPairDef(key = scenario.pair.name, space = scenario.pair.space)
expect(domainConfigStore.getPairDef(scenario.pair)).andReturn(pair).anyTimes
}
protected def expectUpstreamAggregateScan(pair:PairRef, bucketing:Seq[CategoryFunction], constraints:Seq[ScanConstraint],
partResp:Seq[Bucket], storeResp:Seq[Bucket]) {
expect(usMock.scan(asUnorderedList(constraints), asUnorderedList(bucketing))).andReturn(participantDigestResponse(partResp))
expectUpstreamStoreQuery(pair, bucketing, constraints, storeResp)
}
protected def expectUpstreamStoreQuery(pair:PairRef, bucketing:Seq[CategoryFunction], constraints:Seq[ScanConstraint],
storeResp:Seq[Bucket]) {
store.queryUpstreams(asUnorderedList(constraints), anyUnitF4)
expectLastCall[Unit].andAnswer(UpstreamVersionAnswer(pair, storeResp))
}
protected def expectDownstreamAggregateScan(pair:PairRef, bucketing:Seq[CategoryFunction], constraints:Seq[ScanConstraint],
partResp:Seq[Bucket], storeResp:Seq[Bucket]) {
expect(dsMock.scan(asUnorderedList(constraints), asUnorderedList(bucketing))).andReturn(participantDigestResponse(partResp))
expectDownstreamStoreQuery(pair, bucketing, constraints, storeResp)
}
protected def expectDownstreamStoreQuery(pair:PairRef, bucketing:Seq[CategoryFunction], constraints:Seq[ScanConstraint],
storeResp:Seq[Bucket]) {
store.queryDownstreams(asUnorderedList(constraints), anyUnitF5)
expectLastCall[Unit].andAnswer(DownstreamVersionAnswer(pair, storeResp))
}
protected def expectUpstreamEntityScan(pair:PairRef, constraints:Seq[ScanConstraint], partResp:Seq[Vsn], storeResp:Seq[Vsn]) {
expect(usMock.scan(asUnorderedList(constraints), EasyMock.eq(Seq()))).andReturn(participantEntityResponse(partResp))
expectUpstreamStoreQuery(pair, constraints, storeResp)
}
protected def expectUpstreamStoreQuery(pair:PairRef, constraints:Seq[ScanConstraint], storeResp:Seq[Vsn]) {
val correlations = storeResp.map(v=> {
Correlation(id = v.id, upstreamAttributes = v.strAttrs, lastUpdate = v.lastUpdated, upstreamVsn = v.vsn)
})
expect(store.queryUpstreams(asUnorderedList(constraints))).andReturn(correlations)
}
protected def expectDownstreamEntityScan(pair:PairRef, constraints:Seq[ScanConstraint], partResp:Seq[Vsn], storeResp:Seq[Vsn]) {
expect(dsMock.scan(asUnorderedList(constraints), EasyMock.eq(Seq()))).andReturn(participantEntityResponse(partResp))
expectDownstreamStoreQuery(pair, constraints, storeResp)
}
protected def expectDownstreamStoreQuery(pair:PairRef, constraints:Seq[ScanConstraint], storeResp:Seq[Vsn]) {
val correlations = storeResp.map(v=> {
Correlation(id = v.id, downstreamAttributes = v.strAttrs, lastUpdate = v.lastUpdated, downstreamDVsn = v.vsn)
})
expect(store.queryDownstreams(asUnorderedList(constraints))).andReturn(correlations)
}
protected def expectUpstreamEntityStore(pair:PairRef, entities:Seq[Vsn], matched:Boolean, scanId:Option[Long]) {
entities.foreach(v => {
val downstreamVsnToUse = if (matched) { v.vsn } else { null } // If we're matched, make the vsn match
expect(writer.storeUpstreamVersion(VersionID(pair, v.id), v.typedAttrs, v.lastUpdated, v.vsn, scanId)).
andReturn(new Correlation(null, pair, v.id, v.strAttrs, null, v.lastUpdated, now, v.vsn, downstreamVsnToUse, downstreamVsnToUse, matched))
})
}
protected def expectDownstreamEntityStore(pair:PairRef, entities:Seq[Vsn], matched:Boolean, scanId:Option[Long]) {
entities.foreach(v => {
val upstreamVsnToUse = if (matched) { v.vsn } else { null } // If we're matched, make the vsn match
expect(writer.storeDownstreamVersion(VersionID(pair, v.id), v.typedAttrs, v.lastUpdated, v.vsn, v.vsn, scanId)).
andReturn(new Correlation(null, pair, v.id, null, v.strAttrs, v.lastUpdated, now, upstreamVsnToUse, v.vsn, v.vsn, matched))
})
}
protected def participantDigestResponse(buckets:Seq[Bucket]):Seq[ScanResultEntry] =
buckets.map(b => ScanResultEntry.forAggregate(b.vsn, b.attrs))
protected def participantEntityResponse(entities:Seq[Vsn]):Seq[ScanResultEntry] =
entities.map(e => ScanResultEntry.forEntity(e.id, e.vsn, e.lastUpdated, e.strAttrs))
protected abstract class VersionAnswer[T] extends IAnswer[Unit] {
def res:Seq[Bucket]
def answer {
val args = EasyMock.getCurrentArguments
val cb = args(1).asInstanceOf[T]
// Answer with entities from each bucket's children
answerEntities(res.flatMap(b => b.allVsns), cb)
}
def answerEntities(entities:Seq[Vsn], cb:T):Unit
}
protected case class UpstreamVersionAnswer(pair:PairRef, res:Seq[Bucket])
extends VersionAnswer[Function4[VersionID, Map[String, String], DateTime, String, Unit]] {
def answerEntities(entities:Seq[Vsn], cb:Function4[VersionID, Map[String, String], DateTime, String, Unit]) {
entities.foreach(v => cb(VersionID(pair, v.id), v.strAttrs, v.lastUpdated, v.vsn))
}
}
protected case class DownstreamVersionAnswer(pair:PairRef, res:Seq[Bucket])
extends VersionAnswer[Function5[VersionID, Map[String, String], DateTime, String, String, Unit]] {
def answerEntities(entities:Seq[Vsn], cb:Function5[VersionID, Map[String, String], DateTime, String, String, Unit]) {
entities.foreach(v => cb(VersionID(pair, v.id), v.strAttrs, v.lastUpdated, v.vsn, v.vsn))
}
}
def traverseFirstBranch(tx1:Tx, tx2:Tx)(cb:((Tx, Tx) => Unit)) {
cb(tx1, tx2)
(tx1, tx2) match {
case (atx1:AggregateTx, atx2:AggregateTx) => traverseFirstBranch(atx1.respBuckets(0).nextTx, atx2.respBuckets(0).nextTx)(cb)
case (atx1:AggregateTx, _) => traverseFirstBranch(atx1.respBuckets(0).nextTx, null)(cb)
case (_, atx2:AggregateTx) => traverseFirstBranch(null, atx2.respBuckets(0).nextTx)(cb)
case _ =>
}
}
}
object AbstractDataDrivenPolicyTest {
//
// Scenarios
//
val dateTimeCategoryDescriptor = new RangeCategoryDescriptor("datetime")
val dateCategoryDescriptor = new RangeCategoryDescriptor("date")
val intCategoryDescriptor = new RangeCategoryDescriptor("int")
val stringCategoryDescriptor = new PrefixCategoryDescriptor(1, 3, 1)
/**
* This is a DateTime descriptor that is initialized using LocalDates
*/
val localDatePrimedDescriptor = new RangeCategoryDescriptor("datetime", START_2023.toString, END_2023.toString)
val domain = Domain(name="domain")
/**
* Provides a stable definition of now that can be used for updated timestamps
*/
val now = new DateTime()
@DataPoint def noCategoriesScenario = Scenario(
PairRef(name = "ab", space = 999L),
Endpoint(categories = new HashMap[String, AggregatingCategoryDescriptor]),
Endpoint(categories = new HashMap[String, AggregatingCategoryDescriptor]),
EntityTx(Seq(),
Vsn("id1", Map(), "vsn1"),
Vsn("id2", Map(), "vsn2")
)
)
/**
* As part of #203, elements of a set are sent out individually by default.
* For the sake of simplicity, the old behaviour (to send them out as a batch) can not be configured.
* Should any body ask for this, this behavior be may re-instated at some point.
*/
@DataPoint def setOnlyScenario = Scenario(
PairRef(name = "ab", space = 999L),
Endpoint(categories = Map("someString" -> new SetCategoryDescriptor(Set("A","B","C")))),
Endpoint(categories = Map("someString" -> new SetCategoryDescriptor(Set("A","B","C")))),
EntityTx(Seq(new SetConstraint("someString", Set("A"))),
Vsn("id1", Map("someString" -> "A"), "vsn1"),
Vsn("id2", Map("someString" -> "A"), "vsn2")
),
EntityTx(Seq(new SetConstraint("someString", Set("B"))),
Vsn("id3", Map("someString" -> "B"), "vsn3"),
Vsn("id4", Map("someString" -> "B"), "vsn4")
),
EntityTx(Seq(new SetConstraint("someString", Set("C"))),
Vsn("id5", Map("someString" -> "C"), "vsn5"),
Vsn("id6", Map("someString" -> "C"), "vsn6")
)
)
@DataPoint def dateTimesOnlyScenario = Scenario(
PairRef(name = "ab", space = 999L),
Endpoint(categories = Map("bizDateTime" -> dateTimeCategoryDescriptor)),
Endpoint(categories = Map("bizDateTime" -> dateTimeCategoryDescriptor)),
AggregateTx(Seq(yearly("bizDateTime", TimeDataType)), Seq(),
Bucket("2010", Map("bizDateTime" -> "2010"),
AggregateTx(Seq(monthly("bizDateTime", TimeDataType)), Seq(dateTimeRange("bizDateTime", START_2010, END_2010)),
Bucket("2010-07", Map("bizDateTime" -> "2010-07"),
AggregateTx(Seq(daily("bizDateTime", TimeDataType)), Seq(dateTimeRange("bizDateTime", JUL_2010, END_JUL_2010)),
Bucket("2010-07-08", Map("bizDateTime" -> "2010-07-08"),
EntityTx(Seq(dateTimeRange("bizDateTime", JUL_8_2010, END_JUL_8_2010)),
Vsn("id1", Map("bizDateTime" -> JUL_8_2010_1), "vsn1"),
Vsn("id2", Map("bizDateTime" -> JUL_8_2010_2), "vsn2")
)),
Bucket("2010-07-09", Map("bizDateTime" -> "2010-07-09"),
EntityTx(Seq(dateTimeRange("bizDateTime", JUL_9_2010, END_JUL_9_2010)),
Vsn("id3", Map("bizDateTime" -> JUL_9_2010_1), "vsn3")
))
)),
Bucket("2010-08", Map("bizDateTime" -> "2010-08"),
AggregateTx(Seq(daily("bizDateTime", TimeDataType)), Seq(dateTimeRange("bizDateTime", AUG_2010, END_AUG_2010)),
Bucket("2010-08-02", Map("bizDateTime" -> "2010-08-02"),
EntityTx(Seq(dateTimeRange("bizDateTime", AUG_11_2010, END_AUG_11_2010)),
Vsn("id4", Map("bizDateTime" -> AUG_11_2010_1), "vsn4")
))
))
)),
Bucket("2011", Map("bizDateTime" -> "2011"),
AggregateTx(Seq(monthly("bizDateTime", TimeDataType)), Seq(dateTimeRange("bizDateTime", START_2011, END_2011)),
Bucket("2011-01", Map("bizDateTime" -> "2011-01"),
AggregateTx(Seq(daily("bizDateTime", TimeDataType)), Seq(dateTimeRange("bizDateTime", JAN_2011, END_JAN_2011)),
Bucket("2011-01-20", Map("bizDateTime" -> "2011-01-20"),
EntityTx(Seq(dateTimeRange("bizDateTime", JAN_20_2011, END_JAN_20_2011)),
Vsn("id5", Map("bizDateTime" -> JAN_20_2011_1), "vsn5")
))
))
))
))
@DataPoint def datesOnlyScenario = Scenario(
PairRef(name = "xy", space = 999L),
Endpoint(categories = Map("bizDate" -> dateCategoryDescriptor)),
Endpoint(categories = Map("bizDate" -> dateCategoryDescriptor)),
AggregateTx(Seq(yearly("bizDate", DateDataType)), Seq(),
Bucket("1995", Map("bizDate" -> "1995"),
AggregateTx(Seq(monthly("bizDate", DateDataType)), Seq(dateRange("bizDate", START_1995, END_1995)),
Bucket("1995-04", Map("bizDate" -> "1995-04"),
AggregateTx(Seq(daily("bizDate", DateDataType)), Seq(dateRange("bizDate", APR_1_1995, APR_30_1995)),
Bucket("1995-04-11", Map("bizDate" -> "1995-04-11"),
EntityTx(Seq(dateRange("bizDate", APR_11_1995, APR_11_1995)),
Vsn("id1", Map("bizDate" -> APR_11_1995), "vsn1"),
Vsn("id2", Map("bizDate" -> APR_11_1995), "vsn2")
)),
Bucket("1995-04-12", Map("bizDate" -> "1995-04-12"),
EntityTx(Seq(dateRange("bizDate", APR_12_1995, APR_12_1995)),
Vsn("id3", Map("bizDate" -> APR_12_1995), "vsn3")
))
)),
Bucket("1995-05", Map("bizDate" -> "1995-05"),
AggregateTx(Seq(daily("bizDate", DateDataType)), Seq(dateRange("bizDate", MAY_1_1995, MAY_31_1995)),
Bucket("1995-05-23", Map("bizDate" -> "1995-05-23"),
EntityTx(Seq(dateRange("bizDate", MAY_23_1995, MAY_23_1995)),
Vsn("id4", Map("bizDate" -> MAY_23_1995), "vsn4")
))
))
)),
Bucket("1996", Map("bizDate" -> "1996"),
AggregateTx(Seq(monthly("bizDate", DateDataType)), Seq(dateRange("bizDate", START_1996, END_1996)),
Bucket("1996-03", Map("bizDate" -> "1996-03"),
AggregateTx(Seq(daily("bizDate", DateDataType)), Seq(dateRange("bizDate", MAR_1_1996, MAR_31_1996)),
Bucket("1996-03-15", Map("bizDate" -> "1996-03-15"),
EntityTx(Seq(dateRange("bizDate", MAR_15_1996, MAR_15_1996)),
Vsn("id5", Map("bizDate" -> MAR_15_1996), "vsn5")
))
))
))
))
/**
* This scenario uses a constrained descriptor that is initialized with LocalDate
* values but uses a full DateTime data type during its descent.
*/
@DataPoint def yy_MM_dddd_dateTimesOnlyScenario = Scenario(
PairRef(name = "tf", space = 999L),
Endpoint(categories = Map("bizDateTime" -> localDatePrimedDescriptor)),
Endpoint(categories = Map("bizDateTime" -> localDatePrimedDescriptor)),
AggregateTx(Seq(yearly("bizDateTime", TimeDataType)), Seq(dateTimeRange("bizDateTime", START_2023_FULL, END_2023_FULL)),
Bucket("2023", Map("bizDateTime" -> "2023"),
AggregateTx(Seq(monthly("bizDateTime", TimeDataType)), Seq(dateTimeRange("bizDateTime", START_2023_FULL, END_2023_FULL)),
Bucket("2023-10", Map("bizDateTime" -> "2023-10"),
AggregateTx(Seq(daily("bizDateTime", TimeDataType)), Seq(dateTimeRange("bizDateTime", OCT_1_2023, OCT_31_2023)),
Bucket("2023-10-17", Map("bizDateTime" -> "2023-10-17"),
EntityTx(Seq(dateTimeRange("bizDateTime", OCT_17_2023_START, OCT_17_2023_END)),
Vsn("id1", Map("bizDateTime" -> OCT_17_2023), "vsn1")
))
))
))
))
@DataPoint def integersOnlyScenario = Scenario(
PairRef(name = "bc", space = 999L),
Endpoint(categories = Map("someInt" -> intCategoryDescriptor)),
Endpoint(categories = Map("someInt" -> intCategoryDescriptor)),
AggregateTx(Seq(thousands("someInt")), Seq(),
Bucket("1000", Map("someInt" -> "1000"),
AggregateTx(Seq(hundreds("someInt")), Seq(intRange("someInt", 1000, 1999)),
Bucket("1200", Map("someInt" -> "1200"),
AggregateTx(Seq(tens("someInt")), Seq(intRange("someInt", 1200, 1299)),
Bucket("1230", Map("someInt" -> "1230"),
EntityTx(Seq(intRange("someInt", 1230, 1239)),
Vsn("id1", Map("someInt" -> 1234), "vsn1")
)),
Bucket("1240", Map("someInt" -> "1240"),
EntityTx(Seq(intRange("someInt", 1240, 1249)),
Vsn("id2", Map("someInt" -> 1245), "vsn2")
))
)),
Bucket("1300", Map("someInt" -> "1300"),
AggregateTx(Seq(tens("someInt")), Seq(intRange("someInt", 1300, 1399)),
Bucket("1350", Map("someInt" -> "1350"),
EntityTx(Seq(intRange("someInt", 1350, 1359)),
Vsn("id3", Map("someInt" -> 1357), "vsn3")
))
))
)),
Bucket("2000", Map("someInt" -> "2000"),
AggregateTx(Seq(hundreds("someInt")), Seq(intRange("someInt", 2000, 2999)),
Bucket("2300", Map("someInt" -> "2300"),
AggregateTx(Seq(tens("someInt")), Seq(intRange("someInt", 2300, 2399)),
Bucket("2340", Map("someInt" -> "2340"),
EntityTx(Seq(intRange("someInt", 2340, 2349)),
Vsn("id4", Map("someInt" -> 2345), "vsn4")
))
))
))
))
@DataPoint def stringsOnlyScenario = Scenario(
PairRef(name = "bc", space = 999L),
Endpoint(categories = Map("someString" -> stringCategoryDescriptor)),
Endpoint(categories = Map("someString" -> stringCategoryDescriptor)),
AggregateTx(Seq(oneCharString("someString")), Seq(),
Bucket("A", Map("someString" -> "A"),
AggregateTx(Seq(twoCharString("someString")), Seq(prefix("someString", "A")),
Bucket("AB", Map("someString" -> "AB"),
AggregateTx(Seq(threeCharString("someString")), Seq(prefix("someString", "AB")),
Bucket("ABC", Map("someString" -> "ABC"),
EntityTx(Seq(prefix("someString", "ABC")),
Vsn("id1", Map("someString" -> "ABC"), "vsn1")
)),
Bucket("ABD", Map("someString" -> "ABD"),
EntityTx(Seq(prefix("someString", "ABD")),
Vsn("id2", Map("someString" -> "ABDZ"), "vsn2")
))
)),
Bucket("AC", Map("someString" -> "AC"),
AggregateTx(Seq(threeCharString("someString")), Seq(prefix("someString", "AC")),
Bucket("ACD", Map("someString" -> "ACD"),
EntityTx(Seq(prefix("someString", "ACD")),
Vsn("id3", Map("someString" -> "ACDC"), "vsn3")
))
))
)),
Bucket("Z", Map("someString" -> "Z"),
AggregateTx(Seq(twoCharString("someString")), Seq(prefix("someString", "Z")),
Bucket("ZY", Map("someString" -> "ZY"),
AggregateTx(Seq(threeCharString("someString")), Seq(prefix("someString", "ZY")),
Bucket("ZYX", Map("someString" -> "ZYX"),
EntityTx(Seq(prefix("someString", "ZYX")),
Vsn("id4", Map("someString" -> "ZYXXY"), "vsn4")
))
))
))
))
@DataPoint def integersAndDateTimesScenario = Scenario(
PairRef(name = "ab", space = 999L),
Endpoint(categories = Map("bizDateTime" -> dateTimeCategoryDescriptor, "someInt" -> intCategoryDescriptor)),
Endpoint(categories = Map("bizDateTime" -> dateTimeCategoryDescriptor, "someInt" -> intCategoryDescriptor)),
AggregateTx(Seq(yearly("bizDateTime", TimeDataType), thousands("someInt")), Seq(),
Bucket("2010_1000", Map("bizDateTime" -> "2010", "someInt" -> "1000"),
AggregateTx(Seq(monthly("bizDateTime", TimeDataType), hundreds("someInt")), Seq(dateTimeRange("bizDateTime", START_2010, END_2010), intRange("someInt", 1000, 1999)),
Bucket("2010-07_1200", Map("bizDateTime" -> "2010-07", "someInt" -> "1200"),
AggregateTx(Seq(daily("bizDateTime", TimeDataType), tens("someInt")), Seq(dateTimeRange("bizDateTime", JUL_2010, END_JUL_2010), intRange("someInt", 1200, 1299)),
Bucket("2010-07-08_1230", Map("bizDateTime" -> "2010-07-08", "someInt" -> "1230"),
EntityTx(Seq(dateTimeRange("bizDateTime", JUL_8_2010, END_JUL_8_2010), intRange("someInt", 1230, 1239)),
Vsn("id1", Map("bizDateTime" -> JUL_8_2010_1, "someInt" -> 1234), "vsn1"),
Vsn("id2", Map("bizDateTime" -> JUL_8_2010_2, "someInt" -> 1235), "vsn2")
)),
Bucket("2010-07-09_1240", Map("bizDateTime" -> "2010-07-09", "someInt" -> "1240"),
EntityTx(Seq(dateTimeRange("bizDateTime", JUL_9_2010, END_JUL_9_2010), intRange("someInt", 1240, 1249)),
Vsn("id3", Map("bizDateTime" -> JUL_9_2010_1, "someInt" -> 1245), "vsn3")
))
)),
Bucket("2010-08_1300", Map("bizDateTime" -> "2010-08", "someInt" -> "1300"),
AggregateTx(Seq(daily("bizDateTime", TimeDataType), tens("someInt")), Seq(dateTimeRange("bizDateTime", AUG_2010, END_AUG_2010), intRange("someInt", 1300, 1399)),
Bucket("2010-08-02_1350", Map("bizDateTime" -> "2010-08-02", "someInt" -> "1350"),
EntityTx(Seq(dateTimeRange("bizDateTime", AUG_11_2010, END_AUG_11_2010), intRange("someInt", 1350, 1359)),
Vsn("id4", Map("bizDateTime" -> AUG_11_2010_1, "someInt" -> 1357), "vsn4")
))
))
)),
Bucket("2011_2000", Map("bizDateTime" -> "2011", "someInt" -> "2000"),
AggregateTx(Seq(monthly("bizDateTime", TimeDataType), hundreds("someInt")), Seq(dateTimeRange("bizDateTime", START_2011, END_2011), intRange("someInt", 2000, 2999)),
Bucket("2011-01_2300", Map("bizDateTime" -> "2011-01", "someInt" -> "2300"),
AggregateTx(Seq(daily("bizDateTime", TimeDataType), tens("someInt")), Seq(dateTimeRange("bizDateTime", JAN_2011, END_JAN_2011), intRange("someInt", 2300, 2399)),
Bucket("2011-01-20_2340", Map("bizDateTime" -> "2011-01-20", "someInt" -> "2340"),
EntityTx(Seq(dateTimeRange("bizDateTime", JAN_20_2011, END_JAN_20_2011), intRange("someInt", 2340, 2349)),
Vsn("id5", Map("bizDateTime" -> JAN_20_2011_1, "someInt" -> 2345), "vsn5")
))
))
))
))
/**
* As part of #203, elements of a set are sent out individually by default.
* For the sake of simplicity, the old behaviour (to send them out as a batch) can not be configured.
* Should any body ask for this, this behavior be may re-instated at some point.
*/
@DataPoint def setAndDateTimesScenario = Scenario(
PairRef(name = "gh", space = 999L),
Endpoint(categories = Map("bizDateTime" -> dateTimeCategoryDescriptor, "someString" -> new SetCategoryDescriptor(Set("A","B")))),
Endpoint(categories = Map("bizDateTime" -> dateTimeCategoryDescriptor, "someString" -> new SetCategoryDescriptor(Set("A","B")))),
AggregateTx(Seq(yearly("bizDateTime", TimeDataType)), Seq(new SetConstraint("someString",Set("A"))),
Bucket("2010_A", Map("bizDateTime" -> "2010", "someString" -> "A"),
AggregateTx(Seq(monthly("bizDateTime", TimeDataType)), Seq(dateTimeRange("bizDateTime", START_2010, END_2010), new SetConstraint("someString",Set("A"))),
Bucket("2010-07_A", Map("bizDateTime" -> "2010-07", "someString" -> "A"),
AggregateTx(Seq(daily("bizDateTime", TimeDataType)), Seq(dateTimeRange("bizDateTime", JUL_2010, END_JUL_2010), new SetConstraint("someString",Set("A"))),
Bucket("2010-07-08_A", Map("bizDateTime" -> "2010-07-08", "someString" -> "A"),
EntityTx(Seq(dateTimeRange("bizDateTime", JUL_8_2010, END_JUL_8_2010), new SetConstraint("someString",Set("A"))),
Vsn("id1", Map("bizDateTime" -> JUL_8_2010_1, "someString" -> "A"), "vsn1"),
Vsn("id2", Map("bizDateTime" -> JUL_8_2010_2, "someString" -> "A"), "vsn2")
)
)
)
)
)
)
),
AggregateTx(Seq(yearly("bizDateTime", TimeDataType)), Seq(new SetConstraint("someString",Set("B"))),
Bucket("2011_B", Map("bizDateTime" -> "2011", "someString" -> "B"),
AggregateTx(Seq(monthly("bizDateTime", TimeDataType)), Seq(dateTimeRange("bizDateTime", START_2011, END_2011), new SetConstraint("someString",Set("B"))),
Bucket("2011-01_B", Map("bizDateTime" -> "2011-01", "someString" -> "B"),
AggregateTx(Seq(daily("bizDateTime", TimeDataType)), Seq(dateTimeRange("bizDateTime", JAN_2011, END_JAN_2011), new SetConstraint("someString",Set("B"))),
Bucket("2011-01-20_B", Map("bizDateTime" -> "2011-01-20", "someString" -> "B"),
EntityTx(Seq(dateTimeRange("bizDateTime", JAN_20_2011, END_JAN_20_2011), new SetConstraint("someString",Set("B"))),
Vsn("id3", Map("bizDateTime" -> JAN_20_2011_1, "someString" -> "B"), "vsn3")
)
)
)
)
)
)
)
)
//
// Aliases
//
def yearly(attrName:String, dataType:DateCategoryDataType) = YearlyCategoryFunction(attrName, dataType)
def monthly(attrName:String, dataType:DateCategoryDataType) = MonthlyCategoryFunction(attrName, dataType)
def daily(attrName:String, dataType:DateCategoryDataType) = DailyCategoryFunction(attrName, dataType)
def thousands(attrName:String) = IntegerCategoryFunction(attrName, 1000, 10)
def hundreds(attrName:String) = IntegerCategoryFunction(attrName, 100, 10)
def tens(attrName:String) = IntegerCategoryFunction(attrName, 10, 10)
def oneCharString(attrName:String) = StringPrefixCategoryFunction(attrName, 1, 3, 1)
def twoCharString(attrName:String) = StringPrefixCategoryFunction(attrName, 2, 3, 1)
def threeCharString(attrName:String) = StringPrefixCategoryFunction(attrName, 3, 3, 1)
def dateTimeRange(n:String, lower:DateTime, upper:DateTime) = new TimeRangeConstraint(n, lower, upper)
def dateRange(n:String, lower:LocalDate, upper:LocalDate) = new DateRangeConstraint(n, lower, upper)
def intRange(n:String, lower:Int, upper:Int) = new IntegerRangeConstraint(n, lower, upper)
def prefix(n: String, prefix: String) = new StringPrefixConstraint(n, prefix)
//
// Type Definitions
//
case class Scenario(pair:PairRef, upstreamEp:Endpoint, downstreamEp:Endpoint, tx:Tx*)
abstract class Tx {
def constraints:Seq[ScanConstraint]
def allVsns:Seq[Vsn]
def alterFirstVsn(newVsn:String):Tx
def firstVsn:Vsn
def toString(indent:Int):String
}
/**
* @param bucketing The bucketing policy to apply
* @param constraints The value constraints being applied to this transaction
* @param respBuckets The list of buckets expected in this transaction
*/
case class AggregateTx(bucketing:Seq[CategoryFunction], constraints:Seq[ScanConstraint], respBuckets:Bucket*) extends Tx {
lazy val allVsns = respBuckets.flatMap(b => b.allVsns)
def alterFirstVsn(newVsn:String) =
// This uses the prepend operator +: to alter the first the element of the list and then re-attach the remainder to create a new sequence
AggregateTx(bucketing, constraints, (respBuckets(0).alterFirstVsn(newVsn) +: respBuckets.drop(1)):_*)
def firstVsn = respBuckets(0).nextTx.firstVsn
def toString(indent:Int) = (" " * indent) + "AggregateTx(" + bucketing + ", " + constraints + ")\\n" + respBuckets.map(b => b.toString(indent + 2)).foldLeft("")(_ + _)
}
case class EntityTx(constraints:Seq[ScanConstraint], entities:Vsn*) extends Tx {
lazy val allVsns = entities
def alterFirstVsn(newVsn:String) = EntityTx(constraints, (entities(0).alterVsn(newVsn) +: entities.drop(1)):_*)
def firstVsn = entities(0)
def toString(indent:Int) = (" " * indent) + "EntityTx(" + constraints + ")\\n" + entities.map(e => e.toString(indent + 2)).foldLeft("")(_ + _)
}
case class Bucket(name:String, attrs:Map[String, String], nextTx:Tx) {
lazy val allVsns = nextTx.allVsns
lazy val vsn = DigestUtils.md5Hex(allVsns.map(v => v.vsn).foldLeft("")(_ + _))
def alterFirstVsn(newVsn:String):Bucket = Bucket(name, attrs, nextTx.alterFirstVsn(newVsn))
def toString(indent:Int) = (" " * indent) + "Bucket(" + name + ", " + attrs + ", " + vsn + ")\\n" + nextTx.toString(indent + 2)
}
case class Vsn(id:String, attrs:Map[String, Any], vsn:String) {
def typedAttrs = attrs.map { case (k, v) => k -> toTyped(v) }.toMap
def strAttrs = attrs.map { case (k, v) => k -> v.toString }.toMap
val lastUpdated = now
def alterVsn(newVsn:String) = {
Vsn(id, attrs, newVsn)
}
def toString(indent:Int) = (" " * indent) + "Vsn(" + id + ", " + attrs + ", " + vsn + ")\\n"
def toTyped(v:Any) = v match {
case i:Int => IntegerAttribute(i)
case dt:DateTime => DateTimeAttribute(dt)
case dt:LocalDate => DateAttribute(dt)
case _ => StringAttribute(v.toString)
}
}
}
|
lshift/diffa
|
kernel/src/test/scala/net/lshift/diffa/kernel/differencing/AbstractDataDrivenPolicyTest.scala
|
Scala
|
apache-2.0
| 44,254 |
package hu.frankdavid.ranking.strategy
import hu.frankdavid.ranking._
case object FootballWorldCupStrategy extends TournamentStrategy("Football World Cup Strategy") {
val GroupStageMatch = 1
val KnockoutStageMatch = 2
private val GroupSize: Int = 4
private val RoundRobinStrategy = new RoundRobinStrategy(1, 3, 1, 0)
def matchesOrResult(implicit context: TournamentContext): MatchesOrResult = {
if (context.players.size % 4 != 0) {
throw new StrategyException("Football Strategy requires the number of players to be devisible by 4.")
}
val numberOfGroups = context.players.size / GroupSize
val pots = context.players.grouped(numberOfGroups)
val random = context.createRandom()
val groups = pots.map(random.shuffle(_)).toSeq.transpose.map(new Group(_))
MatchesOrResult.chain(groups.map(_.matchesOrResult)) { (results) =>
// winner teams are matched to the loser teams in reverse order
val knockoutInit = (results.map(_.head) zip results.map(_.last).reverse).map(p => Seq(p._1, p._2)).flatten
new KnockoutTreeNode(knockoutInit).matchesOrResult(3)
}.withMaxParallelism(context.maxParallelism)
}
private class KnockoutTreeNode(players: Seq[Player])
(implicit context: TournamentContext) {
def matchesOrResult(topN: Int): MatchesOrResult = {
val halves = players.splitAt(players.size / 2)
if (topN == 3) {
val half1 = new KnockoutTreeNode(halves._1).matchesOrResult(2)
val half2 = new KnockoutTreeNode(halves._2).matchesOrResult(2)
half1.chain(half2) { (h1, h2) =>
val bronze = new KnockoutTreeNode(Seq(h1(1), h2(1))).matchesOrResult(1)
val goldSilver = new KnockoutTreeNode(Seq(h1(0), h2(0))).matchesOrResult(2)
goldSilver.chain(bronze) { (gs, b) =>
Result(gs ++ b)
}
}
}
else if (players.size > 1) {
val half1 = new KnockoutTreeNode(halves._1).matchesOrResult(1)
val half2 = new KnockoutTreeNode(halves._2).matchesOrResult(1)
half1.chain(half2) { (h1, h2) =>
val matchup = MatchUp(h1(0), h2(0), KnockoutStageMatch)
context.gameHistory.maybeLatestGame(matchup) map
(game => Result(game.playersDescendingInOrder.take(topN))) getOrElse Matches(matchup)
}
} else {
// only one player
Result(players)
}
}
}
private class Group(val players: Seq[Player])(implicit context: TournamentContext) {
val matchesOrResult: MatchesOrResult = {
val roundRobinContext = context.copy(players = players, numAwardedPlayers = 2)
RoundRobinStrategy.matchesOrResult(roundRobinContext) match {
case Matches(matches) => Matches(matches.map(_.copy(matchType = GroupStageMatch, enableDraw = true)))
case x => x
}
}
}
}
|
frankdavid/ranking
|
src/main/scala/hu/frankdavid/ranking/strategy/FootballWorldCupStrategy.scala
|
Scala
|
apache-2.0
| 2,858 |
package controllers
import play.api.libs.json._
trait ServiceError {
/**
* JSON returned to the front end when an error is caught.
*/
val SERVICE_ERROR = Json.obj(
"errors" -> Json.obj(
"authentication" -> Json.arr(JsString("You're not authenticated. Please authenticate"))
)
)
}
|
marinatedpork/google-oauth-ember-play-scala
|
app/controllers/ApplicationError.scala
|
Scala
|
mit
| 311 |
/*
* bytefrog: a tracing framework for the JVM. For more information
* see http://code-pulse.com/bytefrog
*
* Copyright (C) 2014 Applied Visions - http://securedecisions.avi.com
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.secdec.bytefrog.clients.common.config
import java.io.File
case class TraceOutputSettings(
traceName: String,
dumpFile: Option[File] = None,
saveFile: File = new File("application-trace-data.cptrace").getCanonicalFile,
methodCallBucketLength: Int = 1000)
|
secdec/bytefrog-clients
|
common/src/main/scala/com/secdec/bytefrog/clients/common/config/TraceOutputSettings.scala
|
Scala
|
apache-2.0
| 1,016 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.api.scala.runtime
import org.apache.flink.api.common.typeutils.{TypeSerializer, TypeComparator}
import org.apache.flink.api.java.typeutils.TupleTypeInfoBase
import org.apache.flink.api.scala._
import org.apache.flink.api.scala.runtime.tuple.base.TupleComparatorTestBase
class TupleComparatorISD1Test extends TupleComparatorTestBase[(Int, String, Double)] {
protected def createComparator(ascending: Boolean): TypeComparator[(Int, String, Double)] = {
val ti = createTypeInformation[(Int, String, Double)]
ti.asInstanceOf[TupleTypeInfoBase[(Int, String, Double)]]
.createComparator(Array(0), Array(ascending),0)
}
protected def createSerializer: TypeSerializer[(Int, String, Double)] = {
val ti = createTypeInformation[(Int, String, Double)]
ti.createSerializer()
}
protected def getSortedTestData: Array[(Int, String, Double)] = {
dataISD
}
private val dataISD = Array(
(4, "hello", 20.0),
(5, "hello", 23.2),
(6, "world", 20.0),
(7, "hello", 20.0),
(8, "hello", 23.2),
(9, "world", 20.0),
(10, "hello", 20.0),
(11, "hello", 23.2)
)
}
|
citlab/vs.msc.ws14
|
flink-0-7-custom/flink-tests/src/test/scala/org/apache/flink/api/scala/runtime/TupleComparatorISD1Test.scala
|
Scala
|
apache-2.0
| 1,943 |
package com.tritondigital.counters
case class Tag(key: String, value: String) extends Ordered[Tag] {
require(key != null, "key cannot be null")
require(value != null, "value cannot be null")
Metric.validate(key)
Metric.validate(value)
// See http://stackoverflow.com/questions/8087958/in-scala-is-there-an-easy-way-to-convert-a-case-class-into-a-tuple
private def toPair = Tag.unapply(this).get
def compare(that: Tag) = Tag.ordering.compare(toPair, that.toPair)
}
object Tag {
private [Tag] val ordering = Ordering.Tuple2[String, String]
}
|
tritondigital/tritondigital-counters
|
src/main/scala/com/tritondigital/counters/Tag.scala
|
Scala
|
apache-2.0
| 560 |
package edu.berkeley.cs.amplab.mlmatrix
import java.util.concurrent.ThreadLocalRandom
import scala.reflect.ClassTag
import breeze.linalg._
import com.github.fommil.netlib.LAPACK.{getInstance=>lapack}
import org.netlib.util.intW
import org.netlib.util.doubleW
import org.apache.spark.{SparkContext, SparkException}
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.SQLContext
import org.apache.spark.sql.{DataFrame, Row}
import org.apache.spark.sql.types._
/** Note: [[breeze.linalg.DenseMatrix]] by default uses column-major layout. */
case class RowPartition(mat: DenseMatrix[Double]) extends Serializable
case class RowPartitionInfo(
partitionId: Int, // RDD partition this block is in
blockId: Int, // BlockId goes from 0 to numBlocks
startRow: Long) extends Serializable
class RowPartitionedMatrix(
val rdd: RDD[RowPartition],
rows: Option[Long] = None,
cols: Option[Long] = None) extends DistributedMatrix(rows, cols) with Logging {
// Map from partitionId to RowPartitionInfo
// Each RDD partition can have multiple RowPartition
@transient var partitionInfo_ : Map[Int, Array[RowPartitionInfo]] = null
override def getDim() = {
val dims = rdd.map { lm =>
(lm.mat.rows.toLong, lm.mat.cols.toLong)
}.reduce { case(a, b) =>
(a._1 + b._1, a._2)
}
dims
}
private def calculatePartitionInfo() {
// Partition information sorted by (partitionId, matrixInPartition)
val rowsPerPartition = rdd.mapPartitionsWithIndex { case (part, iter) =>
if (iter.isEmpty) {
Iterator()
} else {
iter.zipWithIndex.map(x => (part, x._2, x._1.mat.rows.toLong))
}
}.collect().sortBy(x => (x._1, x._2))
// TODO(shivaram): Test this and make it simpler ?
val blocksPerPartition = rowsPerPartition.groupBy(x => x._1).mapValues(_.length)
val partitionBlockStart = new collection.mutable.HashMap[Int, Int]
partitionBlockStart.put(0, 0)
(1 until rdd.partitions.size).foreach { p =>
partitionBlockStart(p) =
blocksPerPartition.getOrElse(p - 1, 0) + partitionBlockStart(p - 1)
}
val rowsWithblockIds = rowsPerPartition.map { x =>
(x._1, partitionBlockStart(x._1) + x._2, x._3)
}
val cumulativeSum = rowsWithblockIds.scanLeft(0L){ case (x1, x2) =>
x1 + x2._3
}.dropRight(1)
partitionInfo_ = rowsWithblockIds.map(x => (x._1, x._2)).zip(
cumulativeSum).map(x => RowPartitionInfo(x._1._1, x._1._2, x._2)).groupBy(x => x.partitionId)
}
def getPartitionInfo = {
if (partitionInfo_ == null) {
calculatePartitionInfo()
}
partitionInfo_
}
override def +(other: Double) = {
new RowPartitionedMatrix(rdd.map { lm =>
RowPartition(lm.mat :+ other)
}, rows, cols)
}
override def *(other: Double) = {
new RowPartitionedMatrix(rdd.map { lm =>
RowPartition(lm.mat :* other)
}, rows, cols)
}
override def mapElements(f: Double => Double) = {
new RowPartitionedMatrix(rdd.map { lm =>
RowPartition(
new DenseMatrix[Double](lm.mat.rows, lm.mat.cols, lm.mat.data.map(f)))
}, rows, cols)
}
override def aggregateElements[U: ClassTag](zeroValue: U)(seqOp: (U, Double) => U, combOp: (U, U) => U): U = {
rdd.map { part =>
part.mat.data.aggregate(zeroValue)(seqOp, combOp)
}.reduce(combOp)
}
override def reduceRowElements(f: (Double, Double) => Double): DistributedMatrix = {
val reducedRows = rdd.map { rowPart =>
// get row-major layout by transposing
val rows = rowPart.mat.data.grouped(rowPart.mat.rows).toSeq.transpose
val reduced = rows.map(_.reduce(f)).toArray
RowPartition(new DenseMatrix[Double](rowPart.mat.rows, 1, reduced))
}
new RowPartitionedMatrix(reducedRows, rows, Some(1))
}
override def reduceColElements(f: (Double, Double) => Double): DistributedMatrix = {
val reducedColsPerPart = rdd.map { rowPart =>
val cols = rowPart.mat.data.grouped(rowPart.mat.rows)
cols.map(_.reduce(f)).toArray
}
val collapsed = reducedColsPerPart.reduce { case arrPair => arrPair.zipped.map(f) }
RowPartitionedMatrix.fromArray(
rdd.sparkContext.parallelize(Seq(collapsed), 1), Seq(1), numCols().toInt)
}
override def +(other: DistributedMatrix) = {
other match {
case otherBlocked: RowPartitionedMatrix =>
if (this.dim == other.dim) {
// Check if matrices share same partitioner and can be zipped
if (rdd.partitions.size == otherBlocked.rdd.partitions.size) {
new RowPartitionedMatrix(rdd.zip(otherBlocked.rdd).map { case (lm, otherLM) =>
RowPartition(lm.mat :+ otherLM.mat)
}, rows, cols)
} else {
throw new SparkException(
"Cannot add matrices with unequal partitions")
}
} else {
throw new IllegalArgumentException("Cannot add matrices of unequal size")
}
case _ =>
throw new IllegalArgumentException("Cannot add matrices of different types")
}
}
override def apply(rowRange: Range, colRange: ::.type) = {
this.apply(rowRange, Range(0, numCols().toInt))
}
override def apply(rowRange: ::.type, colRange: Range) = {
new RowPartitionedMatrix(rdd.map { lm =>
RowPartition(lm.mat(::, colRange))
})
}
override def apply(rowRange: Range, colRange: Range) = {
// TODO: Make this a class member
val partitionBroadcast = rdd.sparkContext.broadcast(getPartitionInfo)
// First filter partitions which have rows in this index, then select them
RowPartitionedMatrix.fromMatrix(rdd.mapPartitionsWithIndex { case (part, iter) =>
if (partitionBroadcast.value.contains(part)) {
val startRows = partitionBroadcast.value(part).sortBy(x => x.blockId).map(x => x.startRow)
iter.zip(startRows.iterator).flatMap { case (lm, sr) =>
// TODO: Handle Longs vs. Ints correctly here
val matRange = sr.toInt until (sr.toInt + lm.mat.rows)
if (matRange.contains(rowRange.start) || rowRange.contains(sr.toInt)) {
// The end row is min of number of rows in this partition
// and number of rows left to read
val start = (math.max(rowRange.start - sr, 0)).toInt
val end = (math.min(rowRange.end - sr, lm.mat.rows)).toInt
Iterator(lm.mat(start until end, colRange))
} else {
Iterator()
}
}
} else {
Iterator()
}
})
}
override def cache() = {
rdd.cache()
this
}
// TODO: This is terribly inefficient if we have more partitions.
// Make this more efficient
override def collect(): DenseMatrix[Double] = {
require(numRows() * numCols() <= Integer.MAX_VALUE,
"Cannot collect matrix of size " + numRows() + " " + numCols() + " to a local array")
val parts = rdd.map(x => x.mat).collect()
val fullMat = new Array[Double]( (numRows() * numCols()).toInt )
// Fill in row at a time but in column major order
var row = 0
parts.foreach { part =>
(0 until part.rows).foreach { r =>
(0 until part.cols).foreach { c =>
val idx = c * numRows().toInt + row
fullMat(idx) = part(r, c)
}
row = row + 1
}
}
new DenseMatrix[Double](numRows().toInt, numCols().toInt, fullMat)
// parts.reduceLeftOption((a,b) => DenseMatrix.vertcat(a, b)).getOrElse(new DenseMatrix[Double](0, 0))
}
def qrR(): DenseMatrix[Double] = {
new TSQR().qrR(this)
}
// Estimate the condition number of the matrix
// Optionally pass in a R that correspondings to the R matrix obtained
// by a QR decomposition
def condEst(rOpt: Option[DenseMatrix[Double]] = None): Double = {
val R = rOpt match {
case None => qrR()
case Some(rMat) => rMat
}
val n = R.rows
val work = new Array[Double](3*n)
val iwork = new Array[Int](n)
val rcond = new doubleW(0)
val info = new intW(0)
lapack.dtrcon("1", "U", "n", n, R.data, n, rcond, work, iwork, info)
1/(rcond.`val`)
}
// Apply a function to each partition of the matrix
def mapPartitions(f: DenseMatrix[Double] => DenseMatrix[Double]) = {
// TODO: This can be efficient if we don't change num rows per partition
RowPartitionedMatrix.fromMatrix(rdd.map { lm =>
f(lm.mat)
})
}
}
object RowPartitionedMatrix {
// Convert an RDD[DenseMatrix[Double]] to an RDD[RowPartition]
def fromMatrix(matrixRDD: RDD[DenseMatrix[Double]]): RowPartitionedMatrix = {
new RowPartitionedMatrix(matrixRDD.map(mat => RowPartition(mat)))
}
def fromArray(matrixRDD: RDD[Array[Double]]): RowPartitionedMatrix = {
fromMatrix(arrayToMatrix(matrixRDD))
}
def fromArray(
matrixRDD: RDD[Array[Double]],
rowsPerPartition: Seq[Int],
cols: Int): RowPartitionedMatrix = {
new RowPartitionedMatrix(
arrayToMatrix(matrixRDD, rowsPerPartition, cols).map(mat => RowPartition(mat)),
Some(rowsPerPartition.sum), Some(cols))
}
def arrayToMatrix(
matrixRDD: RDD[Array[Double]],
rowsPerPartition: Seq[Int],
cols: Int) = {
val rBroadcast = matrixRDD.context.broadcast(rowsPerPartition)
val data = matrixRDD.mapPartitionsWithIndex { case (part, iter) =>
val rows = rBroadcast.value(part)
val matData = new Array[Double](rows * cols)
var nRow = 0
while (iter.hasNext) {
val arr = iter.next()
var idx = 0
while (idx < arr.size) {
matData(nRow + idx * rows) = arr(idx)
idx = idx + 1
}
nRow += 1
}
Iterator(new DenseMatrix[Double](rows, cols, matData.toArray))
}
data
}
def arrayToMatrix(matrixRDD: RDD[Array[Double]]): RDD[DenseMatrix[Double]] = {
val rowsColsPerPartition = matrixRDD.mapPartitionsWithIndex { case (part, iter) =>
if (iter.hasNext) {
val nCols = iter.next().size
Iterator((part, 1 + iter.size, nCols))
} else {
Iterator((part, 0, 0))
}
}.collect().sortBy(x => (x._1, x._2, x._3)).map(x => (x._1, (x._2, x._3))).toMap
val rBroadcast = matrixRDD.context.broadcast(rowsColsPerPartition)
val data = matrixRDD.mapPartitionsWithIndex { case (part, iter) =>
val (rows, cols) = rBroadcast.value(part)
val matData = new Array[Double](rows * cols)
var nRow = 0
while (iter.hasNext) {
val arr = iter.next()
var idx = 0
while (idx < arr.size) {
matData(nRow + idx * rows) = arr(idx)
idx = idx + 1
}
nRow += 1
}
Iterator(new DenseMatrix[Double](rows, cols, matData.toArray))
}
data
}
// Convert a DataFrame of all DoubleType columns to a RowPartionedMatix
def fromDataFrame(df: DataFrame): RowPartitionedMatrix = {
require(df.dtypes.forall(_._2 == "DoubleType"),
"The provided DataFrame must contain all 'DoubleType' columns")
fromMatrix(dataFrameToMatrix(df))
}
def fromDataFrame(
df: DataFrame,
rowsPerPartition: Seq[Int],
cols: Int): RowPartitionedMatrix = {
require(df.dtypes.forall(_._2 == "DoubleType"),
"The provided DataFrame must contain all 'DoubleType' columns")
new RowPartitionedMatrix(
dataFrameToMatrix(df, rowsPerPartition, cols).map(mat => RowPartition(mat)),
Some(rowsPerPartition.sum), Some(cols))
}
// Convert a DataFrame of all DoubleType columns to a RDD[DenseMatrix[Double]]
def dataFrameToMatrix(
df: DataFrame,
rowsPerPartition: Seq[Int],
cols: Int) = {
val matrixRDD = df.rdd.map(x => x.toSeq.toArray).map(y => y.map(z => z.asInstanceOf[Double]))
arrayToMatrix(matrixRDD, rowsPerPartition, cols)
}
def dataFrameToMatrix(df: DataFrame): RDD[DenseMatrix[Double]] = {
val matrixRDD = df.rdd.map(x => x.toSeq.toArray).map(y => y.map(z => z.asInstanceOf[Double]))
arrayToMatrix(matrixRDD)
}
// Create a RowPartitionedMatrix containing random numbers from the unit uniform
def createRandom(sc: SparkContext,
numRows: Int,
numCols: Int,
numParts: Int,
cache: Boolean = true): RowPartitionedMatrix = {
val rowsPerPart = numRows / numParts
val matrixParts = sc.parallelize(1 to numParts, numParts).mapPartitions { part =>
val data = new Array[Double](rowsPerPart * numCols)
var i = 0
while (i < rowsPerPart*numCols) {
data(i) = ThreadLocalRandom.current().nextDouble()
i = i + 1
}
val mat = new DenseMatrix[Double](rowsPerPart, numCols, data)
Iterator(mat)
}
if (cache) {
matrixParts.cache()
}
RowPartitionedMatrix.fromMatrix(matrixParts)
}
}
|
amplab/ml-matrix
|
src/main/scala/edu/berkeley/cs/amplab/mlmatrix/RowPartitionedMatrix.scala
|
Scala
|
apache-2.0
| 12,711 |
package es.weso.computex.entities
import es.weso.utils.JenaUtils
import es.weso.computex.profile.Profile
case class Options(
val profile: Profile = Profile.Computex,
val contentFormat: String = JenaUtils.TTL,
val verbose: Boolean = false,
val showSource: Boolean = false,
val expand: Boolean = true,
val imports: Boolean = true,
val preffix: Boolean = true
)
|
weso/computex
|
app/es/weso/computex/entities/Options.scala
|
Scala
|
apache-2.0
| 403 |
package rovak.steamkit.steam.gc
import rovak.steamkit.steam.language.internal.MsgHdr
import rovak.steamkit.util.stream.BinaryReader
import rovak.steamkit.steam.language.EMsg
trait IPacketGCMsg {
/**
* Gets a value indicating whether this packet message is protobuf backed.
* @return true if this instance is protobuf backed; otherwise, false
*/
def isProto: Boolean
/**
* Gets the network message type of this packet message.
* @return The message type.
*/
def msgType: EMsg
/**
* Gets the target job id for this packet message.
* @return The target job id.
*/
def targetJobId: Long
/**
* Gets the source job id for this packet message.
* @return The source job id.
*/
def sourceJobId: Long
/**
* Gets the underlying data that represents this client message.
* @return The data.
*/
val data: Array[Byte]
}
|
Rovak/scala-steamkit
|
steam/src/main/scala/rovak/steamkit/steam/gc/IPacketGCMsg.scala
|
Scala
|
mit
| 878 |
/*
* Copyright 2014 Rik van der Kleij
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.powertuple.intellij.haskell.external
import com.intellij.execution.configurations.GeneralCommandLine
import com.intellij.execution.process.CapturingProcessHandler
import com.intellij.execution.process.ProcessOutput
import java.io.File
import scala.collection.JavaConversions._
object ExternalProcess {
val StandardTimeout = 3000
def getProcessOutput(workDir: String, commandPath: String, arguments: Seq[String], timeout: Int = StandardTimeout): ProcessOutput = {
if (!new File(workDir).isDirectory || !new File(commandPath).canExecute) {
new ProcessOutput
}
val cmd: GeneralCommandLine = new GeneralCommandLine
cmd.withWorkDirectory(workDir)
cmd.setExePath(commandPath)
cmd.addParameters(arguments)
execute(cmd, timeout)
}
def execute(cmd: GeneralCommandLine): ProcessOutput = {
execute(cmd, StandardTimeout)
}
def execute(cmd: GeneralCommandLine, timeout: Int): ProcessOutput = {
val processHandler: CapturingProcessHandler = new CapturingProcessHandler(cmd.createProcess)
if (timeout < 0) processHandler.runProcess else processHandler.runProcess(timeout, false)
}
}
|
ericssmith/intellij-haskell
|
src/com/powertuple/intellij/haskell/external/ExternalProcess.scala
|
Scala
|
apache-2.0
| 1,740 |
/**
* Examples from: Chapter 5: KMeans
*/
package com.heatonresearch.aifh.examples.kmeans
|
HairyFotr/aifh
|
vol1/scala-examples/src/main/scala/com/heatonresearch/aifh/examples/kmeans/package.scala
|
Scala
|
apache-2.0
| 93 |
package be.cmpg.cancer
import org.junit.runner.RunWith
import org.specs2.runner.JUnitRunner
import org.specs2.mutable.Specification
import be.cmpg.graph.Network
import be.cmpg.graph.Interaction
import be.cmpg.graph.Gene
@RunWith(classOf[JUnitRunner])
class MutualExclusivityNetworkManagerSpecification extends Specification {
"The Mutual Exclusivity Network Manager" should {
"give score of this matrix should be the expected score" in {
val network = new Network(interactions = Set())
/*
* Score:
*
* Mutation matrix
* g1 g2 g3 g4
* S_1 x
* S_2 x x x
* S_3 x
* S_4 x
* S_5 x x
* S_6 x x
* S_7 x
* S_8 x
* S_9 x
* S_10 x
* .
* .
* .
* S_30 // samples without mutation in those genes
*/
val genePatientMatrix: Map[PolimorphismKey, Polimorphism] = Map(
PolimorphismKey(Gene("g1"), "S_1") -> Polimorphism("g1"),
PolimorphismKey(Gene("g1"), "S_2") -> Polimorphism("g1"),
PolimorphismKey(Gene("g3"), "S_2") -> Polimorphism("g3"),
PolimorphismKey(Gene("g4"), "S_2") -> Polimorphism("g4"),
PolimorphismKey(Gene("g1"), "S_3") -> Polimorphism("g1"),
PolimorphismKey(Gene("g1"), "S_4") -> Polimorphism("g1"),
PolimorphismKey(Gene("g1"), "S_5") -> Polimorphism("g1"),
PolimorphismKey(Gene("g2"), "S_5") -> Polimorphism("g2"),
PolimorphismKey(Gene("g2"), "S_6") -> Polimorphism("g2"),
PolimorphismKey(Gene("g3"), "S_6") -> Polimorphism("g3"),
PolimorphismKey(Gene("g2"), "S_7") -> Polimorphism("g2"),
PolimorphismKey(Gene("g2"), "S_8") -> Polimorphism("g2"),
PolimorphismKey(Gene("g3"), "S_9") -> Polimorphism("g3"),
PolimorphismKey(Gene("g4"), "S_10") -> Polimorphism("g4")) ++ (11 to 30).map {id => PolimorphismKey(Gene("gOther"), "S_"+id) -> Polimorphism("gOther")}.toMap
/* Scores:
* S1 S2 S3 S4 S5 S6 S7 S8 S9 S10
* s(g1) = sqrt( 1 + 1/3 + 1 + 1 +1/2) = sqrt(3.8333) = 1.957
* s(g2) = sqrt( 1/2 + 1 + 1) = sqrt(2.5) = 2.5
* s(g3) = sqrt(1) = 1.73
* s(g4) = sqrt(1) = 1.73
*
* S_2 y S_5 only contribute once to the score of g1.
* S_2 is mutated in 3 genes, so the score increases for g1 is only 1/3
* S_5 is mutated in 2 genes, so the score increases for g1 is only 1/2
*
* For g3, S_2 and S_6 does not add to the score as it have already given a score to g1 and g2 respectively.
* For g4, S_2 does not add to the score as it have already given to g1.
*
*/
val networkManager = new MutualExclusivityNetworkManager(
network = network,
genePatientMatrix = genePatientMatrix,
minimumSamplesAltered = 0,
evaporation = 0.996)
val subnetwork = Set(
Interaction(Gene("g1"), Gene("g2")),
Interaction(Gene("g2"), Gene("g3")),
Interaction(Gene("g3"), Gene("g4")))
val expectedScore = (math.sqrt(1.0 + 1.0/3.0 + 1.0 + 1.0 + 1.0/2.0) + math.sqrt(1.0 + 1.0 + 1.0/2.0) + math.sqrt(1.0) + math.sqrt(1.0)) /4
networkManager.scoreSubnetwork(subnetwork) must beEqualTo(expectedScore)
}
/*
"better distributed networks should have better scores" in {
val network = new Network(interactions = Set())
* Score:
*
* Mutation matrix
* g1 g2 g3 g4 g1 g2 g3 g4
* S_1 x x
* S_2 x x
* S_3 x x
* S_4 x x
* S_5 x < x
* S_6 x x
* S_7 x x
* S_8 x x
* S_9 x x
* S_10 x x
* S_11 x x
* S_12 x x
val genePatientMatrix1: Map[(String, String), Polimorphism] = Map(
("g1", "S_1") -> Polimorphism("g1"),
("g1", "S_2") -> Polimorphism("g1"),
("g1", "S_3") -> Polimorphism("g1"),
("g1", "S_4") -> Polimorphism("g1"),
("g1", "S_5") -> Polimorphism("g1"),
("g2", "S_6") -> Polimorphism("g2"),
("g2", "S_7") -> Polimorphism("g2"),
("g2", "S_8") -> Polimorphism("g2"),
("g3", "S_9") -> Polimorphism("g3"),
("g3", "S_10") -> Polimorphism("g3"),
("g4", "S_11") -> Polimorphism("g4"),
("g4", "S_12") -> Polimorphism("g4"))
val genePatientMatrix2: Map[(String, String), Polimorphism] = Map(
("g1", "S_1") -> Polimorphism("g1"),
("g1", "S_2") -> Polimorphism("g1"),
("g1", "S_3") -> Polimorphism("g1"),
("g2", "S_4") -> Polimorphism("g2"),
("g2", "S_5") -> Polimorphism("g2"),
("g2", "S_5") -> Polimorphism("g2"),
("g2", "S_6") -> Polimorphism("g2"),
("g3", "S_7") -> Polimorphism("g3"),
("g3", "S_8") -> Polimorphism("g3"),
("g3", "S_9") -> Polimorphism("g3"),
("g4", "S_10") -> Polimorphism("g4"),
("g4", "S_11") -> Polimorphism("g4"),
("g4", "S_12") -> Polimorphism("g4"))
val networkManager1 = new MutualExclusivityNetworkManager(
network = network,
genePatientMatrix = genePatientMatrix1,
minimumSamplesAltered = 0)
val networkManager2 = new MutualExclusivityNetworkManager(
network = network,
genePatientMatrix = genePatientMatrix2,
minimumSamplesAltered = 0)
val subnetwork = Set(
Interaction(Gene("g1"), Gene("g2")),
Interaction(Gene("g2"), Gene("g3")),
Interaction(Gene("g3"), Gene("g4")))
println("BadSubNetwork: "+ networkManager1.scoreSubnetwork(subnetwork))
println("GoodSubNetwork: "+ networkManager2.scoreSubnetwork(subnetwork))
networkManager1.scoreSubnetwork(subnetwork) must beLessThan(networkManager2.scoreSubnetwork(subnetwork) )
}*/
}
/*
"The Mutual Exclusivity Network Manager" should {
val network = new Network(
interactions = Set(
Interaction(Gene("exclusive1"), Gene("exclusive2"), probability = 1),
Interaction(Gene("exclusive1"), Gene("notmutated1"), probability = 1),
Interaction(Gene("exclusive1"), Gene("notexclusive1"), probability = 1),
Interaction(Gene("notmutated1"), Gene("notexclusive2"), probability = 1),
Interaction(Gene("notexclusive1"), Gene("notexclusive"), probability = 1),
Interaction(Gene("exclusive2"), Gene("exclusive3"), probability = 1),
Interaction(Gene("exclusive2"), Gene("exclusive4"), probability = 1)))
val genePatientMatrix: Map[(String, String), String] = Map(
// p1 have only exclusive1
(("exclusive1", "p1.1") -> "M"),
(("exclusive1", "p1.2") -> "M"),
(("exclusive1", "p1.3") -> "M"),
// p2 have only exclusive2
(("exclusive2", "p2.1") -> "M"),
(("exclusive2", "p2.2") -> "M"),
// p3 have only exclusive3
(("exclusive3", "p3.1") -> "M"),
(("exclusive3", "p3.2") -> "M"),
// p4 have only exclusive4
(("exclusive4", "p4.1") -> "M"),
(("exclusive4", "p4.2") -> "M"),
// notexclusive 1&2 have mutations for pacients 1,2 & 3
(("notexclusive1", "p1.1") -> "M"),
(("notexclusive1", "p1.2") -> "M"),
(("notexclusive2", "p1.1") -> "M")
)
val all_samples = genePatientMatrix.keys.map(_._2).toSet
val networkManager = new MutualExclusivityNetworkManager(
network = network,
genePatientMatrix = genePatientMatrix,
mAS_perGene = 1,
all_samples = all_samples,
evaporation = 0.996)
"give zero score to an empty subnetwork" in {
val subnetwork: Set[Interaction] = Set()
networkManager.scoreSubnetwork(subnetwork) must beEqualTo(0)
}
"give zero score to a size one subnetwork" in {
val subnetwork: Set[Interaction] = Set(Interaction(Gene("exclusive1"), Gene("exclusive1")))
networkManager.scoreSubnetwork(subnetwork) must beEqualTo(0)
}
"give zero score to a subnetwork with non mutated genes" in { // FIXME should it be like this??? What if all subnetowrks have genes without mutations???
val subnetwork: Set[Interaction] = Set(Interaction(Gene("exclusive1"), Gene("notmutated1")))
networkManager.scoreSubnetwork(subnetwork) must beEqualTo(0)
}
"give a positive score to a 2 gene subnetwork" in {
val subnetwork: Set[Interaction] = Set(Interaction(Gene("exclusive1"), Gene("exclusive2")))
networkManager.scoreSubnetwork(subnetwork) must be_>(0.0)
}
"give a higher positive score to bigger exclusive subnetworks" in {
val subnetwork_s2: Set[Interaction] = Set(Interaction(Gene("exclusive1"), Gene("exclusive2")))
val subnetwork_s3: Set[Interaction] = Set(
Interaction(Gene("exclusive1"), Gene("exclusive2")),
Interaction(Gene("exclusive2"), Gene("exclusive3")))
val score_2 = networkManager.scoreSubnetwork(subnetwork_s2)
val score_3 = networkManager.scoreSubnetwork(subnetwork_s3)
score_3 must be_>(score_2)
}
"give a higher score to subnetworks with more mutual exclusive samples" in {
val subnetwork_with5patients: Set[Interaction] = Set(Interaction(Gene("exclusive1"), Gene("exclusive2")))
val subnetwork_with4patients: Set[Interaction] = Set(Interaction(Gene("exclusive3"), Gene("exclusive2")))
val score_with5patients = networkManager.scoreSubnetwork(subnetwork_with5patients)
val score_with4patients = networkManager.scoreSubnetwork(subnetwork_with4patients)
score_with5patients must be_>(score_with4patients)
}
"give a higher score to subnetworks without non-exclusive genes" in {
val subnetwork_withNonExclusive: Set[Interaction] = Set(
Interaction(Gene("exclusive1"), Gene("exclusive2")),
Interaction(Gene("exclusive1"), Gene("notexclusive1")))
val subnetwork_withOnlyExclusive: Set[Interaction] = Set(
Interaction(Gene("exclusive1"), Gene("exclusive2")))
val score_withNonExclusive = networkManager.scoreSubnetwork(subnetwork_withNonExclusive)
val score_withOnlyExclusive = networkManager.scoreSubnetwork(subnetwork_withOnlyExclusive)
score_withOnlyExclusive must be_>(score_withNonExclusive)
}
}
*
*/
}
|
spulido99/SSA
|
src/test/scala/be/cmpg/cancer/MutualExclusivityNetworkManagerSpecification.scala
|
Scala
|
gpl-2.0
| 10,791 |
package com.github.britter.restfulpersons.scalafinatra
import reactivemongo.api.collections.default.BSONCollection
trait CollectionProvider {
protected def collection(colName: String): BSONCollection
}
|
britter/restful-persons
|
scala-finatra/src/main/scala/com/github/britter/restfulpersons/scalafinatra/CollectionProvider.scala
|
Scala
|
mit
| 208 |
package doodle
package svg
package algebra
import cats._
import doodle.language.Basic
trait JvmAlgebraModule
extends AlgebraModule
with PathModule
with ShapeModule
with SvgModule
with JvmBase {
type Algebra[F[_]] = doodle.algebra.Algebra[F] with Basic[F]
final class JvmAlgebra(
val applyF: Apply[SvgResult],
val functorF: Functor[SvgResult]
) extends BaseAlgebra
val algebraInstance = new JvmAlgebra(Svg.svgResultApply, Svg.svgResultApply)
}
|
underscoreio/doodle
|
svg/jvm/src/main/scala/doodle/svg/algebra/Algebra.scala
|
Scala
|
apache-2.0
| 486 |
package io.vertx.ext.asyncsql.impl
import io.vertx.core.Vertx
import io.vertx.core.json.JsonObject
import io.vertx.ext.asyncsql.impl.pool.PostgresqlAsyncConnectionPool
/**
* @author <a href="http://www.campudus.com">Joern Bernhardt</a>.
*/
class PostgreSQLClient(val vertx: Vertx, val config: JsonObject) extends BaseSQLClient {
override protected val poolFactory = PostgresqlAsyncConnectionPool.apply _
override protected val defaultHost: String = "localhost"
override protected val defaultPort: Int = 5432
override protected val defaultDatabase: Option[String] = Some("testdb")
override protected val defaultUser: String = "vertx"
override protected val defaultPassword: Option[String] = Some("password")
}
|
InfoSec812/vertx-mysql-postgresql-service
|
src/main/scala/io/vertx/ext/asyncsql/impl/PostgreSQLClient.scala
|
Scala
|
apache-2.0
| 732 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package whisk.core.controller
import scala.concurrent.Future
import scala.util.Failure
import scala.util.Success
import akka.http.scaladsl.model.StatusCodes._
import akka.http.scaladsl.marshallers.sprayjson.SprayJsonSupport._
import akka.http.scaladsl.server.RequestContext
import akka.http.scaladsl.server.RouteResult
import spray.json._
import whisk.common.TransactionId
import whisk.core.database.DocumentTypeMismatchException
import whisk.core.database.CacheChangeNotification
import whisk.core.database.NoDocumentException
import whisk.core.entitlement._
import whisk.core.entity._
import whisk.core.entity.types.EntityStore
import whisk.http.ErrorResponse.terminate
import whisk.http.Messages
trait WhiskPackagesApi extends WhiskCollectionAPI with ReferencedEntities {
services: WhiskServices =>
protected override val collection = Collection(Collection.PACKAGES)
/** Database service to CRUD packages. */
protected val entityStore: EntityStore
/** Notification service for cache invalidation. */
protected implicit val cacheChangeNotification: Some[CacheChangeNotification]
/** Route directives for API. The methods that are supported on packages. */
protected override lazy val entityOps = put | get | delete
/** Must exclude any private packages when listing those in a namespace unless owned by subject. */
protected override val listRequiresPrivateEntityFilter = true
/** JSON response formatter. */
import RestApiCommons.jsonDefaultResponsePrinter
/**
* Creates or updates package/binding if it already exists. The PUT content is deserialized into a
* WhiskPackagePut which is a subset of WhiskPackage (it eschews the namespace and entity name since
* the former is derived from the authenticated user and the latter is derived from the URI). If the
* binding property is defined, creates or updates a package binding as long as resource is already a
* binding.
*
* The WhiskPackagePut is merged with the existing WhiskPackage in the datastore, overriding old values
* with new values that are defined. Any values not defined in the PUT content are replaced with old values.
*
* Responses are one of (Code, Message)
* - 200 WhiskPackage as JSON
* - 400 Bad Request
* - 409 Conflict
* - 500 Internal Server Error
*/
override def create(user: Identity, entityName: FullyQualifiedEntityName)(implicit transid: TransactionId) = {
parameter('overwrite ? false) { overwrite =>
entity(as[WhiskPackagePut]) { content =>
val request = content.resolve(entityName.namespace)
request.binding.map { b =>
logging.info(this, "checking if package is accessible")
}
val referencedentities = referencedEntities(request)
onComplete(entitlementProvider.check(user, Privilege.READ, referencedentities)) {
case Success(_) =>
putEntity(
WhiskPackage,
entityStore,
entityName.toDocId,
overwrite,
update(request) _,
() => create(request, entityName))
case Failure(f) =>
rewriteEntitlementFailure(f)
}
}
}
}
/**
* Activating a package is not supported. This method is not permitted and is not reachable.
*
* Responses are one of (Code, Message)
* - 405 Not Allowed
*/
override def activate(user: Identity, entityName: FullyQualifiedEntityName, env: Option[Parameters])(
implicit transid: TransactionId) = {
logging.error(this, "activate is not permitted on packages")
reject
}
/**
* Deletes package/binding. If a package, may only be deleted if there are no entities in the package.
*
* Responses are one of (Code, Message)
* - 200 WhiskPackage as JSON
* - 404 Not Found
* - 409 Conflict
* - 500 Internal Server Error
*/
override def remove(user: Identity, entityName: FullyQualifiedEntityName)(implicit transid: TransactionId) = {
deleteEntity(
WhiskPackage,
entityStore,
entityName.toDocId,
(wp: WhiskPackage) => {
wp.binding map {
// this is a binding, it is safe to remove
_ =>
Future.successful({})
} getOrElse {
// may only delete a package if all its ingredients are deleted already
WhiskAction
.listCollectionInNamespace(entityStore, wp.namespace.addPath(wp.name), skip = 0, limit = 0) flatMap {
case Left(list) if (list.size != 0) =>
Future failed {
RejectRequest(
Conflict,
s"Package not empty (contains ${list.size} ${if (list.size == 1) "entity" else "entities"})")
}
case _ => Future.successful({})
}
}
})
}
/**
* Gets package/binding.
* The package/binding name is prefixed with the namespace to create the primary index key.
*
* Responses are one of (Code, Message)
* - 200 WhiskPackage has JSON
* - 404 Not Found
* - 500 Internal Server Error
*/
override def fetch(user: Identity, entityName: FullyQualifiedEntityName, env: Option[Parameters])(
implicit transid: TransactionId) = {
getEntity(WhiskPackage, entityStore, entityName.toDocId, Some { mergePackageWithBinding() _ })
}
/**
* Gets all packages/bindings in namespace.
*
* Responses are one of (Code, Message)
* - 200 [] or [WhiskPackage as JSON]
* - 500 Internal Server Error
*/
override def list(user: Identity, namespace: EntityPath, excludePrivate: Boolean)(implicit transid: TransactionId) = {
// for consistency, all the collections should support the same list API
// but because supporting docs on actions is difficult, the API does not
// offer an option to fetch entities with full docs yet; see comment in
// Actions API for more.
val docs = false
parameter('skip ? 0, 'limit ? collection.listLimit, 'count ? false) { (skip, limit, count) =>
listEntities {
WhiskPackage.listCollectionInNamespace(entityStore, namespace, skip, limit, docs) map { list =>
// any subject is entitled to list packages in any namespace
// however, they shall only observe public packages if the packages
// are not in one of the namespaces the subject is entitled to
val packages = list.fold((js) => js, (ps) => ps.map(WhiskPackage.serdes.write(_)))
FilterEntityList.filter(packages, excludePrivate, additionalFilter = {
// additionally exclude bindings
_.fields.get(WhiskPackage.bindingFieldName) match {
case Some(JsBoolean(isbinding)) => !isbinding
case _ => false // exclude anything that does not conform
}
})
}
}
}
}
/**
* Validates that a referenced binding exists.
*/
private def checkBinding(binding: FullyQualifiedEntityName)(implicit transid: TransactionId): Future[Unit] = {
WhiskPackage.get(entityStore, binding.toDocId) recoverWith {
case t: NoDocumentException => Future.failed(RejectRequest(BadRequest, Messages.bindingDoesNotExist))
case t: DocumentTypeMismatchException =>
Future.failed(RejectRequest(Conflict, Messages.requestedBindingIsNotValid))
case t => Future.failed(RejectRequest(BadRequest, t))
} flatMap {
// trying to create a new package binding that refers to another binding
case provider if provider.binding.nonEmpty =>
Future.failed(RejectRequest(BadRequest, Messages.bindingCannotReferenceBinding))
// or creating a package binding that refers to a package
case _ => Future.successful({})
}
}
/**
* Creates a WhiskPackage from PUT content, generating default values where necessary.
* If this is a binding, confirm the referenced package exists.
*/
private def create(content: WhiskPackagePut, pkgName: FullyQualifiedEntityName)(
implicit transid: TransactionId): Future[WhiskPackage] = {
val validateBinding = content.binding map { b =>
checkBinding(b.fullyQualifiedName)
} getOrElse Future.successful({})
validateBinding map { _ =>
WhiskPackage(
pkgName.path,
pkgName.name,
content.binding,
content.parameters getOrElse Parameters(),
content.version getOrElse SemVer(),
content.publish getOrElse false,
// remove any binding annotation from PUT (always set by the controller)
(content.annotations getOrElse Parameters())
- WhiskPackage.bindingFieldName
++ bindingAnnotation(content.binding))
}
}
/** Updates a WhiskPackage from PUT content, merging old package/binding where necessary. */
private def update(content: WhiskPackagePut)(wp: WhiskPackage)(
implicit transid: TransactionId): Future[WhiskPackage] = {
val validateBinding = content.binding map { binding =>
wp.binding map {
// pre-existing entity is a binding, check that new binding is valid
b =>
checkBinding(b.fullyQualifiedName)
} getOrElse {
// pre-existing entity is a package, cannot make it a binding
Future.failed(RejectRequest(Conflict, Messages.packageCannotBecomeBinding))
}
} getOrElse Future.successful({})
validateBinding map { _ =>
WhiskPackage(
wp.namespace,
wp.name,
content.binding orElse wp.binding,
content.parameters getOrElse wp.parameters,
content.version getOrElse wp.version.upPatch,
content.publish getOrElse wp.publish,
// override any binding annotation from PUT (always set by the controller)
(content.annotations getOrElse wp.annotations)
- WhiskPackage.bindingFieldName
++ bindingAnnotation(content.binding orElse wp.binding)).revision[WhiskPackage](wp.docinfo.rev)
}
}
private def rewriteEntitlementFailure(failure: Throwable)(
implicit transid: TransactionId): RequestContext => Future[RouteResult] = {
logging.info(this, s"rewriting failure $failure")
failure match {
case RejectRequest(NotFound, _) => terminate(BadRequest, Messages.bindingDoesNotExist)
case RejectRequest(Conflict, _) => terminate(Conflict, Messages.requestedBindingIsNotValid)
case _ => super.handleEntitlementFailure(failure)
}
}
/**
* Constructs a "binding" annotation. This is redundant with the binding
* information available in WhiskPackage but necessary for some clients which
* fetch package lists but cannot determine which package may be bound. An
* alternative is to include the binding in the package list "view" but this
* will require an API change. So using an annotation instead.
*/
private def bindingAnnotation(binding: Option[Binding]): Parameters = {
binding map { b =>
Parameters(WhiskPackage.bindingFieldName, Binding.serdes.write(b))
} getOrElse Parameters()
}
/**
* Constructs a WhiskPackage that is a merger of a package with its packing binding (if any).
* If this is a binding, fetch package for binding, merge parameters then emit.
* Otherwise this is a package, emit it.
*/
private def mergePackageWithBinding(ref: Option[WhiskPackage] = None)(wp: WhiskPackage)(
implicit transid: TransactionId): RequestContext => Future[RouteResult] = {
wp.binding map {
case b: Binding =>
val docid = b.fullyQualifiedName.toDocId
logging.info(this, s"fetching package '$docid' for reference")
getEntity(WhiskPackage, entityStore, docid, Some {
mergePackageWithBinding(Some { wp }) _
})
} getOrElse {
val pkg = ref map { _ inherit wp.parameters } getOrElse wp
logging.info(this, s"fetching package actions in '${wp.fullPath}'")
val actions = WhiskAction.listCollectionInNamespace(entityStore, wp.fullPath, skip = 0, limit = 0) flatMap {
case Left(list) =>
Future.successful {
pkg withPackageActions (list map { o =>
WhiskPackageAction.serdes.read(o)
})
}
case t =>
Future.failed {
logging.error(this, "unexpected result in package action lookup: $t")
new IllegalStateException(s"unexpected result in package action lookup: $t")
}
}
onComplete(actions) {
case Success(p) =>
logging.info(this, s"[GET] entity success")
complete(OK, p)
case Failure(t) =>
logging.error(this, s"[GET] failed: ${t.getMessage}")
terminate(InternalServerError)
}
}
}
}
|
tysonnorris/openwhisk
|
core/controller/src/main/scala/whisk/core/controller/Packages.scala
|
Scala
|
apache-2.0
| 13,469 |
package rotationsymmetry.sxgboost
import org.scalatest.FunSuite
import rotationsymmetry.sxgboost.loss.SquareLoss
class LossAggregatorSuite extends FunSuite with TestData{
val loss = new SquareLoss()
val featureIndicesBundle = Array(Array(0, 1), Array(1, 2))
val metaData = new MetaData(3, Array(3, 4, 5))
val tree0 = new WorkingNode(0)
tree0.prediction = Some(0.4)
val workingModel = new WorkingModel(0, Array(tree0))
test("offsets should be of correct size and values") {
val currentRoot = new WorkingNode(0)
val agg = new LossAggregator(featureIndicesBundle, workingModel, currentRoot, metaData, loss)
assert(agg.offsets.length == featureIndicesBundle.length)
assert(agg.offsets === Array(Array(0, 9), Array(0, 12)))
}
test("stats should be of correct size") {
val currentRoot = new WorkingNode(0)
val agg = new LossAggregator(featureIndicesBundle, workingModel, currentRoot, metaData, loss)
assert(agg.stats.length == featureIndicesBundle.length)
assert(agg.stats(0).length == 21)
assert(agg.stats(1).length == 27)
}
test("add treePoint will not update stats if the leaf node is not in the batch") {
val currentRoot = new WorkingNode(0)
val agg = new LossAggregator(featureIndicesBundle, workingModel, currentRoot, metaData, loss)
val treePoint = TreePoint(0.3, Array(0, 0, 1))
val stats0 = agg.stats.map(_.clone())
agg.add(treePoint)
assert(stats0 === agg.stats)
}
test("add treePoint will update stats correctly if the leaf node is in the batch") {
val currentRoot = new WorkingNode(0)
currentRoot.idxInBatch = Some(1)
val agg = new LossAggregator(featureIndicesBundle, workingModel, currentRoot, metaData, loss)
val treePoint = TreePoint(0.3, Array(0, 0, 1))
val stats0 = agg.stats.map(_.clone())
val diff1 = 2 * (0.4 - 0.3)
val diff2 = 2.0
val weight = 1.0
val statsSize = 3
stats0(1)(0) = diff1
stats0(1)(1) = diff2
stats0(1)(2) = weight
stats0(1)(statsSize * 4 + statsSize * 1) = diff1
stats0(1)(statsSize * 4 + statsSize * 1 + 1) = diff2
stats0(1)(statsSize * 4 + statsSize * 1 + 2) = weight
agg.add(treePoint)
assert(stats0 === agg.stats)
}
test("merge stats correctly") {
val currentRoot = new WorkingNode(0)
currentRoot.idxInBatch = Some(0)
val agg = new LossAggregator(featureIndicesBundle, workingModel, currentRoot, metaData, loss)
val treePoint1 = TreePoint(0.3, Array(1, 0, 1))
val treePoint2 = TreePoint(0.4, Array(0, 2, 0))
agg.add(treePoint1)
agg.add(treePoint2)
val agg1 = new LossAggregator(featureIndicesBundle, workingModel, currentRoot, metaData, loss)
val agg2 = new LossAggregator(featureIndicesBundle, workingModel, currentRoot, metaData, loss)
agg1.add(treePoint1)
agg2.add(treePoint2)
agg1.merge(agg2)
assert(agg.stats === agg1.stats)
}
test("test") {
val currentRoot = new WorkingNode(0)
currentRoot.idxInBatch = Some(0)
val agg = new LossAggregator(Array(Array(0, 1)), new WorkingModel(0.35, Array[WorkingNode]()),
currentRoot, simpleMetaData, loss)
simpleBinnedData.foreach(tp => agg.add(tp))
}
}
|
rotationsymmetry/sparkxgboost
|
src/test/scala/rotationsymmetry/sxgboost/LossAggregatorSuite.scala
|
Scala
|
apache-2.0
| 3,178 |
package controllers
import play.api.mvc.Results._
import play.api.mvc._
import scala.concurrent.Future
object Authorized extends ActionFilter[AuthenticatedRequest] {
def filter[A](req: AuthenticatedRequest[A]) = Future.successful {
if (!req.user.admin)
Some(Unauthorized(views.html.defaultpages.unauthorized()))
else
None
}
}
|
gewoonrik/MAP
|
good/app/controllers/Authorized.scala
|
Scala
|
mit
| 353 |
package controllers.web
object Assets extends controllers.common.MyAssetsTrait
|
vtapadia/crickit
|
modules/web/app/controllers/web/Assets.scala
|
Scala
|
apache-2.0
| 79 |
/***********************************************************************
* Copyright (c) 2013-2018 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.tools.status
import com.beust.jcommander.{JCommander, Parameter, Parameters}
import org.locationtech.geomesa.tools.{AutocompleteInfo, Command, Runner}
class HelpCommand(runner: Runner, jc: JCommander) extends Command {
override val name: String = "help"
override val params = new HelpParameters
override def execute(): Unit = {
if (params.autocompleteInfo != null) {
val autocompleteInfo = AutocompleteInfo(params.autocompleteInfo.get(0), params.autocompleteInfo.get(1))
runner.autocompleteUsage(jc, autocompleteInfo)
Command.output.info(s"Wrote Autocomplete function to ${autocompleteInfo.path}.")
} else if (params.command == null || params.command.isEmpty) {
Command.output.info(s"${runner.usage(jc)}\\nTo see help for a specific command type: ${runner.name} help <command-name>\\n")
} else {
Command.output.info(runner.usage(jc, params.command.get(0)))
}
}
}
@Parameters(commandDescription = "Show help")
class HelpParameters {
@Parameter(description = "Help for a specific command", required = false)
val command: java.util.List[String] = null
@Parameter(names = Array("--autocomplete-function"), description = "Generates and outputs a bash function for " +
"autocompleting GeoMesa commandline commands and their parameters. First value is output path, second is command name",
required = false, hidden = true)
val autocompleteInfo: java.util.List[String] = null
}
|
ddseapy/geomesa
|
geomesa-tools/src/main/scala/org/locationtech/geomesa/tools/status/HelpCommand.scala
|
Scala
|
apache-2.0
| 1,942 |
package com.pygmalios.rawKafkaCassandra
import akka.actor.{Actor, ActorSystem}
import com.typesafe.config.Config
import scala.collection.JavaConversions._
/**
* Configuration.
*/
trait GenericRawKafkaCassandraConfig {
def config: Config
// Kafka configuration
def kafkaConfig = config.getConfig("kafka")
def kafkaBrokersConfig: List[String] = kafkaConfig.getStringList("brokers").toList
def kafkaZooKeeperHostConfig = kafkaConfig.getString("zooKeeperHost")
def kafkaTopicsConfig: List[String] = kafkaConfig.getString("topics").split(",").toList
def kafkaConsumerGroupIdConfig = kafkaConfig.getString("consumerGroupId")
// Cassandra configuration
def cassandraConfig = config.getConfig("cassandra")
def cassandraPortConfig: Int = cassandraConfig.getInt("port")
def cassandraHostsConfig: List[String] = cassandraConfig.getStringList("hosts").toList
def cassandraKeyspace = cassandraConfig.getString("keyspace")
}
trait ActorSystemRawKafkaCassandraConfig extends GenericRawKafkaCassandraConfig {
def actorSystem: ActorSystem
override def config = actorSystem.settings.config.getConfig("raw-kafka-cassandra")
}
trait RawKafkaCassandraConfig extends ActorSystemRawKafkaCassandraConfig {
self: Actor =>
override def actorSystem: ActorSystem = context.system
}
class SimpleRawKafkaCassandraConfig(val actorSystem: ActorSystem) extends ActorSystemRawKafkaCassandraConfig
|
pygmalios/raw-kafka-cassandra
|
src/main/scala/com/pygmalios/rawKafkaCassandra/RawKafkaCassandraConfig.scala
|
Scala
|
apache-2.0
| 1,406 |
package mypipe.kafka
import mypipe._
import mypipe.api.event.Mutation
import mypipe.avro.{ AvroVersionedRecordDeserializer, InMemorySchemaRepo }
import mypipe.avro.schema.{ AvroSchemaUtils, ShortSchemaId, AvroSchema, GenericSchemaRepository }
import mypipe.mysql.MySQLBinaryLogConsumer
import mypipe.pipe.Pipe
import mypipe.producer.KafkaMutationSpecificAvroProducer
import org.apache.avro.Schema
import org.scalatest.BeforeAndAfterAll
import org.slf4j.LoggerFactory
import scala.concurrent.duration._
import scala.concurrent.{ Await, Future }
import scala.reflect.runtime.universe._
class KafkaSpecificSpec extends UnitSpec with DatabaseSpec with ActorSystemSpec with BeforeAndAfterAll {
val log = LoggerFactory.getLogger(getClass)
@volatile var done = false
val kafkaProducer = new KafkaMutationSpecificAvroProducer(
conf.getConfig("mypipe.test.kafka-specific-producer"))
val binlogConsumer = MySQLBinaryLogConsumer(Queries.DATABASE.host, Queries.DATABASE.port, Queries.DATABASE.username, Queries.DATABASE.password)
val pipe = new Pipe("test-pipe-kafka-specific", List(binlogConsumer), kafkaProducer)
override def beforeAll() {
pipe.connect()
super.beforeAll()
while (!pipe.isConnected) { Thread.sleep(10) }
}
override def afterAll() {
pipe.disconnect()
super.afterAll()
}
"A specific Kafka Avro producer and consumer" should "properly produce and consume insert, update, and delete events" in withDatabase { db ⇒
val DATABASE = Queries.DATABASE.name
val TABLE = Queries.TABLE.name
val USERNAME = Queries.INSERT.username
val USERNAME2 = Queries.UPDATE.username
val LOGIN_COUNT = 5
val zkConnect = conf.getString("mypipe.test.kafka-specific-producer.zk-connect")
val kafkaConsumer = new KafkaMutationAvroConsumer[mypipe.kafka.UserInsert, mypipe.kafka.UserUpdate, mypipe.kafka.UserDelete, Short](
topic = KafkaUtil.specificTopic(DATABASE, TABLE),
zkConnect = zkConnect,
groupId = s"${DATABASE}_${TABLE}_specific_test-${System.currentTimeMillis()}",
schemaIdSizeInBytes = 2)(
insertCallback = { insertMutation ⇒
log.debug("consumed insert mutation: " + insertMutation)
try {
assert(insertMutation.getDatabase.toString == DATABASE)
assert(insertMutation.getTable.toString == TABLE)
assert(insertMutation.getUsername.toString == USERNAME)
assert(insertMutation.getLoginCount == LOGIN_COUNT)
}
true
},
updateCallback = { updateMutation ⇒
log.debug("consumed update mutation: " + updateMutation)
try {
assert(updateMutation.getDatabase.toString == DATABASE)
assert(updateMutation.getTable.toString == TABLE)
assert(updateMutation.getOldUsername.toString == USERNAME)
assert(updateMutation.getNewUsername.toString == USERNAME2)
assert(updateMutation.getOldLoginCount == LOGIN_COUNT)
assert(updateMutation.getNewLoginCount == LOGIN_COUNT + 1)
}
true
},
deleteCallback = { deleteMutation ⇒
log.debug("consumed delete mutation: " + deleteMutation)
try {
assert(deleteMutation.getDatabase.toString == DATABASE)
assert(deleteMutation.getTable.toString == TABLE)
assert(deleteMutation.getUsername.toString == USERNAME2)
assert(deleteMutation.getLoginCount == LOGIN_COUNT + 1)
}
done = true
true
},
implicitly[TypeTag[UserInsert]],
implicitly[TypeTag[UserUpdate]],
implicitly[TypeTag[UserDelete]]) {
protected val schemaRepoClient: GenericSchemaRepository[Short, Schema] = TestSchemaRepo
override def bytesToSchemaId(bytes: Array[Byte], offset: Int): Short = byteArray2Short(bytes, offset)
private def byteArray2Short(data: Array[Byte], offset: Int) = ((data(offset) << 8) | (data(offset + 1) & 0xff)).toShort
override protected def avroSchemaSubjectForMutationByte(byte: Byte): String = AvroSchemaUtils.specificSubject(DATABASE, TABLE, Mutation.byteToString(byte))
override val insertDeserializer: AvroVersionedRecordDeserializer[UserInsert] = new AvroVersionedRecordDeserializer[UserInsert]()
override val updateDeserializer: AvroVersionedRecordDeserializer[UserUpdate] = new AvroVersionedRecordDeserializer[UserUpdate]()
override val deleteDeserializer: AvroVersionedRecordDeserializer[UserDelete] = new AvroVersionedRecordDeserializer[UserDelete]()
}
val future = kafkaConsumer.start
Await.result(db.connection.sendQuery(Queries.INSERT.statement(loginCount = LOGIN_COUNT)), 2.seconds)
Await.result(db.connection.sendQuery(Queries.UPDATE.statement), 2.seconds)
Await.result(db.connection.sendQuery(Queries.DELETE.statement), 2.seconds)
Await.result(Future { while (!done) Thread.sleep(100) }, 20.seconds)
try {
kafkaConsumer.stop
Await.result(future, 5.seconds)
}
if (!done) assert(false)
}
}
object TestSchemaRepo extends InMemorySchemaRepo[Short, Schema] with ShortSchemaId with AvroSchema {
val DATABASE = "mypipe"
val TABLE = "user"
val insertSchemaId = registerSchema(AvroSchemaUtils.specificSubject(DATABASE, TABLE, Mutation.InsertString), new UserInsert().getSchema)
val updateSchemaId = registerSchema(AvroSchemaUtils.specificSubject(DATABASE, TABLE, Mutation.UpdateString), new UserUpdate().getSchema)
val deleteSchemaId = registerSchema(AvroSchemaUtils.specificSubject(DATABASE, TABLE, Mutation.DeleteString), new UserDelete().getSchema)
}
|
Asana/mypipe
|
mypipe-kafka/src/test/scala/mypipe/kafka/KafkaSpecificSpec.scala
|
Scala
|
apache-2.0
| 5,570 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.types
import scala.math.Ordering
import scala.reflect.runtime.universe.typeTag
import org.apache.spark.annotation.Stable
/**
* The data type representing `java.sql.Timestamp` values.
* Please use the singleton `DataTypes.TimestampType`.
*
* @since 1.3.0
*/
@Stable
class TimestampType private() extends AtomicType {
// The companion object and this class is separated so the companion object also subclasses
// this type. Otherwise, the companion object would be of type "TimestampType$" in byte code.
// Defined with a private constructor so the companion object is the only possible instantiation.
private[sql] type InternalType = Long
@transient private[sql] lazy val tag = typeTag[InternalType]
private[sql] val ordering = implicitly[Ordering[InternalType]]
/**
* The default size of a value of the TimestampType is 8 bytes.
*/
override def defaultSize: Int = 8
private[spark] override def asNullable: TimestampType = this
}
/**
* @since 1.3.0
*/
@Stable
case object TimestampType extends TimestampType
|
WindCanDie/spark
|
sql/catalyst/src/main/scala/org/apache/spark/sql/types/TimestampType.scala
|
Scala
|
apache-2.0
| 1,877 |
// a.scala
package object pkg {
class AnyOps(val x: Any) extends AnyVal
def AnyOps(x: Any) = new AnyOps(x)
}
|
folone/dotty
|
tests/run/t8133/A_1.scala
|
Scala
|
bsd-3-clause
| 114 |
class SimpleResolve {
private def foo = 44
val x = /*ref*/foo
}
//true
|
LPTK/intellij-scala
|
testdata/checkers/checkPrivateAccess/simple/SimpleResolve.scala
|
Scala
|
apache-2.0
| 74 |
/*
* Copyright 2013-2015 Websudos, Limited.
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* - Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* - Explicit consent must be obtained from the copyright owner, Websudos Limited before any redistribution is made.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
package com.websudos.phantom.tables
import com.websudos.phantom.builder.query.InsertQuery
import com.websudos.phantom.dsl._
import com.websudos.phantom.testkit._
import org.joda.time.DateTime
case class Recipe(
url: String,
description: Option[String],
ingredients: List[String],
servings: Option[Int],
lastCheckedAt: DateTime,
props: Map[String, String],
uid: UUID
)
class Recipes extends CassandraTable[Recipes, Recipe] {
object url extends StringColumn(this) with PartitionKey[String]
object description extends OptionalStringColumn(this)
object ingredients extends ListColumn[Recipes, Recipe, String](this)
object servings extends OptionalIntColumn(this)
object last_checked_at extends DateTimeColumn(this)
object props extends MapColumn[Recipes, Recipe, String, String](this)
object uid extends UUIDColumn(this)
override def fromRow(r: Row): Recipe = {
Recipe(
url(r),
description(r),
ingredients(r),
servings(r),
last_checked_at(r),
props(r),
uid(r)
)
}
}
object Recipes extends Recipes with PhantomCassandraConnector {
override def tableName = "Recipes"
def store(recipe: Recipe): InsertQuery.Default[Recipes, Recipe] = {
insert
.value(_.url, recipe.url)
.value(_.description, recipe.description)
.value(_.ingredients, recipe.ingredients)
.value(_.last_checked_at, recipe.lastCheckedAt)
.value(_.props, recipe.props)
.value(_.uid, recipe.uid)
.value(_.servings, recipe.servings)
}
}
case class SampleEvent(id: UUID, map: Map[Long, DateTime])
sealed class Events extends CassandraTable[Events, SampleEvent] with PhantomCassandraConnector {
object id extends UUIDColumn(this) with PartitionKey[UUID]
object map extends MapColumn[Events, SampleEvent, Long, DateTime](this)
def fromRow(row: Row): SampleEvent = SampleEvent(
id(row),
map(row)
)
}
object Events extends Events {
def store(event: SampleEvent): InsertQuery.Default[Events, SampleEvent] = {
insert.value(_.id, event.id)
.value(_.map, event.map)
}
def getById(id: UUID) = {
select.where(_.id eqs id)
}
}
|
analytically/phantom
|
phantom-dsl/src/test/scala/com/websudos/phantom/tables/Recipes.scala
|
Scala
|
bsd-2-clause
| 3,659 |
/*
* Copyright 2016 Nicolas Rinaudo
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kantan.xpath
import java.io.File
import kantan.xpath.laws.discipline.{DisciplineSuite, NodeDecoderTests, SerializableTests}
import kantan.xpath.laws.discipline.arbitrary._
class FileDecoderTests extends DisciplineSuite {
checkAll("NodeDecoder[File]", NodeDecoderTests[File].bijectiveDecoder[Int, Int])
checkAll("NodeDecoder[File]", SerializableTests[NodeDecoder[File]].serializable)
}
|
nrinaudo/kantan.xpath
|
core/src/test/scala/kantan/xpath/FileDecoderTests.scala
|
Scala
|
apache-2.0
| 999 |
package gr.cslab.ece.ntua.musqle.benchmarks.tpcds
class AllQueries
object AllQueries{
val rc = Array(1000000, 1000000, 1000000, 1000000, 1000000)
val tpcds1_4Queries = Seq(
("q1", """
| WITH customer_total_return AS
| (SELECT sr_customer_sk AS ctr_customer_sk, sr_store_sk AS ctr_store_sk,
| sum(sr_return_amt) AS ctr_total_return
| FROM store_returns, date_dim
| WHERE sr_returned_date_sk = d_date_sk AND d_year = 2000
| GROUP BY sr_customer_sk, sr_store_sk)
| SELECT c_customer_id
| FROM customer_total_return ctr1, store, customer
| WHERE ctr1.ctr_total_return >
| (SELECT avg(ctr_total_return)*1.2
| FROM customer_total_return ctr2
| WHERE ctr1.ctr_store_sk = ctr2.ctr_store_sk)
| AND s_store_sk = ctr1.ctr_store_sk
| AND s_state = 'TN'
| AND ctr1.ctr_customer_sk = c_customer_sk
| ORDER BY c_customer_id LIMIT 100
""".stripMargin),
("q2", """
| WITH wscs as
| (SELECT sold_date_sk, sales_price
| FROM (SELECT ws_sold_date_sk sold_date_sk, ws_ext_sales_price sales_price
| FROM web_sales) x
| UNION ALL
| (SELECT cs_sold_date_sk sold_date_sk, cs_ext_sales_price sales_price
| FROM catalog_sales)),
| wswscs AS
| (SELECT d_week_seq,
| sum(case when (d_day_name='Sunday') then sales_price else null end) sun_sales,
| sum(case when (d_day_name='Monday') then sales_price else null end) mon_sales,
| sum(case when (d_day_name='Tuesday') then sales_price else null end) tue_sales,
| sum(case when (d_day_name='Wednesday') then sales_price else null end) wed_sales,
| sum(case when (d_day_name='Thursday') then sales_price else null end) thu_sales,
| sum(case when (d_day_name='Friday') then sales_price else null end) fri_sales,
| sum(case when (d_day_name='Saturday') then sales_price else null end) sat_sales
| FROM wscs, date_dim
| WHERE d_date_sk = sold_date_sk
| GROUP BY d_week_seq)
| SELECT d_week_seq1
| ,round(sun_sales1/sun_sales2,2)
| ,round(mon_sales1/mon_sales2,2)
| ,round(tue_sales1/tue_sales2,2)
| ,round(wed_sales1/wed_sales2,2)
| ,round(thu_sales1/thu_sales2,2)
| ,round(fri_sales1/fri_sales2,2)
| ,round(sat_sales1/sat_sales2,2)
| FROM
| (SELECT wswscs.d_week_seq d_week_seq1
| ,sun_sales sun_sales1
| ,mon_sales mon_sales1
| ,tue_sales tue_sales1
| ,wed_sales wed_sales1
| ,thu_sales thu_sales1
| ,fri_sales fri_sales1
| ,sat_sales sat_sales1
| FROM wswscs,date_dim
| WHERE date_dim.d_week_seq = wswscs.d_week_seq AND d_year = 2001) y,
| (SELECT wswscs.d_week_seq d_week_seq2
| ,sun_sales sun_sales2
| ,mon_sales mon_sales2
| ,tue_sales tue_sales2
| ,wed_sales wed_sales2
| ,thu_sales thu_sales2
| ,fri_sales fri_sales2
| ,sat_sales sat_sales2
| FROM wswscs, date_dim
| WHERE date_dim.d_week_seq = wswscs.d_week_seq AND d_year = 2001 + 1) z
| WHERE d_week_seq1=d_week_seq2-53
| ORDER BY d_week_seq1
""".stripMargin),
("q3", """
| SELECT dt.d_year, item.i_brand_id brand_id, item.i_brand brand,SUM(ss_ext_sales_price) sum_agg
| FROM date_dim dt, store_sales, item
| WHERE dt.d_date_sk = store_sales.ss_sold_date_sk
| AND store_sales.ss_item_sk = item.i_item_sk
| AND item.i_manufact_id = 128
| AND dt.d_moy=11
| GROUP BY dt.d_year, item.i_brand, item.i_brand_id
| ORDER BY dt.d_year, sum_agg desc, brand_id
| LIMIT 100
""".stripMargin),
("q4", """
|WITH year_total AS (
| SELECT c_customer_id customer_id,
| c_first_name customer_first_name,
| c_last_name customer_last_name,
| c_preferred_cust_flag customer_preferred_cust_flag,
| c_birth_country customer_birth_country,
| c_login customer_login,
| c_email_address customer_email_address,
| d_year dyear,
| sum(((ss_ext_list_price-ss_ext_wholesale_cost-ss_ext_discount_amt)+ss_ext_sales_price)/2) year_total,
| 's' sale_type
| FROM customer, store_sales, date_dim
| WHERE c_customer_sk = ss_customer_sk AND ss_sold_date_sk = d_date_sk
| GROUP BY c_customer_id,
| c_first_name,
| c_last_name,
| c_preferred_cust_flag,
| c_birth_country,
| c_login,
| c_email_address,
| d_year
| UNION ALL
| SELECT c_customer_id customer_id,
| c_first_name customer_first_name,
| c_last_name customer_last_name,
| c_preferred_cust_flag customer_preferred_cust_flag,
| c_birth_country customer_birth_country,
| c_login customer_login,
| c_email_address customer_email_address,
| d_year dyear,
| sum((((cs_ext_list_price-cs_ext_wholesale_cost-cs_ext_discount_amt)+cs_ext_sales_price)/2) ) year_total,
| 'c' sale_type
| FROM customer, catalog_sales, date_dim
| WHERE c_customer_sk = cs_bill_customer_sk AND cs_sold_date_sk = d_date_sk
| GROUP BY c_customer_id,
| c_first_name,
| c_last_name,
| c_preferred_cust_flag,
| c_birth_country,
| c_login,
| c_email_address,
| d_year
| UNION ALL
| SELECT c_customer_id customer_id
| ,c_first_name customer_first_name
| ,c_last_name customer_last_name
| ,c_preferred_cust_flag customer_preferred_cust_flag
| ,c_birth_country customer_birth_country
| ,c_login customer_login
| ,c_email_address customer_email_address
| ,d_year dyear
| ,sum((((ws_ext_list_price-ws_ext_wholesale_cost-ws_ext_discount_amt)+ws_ext_sales_price)/2) ) year_total
| ,'w' sale_type
| FROM customer, web_sales, date_dim
| WHERE c_customer_sk = ws_bill_customer_sk AND ws_sold_date_sk = d_date_sk
| GROUP BY c_customer_id,
| c_first_name,
| c_last_name,
| c_preferred_cust_flag,
| c_birth_country,
| c_login,
| c_email_address,
| d_year)
| SELECT
| t_s_secyear.customer_id,
| t_s_secyear.customer_first_name,
| t_s_secyear.customer_last_name,
| t_s_secyear.customer_preferred_cust_flag,
| t_s_secyear.customer_birth_country,
| t_s_secyear.customer_login,
| t_s_secyear.customer_email_address
| FROM year_total t_s_firstyear, year_total t_s_secyear, year_total t_c_firstyear,
| year_total t_c_secyear, year_total t_w_firstyear, year_total t_w_secyear
| WHERE t_s_secyear.customer_id = t_s_firstyear.customer_id
| and t_s_firstyear.customer_id = t_c_secyear.customer_id
| and t_s_firstyear.customer_id = t_c_firstyear.customer_id
| and t_s_firstyear.customer_id = t_w_firstyear.customer_id
| and t_s_firstyear.customer_id = t_w_secyear.customer_id
| and t_s_firstyear.sale_type = 's'
| and t_c_firstyear.sale_type = 'c'
| and t_w_firstyear.sale_type = 'w'
| and t_s_secyear.sale_type = 's'
| and t_c_secyear.sale_type = 'c'
| and t_w_secyear.sale_type = 'w'
| and t_s_firstyear.dyear = 2001
| and t_s_secyear.dyear = 2001+1
| and t_c_firstyear.dyear = 2001
| and t_c_secyear.dyear = 2001+1
| and t_w_firstyear.dyear = 2001
| and t_w_secyear.dyear = 2001+1
| and t_s_firstyear.year_total > 0
| and t_c_firstyear.year_total > 0
| and t_w_firstyear.year_total > 0
| and case when t_c_firstyear.year_total > 0 then t_c_secyear.year_total / t_c_firstyear.year_total else null end
| > case when t_s_firstyear.year_total > 0 then t_s_secyear.year_total / t_s_firstyear.year_total else null end
| and case when t_c_firstyear.year_total > 0 then t_c_secyear.year_total / t_c_firstyear.year_total else null end
| > case when t_w_firstyear.year_total > 0 then t_w_secyear.year_total / t_w_firstyear.year_total else null end
| ORDER BY
| t_s_secyear.customer_id,
| t_s_secyear.customer_first_name,
| t_s_secyear.customer_last_name,
| t_s_secyear.customer_preferred_cust_flag,
| t_s_secyear.customer_birth_country,
| t_s_secyear.customer_login,
| t_s_secyear.customer_email_address
| LIMIT 100
""".stripMargin),
// Modifications: "+ days" -> date_add
// Modifications: "||" -> concat
("q5", """
| WITH ssr AS
| (SELECT s_store_id,
| sum(sales_price) as sales,
| sum(profit) as profit,
| sum(return_amt) as returns,
| sum(net_loss) as profit_loss
| FROM
| (SELECT ss_store_sk as store_sk,
| ss_sold_date_sk as date_sk,
| ss_ext_sales_price as sales_price,
| ss_net_profit as profit,
| cast(0 as decimal(7,2)) as return_amt,
| cast(0 as decimal(7,2)) as net_loss
| FROM store_sales
| UNION ALL
| SELECT sr_store_sk as store_sk,
| sr_returned_date_sk as date_sk,
| cast(0 as decimal(7,2)) as sales_price,
| cast(0 as decimal(7,2)) as profit,
| sr_return_amt as return_amt,
| sr_net_loss as net_loss
| FROM store_returns)
| salesreturns, date_dim, store
| WHERE date_sk = d_date_sk
| and d_date between cast('2000-08-23' as date)
| and ((cast('2000-08-23' as date) + interval 14 days))
| and store_sk = s_store_sk
| GROUP BY s_store_id),
| csr AS
| (SELECT cp_catalog_page_id,
| sum(sales_price) as sales,
| sum(profit) as profit,
| sum(return_amt) as returns,
| sum(net_loss) as profit_loss
| FROM
| (SELECT cs_catalog_page_sk as page_sk,
| cs_sold_date_sk as date_sk,
| cs_ext_sales_price as sales_price,
| cs_net_profit as profit,
| cast(0 as decimal(7,2)) as return_amt,
| cast(0 as decimal(7,2)) as net_loss
| FROM catalog_sales
| UNION ALL
| SELECT cr_catalog_page_sk as page_sk,
| cr_returned_date_sk as date_sk,
| cast(0 as decimal(7,2)) as sales_price,
| cast(0 as decimal(7,2)) as profit,
| cr_return_amount as return_amt,
| cr_net_loss as net_loss
| from catalog_returns
| ) salesreturns, date_dim, catalog_page
| WHERE date_sk = d_date_sk
| and d_date between cast('2000-08-23' as date)
| and ((cast('2000-08-23' as date) + interval 14 days))
| and page_sk = cp_catalog_page_sk
| GROUP BY cp_catalog_page_id)
| ,
| wsr AS
| (SELECT web_site_id,
| sum(sales_price) as sales,
| sum(profit) as profit,
| sum(return_amt) as returns,
| sum(net_loss) as profit_loss
| from
| (select ws_web_site_sk as wsr_web_site_sk,
| ws_sold_date_sk as date_sk,
| ws_ext_sales_price as sales_price,
| ws_net_profit as profit,
| cast(0 as decimal(7,2)) as return_amt,
| cast(0 as decimal(7,2)) as net_loss
| from web_sales
| union all
| select ws_web_site_sk as wsr_web_site_sk,
| wr_returned_date_sk as date_sk,
| cast(0 as decimal(7,2)) as sales_price,
| cast(0 as decimal(7,2)) as profit,
| wr_return_amt as return_amt,
| wr_net_loss as net_loss
| FROM web_returns LEFT OUTER JOIN web_sales on
| ( wr_item_sk = ws_item_sk
| and wr_order_number = ws_order_number)
| ) salesreturns, date_dim, web_site
| WHERE date_sk = d_date_sk
| and d_date between cast('2000-08-23' as date)
| and ((cast('2000-08-23' as date) + interval 14 days))
| and wsr_web_site_sk = web_site_sk
| GROUP BY web_site_id)
| SELECT channel,
| id,
| sum(sales) as sales,
| sum(returns) as returns,
| sum(profit) as profit
| from
| (select 'store channel' as channel,
| concat('store', s_store_id) as id,
| sales,
| returns,
| (profit - profit_loss) as profit
| FROM ssr
| UNION ALL
| select 'catalog channel' as channel,
| concat('catalog_page', cp_catalog_page_id) as id,
| sales,
| returns,
| (profit - profit_loss) as profit
| FROM csr
| UNION ALL
| SELECT 'web channel' as channel,
| concat('web_site', web_site_id) as id,
| sales,
| returns,
| (profit - profit_loss) as profit
| FROM wsr
| ) x
| GROUP BY ROLLUP (channel, id)
| ORDER BY channel, id
| LIMIT 100
""".stripMargin),
("q6", """
| SELECT a.ca_state state, count(*) cnt
| FROM
| customer_address a, customer c, store_sales s, date_dim d, item i
| WHERE a.ca_address_sk = c.c_current_addr_sk
| AND c.c_customer_sk = s.ss_customer_sk
| AND s.ss_sold_date_sk = d.d_date_sk
| AND s.ss_item_sk = i.i_item_sk
| AND d.d_month_seq =
| (SELECT distinct (d_month_seq) FROM date_dim
| WHERE d_year = 2000 AND d_moy = 1)
| AND i.i_current_price > 1.2 *
| (SELECT avg(j.i_current_price) FROM item j
| WHERE j.i_category = i.i_category)
| GROUP BY a.ca_state
| HAVING count(*) >= 10
| ORDER BY cnt LIMIT 100
""".stripMargin),
("q7", """
| SELECT i_item_id,
| avg(ss_quantity) agg1,
| avg(ss_list_price) agg2,
| avg(ss_coupon_amt) agg3,
| avg(ss_sales_price) agg4
| FROM store_sales, customer_demographics, date_dim, item, promotion
| WHERE ss_sold_date_sk = d_date_sk AND
| ss_item_sk = i_item_sk AND
| ss_cdemo_sk = cd_demo_sk AND
| ss_promo_sk = p_promo_sk AND
| cd_gender = 'M' AND
| cd_marital_status = 'S' AND
| cd_education_status = 'College' AND
| (p_channel_email = 'N' or p_channel_event = 'N') AND
| d_year = 2000
| GROUP BY i_item_id
| ORDER BY i_item_id LIMIT 100
""".stripMargin),
("q8", """
| select s_store_name, sum(ss_net_profit)
| from store_sales, date_dim, store,
| (SELECT ca_zip
| from (
| (SELECT substr(ca_zip,1,5) ca_zip FROM customer_address
| WHERE substr(ca_zip,1,5) IN (
| '24128','76232','65084','87816','83926','77556','20548',
| '26231','43848','15126','91137','61265','98294','25782',
| '17920','18426','98235','40081','84093','28577','55565',
| '17183','54601','67897','22752','86284','18376','38607',
| '45200','21756','29741','96765','23932','89360','29839',
| '25989','28898','91068','72550','10390','18845','47770',
| '82636','41367','76638','86198','81312','37126','39192',
| '88424','72175','81426','53672','10445','42666','66864',
| '66708','41248','48583','82276','18842','78890','49448',
| '14089','38122','34425','79077','19849','43285','39861',
| '66162','77610','13695','99543','83444','83041','12305',
| '57665','68341','25003','57834','62878','49130','81096',
| '18840','27700','23470','50412','21195','16021','76107',
| '71954','68309','18119','98359','64544','10336','86379',
| '27068','39736','98569','28915','24206','56529','57647',
| '54917','42961','91110','63981','14922','36420','23006',
| '67467','32754','30903','20260','31671','51798','72325',
| '85816','68621','13955','36446','41766','68806','16725',
| '15146','22744','35850','88086','51649','18270','52867',
| '39972','96976','63792','11376','94898','13595','10516',
| '90225','58943','39371','94945','28587','96576','57855',
| '28488','26105','83933','25858','34322','44438','73171',
| '30122','34102','22685','71256','78451','54364','13354',
| '45375','40558','56458','28286','45266','47305','69399',
| '83921','26233','11101','15371','69913','35942','15882',
| '25631','24610','44165','99076','33786','70738','26653',
| '14328','72305','62496','22152','10144','64147','48425',
| '14663','21076','18799','30450','63089','81019','68893',
| '24996','51200','51211','45692','92712','70466','79994',
| '22437','25280','38935','71791','73134','56571','14060',
| '19505','72425','56575','74351','68786','51650','20004',
| '18383','76614','11634','18906','15765','41368','73241',
| '76698','78567','97189','28545','76231','75691','22246',
| '51061','90578','56691','68014','51103','94167','57047',
| '14867','73520','15734','63435','25733','35474','24676',
| '94627','53535','17879','15559','53268','59166','11928',
| '59402','33282','45721','43933','68101','33515','36634',
| '71286','19736','58058','55253','67473','41918','19515',
| '36495','19430','22351','77191','91393','49156','50298',
| '87501','18652','53179','18767','63193','23968','65164',
| '68880','21286','72823','58470','67301','13394','31016',
| '70372','67030','40604','24317','45748','39127','26065',
| '77721','31029','31880','60576','24671','45549','13376',
| '50016','33123','19769','22927','97789','46081','72151',
| '15723','46136','51949','68100','96888','64528','14171',
| '79777','28709','11489','25103','32213','78668','22245',
| '15798','27156','37930','62971','21337','51622','67853',
| '10567','38415','15455','58263','42029','60279','37125',
| '56240','88190','50308','26859','64457','89091','82136',
| '62377','36233','63837','58078','17043','30010','60099',
| '28810','98025','29178','87343','73273','30469','64034',
| '39516','86057','21309','90257','67875','40162','11356',
| '73650','61810','72013','30431','22461','19512','13375',
| '55307','30625','83849','68908','26689','96451','38193',
| '46820','88885','84935','69035','83144','47537','56616',
| '94983','48033','69952','25486','61547','27385','61860',
| '58048','56910','16807','17871','35258','31387','35458',
| '35576'))
| INTERSECT
| (select ca_zip
| FROM
| (SELECT substr(ca_zip,1,5) ca_zip,count(*) cnt
| FROM customer_address, customer
| WHERE ca_address_sk = c_current_addr_sk and
| c_preferred_cust_flag='Y'
| group by ca_zip
| having count(*) > 10) A1)
| ) A2
| ) V1
| where ss_store_sk = s_store_sk
| and ss_sold_date_sk = d_date_sk
| and d_qoy = 2 and d_year = 1998
| and (substr(s_zip,1,2) = substr(V1.ca_zip,1,2))
| group by s_store_name
| order by s_store_name LIMIT 100
""".stripMargin),
("q9", s"""
|select case when (select count(*) from store_sales
| where ss_quantity between 1 and 20) > ${rc(0)}
| then (select avg(ss_ext_discount_amt) from store_sales
| where ss_quantity between 1 and 20)
| else (select avg(ss_net_paid) from store_sales
| where ss_quantity between 1 and 20) end bucket1 ,
| case when (select count(*) from store_sales
| where ss_quantity between 21 and 40) > ${rc(1)}
| then (select avg(ss_ext_discount_amt) from store_sales
| where ss_quantity between 21 and 40)
| else (select avg(ss_net_paid) from store_sales
| where ss_quantity between 21 and 40) end bucket2,
| case when (select count(*) from store_sales
| where ss_quantity between 41 and 60) > ${rc(2)}
| then (select avg(ss_ext_discount_amt) from store_sales
| where ss_quantity between 41 and 60)
| else (select avg(ss_net_paid) from store_sales
| where ss_quantity between 41 and 60) end bucket3,
| case when (select count(*) from store_sales
| where ss_quantity between 61 and 80) > ${rc(3)}
| then (select avg(ss_ext_discount_amt) from store_sales
| where ss_quantity between 61 and 80)
| else (select avg(ss_net_paid) from store_sales
| where ss_quantity between 61 and 80) end bucket4,
| case when (select count(*) from store_sales
| where ss_quantity between 81 and 100) > ${rc(4)}
| then (select avg(ss_ext_discount_amt) from store_sales
| where ss_quantity between 81 and 100)
| else (select avg(ss_net_paid) from store_sales
| where ss_quantity between 81 and 100) end bucket5
|from reason
|where r_reason_sk = 1
""".stripMargin),
("q10", """
| select
| cd_gender, cd_marital_status, cd_education_status, count(*) cnt1,
| cd_purchase_estimate, count(*) cnt2, cd_credit_rating, count(*) cnt3,
| cd_dep_count, count(*) cnt4, cd_dep_employed_count, count(*) cnt5,
| cd_dep_college_count, count(*) cnt6
| from
| customer c, customer_address ca, customer_demographics
| where
| c.c_current_addr_sk = ca.ca_address_sk and
| ca_county in ('Rush County','Toole County','Jefferson County',
| 'Dona Ana County','La Porte County') and
| cd_demo_sk = c.c_current_cdemo_sk AND
| exists (select * from store_sales, date_dim
| where c.c_customer_sk = ss_customer_sk AND
| ss_sold_date_sk = d_date_sk AND
| d_year = 2002 AND
| d_moy between 1 AND 1+3) AND
| (exists (select * from web_sales, date_dim
| where c.c_customer_sk = ws_bill_customer_sk AND
| ws_sold_date_sk = d_date_sk AND
| d_year = 2002 AND
| d_moy between 1 AND 1+3) or
| exists (select * from catalog_sales, date_dim
| where c.c_customer_sk = cs_ship_customer_sk AND
| cs_sold_date_sk = d_date_sk AND
| d_year = 2002 AND
| d_moy between 1 AND 1+3))
| group by cd_gender,
| cd_marital_status,
| cd_education_status,
| cd_purchase_estimate,
| cd_credit_rating,
| cd_dep_count,
| cd_dep_employed_count,
| cd_dep_college_count
| order by cd_gender,
| cd_marital_status,
| cd_education_status,
| cd_purchase_estimate,
| cd_credit_rating,
| cd_dep_count,
| cd_dep_employed_count,
| cd_dep_college_count
|LIMIT 100
""".stripMargin),
("q11", """
| with year_total as (
| select c_customer_id customer_id
| ,c_first_name customer_first_name
| ,c_last_name customer_last_name
| ,c_preferred_cust_flag customer_preferred_cust_flag
| ,c_birth_country customer_birth_country
| ,c_login customer_login
| ,c_email_address customer_email_address
| ,d_year dyear
| ,sum(ss_ext_list_price-ss_ext_discount_amt) year_total
| ,'s' sale_type
| from customer, store_sales, date_dim
| where c_customer_sk = ss_customer_sk
| and ss_sold_date_sk = d_date_sk
| group by c_customer_id
| ,c_first_name
| ,c_last_name
| ,d_year
| ,c_preferred_cust_flag
| ,c_birth_country
| ,c_login
| ,c_email_address
| ,d_year
| union all
| select c_customer_id customer_id
| ,c_first_name customer_first_name
| ,c_last_name customer_last_name
| ,c_preferred_cust_flag customer_preferred_cust_flag
| ,c_birth_country customer_birth_country
| ,c_login customer_login
| ,c_email_address customer_email_address
| ,d_year dyear
| ,sum(ws_ext_list_price-ws_ext_discount_amt) year_total
| ,'w' sale_type
| from customer, web_sales, date_dim
| where c_customer_sk = ws_bill_customer_sk
| and ws_sold_date_sk = d_date_sk
| group by
| c_customer_id, c_first_name, c_last_name, c_preferred_cust_flag, c_birth_country,
| c_login, c_email_address, d_year)
| select
| t_s_secyear.customer_preferred_cust_flag
| from year_total t_s_firstyear
| ,year_total t_s_secyear
| ,year_total t_w_firstyear
| ,year_total t_w_secyear
| where t_s_secyear.customer_id = t_s_firstyear.customer_id
| and t_s_firstyear.customer_id = t_w_secyear.customer_id
| and t_s_firstyear.customer_id = t_w_firstyear.customer_id
| and t_s_firstyear.sale_type = 's'
| and t_w_firstyear.sale_type = 'w'
| and t_s_secyear.sale_type = 's'
| and t_w_secyear.sale_type = 'w'
| and t_s_firstyear.dyear = 2001
| and t_s_secyear.dyear = 2001+1
| and t_w_firstyear.dyear = 2001
| and t_w_secyear.dyear = 2001+1
| and t_s_firstyear.year_total > 0
| and t_w_firstyear.year_total > 0
| and case when t_w_firstyear.year_total > 0 then t_w_secyear.year_total / t_w_firstyear.year_total else null end
| > case when t_s_firstyear.year_total > 0 then t_s_secyear.year_total / t_s_firstyear.year_total else null end
| order by t_s_secyear.customer_preferred_cust_flag
| LIMIT 100
""".stripMargin),
// Modifications: "+ days" -> date_add
("q12", """
| select
| i_item_desc, i_category, i_class, i_current_price,
| sum(ws_ext_sales_price) as itemrevenue,
| sum(ws_ext_sales_price)*100/sum(sum(ws_ext_sales_price)) over
| (partition by i_class) as revenueratio
| from
| web_sales, item, date_dim
| where
| ws_item_sk = i_item_sk
| and i_category in ('Sports', 'Books', 'Home')
| and ws_sold_date_sk = d_date_sk
| and d_date between cast('1999-02-22' as date)
| and (cast('1999-02-22' as date) + interval 30 days)
| group by
| i_item_id, i_item_desc, i_category, i_class, i_current_price
| order by
| i_category, i_class, i_item_id, i_item_desc, revenueratio
| LIMIT 100
""".stripMargin),
("q13", """
| select avg(ss_quantity)
| ,avg(ss_ext_sales_price)
| ,avg(ss_ext_wholesale_cost)
| ,sum(ss_ext_wholesale_cost)
| from store_sales
| ,store
| ,customer_demographics
| ,household_demographics
| ,customer_address
| ,date_dim
| where s_store_sk = ss_store_sk
| and ss_sold_date_sk = d_date_sk and d_year = 2001
| and((ss_hdemo_sk=hd_demo_sk
| and cd_demo_sk = ss_cdemo_sk
| and cd_marital_status = 'M'
| and cd_education_status = 'Advanced Degree'
| and ss_sales_price between 100.00 and 150.00
| and hd_dep_count = 3
| )or
| (ss_hdemo_sk=hd_demo_sk
| and cd_demo_sk = ss_cdemo_sk
| and cd_marital_status = 'S'
| and cd_education_status = 'College'
| and ss_sales_price between 50.00 and 100.00
| and hd_dep_count = 1
| ) or
| (ss_hdemo_sk=hd_demo_sk
| and cd_demo_sk = ss_cdemo_sk
| and cd_marital_status = 'W'
| and cd_education_status = '2 yr Degree'
| and ss_sales_price between 150.00 and 200.00
| and hd_dep_count = 1
| ))
| and((ss_addr_sk = ca_address_sk
| and ca_country = 'United States'
| and ca_state in ('TX', 'OH', 'TX')
| and ss_net_profit between 100 and 200
| ) or
| (ss_addr_sk = ca_address_sk
| and ca_country = 'United States'
| and ca_state in ('OR', 'NM', 'KY')
| and ss_net_profit between 150 and 300
| ) or
| (ss_addr_sk = ca_address_sk
| and ca_country = 'United States'
| and ca_state in ('VA', 'TX', 'MS')
| and ss_net_profit between 50 and 250
| ))
""".stripMargin),
("q14a", """
|with cross_items as
| (select i_item_sk ss_item_sk
| from item,
| (select iss.i_brand_id brand_id, iss.i_class_id class_id, iss.i_category_id category_id
| from store_sales, item iss, date_dim d1
| where ss_item_sk = iss.i_item_sk
and ss_sold_date_sk = d1.d_date_sk
| and d1.d_year between 1999 AND 1999 + 2
| intersect
| select ics.i_brand_id, ics.i_class_id, ics.i_category_id
| from catalog_sales, item ics, date_dim d2
| where cs_item_sk = ics.i_item_sk
| and cs_sold_date_sk = d2.d_date_sk
| and d2.d_year between 1999 AND 1999 + 2
| intersect
| select iws.i_brand_id, iws.i_class_id, iws.i_category_id
| from web_sales, item iws, date_dim d3
| where ws_item_sk = iws.i_item_sk
| and ws_sold_date_sk = d3.d_date_sk
| and d3.d_year between 1999 AND 1999 + 2) x
| where i_brand_id = brand_id
| and i_class_id = class_id
| and i_category_id = category_id
|),
| avg_sales as
| (select avg(quantity*list_price) average_sales
| from (
| select ss_quantity quantity, ss_list_price list_price
| from store_sales, date_dim
| where ss_sold_date_sk = d_date_sk
| and d_year between 1999 and 2001
| union all
| select cs_quantity quantity, cs_list_price list_price
| from catalog_sales, date_dim
| where cs_sold_date_sk = d_date_sk
| and d_year between 1999 and 1999 + 2
| union all
| select ws_quantity quantity, ws_list_price list_price
| from web_sales, date_dim
| where ws_sold_date_sk = d_date_sk
| and d_year between 1999 and 1999 + 2) x)
| select channel, i_brand_id,i_class_id,i_category_id,sum(sales), sum(number_sales)
| from(
| select 'store' channel, i_brand_id,i_class_id
| ,i_category_id,sum(ss_quantity*ss_list_price) sales
| , count(*) number_sales
| from store_sales, item, date_dim
| where ss_item_sk in (select ss_item_sk from cross_items)
| and ss_item_sk = i_item_sk
| and ss_sold_date_sk = d_date_sk
| and d_year = 1999+2
| and d_moy = 11
| group by i_brand_id,i_class_id,i_category_id
| having sum(ss_quantity*ss_list_price) > (select average_sales from avg_sales)
| union all
| select 'catalog' channel, i_brand_id,i_class_id,i_category_id, sum(cs_quantity*cs_list_price) sales, count(*) number_sales
| from catalog_sales, item, date_dim
| where cs_item_sk in (select ss_item_sk from cross_items)
| and cs_item_sk = i_item_sk
| and cs_sold_date_sk = d_date_sk
| and d_year = 1999+2
| and d_moy = 11
| group by i_brand_id,i_class_id,i_category_id
| having sum(cs_quantity*cs_list_price) > (select average_sales from avg_sales)
| union all
| select 'web' channel, i_brand_id,i_class_id,i_category_id, sum(ws_quantity*ws_list_price) sales , count(*) number_sales
| from web_sales, item, date_dim
| where ws_item_sk in (select ss_item_sk from cross_items)
| and ws_item_sk = i_item_sk
| and ws_sold_date_sk = d_date_sk
| and d_year = 1999+2
| and d_moy = 11
| group by i_brand_id,i_class_id,i_category_id
| having sum(ws_quantity*ws_list_price) > (select average_sales from avg_sales)
| ) y
| group by rollup (channel, i_brand_id,i_class_id,i_category_id)
| order by channel,i_brand_id,i_class_id,i_category_id
| limit 100
""".stripMargin),
("q14b", """
| with cross_items as
| (select i_item_sk ss_item_sk
| from item,
| (select iss.i_brand_id brand_id, iss.i_class_id class_id, iss.i_category_id category_id
| from store_sales, item iss, date_dim d1
| where ss_item_sk = iss.i_item_sk
| and ss_sold_date_sk = d1.d_date_sk
| and d1.d_year between 1999 AND 1999 + 2
| intersect
| select ics.i_brand_id, ics.i_class_id, ics.i_category_id
| from catalog_sales, item ics, date_dim d2
| where cs_item_sk = ics.i_item_sk
| and cs_sold_date_sk = d2.d_date_sk
| and d2.d_year between 1999 AND 1999 + 2
| intersect
| select iws.i_brand_id, iws.i_class_id, iws.i_category_id
| from web_sales, item iws, date_dim d3
| where ws_item_sk = iws.i_item_sk
| and ws_sold_date_sk = d3.d_date_sk
| and d3.d_year between 1999 AND 1999 + 2) x
| where i_brand_id = brand_id
| and i_class_id = class_id
| and i_category_id = category_id
| ),
| avg_sales as
| (select avg(quantity*list_price) average_sales
| from (select ss_quantity quantity, ss_list_price list_price
| from store_sales, date_dim
| where ss_sold_date_sk = d_date_sk and d_year between 1999 and 1999 + 2
| union all
| select cs_quantity quantity, cs_list_price list_price
| from catalog_sales, date_dim
| where cs_sold_date_sk = d_date_sk and d_year between 1999 and 1999 + 2
| union all
| select ws_quantity quantity, ws_list_price list_price
| from web_sales, date_dim
| where ws_sold_date_sk = d_date_sk and d_year between 1999 and 1999 + 2) x)
| select * from
| (select 'store' channel, i_brand_id,i_class_id,i_category_id
| ,sum(ss_quantity*ss_list_price) sales, count(*) number_sales
| from store_sales, item, date_dim
| where ss_item_sk in (select ss_item_sk from cross_items)
| and ss_item_sk = i_item_sk
| and ss_sold_date_sk = d_date_sk
| and d_week_seq = (select d_week_seq from date_dim
| where d_year = 1999 + 1 and d_moy = 12 and d_dom = 11)
| group by i_brand_id,i_class_id,i_category_id
| having sum(ss_quantity*ss_list_price) > (select average_sales from avg_sales)) this_year,
| (select 'store' channel, i_brand_id,i_class_id
| ,i_category_id, sum(ss_quantity*ss_list_price) sales, count(*) number_sales
| from store_sales, item, date_dim
| where ss_item_sk in (select ss_item_sk from cross_items)
| and ss_item_sk = i_item_sk
| and ss_sold_date_sk = d_date_sk
| and d_week_seq = (select d_week_seq from date_dim
| where d_year = 1999 and d_moy = 12 and d_dom = 11)
| group by i_brand_id,i_class_id,i_category_id
| having sum(ss_quantity*ss_list_price) > (select average_sales from avg_sales)) last_year
| where this_year.i_brand_id= last_year.i_brand_id
| and this_year.i_class_id = last_year.i_class_id
| and this_year.i_category_id = last_year.i_category_id
| order by this_year.channel, this_year.i_brand_id, this_year.i_class_id, this_year.i_category_id
| limit 100
""".stripMargin),
("q15", """
| select ca_zip, sum(cs_sales_price)
| from catalog_sales, customer, customer_address, date_dim
| where cs_bill_customer_sk = c_customer_sk
| and c_current_addr_sk = ca_address_sk
| and ( substr(ca_zip,1,5) in ('85669', '86197','88274','83405','86475',
| '85392', '85460', '80348', '81792')
| or ca_state in ('CA','WA','GA')
| or cs_sales_price > 500)
| and cs_sold_date_sk = d_date_sk
| and d_qoy = 2 and d_year = 2001
| group by ca_zip
| order by ca_zip
| limit 100
""".stripMargin),
// Modifications: " -> `
("q16", """
| select
| count(distinct cs_order_number) as `order count`,
| sum(cs_ext_ship_cost) as `total shipping cost`,
| sum(cs_net_profit) as `total net profit`
| from
| catalog_sales cs1, date_dim, customer_address, call_center
| where
| d_date between '2002-02-01' and (cast('2002-02-01' as date) + interval 60 days)
| and cs1.cs_ship_date_sk = d_date_sk
| and cs1.cs_ship_addr_sk = ca_address_sk
| and ca_state = 'GA'
| and cs1.cs_call_center_sk = cc_call_center_sk
| and cc_county in ('Williamson County','Williamson County','Williamson County','Williamson County', 'Williamson County')
| and exists (select *
| from catalog_sales cs2
| where cs1.cs_order_number = cs2.cs_order_number
| and cs1.cs_warehouse_sk <> cs2.cs_warehouse_sk)
| and not exists(select *
| from catalog_returns cr1
| where cs1.cs_order_number = cr1.cr_order_number)
| order by count(distinct cs_order_number)
| limit 100
""".stripMargin),
("q17", """
| select i_item_id
| ,i_item_desc
| ,s_state
| ,count(ss_quantity) as store_sales_quantitycount
| ,avg(ss_quantity) as store_sales_quantityave
| ,stddev_samp(ss_quantity) as store_sales_quantitystdev
| ,stddev_samp(ss_quantity)/avg(ss_quantity) as store_sales_quantitycov
| ,count(sr_return_quantity) as_store_returns_quantitycount
| ,avg(sr_return_quantity) as_store_returns_quantityave
| ,stddev_samp(sr_return_quantity) as_store_returns_quantitystdev
| ,stddev_samp(sr_return_quantity)/avg(sr_return_quantity) as store_returns_quantitycov
| ,count(cs_quantity) as catalog_sales_quantitycount ,avg(cs_quantity) as catalog_sales_quantityave
| ,stddev_samp(cs_quantity)/avg(cs_quantity) as catalog_sales_quantitystdev
| ,stddev_samp(cs_quantity)/avg(cs_quantity) as catalog_sales_quantitycov
| from store_sales, store_returns, catalog_sales, date_dim d1, date_dim d2, date_dim d3, store, item
| where d1.d_quarter_name = '2001Q1'
| and d1.d_date_sk = ss_sold_date_sk
| and i_item_sk = ss_item_sk
| and s_store_sk = ss_store_sk
| and ss_customer_sk = sr_customer_sk
| and ss_item_sk = sr_item_sk
| and ss_ticket_number = sr_ticket_number
| and sr_returned_date_sk = d2.d_date_sk
| and d2.d_quarter_name in ('2001Q1','2001Q2','2001Q3')
| and sr_customer_sk = cs_bill_customer_sk
| and sr_item_sk = cs_item_sk
| and cs_sold_date_sk = d3.d_date_sk
| and d3.d_quarter_name in ('2001Q1','2001Q2','2001Q3')
| group by i_item_id, i_item_desc, s_state
| order by i_item_id, i_item_desc, s_state
| limit 100
""".stripMargin),
// Modifications: "numeric" -> "decimal"
("q18", """
| select i_item_id,
| ca_country,
| ca_state,
| ca_county,
| avg( cast(cs_quantity as decimal(12,2))) agg1,
| avg( cast(cs_list_price as decimal(12,2))) agg2,
| avg( cast(cs_coupon_amt as decimal(12,2))) agg3,
| avg( cast(cs_sales_price as decimal(12,2))) agg4,
| avg( cast(cs_net_profit as decimal(12,2))) agg5,
| avg( cast(c_birth_year as decimal(12,2))) agg6,
| avg( cast(cd1.cd_dep_count as decimal(12,2))) agg7
| from catalog_sales, customer_demographics cd1,
| customer_demographics cd2, customer, customer_address, date_dim, item
| where cs_sold_date_sk = d_date_sk and
| cs_item_sk = i_item_sk and
| cs_bill_cdemo_sk = cd1.cd_demo_sk and
| cs_bill_customer_sk = c_customer_sk and
| cd1.cd_gender = 'F' and
| cd1.cd_education_status = 'Unknown' and
| c_current_cdemo_sk = cd2.cd_demo_sk and
| c_current_addr_sk = ca_address_sk and
| c_birth_month in (1,6,8,9,12,2) and
| d_year = 1998 and
| ca_state in ('MS','IN','ND','OK','NM','VA','MS')
| group by rollup (i_item_id, ca_country, ca_state, ca_county)
| order by ca_country, ca_state, ca_county, i_item_id
| LIMIT 100
""".stripMargin),
("q19", """
| select i_brand_id brand_id, i_brand brand, i_manufact_id, i_manufact,
| sum(ss_ext_sales_price) ext_price
| from date_dim, store_sales, item,customer,customer_address,store
| where d_date_sk = ss_sold_date_sk
| and ss_item_sk = i_item_sk
| and i_manager_id = 8
| and d_moy = 11
| and d_year = 1998
| and ss_customer_sk = c_customer_sk
| and c_current_addr_sk = ca_address_sk
| and substr(ca_zip,1,5) <> substr(s_zip,1,5)
| and ss_store_sk = s_store_sk
| group by i_brand, i_brand_id, i_manufact_id, i_manufact
| order by ext_price desc, brand, brand_id, i_manufact_id, i_manufact
| limit 100
""".stripMargin),
("q20", """
|select i_item_desc
| ,i_category
| ,i_class
| ,i_current_price
| ,sum(cs_ext_sales_price) as itemrevenue
| ,sum(cs_ext_sales_price)*100/sum(sum(cs_ext_sales_price)) over
| (partition by i_class) as revenueratio
| from catalog_sales, item, date_dim
| where cs_item_sk = i_item_sk
| and i_category in ('Sports', 'Books', 'Home')
| and cs_sold_date_sk = d_date_sk
| and d_date between cast('1999-02-22' as date)
| and (cast('1999-02-22' as date) + interval 30 days)
| group by i_item_id, i_item_desc, i_category, i_class, i_current_price
| order by i_category, i_class, i_item_id, i_item_desc, revenueratio
| limit 100
""".stripMargin),
// Modifications: "+ days" -> date_add
("q21", """
| select * from(
| select w_warehouse_name, i_item_id,
| sum(case when (cast(d_date as date) < cast ('2000-03-11' as date))
| then inv_quantity_on_hand
| else 0 end) as inv_before,
| sum(case when (cast(d_date as date) >= cast ('2000-03-11' as date))
| then inv_quantity_on_hand
| else 0 end) as inv_after
| from inventory, warehouse, item, date_dim
| where i_current_price between 0.99 and 1.49
| and i_item_sk = inv_item_sk
| and inv_warehouse_sk = w_warehouse_sk
| and inv_date_sk = d_date_sk
| and d_date between (cast('2000-03-11' as date) - interval 30 days)
| and (cast('2000-03-11' as date) + interval 30 days)
| group by w_warehouse_name, i_item_id) x
| where (case when inv_before > 0
| then inv_after / inv_before
| else null
| end) between 2.0/3.0 and 3.0/2.0
| order by w_warehouse_name, i_item_id
| limit 100
""".stripMargin),
("q22", """
| select i_product_name, i_brand, i_class, i_category, avg(inv_quantity_on_hand) qoh
| from inventory, date_dim, item, warehouse
| where inv_date_sk=d_date_sk
| and inv_item_sk=i_item_sk
| and inv_warehouse_sk = w_warehouse_sk
| and d_month_seq between 1200 and 1200 + 11
| group by rollup(i_product_name, i_brand, i_class, i_category)
| order by qoh, i_product_name, i_brand, i_class, i_category
| limit 100
""".stripMargin),
("q23a", """
| with frequent_ss_items as
| (select substr(i_item_desc,1,30) itemdesc,i_item_sk item_sk,d_date solddate,count(*) cnt
| from store_sales, date_dim, item
| where ss_sold_date_sk = d_date_sk
| and ss_item_sk = i_item_sk
| and d_year in (2000, 2000+1, 2000+2,2000+3)
| group by substr(i_item_desc,1,30),i_item_sk,d_date
| having count(*) >4),
| max_store_sales as
| (select max(csales) tpcds_cmax
| from (select c_customer_sk,sum(ss_quantity*ss_sales_price) csales
| from store_sales, customer, date_dim
| where ss_customer_sk = c_customer_sk
| and ss_sold_date_sk = d_date_sk
| and d_year in (2000, 2000+1, 2000+2,2000+3)
| group by c_customer_sk) x),
| best_ss_customer as
| (select c_customer_sk,sum(ss_quantity*ss_sales_price) ssales
| from store_sales, customer
| where ss_customer_sk = c_customer_sk
| group by c_customer_sk
| having sum(ss_quantity*ss_sales_price) > (50/100.0) *
| (select * from max_store_sales))
| select sum(sales)
| from ((select cs_quantity*cs_list_price sales
| from catalog_sales, date_dim
| where d_year = 2000
| and d_moy = 2
| and cs_sold_date_sk = d_date_sk
| and cs_item_sk in (select item_sk from frequent_ss_items)
| and cs_bill_customer_sk in (select c_customer_sk from best_ss_customer))
| union all
| (select ws_quantity*ws_list_price sales
| from web_sales, date_dim
| where d_year = 2000
| and d_moy = 2
| and ws_sold_date_sk = d_date_sk
| and ws_item_sk in (select item_sk from frequent_ss_items)
| and ws_bill_customer_sk in (select c_customer_sk from best_ss_customer))) y
| limit 100
""".stripMargin),
("q23b", """
|
| with frequent_ss_items as
| (select substr(i_item_desc,1,30) itemdesc,i_item_sk item_sk,d_date solddate,count(*) cnt
| from store_sales, date_dim, item
| where ss_sold_date_sk = d_date_sk
| and ss_item_sk = i_item_sk
| and d_year in (2000, 2000+1, 2000+2,2000+3)
| group by substr(i_item_desc,1,30),i_item_sk,d_date
| having count(*) > 4),
| max_store_sales as
| (select max(csales) tpcds_cmax
| from (select c_customer_sk,sum(ss_quantity*ss_sales_price) csales
| from store_sales, customer, date_dim
| where ss_customer_sk = c_customer_sk
| and ss_sold_date_sk = d_date_sk
| and d_year in (2000, 2000+1, 2000+2,2000+3)
| group by c_customer_sk) x),
| best_ss_customer as
| (select c_customer_sk,sum(ss_quantity*ss_sales_price) ssales
| from store_sales
| ,customer
| where ss_customer_sk = c_customer_sk
| group by c_customer_sk
| having sum(ss_quantity*ss_sales_price) > (50/100.0) *
| (select * from max_store_sales))
| select c_last_name,c_first_name,sales
| from ((select c_last_name,c_first_name,sum(cs_quantity*cs_list_price) sales
| from catalog_sales, customer, date_dim
| where d_year = 2000
| and d_moy = 2
| and cs_sold_date_sk = d_date_sk
| and cs_item_sk in (select item_sk from frequent_ss_items)
| and cs_bill_customer_sk in (select c_customer_sk from best_ss_customer)
| and cs_bill_customer_sk = c_customer_sk
| group by c_last_name,c_first_name)
| union all
| (select c_last_name,c_first_name,sum(ws_quantity*ws_list_price) sales
| from web_sales, customer, date_dim
| where d_year = 2000
| and d_moy = 2
| and ws_sold_date_sk = d_date_sk
| and ws_item_sk in (select item_sk from frequent_ss_items)
| and ws_bill_customer_sk in (select c_customer_sk from best_ss_customer)
| and ws_bill_customer_sk = c_customer_sk
| group by c_last_name,c_first_name)) y
| order by c_last_name,c_first_name,sales
| limit 100
""".stripMargin),
("q24a", """
| with ssales as
| (select c_last_name, c_first_name, s_store_name, ca_state, s_state, i_color,
| i_current_price, i_manager_id, i_units, i_size, sum(ss_net_paid) netpaid
| from store_sales, store_returns, store, item, customer, customer_address
| where ss_ticket_number = sr_ticket_number
| and ss_item_sk = sr_item_sk
| and ss_customer_sk = c_customer_sk
| and ss_item_sk = i_item_sk
| and ss_store_sk = s_store_sk
| and c_birth_country = upper(ca_country)
| and s_zip = ca_zip
| and s_market_id = 8
| group by c_last_name, c_first_name, s_store_name, ca_state, s_state, i_color,
| i_current_price, i_manager_id, i_units, i_size)
| select c_last_name, c_first_name, s_store_name, sum(netpaid) paid
| from ssales
| where i_color = 'pale'
| group by c_last_name, c_first_name, s_store_name
| having sum(netpaid) > (select 0.05*avg(netpaid) from ssales)
""".stripMargin),
("q24b", """
| with ssales as
| (select c_last_name, c_first_name, s_store_name, ca_state, s_state, i_color,
| i_current_price, i_manager_id, i_units, i_size, sum(ss_net_paid) netpaid
| from store_sales, store_returns, store, item, customer, customer_address
| where ss_ticket_number = sr_ticket_number
| and ss_item_sk = sr_item_sk
| and ss_customer_sk = c_customer_sk
| and ss_item_sk = i_item_sk
| and ss_store_sk = s_store_sk
| and c_birth_country = upper(ca_country)
| and s_zip = ca_zip
| and s_market_id = 8
| group by c_last_name, c_first_name, s_store_name, ca_state, s_state,
| i_color, i_current_price, i_manager_id, i_units, i_size)
| select c_last_name, c_first_name, s_store_name, sum(netpaid) paid
| from ssales
| where i_color = 'chiffon'
| group by c_last_name, c_first_name, s_store_name
| having sum(netpaid) > (select 0.05*avg(netpaid) from ssales)
""".stripMargin),
("q25", """
| select i_item_id, i_item_desc, s_store_id, s_store_name,
| sum(ss_net_profit) as store_sales_profit,
| sum(sr_net_loss) as store_returns_loss,
| sum(cs_net_profit) as catalog_sales_profit
| from
| store_sales, store_returns, catalog_sales, date_dim d1, date_dim d2, date_dim d3,
| store, item
| where
| d1.d_moy = 4
| and d1.d_year = 2001
| and d1.d_date_sk = ss_sold_date_sk
| and i_item_sk = ss_item_sk
| and s_store_sk = ss_store_sk
| and ss_customer_sk = sr_customer_sk
| and ss_item_sk = sr_item_sk
| and ss_ticket_number = sr_ticket_number
| and sr_returned_date_sk = d2.d_date_sk
| and d2.d_moy between 4 and 10
| and d2.d_year = 2001
| and sr_customer_sk = cs_bill_customer_sk
| and sr_item_sk = cs_item_sk
| and cs_sold_date_sk = d3.d_date_sk
| and d3.d_moy between 4 and 10
| and d3.d_year = 2001
| group by
| i_item_id, i_item_desc, s_store_id, s_store_name
| order by
| i_item_id, i_item_desc, s_store_id, s_store_name
| limit 100
""".stripMargin),
("q26", """
| select i_item_id,
| avg(cs_quantity) agg1,
| avg(cs_list_price) agg2,
| avg(cs_coupon_amt) agg3,
| avg(cs_sales_price) agg4
| from catalog_sales, customer_demographics, date_dim, item, promotion
| where cs_sold_date_sk = d_date_sk and
| cs_item_sk = i_item_sk and
| cs_bill_cdemo_sk = cd_demo_sk and
| cs_promo_sk = p_promo_sk and
| cd_gender = 'M' and
| cd_marital_status = 'S' and
| cd_education_status = 'College' and
| (p_channel_email = 'N' or p_channel_event = 'N') and
| d_year = 2000
| group by i_item_id
| order by i_item_id
| limit 100
""".stripMargin),
("q27", """
| select i_item_id,
| s_state, grouping(s_state) g_state,
| avg(ss_quantity) agg1,
| avg(ss_list_price) agg2,
| avg(ss_coupon_amt) agg3,
| avg(ss_sales_price) agg4
| from store_sales, customer_demographics, date_dim, store, item
| where ss_sold_date_sk = d_date_sk and
| ss_item_sk = i_item_sk and
| ss_store_sk = s_store_sk and
| ss_cdemo_sk = cd_demo_sk and
| cd_gender = 'M' and
| cd_marital_status = 'S' and
| cd_education_status = 'College' and
| d_year = 2002 and
| s_state in ('TN','TN', 'TN', 'TN', 'TN', 'TN')
| group by rollup (i_item_id, s_state)
| order by i_item_id, s_state
| limit 100
""".stripMargin),
("q28", """
| select *
| from (select avg(ss_list_price) B1_LP
| ,count(ss_list_price) B1_CNT
| ,count(distinct ss_list_price) B1_CNTD
| from store_sales
| where ss_quantity between 0 and 5
| and (ss_list_price between 8 and 8+10
| or ss_coupon_amt between 459 and 459+1000
| or ss_wholesale_cost between 57 and 57+20)) B1,
| (select avg(ss_list_price) B2_LP
| ,count(ss_list_price) B2_CNT
| ,count(distinct ss_list_price) B2_CNTD
| from store_sales
| where ss_quantity between 6 and 10
| and (ss_list_price between 90 and 90+10
| or ss_coupon_amt between 2323 and 2323+1000
| or ss_wholesale_cost between 31 and 31+20)) B2,
| (select avg(ss_list_price) B3_LP
| ,count(ss_list_price) B3_CNT
| ,count(distinct ss_list_price) B3_CNTD
| from store_sales
| where ss_quantity between 11 and 15
| and (ss_list_price between 142 and 142+10
| or ss_coupon_amt between 12214 and 12214+1000
| or ss_wholesale_cost between 79 and 79+20)) B3,
| (select avg(ss_list_price) B4_LP
| ,count(ss_list_price) B4_CNT
| ,count(distinct ss_list_price) B4_CNTD
| from store_sales
| where ss_quantity between 16 and 20
| and (ss_list_price between 135 and 135+10
| or ss_coupon_amt between 6071 and 6071+1000
| or ss_wholesale_cost between 38 and 38+20)) B4,
| (select avg(ss_list_price) B5_LP
| ,count(ss_list_price) B5_CNT
| ,count(distinct ss_list_price) B5_CNTD
| from store_sales
| where ss_quantity between 21 and 25
| and (ss_list_price between 122 and 122+10
| or ss_coupon_amt between 836 and 836+1000
| or ss_wholesale_cost between 17 and 17+20)) B5,
| (select avg(ss_list_price) B6_LP
| ,count(ss_list_price) B6_CNT
| ,count(distinct ss_list_price) B6_CNTD
| from store_sales
| where ss_quantity between 26 and 30
| and (ss_list_price between 154 and 154+10
| or ss_coupon_amt between 7326 and 7326+1000
| or ss_wholesale_cost between 7 and 7+20)) B6
| limit 100
""".stripMargin),
("q29", """
| select
| i_item_id
| ,i_item_desc
| ,s_store_id
| ,s_store_name
| ,sum(ss_quantity) as store_sales_quantity
| ,sum(sr_return_quantity) as store_returns_quantity
| ,sum(cs_quantity) as catalog_sales_quantity
| from
| store_sales, store_returns, catalog_sales, date_dim d1, date_dim d2,
| date_dim d3, store, item
| where
| d1.d_moy = 9
| and d1.d_year = 1999
| and d1.d_date_sk = ss_sold_date_sk
| and i_item_sk = ss_item_sk
| and s_store_sk = ss_store_sk
| and ss_customer_sk = sr_customer_sk
| and ss_item_sk = sr_item_sk
| and ss_ticket_number = sr_ticket_number
| and sr_returned_date_sk = d2.d_date_sk
| and d2.d_moy between 9 and 9 + 3
| and d2.d_year = 1999
| and sr_customer_sk = cs_bill_customer_sk
| and sr_item_sk = cs_item_sk
| and cs_sold_date_sk = d3.d_date_sk
| and d3.d_year in (1999,1999+1,1999+2)
| group by
| i_item_id, i_item_desc, s_store_id, s_store_name
| order by
| i_item_id, i_item_desc, s_store_id, s_store_name
| limit 100
""".stripMargin),
("q30", """
| with customer_total_return as
| (select wr_returning_customer_sk as ctr_customer_sk
| ,ca_state as ctr_state,
| sum(wr_return_amt) as ctr_total_return
| from web_returns, date_dim, customer_address
| where wr_returned_date_sk = d_date_sk
| and d_year = 2002
| and wr_returning_addr_sk = ca_address_sk
| group by wr_returning_customer_sk,ca_state)
| select c_customer_id,c_salutation,c_first_name,c_last_name,c_preferred_cust_flag
| ,c_birth_day,c_birth_month,c_birth_year,c_birth_country,c_login,c_email_address
| ,c_last_review_date,ctr_total_return
| from customer_total_return ctr1, customer_address, customer
| where ctr1.ctr_total_return > (select avg(ctr_total_return)*1.2
| from customer_total_return ctr2
| where ctr1.ctr_state = ctr2.ctr_state)
| and ca_address_sk = c_current_addr_sk
| and ca_state = 'GA'
| and ctr1.ctr_customer_sk = c_customer_sk
| order by c_customer_id,c_salutation,c_first_name,c_last_name,c_preferred_cust_flag
| ,c_birth_day,c_birth_month,c_birth_year,c_birth_country,c_login,c_email_address
| ,c_last_review_date,ctr_total_return
| limit 100
""".stripMargin),
("q31", """
| with ss as
| (select ca_county,d_qoy, d_year,sum(ss_ext_sales_price) as store_sales
| from store_sales,date_dim,customer_address
| where ss_sold_date_sk = d_date_sk
| and ss_addr_sk=ca_address_sk
| group by ca_county,d_qoy, d_year),
| ws as
| (select ca_county,d_qoy, d_year,sum(ws_ext_sales_price) as web_sales
| from web_sales,date_dim,customer_address
| where ws_sold_date_sk = d_date_sk
| and ws_bill_addr_sk=ca_address_sk
| group by ca_county,d_qoy, d_year)
| select
| ss1.ca_county
| ,ss1.d_year
| ,ws2.web_sales/ws1.web_sales web_q1_q2_increase
| ,ss2.store_sales/ss1.store_sales store_q1_q2_increase
| ,ws3.web_sales/ws2.web_sales web_q2_q3_increase
| ,ss3.store_sales/ss2.store_sales store_q2_q3_increase
| from
| ss ss1, ss ss2, ss ss3, ws ws1, ws ws2, ws ws3
| where
| ss1.d_qoy = 1
| and ss1.d_year = 2000
| and ss1.ca_county = ss2.ca_county
| and ss2.d_qoy = 2
| and ss2.d_year = 2000
| and ss2.ca_county = ss3.ca_county
| and ss3.d_qoy = 3
| and ss3.d_year = 2000
| and ss1.ca_county = ws1.ca_county
| and ws1.d_qoy = 1
| and ws1.d_year = 2000
| and ws1.ca_county = ws2.ca_county
| and ws2.d_qoy = 2
| and ws2.d_year = 2000
| and ws1.ca_county = ws3.ca_county
| and ws3.d_qoy = 3
| and ws3.d_year = 2000
| and case when ws1.web_sales > 0 then ws2.web_sales/ws1.web_sales else null end
| > case when ss1.store_sales > 0 then ss2.store_sales/ss1.store_sales else null end
| and case when ws2.web_sales > 0 then ws3.web_sales/ws2.web_sales else null end
| > case when ss2.store_sales > 0 then ss3.store_sales/ss2.store_sales else null end
| order by ss1.ca_county
""".stripMargin),
// Modifications: " -> `
("q32", """
| select sum(cs_ext_discount_amt) as `excess discount amount`
| from
| catalog_sales, item, date_dim
| where
| i_manufact_id = 977
| and i_item_sk = cs_item_sk
| and d_date between '2000-01-27' and (cast('2000-01-27' as date) + interval 90 days)
| and d_date_sk = cs_sold_date_sk
| and cs_ext_discount_amt > (
| select 1.3 * avg(cs_ext_discount_amt)
| from catalog_sales, date_dim
| where cs_item_sk = i_item_sk
| and d_date between '2000-01-27]' and (cast('2000-01-27' as date) + interval 90 days)
| and d_date_sk = cs_sold_date_sk)
|limit 100
""".stripMargin),
("q33", """
| with ss as (
| select
| i_manufact_id,sum(ss_ext_sales_price) total_sales
| from
| store_sales, date_dim, customer_address, item
| where
| i_manufact_id in (select i_manufact_id
| from item
| where i_category in ('Electronics'))
| and ss_item_sk = i_item_sk
| and ss_sold_date_sk = d_date_sk
| and d_year = 1998
| and d_moy = 5
| and ss_addr_sk = ca_address_sk
| and ca_gmt_offset = -5
| group by i_manufact_id), cs as
| (select i_manufact_id, sum(cs_ext_sales_price) total_sales
| from catalog_sales, date_dim, customer_address, item
| where
| i_manufact_id in (
| select i_manufact_id from item
| where
| i_category in ('Electronics'))
| and cs_item_sk = i_item_sk
| and cs_sold_date_sk = d_date_sk
| and d_year = 1998
| and d_moy = 5
| and cs_bill_addr_sk = ca_address_sk
| and ca_gmt_offset = -5
| group by i_manufact_id),
| ws as (
| select i_manufact_id,sum(ws_ext_sales_price) total_sales
| from
| web_sales, date_dim, customer_address, item
| where
| i_manufact_id in (select i_manufact_id from item
| where i_category in ('Electronics'))
| and ws_item_sk = i_item_sk
| and ws_sold_date_sk = d_date_sk
| and d_year = 1998
| and d_moy = 5
| and ws_bill_addr_sk = ca_address_sk
| and ca_gmt_offset = -5
| group by i_manufact_id)
| select i_manufact_id ,sum(total_sales) total_sales
| from (select * from ss
| union all
| select * from cs
| union all
| select * from ws) tmp1
| group by i_manufact_id
| order by total_sales
|limit 100
""".stripMargin),
("q34", """
| select c_last_name, c_first_name, c_salutation, c_preferred_cust_flag, ss_ticket_number,
| cnt
| FROM
| (select ss_ticket_number, ss_customer_sk, count(*) cnt
| from store_sales,date_dim,store,household_demographics
| where store_sales.ss_sold_date_sk = date_dim.d_date_sk
| and store_sales.ss_store_sk = store.s_store_sk
| and store_sales.ss_hdemo_sk = household_demographics.hd_demo_sk
| and (date_dim.d_dom between 1 and 3 or date_dim.d_dom between 25 and 28)
| and (household_demographics.hd_buy_potential = '>10000' or
| household_demographics.hd_buy_potential = 'unknown')
| and household_demographics.hd_vehicle_count > 0
| and (case when household_demographics.hd_vehicle_count > 0
| then household_demographics.hd_dep_count/ household_demographics.hd_vehicle_count
| else null
| end) > 1.2
| and date_dim.d_year in (1999, 1999+1, 1999+2)
| and store.s_county in ('Williamson County','Williamson County','Williamson County','Williamson County',
| 'Williamson County','Williamson County','Williamson County','Williamson County')
| group by ss_ticket_number,ss_customer_sk) dn,customer
| where ss_customer_sk = c_customer_sk
| and cnt between 15 and 20
| order by c_last_name,c_first_name,c_salutation,c_preferred_cust_flag desc
""".stripMargin),
("q35", """
| select
| ca_state,
| cd_gender,
| cd_marital_status,
| count(*) cnt1,
| min(cd_dep_count),
| max(cd_dep_count),
| avg(cd_dep_count),
| cd_dep_employed_count,
| count(*) cnt2,
| min(cd_dep_employed_count),
| max(cd_dep_employed_count),
| avg(cd_dep_employed_count),
| cd_dep_college_count,
| count(*) cnt3,
| min(cd_dep_college_count),
| max(cd_dep_college_count),
| avg(cd_dep_college_count)
| from
| customer c,customer_address ca,customer_demographics
| where
| c.c_current_addr_sk = ca.ca_address_sk and
| cd_demo_sk = c.c_current_cdemo_sk and
| exists (select * from store_sales, date_dim
| where c.c_customer_sk = ss_customer_sk and
| ss_sold_date_sk = d_date_sk and
| d_year = 2002 and
| d_qoy < 4) and
| (exists (select * from web_sales, date_dim
| where c.c_customer_sk = ws_bill_customer_sk and
| ws_sold_date_sk = d_date_sk and
| d_year = 2002 and
| d_qoy < 4) or
| exists (select * from catalog_sales, date_dim
| where c.c_customer_sk = cs_ship_customer_sk and
| cs_sold_date_sk = d_date_sk and
| d_year = 2002 and
| d_qoy < 4))
| group by ca_state, cd_gender, cd_marital_status, cd_dep_count,
| cd_dep_employed_count, cd_dep_college_count
| order by ca_state, cd_gender, cd_marital_status, cd_dep_count,
| cd_dep_employed_count, cd_dep_college_count
| limit 100
""".stripMargin),
("q36", """
| select
| sum(ss_net_profit)/sum(ss_ext_sales_price) as gross_margin
| ,i_category
| ,i_class
| ,grouping(i_category)+grouping(i_class) as lochierarchy
| ,rank() over (
| partition by grouping(i_category)+grouping(i_class),
| case when grouping(i_class) = 0 then i_category end
| order by sum(ss_net_profit)/sum(ss_ext_sales_price) asc) as rank_within_parent
| from
| store_sales, date_dim d1, item, store
| where
| d1.d_year = 2001
| and d1.d_date_sk = ss_sold_date_sk
| and i_item_sk = ss_item_sk
| and s_store_sk = ss_store_sk
| and s_state in ('TN','TN','TN','TN','TN','TN','TN','TN')
| group by rollup(i_category,i_class)
| order by
| lochierarchy desc
| ,case when lochierarchy = 0 then i_category end
| ,rank_within_parent
| limit 100
""".stripMargin),
// Modifications: "+ days" -> date_add
("q37", """
| select i_item_id, i_item_desc, i_current_price
| from item, inventory, date_dim, catalog_sales
| where i_current_price between 68 and 68 + 30
| and inv_item_sk = i_item_sk
| and d_date_sk=inv_date_sk
| and d_date between cast('2000-02-01' as date) and (cast('2000-02-01' as date) + interval 60 days)
| and i_manufact_id in (677,940,694,808)
| and inv_quantity_on_hand between 100 and 500
| and cs_item_sk = i_item_sk
| group by i_item_id,i_item_desc,i_current_price
| order by i_item_id
| limit 100
""".stripMargin),
("q38", """
| select count(*) from (
| select distinct c_last_name, c_first_name, d_date
| from store_sales, date_dim, customer
| where store_sales.ss_sold_date_sk = date_dim.d_date_sk
| and store_sales.ss_customer_sk = customer.c_customer_sk
| and d_month_seq between 1200 and 1200 + 11
| intersect
| select distinct c_last_name, c_first_name, d_date
| from catalog_sales, date_dim, customer
| where catalog_sales.cs_sold_date_sk = date_dim.d_date_sk
| and catalog_sales.cs_bill_customer_sk = customer.c_customer_sk
| and d_month_seq between 1200 and 1200 + 11
| intersect
| select distinct c_last_name, c_first_name, d_date
| from web_sales, date_dim, customer
| where web_sales.ws_sold_date_sk = date_dim.d_date_sk
| and web_sales.ws_bill_customer_sk = customer.c_customer_sk
| and d_month_seq between 1200 and 1200 + 11
| ) hot_cust
| limit 100
""".stripMargin),
("q39a", """
| with inv as
| (select w_warehouse_name,w_warehouse_sk,i_item_sk,d_moy
| ,stdev,mean, case mean when 0 then null else stdev/mean end cov
| from(select w_warehouse_name,w_warehouse_sk,i_item_sk,d_moy
| ,stddev_samp(inv_quantity_on_hand) stdev,avg(inv_quantity_on_hand) mean
| from inventory, item, warehouse, date_dim
| where inv_item_sk = i_item_sk
| and inv_warehouse_sk = w_warehouse_sk
| and inv_date_sk = d_date_sk
| and d_year = 2001
| group by w_warehouse_name,w_warehouse_sk,i_item_sk,d_moy) foo
| where case mean when 0 then 0 else stdev/mean end > 1)
| select inv1.w_warehouse_sk,inv1.i_item_sk,inv1.d_moy,inv1.mean, inv1.cov
| ,inv2.w_warehouse_sk,inv2.i_item_sk,inv2.d_moy,inv2.mean, inv2.cov
| from inv inv1,inv inv2
| where inv1.i_item_sk = inv2.i_item_sk
| and inv1.w_warehouse_sk = inv2.w_warehouse_sk
| and inv1.d_moy=1
| and inv2.d_moy=1+1
| order by inv1.w_warehouse_sk,inv1.i_item_sk,inv1.d_moy,inv1.mean,inv1.cov
| ,inv2.d_moy,inv2.mean, inv2.cov
""".stripMargin),
("q39b", """
| with inv as
| (select w_warehouse_name,w_warehouse_sk,i_item_sk,d_moy
| ,stdev,mean, case mean when 0 then null else stdev/mean end cov
| from(select w_warehouse_name,w_warehouse_sk,i_item_sk,d_moy
| ,stddev_samp(inv_quantity_on_hand) stdev,avg(inv_quantity_on_hand) mean
| from inventory, item, warehouse, date_dim
| where inv_item_sk = i_item_sk
| and inv_warehouse_sk = w_warehouse_sk
| and inv_date_sk = d_date_sk
| and d_year = 2001
| group by w_warehouse_name,w_warehouse_sk,i_item_sk,d_moy) foo
| where case mean when 0 then 0 else stdev/mean end > 1)
| select inv1.w_warehouse_sk,inv1.i_item_sk,inv1.d_moy,inv1.mean, inv1.cov
| ,inv2.w_warehouse_sk,inv2.i_item_sk,inv2.d_moy,inv2.mean, inv2.cov
| from inv inv1,inv inv2
| where inv1.i_item_sk = inv2.i_item_sk
| and inv1.w_warehouse_sk = inv2.w_warehouse_sk
| and inv1.d_moy=1
| and inv2.d_moy=1+1
| and inv1.cov > 1.5
| order by inv1.w_warehouse_sk,inv1.i_item_sk,inv1.d_moy,inv1.mean,inv1.cov
| ,inv2.d_moy,inv2.mean, inv2.cov
""".stripMargin),
// Modifications: "+ days" -> date_add
("q40", """
| select
| w_state
| ,i_item_id
| ,sum(case when (cast(d_date as date) < cast('2000-03-11' as date))
| then cs_sales_price - coalesce(cr_refunded_cash,0) else 0 end) as sales_before
| ,sum(case when (cast(d_date as date) >= cast('2000-03-11' as date))
| then cs_sales_price - coalesce(cr_refunded_cash,0) else 0 end) as sales_after
| from
| catalog_sales left outer join catalog_returns on
| (cs_order_number = cr_order_number
| and cs_item_sk = cr_item_sk)
| ,warehouse, item, date_dim
| where
| i_current_price between 0.99 and 1.49
| and i_item_sk = cs_item_sk
| and cs_warehouse_sk = w_warehouse_sk
| and cs_sold_date_sk = d_date_sk
| and d_date between (cast('2000-03-11' as date) - interval 30 days)
| and (cast('2000-03-11' as date) + interval 30 days)
| group by w_state,i_item_id
| order by w_state,i_item_id
| limit 100
""".stripMargin),
("q41", """
| select distinct(i_product_name)
| from item i1
| where i_manufact_id between 738 and 738+40
| and (select count(*) as item_cnt
| from item
| where (i_manufact = i1.i_manufact and
| ((i_category = 'Women' and
| (i_color = 'powder' or i_color = 'khaki') and
| (i_units = 'Ounce' or i_units = 'Oz') and
| (i_size = 'medium' or i_size = 'extra large')
| ) or
| (i_category = 'Women' and
| (i_color = 'brown' or i_color = 'honeydew') and
| (i_units = 'Bunch' or i_units = 'Ton') and
| (i_size = 'N/A' or i_size = 'small')
| ) or
| (i_category = 'Men' and
| (i_color = 'floral' or i_color = 'deep') and
| (i_units = 'N/A' or i_units = 'Dozen') and
| (i_size = 'petite' or i_size = 'large')
| ) or
| (i_category = 'Men' and
| (i_color = 'light' or i_color = 'cornflower') and
| (i_units = 'Box' or i_units = 'Pound') and
| (i_size = 'medium' or i_size = 'extra large')
| ))) or
| (i_manufact = i1.i_manufact and
| ((i_category = 'Women' and
| (i_color = 'midnight' or i_color = 'snow') and
| (i_units = 'Pallet' or i_units = 'Gross') and
| (i_size = 'medium' or i_size = 'extra large')
| ) or
| (i_category = 'Women' and
| (i_color = 'cyan' or i_color = 'papaya') and
| (i_units = 'Cup' or i_units = 'Dram') and
| (i_size = 'N/A' or i_size = 'small')
| ) or
| (i_category = 'Men' and
| (i_color = 'orange' or i_color = 'frosted') and
| (i_units = 'Each' or i_units = 'Tbl') and
| (i_size = 'petite' or i_size = 'large')
| ) or
| (i_category = 'Men' and
| (i_color = 'forest' or i_color = 'ghost') and
| (i_units = 'Lb' or i_units = 'Bundle') and
| (i_size = 'medium' or i_size = 'extra large')
| )))) > 0
| order by i_product_name
| limit 100
""".stripMargin),
("q42", """
| select dt.d_year, item.i_category_id, item.i_category, sum(ss_ext_sales_price)
| from date_dim dt, store_sales, item
| where dt.d_date_sk = store_sales.ss_sold_date_sk
| and store_sales.ss_item_sk = item.i_item_sk
| and item.i_manager_id = 1
| and dt.d_moy=11
| and dt.d_year=2000
| group by dt.d_year
| ,item.i_category_id
| ,item.i_category
| order by sum(ss_ext_sales_price) desc,dt.d_year
| ,item.i_category_id
| ,item.i_category
| limit 100
""".stripMargin),
("q43", """
| select s_store_name, s_store_id,
| sum(case when (d_day_name='Sunday') then ss_sales_price else null end) sun_sales,
| sum(case when (d_day_name='Monday') then ss_sales_price else null end) mon_sales,
| sum(case when (d_day_name='Tuesday') then ss_sales_price else null end) tue_sales,
| sum(case when (d_day_name='Wednesday') then ss_sales_price else null end) wed_sales,
| sum(case when (d_day_name='Thursday') then ss_sales_price else null end) thu_sales,
| sum(case when (d_day_name='Friday') then ss_sales_price else null end) fri_sales,
| sum(case when (d_day_name='Saturday') then ss_sales_price else null end) sat_sales
| from date_dim, store_sales, store
| where d_date_sk = ss_sold_date_sk and
| s_store_sk = ss_store_sk and
| s_gmt_offset = -5 and
| d_year = 2000
| group by s_store_name, s_store_id
| order by s_store_name, s_store_id,sun_sales,mon_sales,tue_sales,wed_sales,
| thu_sales,fri_sales,sat_sales
| limit 100
""".stripMargin),
("q44", """
| select asceding.rnk, i1.i_product_name best_performing, i2.i_product_name worst_performing
| from(select *
| from (select item_sk,rank() over (order by rank_col asc) rnk
| from (select ss_item_sk item_sk,avg(ss_net_profit) rank_col
| from store_sales ss1
| where ss_store_sk = 4
| group by ss_item_sk
| having avg(ss_net_profit) > 0.9*(select avg(ss_net_profit) rank_col
| from store_sales
| where ss_store_sk = 4
| and ss_addr_sk is null
| group by ss_store_sk))V1)V11
| where rnk < 11) asceding,
| (select *
| from (select item_sk,rank() over (order by rank_col desc) rnk
| from (select ss_item_sk item_sk,avg(ss_net_profit) rank_col
| from store_sales ss1
| where ss_store_sk = 4
| group by ss_item_sk
| having avg(ss_net_profit) > 0.9*(select avg(ss_net_profit) rank_col
| from store_sales
| where ss_store_sk = 4
| and ss_addr_sk is null
| group by ss_store_sk))V2)V21
| where rnk < 11) descending,
| item i1, item i2
| where asceding.rnk = descending.rnk
| and i1.i_item_sk=asceding.item_sk
| and i2.i_item_sk=descending.item_sk
| order by asceding.rnk
| limit 100
""".stripMargin),
("q45", """
| select ca_zip, ca_city, sum(ws_sales_price)
| from web_sales, customer, customer_address, date_dim, item
| where ws_bill_customer_sk = c_customer_sk
| and c_current_addr_sk = ca_address_sk
| and ws_item_sk = i_item_sk
| and ( substr(ca_zip,1,5) in ('85669', '86197','88274','83405','86475', '85392', '85460', '80348', '81792')
| or
| i_item_id in (select i_item_id
| from item
| where i_item_sk in (2, 3, 5, 7, 11, 13, 17, 19, 23, 29)
| )
| )
| and ws_sold_date_sk = d_date_sk
| and d_qoy = 2 and d_year = 2001
| group by ca_zip, ca_city
| order by ca_zip, ca_city
| limit 100
""".stripMargin),
("q46", """
| select c_last_name, c_first_name, ca_city, bought_city, ss_ticket_number, amt,profit
| from
| (select ss_ticket_number
| ,ss_customer_sk
| ,ca_city bought_city
| ,sum(ss_coupon_amt) amt
| ,sum(ss_net_profit) profit
| from store_sales, date_dim, store, household_demographics, customer_address
| where store_sales.ss_sold_date_sk = date_dim.d_date_sk
| and store_sales.ss_store_sk = store.s_store_sk
| and store_sales.ss_hdemo_sk = household_demographics.hd_demo_sk
| and store_sales.ss_addr_sk = customer_address.ca_address_sk
| and (household_demographics.hd_dep_count = 4 or
| household_demographics.hd_vehicle_count= 3)
| and date_dim.d_dow in (6,0)
| and date_dim.d_year in (1999,1999+1,1999+2)
| and store.s_city in ('Fairview','Midway','Fairview','Fairview','Fairview')
| group by ss_ticket_number,ss_customer_sk,ss_addr_sk,ca_city) dn,customer,customer_address current_addr
| where ss_customer_sk = c_customer_sk
| and customer.c_current_addr_sk = current_addr.ca_address_sk
| and current_addr.ca_city <> bought_city
| order by c_last_name, c_first_name, ca_city, bought_city, ss_ticket_number
| limit 100
""".stripMargin),
("q47", """
| with v1 as(
| select i_category, i_brand,
| s_store_name, s_company_name,
| d_year, d_moy,
| sum(ss_sales_price) sum_sales,
| avg(sum(ss_sales_price)) over
| (partition by i_category, i_brand,
| s_store_name, s_company_name, d_year)
| avg_monthly_sales,
| rank() over
| (partition by i_category, i_brand,
| s_store_name, s_company_name
| order by d_year, d_moy) rn
| from item, store_sales, date_dim, store
| where ss_item_sk = i_item_sk and
| ss_sold_date_sk = d_date_sk and
| ss_store_sk = s_store_sk and
| (
| d_year = 1999 or
| ( d_year = 1999-1 and d_moy =12) or
| ( d_year = 1999+1 and d_moy =1)
| )
| group by i_category, i_brand,
| s_store_name, s_company_name,
| d_year, d_moy),
| v2 as(
| select v1.i_category, v1.i_brand, v1.s_store_name, v1.s_company_name, v1.d_year,
v1.d_moy, v1.avg_monthly_sales ,v1.sum_sales, v1_lag.sum_sales psum,
v1_lead.sum_sales nsum
| from v1, v1 v1_lag, v1 v1_lead
| where v1.i_category = v1_lag.i_category and
| v1.i_category = v1_lead.i_category and
| v1.i_brand = v1_lag.i_brand and
| v1.i_brand = v1_lead.i_brand and
| v1.s_store_name = v1_lag.s_store_name and
| v1.s_store_name = v1_lead.s_store_name and
| v1.s_company_name = v1_lag.s_company_name and
| v1.s_company_name = v1_lead.s_company_name and
| v1.rn = v1_lag.rn + 1 and
| v1.rn = v1_lead.rn - 1)
| select * from v2
| where d_year = 1999 and
| avg_monthly_sales > 0 and
| case when avg_monthly_sales > 0 then abs(sum_sales - avg_monthly_sales) / avg_monthly_sales else null end > 0.1
| order by sum_sales - avg_monthly_sales, 3
| limit 100
""".stripMargin),
("q48", """
| select sum (ss_quantity)
| from store_sales, store, customer_demographics, customer_address, date_dim
| where s_store_sk = ss_store_sk
| and ss_sold_date_sk = d_date_sk and d_year = 2001
| and
| (
| (
| cd_demo_sk = ss_cdemo_sk
| and
| cd_marital_status = 'M'
| and
| cd_education_status = '4 yr Degree'
| and
| ss_sales_price between 100.00 and 150.00
| )
| or
| (
| cd_demo_sk = ss_cdemo_sk
| and
| cd_marital_status = 'D'
| and
| cd_education_status = '2 yr Degree'
| and
| ss_sales_price between 50.00 and 100.00
| )
| or
| (
| cd_demo_sk = ss_cdemo_sk
| and
| cd_marital_status = 'S'
| and
| cd_education_status = 'College'
| and
| ss_sales_price between 150.00 and 200.00
| )
| )
| and
| (
| (
| ss_addr_sk = ca_address_sk
| and
| ca_country = 'United States'
| and
| ca_state in ('CO', 'OH', 'TX')
| and ss_net_profit between 0 and 2000
| )
| or
| (ss_addr_sk = ca_address_sk
| and
| ca_country = 'United States'
| and
| ca_state in ('OR', 'MN', 'KY')
| and ss_net_profit between 150 and 3000
| )
| or
| (ss_addr_sk = ca_address_sk
| and
| ca_country = 'United States'
| and
| ca_state in ('VA', 'CA', 'MS')
| and ss_net_profit between 50 and 25000
| )
| )
""".stripMargin),
// Modifications: "dec" -> "decimal"
("q49", """
| select 'web' as channel, web.item, web.return_ratio, web.return_rank, web.currency_rank
| from (
| select
| item, return_ratio, currency_ratio,
| rank() over (order by return_ratio) as return_rank,
| rank() over (order by currency_ratio) as currency_rank
| from
| ( select ws.ws_item_sk as item
| ,(cast(sum(coalesce(wr.wr_return_quantity,0)) as decimal(15,4))/
| cast(sum(coalesce(ws.ws_quantity,0)) as decimal(15,4) )) as return_ratio
| ,(cast(sum(coalesce(wr.wr_return_amt,0)) as decimal(15,4))/
| cast(sum(coalesce(ws.ws_net_paid,0)) as decimal(15,4) )) as currency_ratio
| from
| web_sales ws left outer join web_returns wr
| on (ws.ws_order_number = wr.wr_order_number and
| ws.ws_item_sk = wr.wr_item_sk)
| ,date_dim
| where
| wr.wr_return_amt > 10000
| and ws.ws_net_profit > 1
| and ws.ws_net_paid > 0
| and ws.ws_quantity > 0
| and ws_sold_date_sk = d_date_sk
| and d_year = 2001
| and d_moy = 12
| group by ws.ws_item_sk
| ) in_web
| ) web
| where (web.return_rank <= 10 or web.currency_rank <= 10)
| union
| select
| 'catalog' as channel, catalog.item, catalog.return_ratio,
| catalog.return_rank, catalog.currency_rank
| from (
| select
| item, return_ratio, currency_ratio,
| rank() over (order by return_ratio) as return_rank,
| rank() over (order by currency_ratio) as currency_rank
| from
| ( select
| cs.cs_item_sk as item
| ,(cast(sum(coalesce(cr.cr_return_quantity,0)) as decimal(15,4))/
| cast(sum(coalesce(cs.cs_quantity,0)) as decimal(15,4) )) as return_ratio
| ,(cast(sum(coalesce(cr.cr_return_amount,0)) as decimal(15,4))/
| cast(sum(coalesce(cs.cs_net_paid,0)) as decimal(15,4) )) as currency_ratio
| from
| catalog_sales cs left outer join catalog_returns cr
| on (cs.cs_order_number = cr.cr_order_number and
| cs.cs_item_sk = cr.cr_item_sk)
| ,date_dim
| where
| cr.cr_return_amount > 10000
| and cs.cs_net_profit > 1
| and cs.cs_net_paid > 0
| and cs.cs_quantity > 0
| and cs_sold_date_sk = d_date_sk
| and d_year = 2001
| and d_moy = 12
| group by cs.cs_item_sk
| ) in_cat
| ) catalog
| where (catalog.return_rank <= 10 or catalog.currency_rank <=10)
| union
| select
| 'store' as channel, store.item, store.return_ratio,
| store.return_rank, store.currency_rank
| from (
| select
| item, return_ratio, currency_ratio,
| rank() over (order by return_ratio) as return_rank,
| rank() over (order by currency_ratio) as currency_rank
| from
| ( select sts.ss_item_sk as item
| ,(cast(sum(coalesce(sr.sr_return_quantity,0)) as decimal(15,4))/
| cast(sum(coalesce(sts.ss_quantity,0)) as decimal(15,4) )) as return_ratio
| ,(cast(sum(coalesce(sr.sr_return_amt,0)) as decimal(15,4))/
| cast(sum(coalesce(sts.ss_net_paid,0)) as decimal(15,4) )) as currency_ratio
| from
| store_sales sts left outer join store_returns sr
| on (sts.ss_ticket_number = sr.sr_ticket_number and sts.ss_item_sk = sr.sr_item_sk)
| ,date_dim
| where
| sr.sr_return_amt > 10000
| and sts.ss_net_profit > 1
| and sts.ss_net_paid > 0
| and sts.ss_quantity > 0
| and ss_sold_date_sk = d_date_sk
| and d_year = 2001
| and d_moy = 12
| group by sts.ss_item_sk
| ) in_store
| ) store
| where (store.return_rank <= 10 or store.currency_rank <= 10)
| order by 1,4,5
| limit 100
""".stripMargin),
// Modifications: " -> `
("q50", """
| select
| s_store_name, s_company_id, s_street_number, s_street_name, s_street_type,
| s_suite_number, s_city, s_county, s_state, s_zip
| ,sum(case when (sr_returned_date_sk - ss_sold_date_sk <= 30 ) then 1 else 0 end) as `30 days`
| ,sum(case when (sr_returned_date_sk - ss_sold_date_sk > 30) and
| (sr_returned_date_sk - ss_sold_date_sk <= 60) then 1 else 0 end ) as `31-60 days`
| ,sum(case when (sr_returned_date_sk - ss_sold_date_sk > 60) and
| (sr_returned_date_sk - ss_sold_date_sk <= 90) then 1 else 0 end) as `61-90 days`
| ,sum(case when (sr_returned_date_sk - ss_sold_date_sk > 90) and
| (sr_returned_date_sk - ss_sold_date_sk <= 120) then 1 else 0 end) as `91-120 days`
| ,sum(case when (sr_returned_date_sk - ss_sold_date_sk > 120) then 1 else 0 end) as `>120 days`
| from
| store_sales, store_returns, store, date_dim d1, date_dim d2
| where
| d2.d_year = 2001
| and d2.d_moy = 8
| and ss_ticket_number = sr_ticket_number
| and ss_item_sk = sr_item_sk
| and ss_sold_date_sk = d1.d_date_sk
| and sr_returned_date_sk = d2.d_date_sk
| and ss_customer_sk = sr_customer_sk
| and ss_store_sk = s_store_sk
| group by
| s_store_name, s_company_id, s_street_number, s_street_name, s_street_type,
| s_suite_number, s_city, s_county, s_state, s_zip
| order by
| s_store_name, s_company_id, s_street_number, s_street_name, s_street_type,
| s_suite_number, s_city, s_county, s_state, s_zip
| limit 100
""".stripMargin),
("q51", """
| WITH web_v1 as (
| select
| ws_item_sk item_sk, d_date,
| sum(sum(ws_sales_price))
| over (partition by ws_item_sk order by d_date rows between unbounded preceding and current row) cume_sales
| from web_sales, date_dim
| where ws_sold_date_sk=d_date_sk
| and d_month_seq between 1200 and 1200+11
| and ws_item_sk is not NULL
| group by ws_item_sk, d_date),
| store_v1 as (
| select
| ss_item_sk item_sk, d_date,
| sum(sum(ss_sales_price))
| over (partition by ss_item_sk order by d_date rows between unbounded preceding and current row) cume_sales
| from store_sales, date_dim
| where ss_sold_date_sk=d_date_sk
| and d_month_seq between 1200 and 1200+11
| and ss_item_sk is not NULL
| group by ss_item_sk, d_date)
| select *
| from (select item_sk, d_date, web_sales, store_sales
| ,max(web_sales)
| over (partition by item_sk order by d_date rows between unbounded preceding and current row) web_cumulative
| ,max(store_sales)
| over (partition by item_sk order by d_date rows between unbounded preceding and current row) store_cumulative
| from (select case when web.item_sk is not null then web.item_sk else store.item_sk end item_sk
| ,case when web.d_date is not null then web.d_date else store.d_date end d_date
| ,web.cume_sales web_sales
| ,store.cume_sales store_sales
| from web_v1 web full outer join store_v1 store on (web.item_sk = store.item_sk
| and web.d_date = store.d_date)
| )x )y
| where web_cumulative > store_cumulative
| order by item_sk, d_date
| limit 100
""".stripMargin),
("q52", """
| select dt.d_year
| ,item.i_brand_id brand_id
| ,item.i_brand brand
| ,sum(ss_ext_sales_price) ext_price
| from date_dim dt, store_sales, item
| where dt.d_date_sk = store_sales.ss_sold_date_sk
| and store_sales.ss_item_sk = item.i_item_sk
| and item.i_manager_id = 1
| and dt.d_moy=11
| and dt.d_year=2000
| group by dt.d_year, item.i_brand, item.i_brand_id
| order by dt.d_year, ext_price desc, brand_id
|limit 100
""".stripMargin),
("q53", """
| select * from
| (select i_manufact_id,
| sum(ss_sales_price) sum_sales,
| avg(sum(ss_sales_price)) over (partition by i_manufact_id) avg_quarterly_sales
| from item, store_sales, date_dim, store
| where ss_item_sk = i_item_sk and
| ss_sold_date_sk = d_date_sk and
| ss_store_sk = s_store_sk and
| d_month_seq in (1200,1200+1,1200+2,1200+3,1200+4,1200+5,1200+6,
| 1200+7,1200+8,1200+9,1200+10,1200+11) and
| ((i_category in ('Books','Children','Electronics') and
| i_class in ('personal','portable','reference','self-help') and
| i_brand in ('scholaramalgamalg #14','scholaramalgamalg #7',
| 'exportiunivamalg #9','scholaramalgamalg #9'))
| or
| (i_category in ('Women','Music','Men') and
| i_class in ('accessories','classical','fragrances','pants') and
| i_brand in ('amalgimporto #1','edu packscholar #1','exportiimporto #1',
| 'importoamalg #1')))
| group by i_manufact_id, d_qoy ) tmp1
| where case when avg_quarterly_sales > 0
| then abs (sum_sales - avg_quarterly_sales)/ avg_quarterly_sales
| else null end > 0.1
| order by avg_quarterly_sales,
| sum_sales,
| i_manufact_id
| limit 100
""".stripMargin),
("q54", """
| with my_customers as (
| select distinct c_customer_sk
| , c_current_addr_sk
| from
| ( select cs_sold_date_sk sold_date_sk,
| cs_bill_customer_sk customer_sk,
| cs_item_sk item_sk
| from catalog_sales
| union all
| select ws_sold_date_sk sold_date_sk,
| ws_bill_customer_sk customer_sk,
| ws_item_sk item_sk
| from web_sales
| ) cs_or_ws_sales,
| item,
| date_dim,
| customer
| where sold_date_sk = d_date_sk
| and item_sk = i_item_sk
| and i_category = 'Women'
| and i_class = 'maternity'
| and c_customer_sk = cs_or_ws_sales.customer_sk
| and d_moy = 12
| and d_year = 1998
| )
| , my_revenue as (
| select c_customer_sk,
| sum(ss_ext_sales_price) as revenue
| from my_customers,
| store_sales,
| customer_address,
| store,
| date_dim
| where c_current_addr_sk = ca_address_sk
| and ca_county = s_county
| and ca_state = s_state
| and ss_sold_date_sk = d_date_sk
| and c_customer_sk = ss_customer_sk
| and d_month_seq between (select distinct d_month_seq+1
| from date_dim where d_year = 1998 and d_moy = 12)
| and (select distinct d_month_seq+3
| from date_dim where d_year = 1998 and d_moy = 12)
| group by c_customer_sk
| )
| , segments as
| (select cast((revenue/50) as int) as segment from my_revenue)
| select segment, count(*) as num_customers, segment*50 as segment_base
| from segments
| group by segment
| order by segment, num_customers
| limit 100
""".stripMargin),
("q55", """
|select i_brand_id brand_id, i_brand brand,
| sum(ss_ext_sales_price) ext_price
| from date_dim, store_sales, item
| where d_date_sk = ss_sold_date_sk
| and ss_item_sk = i_item_sk
| and i_manager_id=28
| and d_moy=11
| and d_year=1999
| group by i_brand, i_brand_id
| order by ext_price desc, brand_id
| limit 100
""".stripMargin),
("q56", """
| with ss as (
| select i_item_id,sum(ss_ext_sales_price) total_sales
| from
| store_sales, date_dim, customer_address, item
| where
| i_item_id in (select i_item_id from item where i_color in ('slate','blanched','burnished'))
| and ss_item_sk = i_item_sk
| and ss_sold_date_sk = d_date_sk
| and d_year = 2001
| and d_moy = 2
| and ss_addr_sk = ca_address_sk
| and ca_gmt_offset = -5
| group by i_item_id),
| cs as (
| select i_item_id,sum(cs_ext_sales_price) total_sales
| from
| catalog_sales, date_dim, customer_address, item
| where
| i_item_id in (select i_item_id from item where i_color in ('slate','blanched','burnished'))
| and cs_item_sk = i_item_sk
| and cs_sold_date_sk = d_date_sk
| and d_year = 2001
| and d_moy = 2
| and cs_bill_addr_sk = ca_address_sk
| and ca_gmt_offset = -5
| group by i_item_id),
| ws as (
| select i_item_id,sum(ws_ext_sales_price) total_sales
| from
| web_sales, date_dim, customer_address, item
| where
| i_item_id in (select i_item_id from item where i_color in ('slate','blanched','burnished'))
| and ws_item_sk = i_item_sk
| and ws_sold_date_sk = d_date_sk
| and d_year = 2001
| and d_moy = 2
| and ws_bill_addr_sk = ca_address_sk
| and ca_gmt_offset = -5
| group by i_item_id)
| select i_item_id ,sum(total_sales) total_sales
| from (select * from ss
| union all
| select * from cs
| union all
| select * from ws) tmp1
| group by i_item_id
| order by total_sales
| limit 100
""".stripMargin),
("q57", """
| with v1 as(
| select i_category, i_brand,
| cc_name,
| d_year, d_moy,
| sum(cs_sales_price) sum_sales,
| avg(sum(cs_sales_price)) over
| (partition by i_category, i_brand, cc_name, d_year)
| avg_monthly_sales,
| rank() over
| (partition by i_category, i_brand, cc_name
| order by d_year, d_moy) rn
| from item, catalog_sales, date_dim, call_center
| where cs_item_sk = i_item_sk and
| cs_sold_date_sk = d_date_sk and
| cc_call_center_sk= cs_call_center_sk and
| (
| d_year = 1999 or
| ( d_year = 1999-1 and d_moy =12) or
| ( d_year = 1999+1 and d_moy =1)
| )
| group by i_category, i_brand,
| cc_name , d_year, d_moy),
| v2 as(
| select v1.i_category, v1.i_brand, v1.cc_name, v1.d_year, v1.d_moy
| ,v1.avg_monthly_sales
| ,v1.sum_sales, v1_lag.sum_sales psum, v1_lead.sum_sales nsum
| from v1, v1 v1_lag, v1 v1_lead
| where v1.i_category = v1_lag.i_category and
| v1.i_category = v1_lead.i_category and
| v1.i_brand = v1_lag.i_brand and
| v1.i_brand = v1_lead.i_brand and
| v1. cc_name = v1_lag. cc_name and
| v1. cc_name = v1_lead. cc_name and
| v1.rn = v1_lag.rn + 1 and
| v1.rn = v1_lead.rn - 1)
| select * from v2
| where d_year = 1999 and
| avg_monthly_sales > 0 and
| case when avg_monthly_sales > 0 then abs(sum_sales - avg_monthly_sales) / avg_monthly_sales else null end > 0.1
| order by sum_sales - avg_monthly_sales, 3
| limit 100
""".stripMargin),
("q58", """
| with ss_items as
| (select i_item_id item_id, sum(ss_ext_sales_price) ss_item_rev
| from store_sales, item, date_dim
| where ss_item_sk = i_item_sk
| and d_date in (select d_date
| from date_dim
| where d_week_seq = (select d_week_seq
| from date_dim
| where d_date = '2000-01-03'))
| and ss_sold_date_sk = d_date_sk
| group by i_item_id),
| cs_items as
| (select i_item_id item_id
| ,sum(cs_ext_sales_price) cs_item_rev
| from catalog_sales, item, date_dim
| where cs_item_sk = i_item_sk
| and d_date in (select d_date
| from date_dim
| where d_week_seq = (select d_week_seq
| from date_dim
| where d_date = '2000-01-03'))
| and cs_sold_date_sk = d_date_sk
| group by i_item_id),
| ws_items as
| (select i_item_id item_id, sum(ws_ext_sales_price) ws_item_rev
| from web_sales, item, date_dim
| where ws_item_sk = i_item_sk
| and d_date in (select d_date
| from date_dim
| where d_week_seq =(select d_week_seq
| from date_dim
| where d_date = '2000-01-03'))
| and ws_sold_date_sk = d_date_sk
| group by i_item_id)
| select ss_items.item_id
| ,ss_item_rev
| ,ss_item_rev/(ss_item_rev+cs_item_rev+ws_item_rev)/3 * 100 ss_dev
| ,cs_item_rev
| ,cs_item_rev/(ss_item_rev+cs_item_rev+ws_item_rev)/3 * 100 cs_dev
| ,ws_item_rev
| ,ws_item_rev/(ss_item_rev+cs_item_rev+ws_item_rev)/3 * 100 ws_dev
| ,(ss_item_rev+cs_item_rev+ws_item_rev)/3 average
| from ss_items,cs_items,ws_items
| where ss_items.item_id=cs_items.item_id
| and ss_items.item_id=ws_items.item_id
| and ss_item_rev between 0.9 * cs_item_rev and 1.1 * cs_item_rev
| and ss_item_rev between 0.9 * ws_item_rev and 1.1 * ws_item_rev
| and cs_item_rev between 0.9 * ss_item_rev and 1.1 * ss_item_rev
| and cs_item_rev between 0.9 * ws_item_rev and 1.1 * ws_item_rev
| and ws_item_rev between 0.9 * ss_item_rev and 1.1 * ss_item_rev
| and ws_item_rev between 0.9 * cs_item_rev and 1.1 * cs_item_rev
| order by item_id, ss_item_rev
| limit 100
""".stripMargin),
("q59", """
| with wss as
| (select d_week_seq,
| ss_store_sk,
| sum(case when (d_day_name='Sunday') then ss_sales_price else null end) sun_sales,
| sum(case when (d_day_name='Monday') then ss_sales_price else null end) mon_sales,
| sum(case when (d_day_name='Tuesday') then ss_sales_price else null end) tue_sales,
| sum(case when (d_day_name='Wednesday') then ss_sales_price else null end) wed_sales,
| sum(case when (d_day_name='Thursday') then ss_sales_price else null end) thu_sales,
| sum(case when (d_day_name='Friday') then ss_sales_price else null end) fri_sales,
| sum(case when (d_day_name='Saturday') then ss_sales_price else null end) sat_sales
| from store_sales,date_dim
| where d_date_sk = ss_sold_date_sk
| group by d_week_seq,ss_store_sk
| )
| select s_store_name1,s_store_id1,d_week_seq1
| ,sun_sales1/sun_sales2,mon_sales1/mon_sales2
| ,tue_sales1/tue_sales2,wed_sales1/wed_sales2,thu_sales1/thu_sales2
| ,fri_sales1/fri_sales2,sat_sales1/sat_sales2
| from
| (select s_store_name s_store_name1,wss.d_week_seq d_week_seq1
| ,s_store_id s_store_id1,sun_sales sun_sales1
| ,mon_sales mon_sales1,tue_sales tue_sales1
| ,wed_sales wed_sales1,thu_sales thu_sales1
| ,fri_sales fri_sales1,sat_sales sat_sales1
| from wss,store,date_dim d
| where d.d_week_seq = wss.d_week_seq and
| ss_store_sk = s_store_sk and
| d_month_seq between 1212 and 1212 + 11) y,
| (select s_store_name s_store_name2,wss.d_week_seq d_week_seq2
| ,s_store_id s_store_id2,sun_sales sun_sales2
| ,mon_sales mon_sales2,tue_sales tue_sales2
| ,wed_sales wed_sales2,thu_sales thu_sales2
| ,fri_sales fri_sales2,sat_sales sat_sales2
| from wss,store,date_dim d
| where d.d_week_seq = wss.d_week_seq and
| ss_store_sk = s_store_sk and
| d_month_seq between 1212+ 12 and 1212 + 23) x
| where s_store_id1=s_store_id2
| and d_week_seq1=d_week_seq2-52
| order by s_store_name1,s_store_id1,d_week_seq1
| limit 100
""".stripMargin),
("q60", """
| with ss as (
| select i_item_id,sum(ss_ext_sales_price) total_sales
| from store_sales, date_dim, customer_address, item
| where
| i_item_id in (select i_item_id from item where i_category in ('Music'))
| and ss_item_sk = i_item_sk
| and ss_sold_date_sk = d_date_sk
| and d_year = 1998
| and d_moy = 9
| and ss_addr_sk = ca_address_sk
| and ca_gmt_offset = -5
| group by i_item_id),
| cs as (
| select i_item_id,sum(cs_ext_sales_price) total_sales
| from catalog_sales, date_dim, customer_address, item
| where
| i_item_id in (select i_item_id from item where i_category in ('Music'))
| and cs_item_sk = i_item_sk
| and cs_sold_date_sk = d_date_sk
| and d_year = 1998
| and d_moy = 9
| and cs_bill_addr_sk = ca_address_sk
| and ca_gmt_offset = -5
| group by i_item_id),
| ws as (
| select i_item_id,sum(ws_ext_sales_price) total_sales
| from web_sales, date_dim, customer_address, item
| where
| i_item_id in (select i_item_id from item where i_category in ('Music'))
| and ws_item_sk = i_item_sk
| and ws_sold_date_sk = d_date_sk
| and d_year = 1998
| and d_moy = 9
| and ws_bill_addr_sk = ca_address_sk
| and ca_gmt_offset = -5
| group by i_item_id)
| select i_item_id, sum(total_sales) total_sales
| from (select * from ss
| union all
| select * from cs
| union all
| select * from ws) tmp1
| group by i_item_id
| order by i_item_id, total_sales
| limit 100
""".stripMargin),
("q61", s"""
| select promotions,total,cast(promotions as decimal(15,4))/cast(total as decimal(15,4))*100
| from
| (select sum(ss_ext_sales_price) promotions
| from store_sales, store, promotion, date_dim, customer, customer_address, item
| where ss_sold_date_sk = d_date_sk
| and ss_store_sk = s_store_sk
| and ss_promo_sk = p_promo_sk
| and ss_customer_sk= c_customer_sk
| and ca_address_sk = c_current_addr_sk
| and ss_item_sk = i_item_sk
| and ca_gmt_offset = -5
| and i_category = 'Jewelry'
| and (p_channel_dmail = 'Y' or p_channel_email = 'Y' or p_channel_tv = 'Y')
| and s_gmt_offset = -5
| and d_year = 1998
| and d_moy = 11) promotional_sales,
| (select sum(ss_ext_sales_price) total
| from store_sales, store, date_dim, customer, customer_address, item
| where ss_sold_date_sk = d_date_sk
| and ss_store_sk = s_store_sk
| and ss_customer_sk= c_customer_sk
| and ca_address_sk = c_current_addr_sk
| and ss_item_sk = i_item_sk
| and ca_gmt_offset = -5
| and i_category = 'Jewelry'
| and s_gmt_offset = -5
| and d_year = 1998
| and d_moy = 11) all_sales
| order by promotions, total
| limit 100
""".stripMargin),
// Modifications: " -> `
("q62", """
| select
| substr(w_warehouse_name,1,20)
| ,sm_type
| ,web_name
| ,sum(case when (ws_ship_date_sk - ws_sold_date_sk <= 30 ) then 1 else 0 end) as `30 days`
| ,sum(case when (ws_ship_date_sk - ws_sold_date_sk > 30) and
| (ws_ship_date_sk - ws_sold_date_sk <= 60) then 1 else 0 end ) as `31-60 days`
| ,sum(case when (ws_ship_date_sk - ws_sold_date_sk > 60) and
| (ws_ship_date_sk - ws_sold_date_sk <= 90) then 1 else 0 end) as `61-90 days`
| ,sum(case when (ws_ship_date_sk - ws_sold_date_sk > 90) and
| (ws_ship_date_sk - ws_sold_date_sk <= 120) then 1 else 0 end) as `91-120 days`
| ,sum(case when (ws_ship_date_sk - ws_sold_date_sk > 120) then 1 else 0 end) as `>120 days`
| from
| web_sales, warehouse, ship_mode, web_site, date_dim
| where
| d_month_seq between 1200 and 1200 + 11
| and ws_ship_date_sk = d_date_sk
| and ws_warehouse_sk = w_warehouse_sk
| and ws_ship_mode_sk = sm_ship_mode_sk
| and ws_web_site_sk = web_site_sk
| group by
| substr(w_warehouse_name,1,20), sm_type, web_name
| order by
| substr(w_warehouse_name,1,20), sm_type, web_name
| limit 100
""".stripMargin),
("q63", """
| select *
| from (select i_manager_id
| ,sum(ss_sales_price) sum_sales
| ,avg(sum(ss_sales_price)) over (partition by i_manager_id) avg_monthly_sales
| from item
| ,store_sales
| ,date_dim
| ,store
| where ss_item_sk = i_item_sk
| and ss_sold_date_sk = d_date_sk
| and ss_store_sk = s_store_sk
| and d_month_seq in (1200,1200+1,1200+2,1200+3,1200+4,1200+5,1200+6,1200+7,
| 1200+8,1200+9,1200+10,1200+11)
| and (( i_category in ('Books','Children','Electronics')
| and i_class in ('personal','portable','refernece','self-help')
| and i_brand in ('scholaramalgamalg #14','scholaramalgamalg #7',
| 'exportiunivamalg #9','scholaramalgamalg #9'))
| or( i_category in ('Women','Music','Men')
| and i_class in ('accessories','classical','fragrances','pants')
| and i_brand in ('amalgimporto #1','edu packscholar #1','exportiimporto #1',
| 'importoamalg #1')))
| group by i_manager_id, d_moy) tmp1
| where case when avg_monthly_sales > 0 then abs (sum_sales - avg_monthly_sales) / avg_monthly_sales else null end > 0.1
| order by i_manager_id
| ,avg_monthly_sales
| ,sum_sales
| limit 100
""".stripMargin),
("q64", """
| with cs_ui as
| (select cs_item_sk
| ,sum(cs_ext_list_price) as sale,sum(cr_refunded_cash+cr_reversed_charge+cr_store_credit) as refund
| from catalog_sales
| ,catalog_returns
| where cs_item_sk = cr_item_sk
| and cs_order_number = cr_order_number
| group by cs_item_sk
| having sum(cs_ext_list_price)>2*sum(cr_refunded_cash+cr_reversed_charge+cr_store_credit)),
| cross_sales as
| (select i_product_name product_name, i_item_sk item_sk, s_store_name store_name, s_zip store_zip,
| ad1.ca_street_number b_street_number, ad1.ca_street_name b_streen_name, ad1.ca_city b_city,
| ad1.ca_zip b_zip, ad2.ca_street_number c_street_number, ad2.ca_street_name c_street_name,
| ad2.ca_city c_city, ad2.ca_zip c_zip, d1.d_year as syear, d2.d_year as fsyear, d3.d_year s2year,
| count(*) cnt, sum(ss_wholesale_cost) s1, sum(ss_list_price) s2, sum(ss_coupon_amt) s3
| FROM store_sales, store_returns, cs_ui, date_dim d1, date_dim d2, date_dim d3,
| store, customer, customer_demographics cd1, customer_demographics cd2,
| promotion, household_demographics hd1, household_demographics hd2,
| customer_address ad1, customer_address ad2, income_band ib1, income_band ib2, item
| WHERE ss_store_sk = s_store_sk AND
| ss_sold_date_sk = d1.d_date_sk AND
| ss_customer_sk = c_customer_sk AND
| ss_cdemo_sk= cd1.cd_demo_sk AND
| ss_hdemo_sk = hd1.hd_demo_sk AND
| ss_addr_sk = ad1.ca_address_sk and
| ss_item_sk = i_item_sk and
| ss_item_sk = sr_item_sk and
| ss_ticket_number = sr_ticket_number and
| ss_item_sk = cs_ui.cs_item_sk and
| c_current_cdemo_sk = cd2.cd_demo_sk AND
| c_current_hdemo_sk = hd2.hd_demo_sk AND
| c_current_addr_sk = ad2.ca_address_sk and
| c_first_sales_date_sk = d2.d_date_sk and
| c_first_shipto_date_sk = d3.d_date_sk and
| ss_promo_sk = p_promo_sk and
| hd1.hd_income_band_sk = ib1.ib_income_band_sk and
| hd2.hd_income_band_sk = ib2.ib_income_band_sk and
| cd1.cd_marital_status <> cd2.cd_marital_status and
| i_color in ('purple','burlywood','indian','spring','floral','medium') and
| i_current_price between 64 and 64 + 10 and
| i_current_price between 64 + 1 and 64 + 15
| group by i_product_name, i_item_sk, s_store_name, s_zip, ad1.ca_street_number,
| ad1.ca_street_name, ad1.ca_city, ad1.ca_zip, ad2.ca_street_number,
| ad2.ca_street_name, ad2.ca_city, ad2.ca_zip, d1.d_year, d2.d_year, d3.d_year
| )
| select cs1.product_name, cs1.store_name, cs1.store_zip, cs1.b_street_number,
| cs1.b_streen_name, cs1.b_city, cs1.b_zip, cs1.c_street_number, cs1.c_street_name,
| cs1.c_city, cs1.c_zip, cs1.syear, cs1.cnt, cs1.s1, cs1.s2, cs1.s3, cs2.s1,
| cs2.s2, cs2.s3, cs2.syear, cs2.cnt
| from cross_sales cs1,cross_sales cs2
| where cs1.item_sk=cs2.item_sk and
| cs1.syear = 1999 and
| cs2.syear = 1999 + 1 and
| cs2.cnt <= cs1.cnt and
| cs1.store_name = cs2.store_name and
| cs1.store_zip = cs2.store_zip
| order by cs1.product_name, cs1.store_name, cs2.cnt
""".stripMargin),
("q65", """
| select
| s_store_name, i_item_desc, sc.revenue, i_current_price, i_wholesale_cost, i_brand
| from store, item,
| (select ss_store_sk, avg(revenue) as ave
| from
| (select ss_store_sk, ss_item_sk,
| sum(ss_sales_price) as revenue
| from store_sales, date_dim
| where ss_sold_date_sk = d_date_sk and d_month_seq between 1176 and 1176+11
| group by ss_store_sk, ss_item_sk) sa
| group by ss_store_sk) sb,
| (select ss_store_sk, ss_item_sk, sum(ss_sales_price) as revenue
| from store_sales, date_dim
| where ss_sold_date_sk = d_date_sk and d_month_seq between 1176 and 1176+11
| group by ss_store_sk, ss_item_sk) sc
| where sb.ss_store_sk = sc.ss_store_sk and
| sc.revenue <= 0.1 * sb.ave and
| s_store_sk = sc.ss_store_sk and
| i_item_sk = sc.ss_item_sk
| order by s_store_name, i_item_desc
| limit 100
""".stripMargin),
// Modifications: "||" -> concat
("q66", """
| select w_warehouse_name, w_warehouse_sq_ft, w_city, w_county, w_state, w_country,
| ship_carriers, year
| ,sum(jan_sales) as jan_sales
| ,sum(feb_sales) as feb_sales
| ,sum(mar_sales) as mar_sales
| ,sum(apr_sales) as apr_sales
| ,sum(may_sales) as may_sales
| ,sum(jun_sales) as jun_sales
| ,sum(jul_sales) as jul_sales
| ,sum(aug_sales) as aug_sales
| ,sum(sep_sales) as sep_sales
| ,sum(oct_sales) as oct_sales
| ,sum(nov_sales) as nov_sales
| ,sum(dec_sales) as dec_sales
| ,sum(jan_sales/w_warehouse_sq_ft) as jan_sales_per_sq_foot
| ,sum(feb_sales/w_warehouse_sq_ft) as feb_sales_per_sq_foot
| ,sum(mar_sales/w_warehouse_sq_ft) as mar_sales_per_sq_foot
| ,sum(apr_sales/w_warehouse_sq_ft) as apr_sales_per_sq_foot
| ,sum(may_sales/w_warehouse_sq_ft) as may_sales_per_sq_foot
| ,sum(jun_sales/w_warehouse_sq_ft) as jun_sales_per_sq_foot
| ,sum(jul_sales/w_warehouse_sq_ft) as jul_sales_per_sq_foot
| ,sum(aug_sales/w_warehouse_sq_ft) as aug_sales_per_sq_foot
| ,sum(sep_sales/w_warehouse_sq_ft) as sep_sales_per_sq_foot
| ,sum(oct_sales/w_warehouse_sq_ft) as oct_sales_per_sq_foot
| ,sum(nov_sales/w_warehouse_sq_ft) as nov_sales_per_sq_foot
| ,sum(dec_sales/w_warehouse_sq_ft) as dec_sales_per_sq_foot
| ,sum(jan_net) as jan_net
| ,sum(feb_net) as feb_net
| ,sum(mar_net) as mar_net
| ,sum(apr_net) as apr_net
| ,sum(may_net) as may_net
| ,sum(jun_net) as jun_net
| ,sum(jul_net) as jul_net
| ,sum(aug_net) as aug_net
| ,sum(sep_net) as sep_net
| ,sum(oct_net) as oct_net
| ,sum(nov_net) as nov_net
| ,sum(dec_net) as dec_net
| from (
| (select
| w_warehouse_name, w_warehouse_sq_ft, w_city, w_county, w_state, w_country
| ,concat('DHL', ',', 'BARIAN') as ship_carriers
| ,d_year as year
| ,sum(case when d_moy = 1 then ws_ext_sales_price * ws_quantity else 0 end) as jan_sales
| ,sum(case when d_moy = 2 then ws_ext_sales_price * ws_quantity else 0 end) as feb_sales
| ,sum(case when d_moy = 3 then ws_ext_sales_price * ws_quantity else 0 end) as mar_sales
| ,sum(case when d_moy = 4 then ws_ext_sales_price * ws_quantity else 0 end) as apr_sales
| ,sum(case when d_moy = 5 then ws_ext_sales_price * ws_quantity else 0 end) as may_sales
| ,sum(case when d_moy = 6 then ws_ext_sales_price * ws_quantity else 0 end) as jun_sales
| ,sum(case when d_moy = 7 then ws_ext_sales_price * ws_quantity else 0 end) as jul_sales
| ,sum(case when d_moy = 8 then ws_ext_sales_price * ws_quantity else 0 end) as aug_sales
| ,sum(case when d_moy = 9 then ws_ext_sales_price * ws_quantity else 0 end) as sep_sales
| ,sum(case when d_moy = 10 then ws_ext_sales_price * ws_quantity else 0 end) as oct_sales
| ,sum(case when d_moy = 11 then ws_ext_sales_price * ws_quantity else 0 end) as nov_sales
| ,sum(case when d_moy = 12 then ws_ext_sales_price * ws_quantity else 0 end) as dec_sales
| ,sum(case when d_moy = 1 then ws_net_paid * ws_quantity else 0 end) as jan_net
| ,sum(case when d_moy = 2 then ws_net_paid * ws_quantity else 0 end) as feb_net
| ,sum(case when d_moy = 3 then ws_net_paid * ws_quantity else 0 end) as mar_net
| ,sum(case when d_moy = 4 then ws_net_paid * ws_quantity else 0 end) as apr_net
| ,sum(case when d_moy = 5 then ws_net_paid * ws_quantity else 0 end) as may_net
| ,sum(case when d_moy = 6 then ws_net_paid * ws_quantity else 0 end) as jun_net
| ,sum(case when d_moy = 7 then ws_net_paid * ws_quantity else 0 end) as jul_net
| ,sum(case when d_moy = 8 then ws_net_paid * ws_quantity else 0 end) as aug_net
| ,sum(case when d_moy = 9 then ws_net_paid * ws_quantity else 0 end) as sep_net
| ,sum(case when d_moy = 10 then ws_net_paid * ws_quantity else 0 end) as oct_net
| ,sum(case when d_moy = 11 then ws_net_paid * ws_quantity else 0 end) as nov_net
| ,sum(case when d_moy = 12 then ws_net_paid * ws_quantity else 0 end) as dec_net
| from
| web_sales, warehouse, date_dim, time_dim, ship_mode
| where
| ws_warehouse_sk = w_warehouse_sk
| and ws_sold_date_sk = d_date_sk
| and ws_sold_time_sk = t_time_sk
| and ws_ship_mode_sk = sm_ship_mode_sk
| and d_year = 2001
| and t_time between 30838 and 30838+28800
| and sm_carrier in ('DHL','BARIAN')
| group by
| w_warehouse_name, w_warehouse_sq_ft, w_city, w_county, w_state, w_country, d_year)
| union all
| (select w_warehouse_name, w_warehouse_sq_ft, w_city, w_county, w_state, w_country
| ,concat('DHL', ',', 'BARIAN') as ship_carriers
| ,d_year as year
| ,sum(case when d_moy = 1 then cs_sales_price * cs_quantity else 0 end) as jan_sales
| ,sum(case when d_moy = 2 then cs_sales_price * cs_quantity else 0 end) as feb_sales
| ,sum(case when d_moy = 3 then cs_sales_price * cs_quantity else 0 end) as mar_sales
| ,sum(case when d_moy = 4 then cs_sales_price * cs_quantity else 0 end) as apr_sales
| ,sum(case when d_moy = 5 then cs_sales_price * cs_quantity else 0 end) as may_sales
| ,sum(case when d_moy = 6 then cs_sales_price * cs_quantity else 0 end) as jun_sales
| ,sum(case when d_moy = 7 then cs_sales_price * cs_quantity else 0 end) as jul_sales
| ,sum(case when d_moy = 8 then cs_sales_price * cs_quantity else 0 end) as aug_sales
| ,sum(case when d_moy = 9 then cs_sales_price * cs_quantity else 0 end) as sep_sales
| ,sum(case when d_moy = 10 then cs_sales_price * cs_quantity else 0 end) as oct_sales
| ,sum(case when d_moy = 11 then cs_sales_price * cs_quantity else 0 end) as nov_sales
| ,sum(case when d_moy = 12 then cs_sales_price * cs_quantity else 0 end) as dec_sales
| ,sum(case when d_moy = 1 then cs_net_paid_inc_tax * cs_quantity else 0 end) as jan_net
| ,sum(case when d_moy = 2 then cs_net_paid_inc_tax * cs_quantity else 0 end) as feb_net
| ,sum(case when d_moy = 3 then cs_net_paid_inc_tax * cs_quantity else 0 end) as mar_net
| ,sum(case when d_moy = 4 then cs_net_paid_inc_tax * cs_quantity else 0 end) as apr_net
| ,sum(case when d_moy = 5 then cs_net_paid_inc_tax * cs_quantity else 0 end) as may_net
| ,sum(case when d_moy = 6 then cs_net_paid_inc_tax * cs_quantity else 0 end) as jun_net
| ,sum(case when d_moy = 7 then cs_net_paid_inc_tax * cs_quantity else 0 end) as jul_net
| ,sum(case when d_moy = 8 then cs_net_paid_inc_tax * cs_quantity else 0 end) as aug_net
| ,sum(case when d_moy = 9 then cs_net_paid_inc_tax * cs_quantity else 0 end) as sep_net
| ,sum(case when d_moy = 10 then cs_net_paid_inc_tax * cs_quantity else 0 end) as oct_net
| ,sum(case when d_moy = 11 then cs_net_paid_inc_tax * cs_quantity else 0 end) as nov_net
| ,sum(case when d_moy = 12 then cs_net_paid_inc_tax * cs_quantity else 0 end) as dec_net
| from
| catalog_sales, warehouse, date_dim, time_dim, ship_mode
| where
| cs_warehouse_sk = w_warehouse_sk
| and cs_sold_date_sk = d_date_sk
| and cs_sold_time_sk = t_time_sk
| and cs_ship_mode_sk = sm_ship_mode_sk
| and d_year = 2001
| and t_time between 30838 AND 30838+28800
| and sm_carrier in ('DHL','BARIAN')
| group by
| w_warehouse_name, w_warehouse_sq_ft, w_city, w_county, w_state, w_country, d_year
| )
| ) x
| group by
| w_warehouse_name, w_warehouse_sq_ft, w_city, w_county, w_state, w_country,
| ship_carriers, year
| order by w_warehouse_name
| limit 100
""".stripMargin),
("q67", """
| select * from
| (select i_category, i_class, i_brand, i_product_name, d_year, d_qoy, d_moy, s_store_id,
| sumsales, rank() over (partition by i_category order by sumsales desc) rk
| from
| (select i_category, i_class, i_brand, i_product_name, d_year, d_qoy, d_moy,
| s_store_id, sum(coalesce(ss_sales_price*ss_quantity,0)) sumsales
| from store_sales, date_dim, store, item
| where ss_sold_date_sk=d_date_sk
| and ss_item_sk=i_item_sk
| and ss_store_sk = s_store_sk
| and d_month_seq between 1200 and 1200+11
| group by rollup(i_category, i_class, i_brand, i_product_name, d_year, d_qoy,
| d_moy,s_store_id))dw1) dw2
| where rk <= 100
| order by
| i_category, i_class, i_brand, i_product_name, d_year,
| d_qoy, d_moy, s_store_id, sumsales, rk
| limit 100
""".stripMargin),
("q68", """
| select
| c_last_name, c_first_name, ca_city, bought_city, ss_ticket_number, extended_price,
| extended_tax, list_price
| from (select
| ss_ticket_number, ss_customer_sk, ca_city bought_city,
| sum(ss_ext_sales_price) extended_price,
| sum(ss_ext_list_price) list_price,
| sum(ss_ext_tax) extended_tax
| from store_sales, date_dim, store, household_demographics, customer_address
| where store_sales.ss_sold_date_sk = date_dim.d_date_sk
| and store_sales.ss_store_sk = store.s_store_sk
| and store_sales.ss_hdemo_sk = household_demographics.hd_demo_sk
| and store_sales.ss_addr_sk = customer_address.ca_address_sk
| and date_dim.d_dom between 1 and 2
| and (household_demographics.hd_dep_count = 4 or
| household_demographics.hd_vehicle_count = 3)
| and date_dim.d_year in (1999,1999+1,1999+2)
| and store.s_city in ('Midway','Fairview')
| group by ss_ticket_number, ss_customer_sk, ss_addr_sk,ca_city) dn,
| customer,
| customer_address current_addr
| where ss_customer_sk = c_customer_sk
| and customer.c_current_addr_sk = current_addr.ca_address_sk
| and current_addr.ca_city <> bought_city
| order by c_last_name, ss_ticket_number
| limit 100
""".stripMargin),
("q69", """
| select
| cd_gender, cd_marital_status, cd_education_status, count(*) cnt1,
| cd_purchase_estimate, count(*) cnt2, cd_credit_rating, count(*) cnt3
| from
| customer c,customer_address ca,customer_demographics
| where
| c.c_current_addr_sk = ca.ca_address_sk and
| ca_state in ('KY', 'GA', 'NM') and
| cd_demo_sk = c.c_current_cdemo_sk and
| exists (select * from store_sales, date_dim
| where c.c_customer_sk = ss_customer_sk and
| ss_sold_date_sk = d_date_sk and
| d_year = 2001 and
| d_moy between 4 and 4+2) and
| (not exists (select * from web_sales, date_dim
| where c.c_customer_sk = ws_bill_customer_sk and
| ws_sold_date_sk = d_date_sk and
| d_year = 2001 and
| d_moy between 4 and 4+2) and
| not exists (select * from catalog_sales, date_dim
| where c.c_customer_sk = cs_ship_customer_sk and
| cs_sold_date_sk = d_date_sk and
| d_year = 2001 and
| d_moy between 4 and 4+2))
| group by cd_gender, cd_marital_status, cd_education_status,
| cd_purchase_estimate, cd_credit_rating
| order by cd_gender, cd_marital_status, cd_education_status,
| cd_purchase_estimate, cd_credit_rating
| limit 100
""".stripMargin),
("q70", """
| select
| sum(ss_net_profit) as total_sum, s_state, s_county
| ,grouping(s_state)+grouping(s_county) as lochierarchy
| ,rank() over (
| partition by grouping(s_state)+grouping(s_county),
| case when grouping(s_county) = 0 then s_state end
| order by sum(ss_net_profit) desc) as rank_within_parent
| from
| store_sales, date_dim d1, store
| where
| d1.d_month_seq between 1200 and 1200+11
| and d1.d_date_sk = ss_sold_date_sk
| and s_store_sk = ss_store_sk
| and s_state in
| (select s_state from
| (select s_state as s_state,
| rank() over ( partition by s_state order by sum(ss_net_profit) desc) as ranking
| from store_sales, store, date_dim
| where d_month_seq between 1200 and 1200+11
| and d_date_sk = ss_sold_date_sk
| and s_store_sk = ss_store_sk
| group by s_state) tmp1
| where ranking <= 5)
| group by rollup(s_state,s_county)
| order by
| lochierarchy desc
| ,case when lochierarchy = 0 then s_state end
| ,rank_within_parent
| limit 100
""".stripMargin),
("q71", """
| select i_brand_id brand_id, i_brand brand,t_hour,t_minute,
| sum(ext_price) ext_price
| from item,
| (select
| ws_ext_sales_price as ext_price,
| ws_sold_date_sk as sold_date_sk,
| ws_item_sk as sold_item_sk,
| ws_sold_time_sk as time_sk
| from web_sales, date_dim
| where d_date_sk = ws_sold_date_sk
| and d_moy=11
| and d_year=1999
| union all
| select
| cs_ext_sales_price as ext_price,
| cs_sold_date_sk as sold_date_sk,
| cs_item_sk as sold_item_sk,
| cs_sold_time_sk as time_sk
| from catalog_sales, date_dim
| where d_date_sk = cs_sold_date_sk
| and d_moy=11
| and d_year=1999
| union all
| select
| ss_ext_sales_price as ext_price,
| ss_sold_date_sk as sold_date_sk,
| ss_item_sk as sold_item_sk,
| ss_sold_time_sk as time_sk
| from store_sales,date_dim
| where d_date_sk = ss_sold_date_sk
| and d_moy=11
| and d_year=1999
| ) as tmp, time_dim
| where
| sold_item_sk = i_item_sk
| and i_manager_id=1
| and time_sk = t_time_sk
| and (t_meal_time = 'breakfast' or t_meal_time = 'dinner')
| group by i_brand, i_brand_id,t_hour,t_minute
| order by ext_price desc, brand_id
""".stripMargin),
// Modifications: "+ days" -> date_add
("q72", """
| select i_item_desc
| ,w_warehouse_name
| ,d1.d_week_seq
| ,count(case when p_promo_sk is null then 1 else 0 end) no_promo
| ,count(case when p_promo_sk is not null then 1 else 0 end) promo
| ,count(*) total_cnt
| from catalog_sales
| join inventory on (cs_item_sk = inv_item_sk)
| join warehouse on (w_warehouse_sk=inv_warehouse_sk)
| join item on (i_item_sk = cs_item_sk)
| join customer_demographics on (cs_bill_cdemo_sk = cd_demo_sk)
| join household_demographics on (cs_bill_hdemo_sk = hd_demo_sk)
| join date_dim d1 on (cs_sold_date_sk = d1.d_date_sk)
| join date_dim d2 on (inv_date_sk = d2.d_date_sk)
| join date_dim d3 on (cs_ship_date_sk = d3.d_date_sk)
| left outer join promotion on (cs_promo_sk=p_promo_sk)
| left outer join catalog_returns on (cr_item_sk = cs_item_sk and cr_order_number = cs_order_number)
| where d1.d_week_seq = d2.d_week_seq
| and inv_quantity_on_hand < cs_quantity
| and d3.d_date > (cast(d1.d_date AS DATE) + interval 5 days)
| and hd_buy_potential = '>10000'
| and d1.d_year = 1999
| and hd_buy_potential = '>10000'
| and cd_marital_status = 'D'
| and d1.d_year = 1999
| group by i_item_desc,w_warehouse_name,d1.d_week_seq
| order by total_cnt desc, i_item_desc, w_warehouse_name, d_week_seq
| limit 100
""".stripMargin),
("q73", """
| select
| c_last_name, c_first_name, c_salutation, c_preferred_cust_flag,
| ss_ticket_number, cnt from
| (select ss_ticket_number, ss_customer_sk, count(*) cnt
| from store_sales,date_dim,store,household_demographics
| where store_sales.ss_sold_date_sk = date_dim.d_date_sk
| and store_sales.ss_store_sk = store.s_store_sk
| and store_sales.ss_hdemo_sk = household_demographics.hd_demo_sk
| and date_dim.d_dom between 1 and 2
| and (household_demographics.hd_buy_potential = '>10000' or
| household_demographics.hd_buy_potential = 'unknown')
| and household_demographics.hd_vehicle_count > 0
| and case when household_demographics.hd_vehicle_count > 0 then
| household_demographics.hd_dep_count/ household_demographics.hd_vehicle_count else null end > 1
| and date_dim.d_year in (1999,1999+1,1999+2)
| and store.s_county in ('Williamson County','Franklin Parish','Bronx County','Orange County')
| group by ss_ticket_number,ss_customer_sk) dj,customer
| where ss_customer_sk = c_customer_sk
| and cnt between 1 and 5
| order by cnt desc
""".stripMargin),
("q74", """
| with year_total as (
| select
| c_customer_id customer_id, c_first_name customer_first_name,
| c_last_name customer_last_name, d_year as year,
| sum(ss_net_paid) year_total, 's' sale_type
| from
| customer, store_sales, date_dim
| where c_customer_sk = ss_customer_sk
| and ss_sold_date_sk = d_date_sk
| and d_year in (2001,2001+1)
| group by
| c_customer_id, c_first_name, c_last_name, d_year
| union all
| select
| c_customer_id customer_id, c_first_name customer_first_name,
| c_last_name customer_last_name, d_year as year,
| sum(ws_net_paid) year_total, 'w' sale_type
| from
| customer, web_sales, date_dim
| where c_customer_sk = ws_bill_customer_sk
| and ws_sold_date_sk = d_date_sk
| and d_year in (2001,2001+1)
| group by
| c_customer_id, c_first_name, c_last_name, d_year)
| select
| t_s_secyear.customer_id, t_s_secyear.customer_first_name, t_s_secyear.customer_last_name
| from
| year_total t_s_firstyear, year_total t_s_secyear,
| year_total t_w_firstyear, year_total t_w_secyear
| where t_s_secyear.customer_id = t_s_firstyear.customer_id
| and t_s_firstyear.customer_id = t_w_secyear.customer_id
| and t_s_firstyear.customer_id = t_w_firstyear.customer_id
| and t_s_firstyear.sale_type = 's'
| and t_w_firstyear.sale_type = 'w'
| and t_s_secyear.sale_type = 's'
| and t_w_secyear.sale_type = 'w'
| and t_s_firstyear.year = 2001
| and t_s_secyear.year = 2001+1
| and t_w_firstyear.year = 2001
| and t_w_secyear.year = 2001+1
| and t_s_firstyear.year_total > 0
| and t_w_firstyear.year_total > 0
| and case when t_w_firstyear.year_total > 0 then t_w_secyear.year_total / t_w_firstyear.year_total else null end
| > case when t_s_firstyear.year_total > 0 then t_s_secyear.year_total / t_s_firstyear.year_total else null end
| order by 1, 1, 1
| limit 100
""".stripMargin),
("q75", """
| WITH all_sales AS (
| SELECT
| d_year, i_brand_id, i_class_id, i_category_id, i_manufact_id,
| SUM(sales_cnt) AS sales_cnt, SUM(sales_amt) AS sales_amt
| FROM (
| SELECT
| d_year, i_brand_id, i_class_id, i_category_id, i_manufact_id,
| cs_quantity - COALESCE(cr_return_quantity,0) AS sales_cnt,
| cs_ext_sales_price - COALESCE(cr_return_amount,0.0) AS sales_amt
| FROM catalog_sales
| JOIN item ON i_item_sk=cs_item_sk
| JOIN date_dim ON d_date_sk=cs_sold_date_sk
| LEFT JOIN catalog_returns ON (cs_order_number=cr_order_number
| AND cs_item_sk=cr_item_sk)
| WHERE i_category='Books'
| UNION
| SELECT
| d_year, i_brand_id, i_class_id, i_category_id, i_manufact_id,
| ss_quantity - COALESCE(sr_return_quantity,0) AS sales_cnt,
| ss_ext_sales_price - COALESCE(sr_return_amt,0.0) AS sales_amt
| FROM store_sales
| JOIN item ON i_item_sk=ss_item_sk
| JOIN date_dim ON d_date_sk=ss_sold_date_sk
| LEFT JOIN store_returns ON (ss_ticket_number=sr_ticket_number
| AND ss_item_sk=sr_item_sk)
| WHERE i_category='Books'
| UNION
| SELECT
| d_year, i_brand_id, i_class_id, i_category_id, i_manufact_id,
| ws_quantity - COALESCE(wr_return_quantity,0) AS sales_cnt,
| ws_ext_sales_price - COALESCE(wr_return_amt,0.0) AS sales_amt
| FROM web_sales
| JOIN item ON i_item_sk=ws_item_sk
| JOIN date_dim ON d_date_sk=ws_sold_date_sk
| LEFT JOIN web_returns ON (ws_order_number=wr_order_number
| AND ws_item_sk=wr_item_sk)
| WHERE i_category='Books') sales_detail
| GROUP BY d_year, i_brand_id, i_class_id, i_category_id, i_manufact_id)
| SELECT
| prev_yr.d_year AS prev_year, curr_yr.d_year AS year, curr_yr.i_brand_id,
| curr_yr.i_class_id, curr_yr.i_category_id, curr_yr.i_manufact_id,
| prev_yr.sales_cnt AS prev_yr_cnt, curr_yr.sales_cnt AS curr_yr_cnt,
| curr_yr.sales_cnt-prev_yr.sales_cnt AS sales_cnt_diff,
| curr_yr.sales_amt-prev_yr.sales_amt AS sales_amt_diff
| FROM all_sales curr_yr, all_sales prev_yr
| WHERE curr_yr.i_brand_id=prev_yr.i_brand_id
| AND curr_yr.i_class_id=prev_yr.i_class_id
| AND curr_yr.i_category_id=prev_yr.i_category_id
| AND curr_yr.i_manufact_id=prev_yr.i_manufact_id
| AND curr_yr.d_year=2002
| AND prev_yr.d_year=2002-1
| AND CAST(curr_yr.sales_cnt AS DECIMAL(17,2))/CAST(prev_yr.sales_cnt AS DECIMAL(17,2))<0.9
| ORDER BY sales_cnt_diff
| LIMIT 100
""".stripMargin),
("q76", """
| SELECT
| channel, col_name, d_year, d_qoy, i_category, COUNT(*) sales_cnt,
| SUM(ext_sales_price) sales_amt
| FROM(
| SELECT
| 'store' as channel, ss_store_sk col_name, d_year, d_qoy, i_category,
| ss_ext_sales_price ext_sales_price
| FROM store_sales, item, date_dim
| WHERE ss_store_sk IS NULL
| AND ss_sold_date_sk=d_date_sk
| AND ss_item_sk=i_item_sk
| UNION ALL
| SELECT
| 'web' as channel, ws_ship_customer_sk col_name, d_year, d_qoy, i_category,
| ws_ext_sales_price ext_sales_price
| FROM web_sales, item, date_dim
| WHERE ws_ship_customer_sk IS NULL
| AND ws_sold_date_sk=d_date_sk
| AND ws_item_sk=i_item_sk
| UNION ALL
| SELECT
| 'catalog' as channel, cs_ship_addr_sk col_name, d_year, d_qoy, i_category,
| cs_ext_sales_price ext_sales_price
| FROM catalog_sales, item, date_dim
| WHERE cs_ship_addr_sk IS NULL
| AND cs_sold_date_sk=d_date_sk
| AND cs_item_sk=i_item_sk) foo
| GROUP BY channel, col_name, d_year, d_qoy, i_category
| ORDER BY channel, col_name, d_year, d_qoy, i_category
| limit 100
""".stripMargin),
// Modifications: "+ days" -> date_add
("q77", """
| with ss as
| (select s_store_sk, sum(ss_ext_sales_price) as sales, sum(ss_net_profit) as profit
| from store_sales, date_dim, store
| where ss_sold_date_sk = d_date_sk
| and d_date between cast('2000-08-03' as date) and
| (cast('2000-08-03' as date) + interval 30 days)
| and ss_store_sk = s_store_sk
| group by s_store_sk),
| sr as
| (select s_store_sk, sum(sr_return_amt) as returns, sum(sr_net_loss) as profit_loss
| from store_returns, date_dim, store
| where sr_returned_date_sk = d_date_sk
| and d_date between cast('2000-08-03' as date) and
| (cast('2000-08-03' as date) + interval 30 days)
| and sr_store_sk = s_store_sk
| group by s_store_sk),
| cs as
| (select cs_call_center_sk, sum(cs_ext_sales_price) as sales, sum(cs_net_profit) as profit
| from catalog_sales, date_dim
| where cs_sold_date_sk = d_date_sk
| and d_date between cast('2000-08-03' as date) and
| (cast('2000-08-03' as date) + interval 30 days)
| group by cs_call_center_sk),
| cr as
| (select sum(cr_return_amount) as returns, sum(cr_net_loss) as profit_loss
| from catalog_returns, date_dim
| where cr_returned_date_sk = d_date_sk
| and d_date between cast('2000-08-03]' as date) and
| (cast('2000-08-03' as date) + interval 30 day)),
| ws as
| (select wp_web_page_sk, sum(ws_ext_sales_price) as sales, sum(ws_net_profit) as profit
| from web_sales, date_dim, web_page
| where ws_sold_date_sk = d_date_sk
| and d_date between cast('2000-08-03' as date) and
| (cast('2000-08-03' as date) + interval 30 days)
| and ws_web_page_sk = wp_web_page_sk
| group by wp_web_page_sk),
| wr as
| (select wp_web_page_sk, sum(wr_return_amt) as returns, sum(wr_net_loss) as profit_loss
| from web_returns, date_dim, web_page
| where wr_returned_date_sk = d_date_sk
| and d_date between cast('2000-08-03' as date) and
| (cast('2000-08-03' as date) + interval 30 days)
| and wr_web_page_sk = wp_web_page_sk
| group by wp_web_page_sk)
| select channel, id, sum(sales) as sales, sum(returns) as returns, sum(profit) as profit
| from
| (select
| 'store channel' as channel, ss.s_store_sk as id, sales,
| coalesce(returns, 0) as returns, (profit - coalesce(profit_loss,0)) as profit
| from ss left join sr
| on ss.s_store_sk = sr.s_store_sk
| union all
| select
| 'catalog channel' as channel, cs_call_center_sk as id, sales,
| returns, (profit - profit_loss) as profit
| from cs, cr
| union all
| select
| 'web channel' as channel, ws.wp_web_page_sk as id, sales,
| coalesce(returns, 0) returns, (profit - coalesce(profit_loss,0)) as profit
| from ws left join wr
| on ws.wp_web_page_sk = wr.wp_web_page_sk
| ) x
| group by rollup(channel, id)
| order by channel, id
| limit 100
""".stripMargin),
("q78", """
| with ws as
| (select d_year AS ws_sold_year, ws_item_sk,
| ws_bill_customer_sk ws_customer_sk,
| sum(ws_quantity) ws_qty,
| sum(ws_wholesale_cost) ws_wc,
| sum(ws_sales_price) ws_sp
| from web_sales
| left join web_returns on wr_order_number=ws_order_number and ws_item_sk=wr_item_sk
| join date_dim on ws_sold_date_sk = d_date_sk
| where wr_order_number is null
| group by d_year, ws_item_sk, ws_bill_customer_sk
| ),
| cs as
| (select d_year AS cs_sold_year, cs_item_sk,
| cs_bill_customer_sk cs_customer_sk,
| sum(cs_quantity) cs_qty,
| sum(cs_wholesale_cost) cs_wc,
| sum(cs_sales_price) cs_sp
| from catalog_sales
| left join catalog_returns on cr_order_number=cs_order_number and cs_item_sk=cr_item_sk
| join date_dim on cs_sold_date_sk = d_date_sk
| where cr_order_number is null
| group by d_year, cs_item_sk, cs_bill_customer_sk
| ),
| ss as
| (select d_year AS ss_sold_year, ss_item_sk,
| ss_customer_sk,
| sum(ss_quantity) ss_qty,
| sum(ss_wholesale_cost) ss_wc,
| sum(ss_sales_price) ss_sp
| from store_sales
| left join store_returns on sr_ticket_number=ss_ticket_number and ss_item_sk=sr_item_sk
| join date_dim on ss_sold_date_sk = d_date_sk
| where sr_ticket_number is null
| group by d_year, ss_item_sk, ss_customer_sk
| )
| select
| round(ss_qty/(coalesce(ws_qty+cs_qty,1)),2) ratio,
| ss_qty store_qty, ss_wc store_wholesale_cost, ss_sp store_sales_price,
| coalesce(ws_qty,0)+coalesce(cs_qty,0) other_chan_qty,
| coalesce(ws_wc,0)+coalesce(cs_wc,0) other_chan_wholesale_cost,
| coalesce(ws_sp,0)+coalesce(cs_sp,0) other_chan_sales_price
| from ss
| left join ws on (ws_sold_year=ss_sold_year and ws_item_sk=ss_item_sk and ws_customer_sk=ss_customer_sk)
| left join cs on (cs_sold_year=ss_sold_year and ss_item_sk=cs_item_sk and cs_customer_sk=ss_customer_sk)
| where coalesce(ws_qty,0)>0 and coalesce(cs_qty, 0)>0 and ss_sold_year=2000
| order by
| ratio,
| ss_qty desc, ss_wc desc, ss_sp desc,
| other_chan_qty,
| other_chan_wholesale_cost,
| other_chan_sales_price,
| round(ss_qty/(coalesce(ws_qty+cs_qty,1)),2)
| limit 100
""".stripMargin),
("q79", """
| select
| c_last_name,c_first_name,substr(s_city,1,30),ss_ticket_number,amt,profit
| from
| (select ss_ticket_number
| ,ss_customer_sk
| ,store.s_city
| ,sum(ss_coupon_amt) amt
| ,sum(ss_net_profit) profit
| from store_sales,date_dim,store,household_demographics
| where store_sales.ss_sold_date_sk = date_dim.d_date_sk
| and store_sales.ss_store_sk = store.s_store_sk
| and store_sales.ss_hdemo_sk = household_demographics.hd_demo_sk
| and (household_demographics.hd_dep_count = 6 or
| household_demographics.hd_vehicle_count > 2)
| and date_dim.d_dow = 1
| and date_dim.d_year in (1999,1999+1,1999+2)
| and store.s_number_employees between 200 and 295
| group by ss_ticket_number,ss_customer_sk,ss_addr_sk,store.s_city) ms,customer
| where ss_customer_sk = c_customer_sk
| order by c_last_name,c_first_name,substr(s_city,1,30), profit
| limit 100
""".stripMargin),
// Modifications: "+ days" -> date_add
// Modifications: "||" -> "concat"
("q80", """
| with ssr as
| (select s_store_id as store_id,
| sum(ss_ext_sales_price) as sales,
| sum(coalesce(sr_return_amt, 0)) as returns,
| sum(ss_net_profit - coalesce(sr_net_loss, 0)) as profit
| from store_sales left outer join store_returns on
| (ss_item_sk = sr_item_sk and ss_ticket_number = sr_ticket_number),
| date_dim, store, item, promotion
| where ss_sold_date_sk = d_date_sk
| and d_date between cast('2000-08-23' as date)
| and (cast('2000-08-23' as date) + interval 30 days)
| and ss_store_sk = s_store_sk
| and ss_item_sk = i_item_sk
| and i_current_price > 50
| and ss_promo_sk = p_promo_sk
| and p_channel_tv = 'N'
| group by s_store_id),
| csr as
| (select cp_catalog_page_id as catalog_page_id,
| sum(cs_ext_sales_price) as sales,
| sum(coalesce(cr_return_amount, 0)) as returns,
| sum(cs_net_profit - coalesce(cr_net_loss, 0)) as profit
| from catalog_sales left outer join catalog_returns on
| (cs_item_sk = cr_item_sk and cs_order_number = cr_order_number),
| date_dim, catalog_page, item, promotion
| where cs_sold_date_sk = d_date_sk
| and d_date between cast('2000-08-23' as date)
| and (cast('2000-08-23' as date) + interval 30 days)
| and cs_catalog_page_sk = cp_catalog_page_sk
| and cs_item_sk = i_item_sk
| and i_current_price > 50
| and cs_promo_sk = p_promo_sk
| and p_channel_tv = 'N'
| group by cp_catalog_page_id),
| wsr as
| (select web_site_id,
| sum(ws_ext_sales_price) as sales,
| sum(coalesce(wr_return_amt, 0)) as returns,
| sum(ws_net_profit - coalesce(wr_net_loss, 0)) as profit
| from web_sales left outer join web_returns on
| (ws_item_sk = wr_item_sk and ws_order_number = wr_order_number),
| date_dim, web_site, item, promotion
| where ws_sold_date_sk = d_date_sk
| and d_date between cast('2000-08-23' as date)
| and (cast('2000-08-23' as date) + interval 30 days)
| and ws_web_site_sk = web_site_sk
| and ws_item_sk = i_item_sk
| and i_current_price > 50
| and ws_promo_sk = p_promo_sk
| and p_channel_tv = 'N'
| group by web_site_id)
| select channel, id, sum(sales) as sales, sum(returns) as returns, sum(profit) as profit
| from (select
| 'store channel' as channel, concat('store', store_id) as id, sales, returns, profit
| from ssr
| union all
| select
| 'catalog channel' as channel, concat('catalog_page', catalog_page_id) as id,
| sales, returns, profit
| from csr
| union all
| select
| 'web channel' as channel, concat('web_site', web_site_id) as id, sales, returns, profit
| from wsr) x
| group by rollup (channel, id)
| order by channel, id
| limit 100
""".stripMargin),
("q81", """
| with customer_total_return as
| (select
| cr_returning_customer_sk as ctr_customer_sk, ca_state as ctr_state,
| sum(cr_return_amt_inc_tax) as ctr_total_return
| from catalog_returns, date_dim, customer_address
| where cr_returned_date_sk = d_date_sk
| and d_year = 2000
| and cr_returning_addr_sk = ca_address_sk
| group by cr_returning_customer_sk, ca_state )
| select
| c_customer_id,c_salutation,c_first_name,c_last_name,ca_street_number,ca_street_name,
| ca_street_type,ca_suite_number,ca_city,ca_county,ca_state,ca_zip,ca_country,
| ca_gmt_offset,ca_location_type,ctr_total_return
| from customer_total_return ctr1, customer_address, customer
| where ctr1.ctr_total_return > (select avg(ctr_total_return)*1.2
| from customer_total_return ctr2
| where ctr1.ctr_state = ctr2.ctr_state)
| and ca_address_sk = c_current_addr_sk
| and ca_state = 'GA'
| and ctr1.ctr_customer_sk = c_customer_sk
| order by c_customer_id,c_salutation,c_first_name,c_last_name,ca_street_number,ca_street_name
| ,ca_street_type,ca_suite_number,ca_city,ca_county,ca_state,ca_zip,ca_country,ca_gmt_offset
| ,ca_location_type,ctr_total_return
| limit 100
""".stripMargin),
("q82", """
| select i_item_id, i_item_desc, i_current_price
| from item, inventory, date_dim, store_sales
| where i_current_price between 62 and 62+30
| and inv_item_sk = i_item_sk
| and d_date_sk=inv_date_sk
| and d_date between cast('2000-05-25' as date) and (cast('2000-05-25' as date) + interval 60 days)
| and i_manufact_id in (129, 270, 821, 423)
| and inv_quantity_on_hand between 100 and 500
| and ss_item_sk = i_item_sk
| group by i_item_id,i_item_desc,i_current_price
| order by i_item_id
| limit 100
""".stripMargin),
("q83", """
| with sr_items as
| (select i_item_id item_id, sum(sr_return_quantity) sr_item_qty
| from store_returns, item, date_dim
| where sr_item_sk = i_item_sk
| and d_date in (select d_date from date_dim where d_week_seq in
| (select d_week_seq from date_dim where d_date in ('2000-06-30','2000-09-27','2000-11-17')))
| and sr_returned_date_sk = d_date_sk
| group by i_item_id),
| cr_items as
| (select i_item_id item_id, sum(cr_return_quantity) cr_item_qty
| from catalog_returns, item, date_dim
| where cr_item_sk = i_item_sk
| and d_date in (select d_date from date_dim where d_week_seq in
| (select d_week_seq from date_dim where d_date in ('2000-06-30','2000-09-27','2000-11-17')))
| and cr_returned_date_sk = d_date_sk
| group by i_item_id),
| wr_items as
| (select i_item_id item_id, sum(wr_return_quantity) wr_item_qty
| from web_returns, item, date_dim
| where wr_item_sk = i_item_sk and d_date in
| (select d_date from date_dim where d_week_seq in
| (select d_week_seq from date_dim where d_date in ('2000-06-30','2000-09-27','2000-11-17')))
| and wr_returned_date_sk = d_date_sk
| group by i_item_id)
| select sr_items.item_id
| ,sr_item_qty
| ,sr_item_qty/(sr_item_qty+cr_item_qty+wr_item_qty)/3.0 * 100 sr_dev
| ,cr_item_qty
| ,cr_item_qty/(sr_item_qty+cr_item_qty+wr_item_qty)/3.0 * 100 cr_dev
| ,wr_item_qty
| ,wr_item_qty/(sr_item_qty+cr_item_qty+wr_item_qty)/3.0 * 100 wr_dev
| ,(sr_item_qty+cr_item_qty+wr_item_qty)/3.0 average
| from sr_items, cr_items, wr_items
| where sr_items.item_id=cr_items.item_id
| and sr_items.item_id=wr_items.item_id
| order by sr_items.item_id, sr_item_qty
| limit 100
""".stripMargin),
// Modifications: "||" -> concat
("q84", """
| select c_customer_id as customer_id
| ,concat(c_last_name, ', ', c_first_name) as customername
| from customer
| ,customer_address
| ,customer_demographics
| ,household_demographics
| ,income_band
| ,store_returns
| where ca_city = 'Edgewood'
| and c_current_addr_sk = ca_address_sk
| and ib_lower_bound >= 38128
| and ib_upper_bound <= 38128 + 50000
| and ib_income_band_sk = hd_income_band_sk
| and cd_demo_sk = c_current_cdemo_sk
| and hd_demo_sk = c_current_hdemo_sk
| and sr_cdemo_sk = cd_demo_sk
| order by c_customer_id
| limit 100
""".stripMargin),
("q85", """
| select
| substr(r_reason_desc,1,20), avg(ws_quantity), avg(wr_refunded_cash), avg(wr_fee)
| from web_sales, web_returns, web_page, customer_demographics cd1,
| customer_demographics cd2, customer_address, date_dim, reason
| where ws_web_page_sk = wp_web_page_sk
| and ws_item_sk = wr_item_sk
| and ws_order_number = wr_order_number
| and ws_sold_date_sk = d_date_sk and d_year = 2000
| and cd1.cd_demo_sk = wr_refunded_cdemo_sk
| and cd2.cd_demo_sk = wr_returning_cdemo_sk
| and ca_address_sk = wr_refunded_addr_sk
| and r_reason_sk = wr_reason_sk
| and
| (
| (
| cd1.cd_marital_status = 'M'
| and
| cd1.cd_marital_status = cd2.cd_marital_status
| and
| cd1.cd_education_status = 'Advanced Degree'
| and
| cd1.cd_education_status = cd2.cd_education_status
| and
| ws_sales_price between 100.00 and 150.00
| )
| or
| (
| cd1.cd_marital_status = 'S'
| and
| cd1.cd_marital_status = cd2.cd_marital_status
| and
| cd1.cd_education_status = 'College'
| and
| cd1.cd_education_status = cd2.cd_education_status
| and
| ws_sales_price between 50.00 and 100.00
| )
| or
| (
| cd1.cd_marital_status = 'W'
| and
| cd1.cd_marital_status = cd2.cd_marital_status
| and
| cd1.cd_education_status = '2 yr Degree'
| and
| cd1.cd_education_status = cd2.cd_education_status
| and
| ws_sales_price between 150.00 and 200.00
| )
| )
| and
| (
| (
| ca_country = 'United States'
| and
| ca_state in ('IN', 'OH', 'NJ')
| and ws_net_profit between 100 and 200
| )
| or
| (
| ca_country = 'United States'
| and
| ca_state in ('WI', 'CT', 'KY')
| and ws_net_profit between 150 and 300
| )
| or
| (
| ca_country = 'United States'
| and
| ca_state in ('LA', 'IA', 'AR')
| and ws_net_profit between 50 and 250
| )
| )
| group by r_reason_desc
| order by substr(r_reason_desc,1,20)
| ,avg(ws_quantity)
| ,avg(wr_refunded_cash)
| ,avg(wr_fee)
| limit 100
""".stripMargin),
("q86", """
| select sum(ws_net_paid) as total_sum, i_category, i_class,
| grouping(i_category)+grouping(i_class) as lochierarchy,
| rank() over (
| partition by grouping(i_category)+grouping(i_class),
| case when grouping(i_class) = 0 then i_category end
| order by sum(ws_net_paid) desc) as rank_within_parent
| from
| web_sales, date_dim d1, item
| where
| d1.d_month_seq between 1200 and 1200+11
| and d1.d_date_sk = ws_sold_date_sk
| and i_item_sk = ws_item_sk
| group by rollup(i_category,i_class)
| order by
| lochierarchy desc,
| case when lochierarchy = 0 then i_category end,
| rank_within_parent
| limit 100
""".stripMargin),
("q87", """
| select count(*)
| from ((select distinct c_last_name, c_first_name, d_date
| from store_sales, date_dim, customer
| where store_sales.ss_sold_date_sk = date_dim.d_date_sk
| and store_sales.ss_customer_sk = customer.c_customer_sk
| and d_month_seq between 1200 and 1200+11)
| except
| (select distinct c_last_name, c_first_name, d_date
| from catalog_sales, date_dim, customer
| where catalog_sales.cs_sold_date_sk = date_dim.d_date_sk
| and catalog_sales.cs_bill_customer_sk = customer.c_customer_sk
| and d_month_seq between 1200 and 1200+11)
| except
| (select distinct c_last_name, c_first_name, d_date
| from web_sales, date_dim, customer
| where web_sales.ws_sold_date_sk = date_dim.d_date_sk
| and web_sales.ws_bill_customer_sk = customer.c_customer_sk
| and d_month_seq between 1200 and 1200+11)
|) cool_cust
""".stripMargin),
("q88", """
| select *
| from
| (select count(*) h8_30_to_9
| from store_sales, household_demographics , time_dim, store
| where ss_sold_time_sk = time_dim.t_time_sk
| and ss_hdemo_sk = household_demographics.hd_demo_sk
| and ss_store_sk = s_store_sk
| and time_dim.t_hour = 8
| and time_dim.t_minute >= 30
| and ((household_demographics.hd_dep_count = 4 and household_demographics.hd_vehicle_count<=4+2) or
| (household_demographics.hd_dep_count = 2 and household_demographics.hd_vehicle_count<=2+2) or
| (household_demographics.hd_dep_count = 0 and household_demographics.hd_vehicle_count<=0+2))
| and store.s_store_name = 'ese') s1,
| (select count(*) h9_to_9_30
| from store_sales, household_demographics , time_dim, store
| where ss_sold_time_sk = time_dim.t_time_sk
| and ss_hdemo_sk = household_demographics.hd_demo_sk
| and ss_store_sk = s_store_sk
| and time_dim.t_hour = 9
| and time_dim.t_minute < 30
| and ((household_demographics.hd_dep_count = 4 and household_demographics.hd_vehicle_count<=4+2) or
| (household_demographics.hd_dep_count = 2 and household_demographics.hd_vehicle_count<=2+2) or
| (household_demographics.hd_dep_count = 0 and household_demographics.hd_vehicle_count<=0+2))
| and store.s_store_name = 'ese') s2,
| (select count(*) h9_30_to_10
| from store_sales, household_demographics , time_dim, store
| where ss_sold_time_sk = time_dim.t_time_sk
| and ss_hdemo_sk = household_demographics.hd_demo_sk
| and ss_store_sk = s_store_sk
| and time_dim.t_hour = 9
| and time_dim.t_minute >= 30
| and ((household_demographics.hd_dep_count = 4 and household_demographics.hd_vehicle_count<=4+2) or
| (household_demographics.hd_dep_count = 2 and household_demographics.hd_vehicle_count<=2+2) or
| (household_demographics.hd_dep_count = 0 and household_demographics.hd_vehicle_count<=0+2))
| and store.s_store_name = 'ese') s3,
| (select count(*) h10_to_10_30
| from store_sales, household_demographics , time_dim, store
| where ss_sold_time_sk = time_dim.t_time_sk
| and ss_hdemo_sk = household_demographics.hd_demo_sk
| and ss_store_sk = s_store_sk
| and time_dim.t_hour = 10
| and time_dim.t_minute < 30
| and ((household_demographics.hd_dep_count = 4 and household_demographics.hd_vehicle_count<=4+2) or
| (household_demographics.hd_dep_count = 2 and household_demographics.hd_vehicle_count<=2+2) or
| (household_demographics.hd_dep_count = 0 and household_demographics.hd_vehicle_count<=0+2))
| and store.s_store_name = 'ese') s4,
| (select count(*) h10_30_to_11
| from store_sales, household_demographics , time_dim, store
| where ss_sold_time_sk = time_dim.t_time_sk
| and ss_hdemo_sk = household_demographics.hd_demo_sk
| and ss_store_sk = s_store_sk
| and time_dim.t_hour = 10
| and time_dim.t_minute >= 30
| and ((household_demographics.hd_dep_count = 4 and household_demographics.hd_vehicle_count<=4+2) or
| (household_demographics.hd_dep_count = 2 and household_demographics.hd_vehicle_count<=2+2) or
| (household_demographics.hd_dep_count = 0 and household_demographics.hd_vehicle_count<=0+2))
| and store.s_store_name = 'ese') s5,
| (select count(*) h11_to_11_30
| from store_sales, household_demographics , time_dim, store
| where ss_sold_time_sk = time_dim.t_time_sk
| and ss_hdemo_sk = household_demographics.hd_demo_sk
| and ss_store_sk = s_store_sk
| and time_dim.t_hour = 11
| and time_dim.t_minute < 30
| and ((household_demographics.hd_dep_count = 4 and household_demographics.hd_vehicle_count<=4+2) or
| (household_demographics.hd_dep_count = 2 and household_demographics.hd_vehicle_count<=2+2) or
| (household_demographics.hd_dep_count = 0 and household_demographics.hd_vehicle_count<=0+2))
| and store.s_store_name = 'ese') s6,
| (select count(*) h11_30_to_12
| from store_sales, household_demographics , time_dim, store
| where ss_sold_time_sk = time_dim.t_time_sk
| and ss_hdemo_sk = household_demographics.hd_demo_sk
| and ss_store_sk = s_store_sk
| and time_dim.t_hour = 11
| and time_dim.t_minute >= 30
| and ((household_demographics.hd_dep_count = 4 and household_demographics.hd_vehicle_count<=4+2) or
| (household_demographics.hd_dep_count = 2 and household_demographics.hd_vehicle_count<=2+2) or
| (household_demographics.hd_dep_count = 0 and household_demographics.hd_vehicle_count<=0+2))
| and store.s_store_name = 'ese') s7,
| (select count(*) h12_to_12_30
| from store_sales, household_demographics , time_dim, store
| where ss_sold_time_sk = time_dim.t_time_sk
| and ss_hdemo_sk = household_demographics.hd_demo_sk
| and ss_store_sk = s_store_sk
| and time_dim.t_hour = 12
| and time_dim.t_minute < 30
| and ((household_demographics.hd_dep_count = 4 and household_demographics.hd_vehicle_count<=4+2) or
| (household_demographics.hd_dep_count = 2 and household_demographics.hd_vehicle_count<=2+2) or
| (household_demographics.hd_dep_count = 0 and household_demographics.hd_vehicle_count<=0+2))
| and store.s_store_name = 'ese') s8
""".stripMargin),
("q89", """
| select *
| from(
| select i_category, i_class, i_brand,
| s_store_name, s_company_name,
| d_moy,
| sum(ss_sales_price) sum_sales,
| avg(sum(ss_sales_price)) over
| (partition by i_category, i_brand, s_store_name, s_company_name)
| avg_monthly_sales
| from item, store_sales, date_dim, store
| where ss_item_sk = i_item_sk and
| ss_sold_date_sk = d_date_sk and
| ss_store_sk = s_store_sk and
| d_year in (1999) and
| ((i_category in ('Books','Electronics','Sports') and
| i_class in ('computers','stereo','football'))
| or (i_category in ('Men','Jewelry','Women') and
| i_class in ('shirts','birdal','dresses')))
| group by i_category, i_class, i_brand,
| s_store_name, s_company_name, d_moy) tmp1
| where case when (avg_monthly_sales <> 0) then (abs(sum_sales - avg_monthly_sales) / avg_monthly_sales) else null end > 0.1
| order by sum_sales - avg_monthly_sales, s_store_name
| limit 100
""".stripMargin),
("q90", """
| select cast(amc as decimal(15,4))/cast(pmc as decimal(15,4)) am_pm_ratio
| from ( select count(*) amc
| from web_sales, household_demographics , time_dim, web_page
| where ws_sold_time_sk = time_dim.t_time_sk
| and ws_ship_hdemo_sk = household_demographics.hd_demo_sk
| and ws_web_page_sk = web_page.wp_web_page_sk
| and time_dim.t_hour between 8 and 8+1
| and household_demographics.hd_dep_count = 6
| and web_page.wp_char_count between 5000 and 5200) at,
| ( select count(*) pmc
| from web_sales, household_demographics , time_dim, web_page
| where ws_sold_time_sk = time_dim.t_time_sk
| and ws_ship_hdemo_sk = household_demographics.hd_demo_sk
| and ws_web_page_sk = web_page.wp_web_page_sk
| and time_dim.t_hour between 19 and 19+1
| and household_demographics.hd_dep_count = 6
| and web_page.wp_char_count between 5000 and 5200) pt
| order by am_pm_ratio
| limit 100
""".stripMargin),
("q91", """
| select
| cc_call_center_id Call_Center, cc_name Call_Center_Name, cc_manager Manager,
| sum(cr_net_loss) Returns_Loss
| from
| call_center, catalog_returns, date_dim, customer, customer_address,
| customer_demographics, household_demographics
| where
| cr_call_center_sk = cc_call_center_sk
| and cr_returned_date_sk = d_date_sk
| and cr_returning_customer_sk = c_customer_sk
| and cd_demo_sk = c_current_cdemo_sk
| and hd_demo_sk = c_current_hdemo_sk
| and ca_address_sk = c_current_addr_sk
| and d_year = 1998
| and d_moy = 11
| and ( (cd_marital_status = 'M' and cd_education_status = 'Unknown')
| or(cd_marital_status = 'W' and cd_education_status = 'Advanced Degree'))
| and hd_buy_potential like 'Unknown%'
| and ca_gmt_offset = -7
| group by cc_call_center_id,cc_name,cc_manager,cd_marital_status,cd_education_status
| order by sum(cr_net_loss) desc
""".stripMargin),
// Modifications: "+ days" -> date_add
// Modifications: " -> `
("q92", """
| select sum(ws_ext_discount_amt) as `Excess Discount Amount`
| from web_sales, item, date_dim
| where i_manufact_id = 350
| and i_item_sk = ws_item_sk
| and d_date between '2000-01-27' and (cast('2000-01-27' as date) + interval 90 days)
| and d_date_sk = ws_sold_date_sk
| and ws_ext_discount_amt >
| (
| SELECT 1.3 * avg(ws_ext_discount_amt)
| FROM web_sales, date_dim
| WHERE ws_item_sk = i_item_sk
| and d_date between '2000-01-27' and (cast('2000-01-27' as date) + interval 90 days)
| and d_date_sk = ws_sold_date_sk
| )
| order by sum(ws_ext_discount_amt)
| limit 100
""".stripMargin),
("q93", """
| select ss_customer_sk, sum(act_sales) sumsales
| from (select
| ss_item_sk, ss_ticket_number, ss_customer_sk,
| case when sr_return_quantity is not null then (ss_quantity-sr_return_quantity)*ss_sales_price
| else (ss_quantity*ss_sales_price) end act_sales
| from store_sales
| left outer join store_returns
| on (sr_item_sk = ss_item_sk and sr_ticket_number = ss_ticket_number),
| reason
| where sr_reason_sk = r_reason_sk and r_reason_desc = 'reason 28') t
| group by ss_customer_sk
| order by sumsales, ss_customer_sk
| limit 100
""".stripMargin),
// Modifications: "+ days" -> date_add
// Modifications: " -> `
("q94", """
| select
| count(distinct ws_order_number) as `order count`
| ,sum(ws_ext_ship_cost) as `total shipping cost`
| ,sum(ws_net_profit) as `total net profit`
| from
| web_sales ws1, date_dim, customer_address, web_site
| where
| d_date between '1999-02-01' and
| (cast('1999-02-01' as date) + interval 60 days)
| and ws1.ws_ship_date_sk = d_date_sk
| and ws1.ws_ship_addr_sk = ca_address_sk
| and ca_state = 'IL'
| and ws1.ws_web_site_sk = web_site_sk
| and web_company_name = 'pri'
| and exists (select *
| from web_sales ws2
| where ws1.ws_order_number = ws2.ws_order_number
| and ws1.ws_warehouse_sk <> ws2.ws_warehouse_sk)
| and not exists(select *
| from web_returns wr1
| where ws1.ws_order_number = wr1.wr_order_number)
| order by count(distinct ws_order_number)
| limit 100
""".stripMargin),
// Modifications: "+ days" -> date_add
("q95", """
| with ws_wh as
| (select ws1.ws_order_number,ws1.ws_warehouse_sk wh1,ws2.ws_warehouse_sk wh2
| from web_sales ws1,web_sales ws2
| where ws1.ws_order_number = ws2.ws_order_number
| and ws1.ws_warehouse_sk <> ws2.ws_warehouse_sk)
| select
| count(distinct ws_order_number) as `order count`
| ,sum(ws_ext_ship_cost) as `total shipping cost`
| ,sum(ws_net_profit) as `total net profit`
| from
| web_sales ws1, date_dim, customer_address, web_site
| where
| d_date between '1999-02-01' and
| (cast('1999-02-01' as date) + interval 60 days)
| and ws1.ws_ship_date_sk = d_date_sk
| and ws1.ws_ship_addr_sk = ca_address_sk
| and ca_state = 'IL'
| and ws1.ws_web_site_sk = web_site_sk
| and web_company_name = 'pri'
| and ws1.ws_order_number in (select ws_order_number
| from ws_wh)
| and ws1.ws_order_number in (select wr_order_number
| from web_returns,ws_wh
| where wr_order_number = ws_wh.ws_order_number)
| order by count(distinct ws_order_number)
| limit 100
""".stripMargin),
("q96", """
| select count(*)
| from store_sales, household_demographics, time_dim, store
| where ss_sold_time_sk = time_dim.t_time_sk
| and ss_hdemo_sk = household_demographics.hd_demo_sk
| and ss_store_sk = s_store_sk
| and time_dim.t_hour = 20
| and time_dim.t_minute >= 30
| and household_demographics.hd_dep_count = 7
| and store.s_store_name = 'ese'
| order by count(*)
| limit 100
""".stripMargin),
("q97", """
| with ssci as (
| select ss_customer_sk customer_sk, ss_item_sk item_sk
| from store_sales,date_dim
| where ss_sold_date_sk = d_date_sk
| and d_month_seq between 1200 and 1200 + 11
| group by ss_customer_sk, ss_item_sk),
| csci as(
| select cs_bill_customer_sk customer_sk, cs_item_sk item_sk
| from catalog_sales,date_dim
| where cs_sold_date_sk = d_date_sk
| and d_month_seq between 1200 and 1200 + 11
| group by cs_bill_customer_sk, cs_item_sk)
| select sum(case when ssci.customer_sk is not null and csci.customer_sk is null then 1 else 0 end) store_only
| ,sum(case when ssci.customer_sk is null and csci.customer_sk is not null then 1 else 0 end) catalog_only
| ,sum(case when ssci.customer_sk is not null and csci.customer_sk is not null then 1 else 0 end) store_and_catalog
| from ssci full outer join csci on (ssci.customer_sk=csci.customer_sk
| and ssci.item_sk = csci.item_sk)
| limit 100
""".stripMargin),
// Modifications: "+ days" -> date_add
("q98", """
|select i_item_desc, i_category, i_class, i_current_price
| ,sum(ss_ext_sales_price) as itemrevenue
| ,sum(ss_ext_sales_price)*100/sum(sum(ss_ext_sales_price)) over
| (partition by i_class) as revenueratio
|from
| store_sales, item, date_dim
|where
| ss_item_sk = i_item_sk
| and i_category in ('Sports', 'Books', 'Home')
| and ss_sold_date_sk = d_date_sk
| and d_date between cast('1999-02-22' as date)
| and (cast('1999-02-22' as date) + interval 30 days)
|group by
| i_item_id, i_item_desc, i_category, i_class, i_current_price
|order by
| i_category, i_class, i_item_id, i_item_desc, revenueratio
""".stripMargin),
// Modifications: " -> `
("q99", """
| select
| substr(w_warehouse_name,1,20), sm_type, cc_name
| ,sum(case when (cs_ship_date_sk - cs_sold_date_sk <= 30 ) then 1 else 0 end) as `30 days`
| ,sum(case when (cs_ship_date_sk - cs_sold_date_sk > 30) and
| (cs_ship_date_sk - cs_sold_date_sk <= 60) then 1 else 0 end ) as `31-60 days`
| ,sum(case when (cs_ship_date_sk - cs_sold_date_sk > 60) and
| (cs_ship_date_sk - cs_sold_date_sk <= 90) then 1 else 0 end) as `61-90 days`
| ,sum(case when (cs_ship_date_sk - cs_sold_date_sk > 90) and
| (cs_ship_date_sk - cs_sold_date_sk <= 120) then 1 else 0 end) as `91-120 days`
| ,sum(case when (cs_ship_date_sk - cs_sold_date_sk > 120) then 1 else 0 end) as `>120 days`
| from
| catalog_sales, warehouse, ship_mode, call_center, date_dim
| where
| d_month_seq between 1200 and 1200 + 11
| and cs_ship_date_sk = d_date_sk
| and cs_warehouse_sk = w_warehouse_sk
| and cs_ship_mode_sk = sm_ship_mode_sk
| and cs_call_center_sk = cc_call_center_sk
| group by
| substr(w_warehouse_name,1,20), sm_type, cc_name
| order by substr(w_warehouse_name,1,20), sm_type, cc_name
| limit 100
""".stripMargin),
("qSsMax",
"""
|select
| count(*) as total,
| count(ss_sold_date_sk) as not_null_total,
| count(distinct ss_sold_date_sk) as unique_days,
| max(ss_sold_date_sk) as max_ss_sold_date_sk,
| max(ss_sold_time_sk) as max_ss_sold_time_sk,
| max(ss_item_sk) as max_ss_item_sk,
| max(ss_customer_sk) as max_ss_customer_sk,
| max(ss_cdemo_sk) as max_ss_cdemo_sk,
| max(ss_hdemo_sk) as max_ss_hdemo_sk,
| max(ss_addr_sk) as max_ss_addr_sk,
| max(ss_store_sk) as max_ss_store_sk,
| max(ss_promo_sk) as max_ss_promo_sk
|from store_sales
""".stripMargin)
)
}
|
gsvic/MuSQLE
|
src/main/scala/gr/cslab/ece/ntua/musqle/benchmarks/tpcds/AllQueries.scala
|
Scala
|
apache-2.0
| 211,380 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.kafka010
import java.{util => ju}
import java.io._
import java.nio.charset.StandardCharsets
import scala.collection.JavaConverters._
import org.apache.commons.io.IOUtils
import org.apache.spark.SparkEnv
import org.apache.spark.internal.Logging
import org.apache.spark.scheduler.ExecutorCacheTaskLocation
import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.catalyst.expressions.UnsafeRow
import org.apache.spark.sql.execution.streaming.{HDFSMetadataLog, SerializedOffset}
import org.apache.spark.sql.kafka010.KafkaSourceProvider.{INSTRUCTION_FOR_FAIL_ON_DATA_LOSS_FALSE, INSTRUCTION_FOR_FAIL_ON_DATA_LOSS_TRUE}
import org.apache.spark.sql.sources.v2.DataSourceOptions
import org.apache.spark.sql.sources.v2.reader.{InputPartition, InputPartitionReader, SupportsScanUnsafeRow}
import org.apache.spark.sql.sources.v2.reader.streaming.{MicroBatchReader, Offset}
import org.apache.spark.sql.types.StructType
import org.apache.spark.util.UninterruptibleThread
/**
* A [[MicroBatchReader]] that reads data from Kafka.
*
* The [[KafkaSourceOffset]] is the custom [[Offset]] defined for this source that contains
* a map of TopicPartition -> offset. Note that this offset is 1 + (available offset). For
* example if the last record in a Kafka topic "t", partition 2 is offset 5, then
* KafkaSourceOffset will contain TopicPartition("t", 2) -> 6. This is done keep it consistent
* with the semantics of `KafkaConsumer.position()`.
*
* Zero data lost is not guaranteed when topics are deleted. If zero data lost is critical, the user
* must make sure all messages in a topic have been processed when deleting a topic.
*
* There is a known issue caused by KAFKA-1894: the query using Kafka maybe cannot be stopped.
* To avoid this issue, you should make sure stopping the query before stopping the Kafka brokers
* and not use wrong broker addresses.
*/
private[kafka010] class KafkaMicroBatchReader(
kafkaOffsetReader: KafkaOffsetReader,
executorKafkaParams: ju.Map[String, Object],
options: DataSourceOptions,
metadataPath: String,
startingOffsets: KafkaOffsetRangeLimit,
failOnDataLoss: Boolean)
extends MicroBatchReader with SupportsScanUnsafeRow with Logging {
private var startPartitionOffsets: PartitionOffsetMap = _
private var endPartitionOffsets: PartitionOffsetMap = _
private val pollTimeoutMs = options.getLong(
"kafkaConsumer.pollTimeoutMs",
SparkEnv.get.conf.getTimeAsMs("spark.network.timeout", "120s"))
private val maxOffsetsPerTrigger =
Option(options.get("maxOffsetsPerTrigger").orElse(null)).map(_.toLong)
private val rangeCalculator = KafkaOffsetRangeCalculator(options)
/**
* Lazily initialize `initialPartitionOffsets` to make sure that `KafkaConsumer.poll` is only
* called in StreamExecutionThread. Otherwise, interrupting a thread while running
* `KafkaConsumer.poll` may hang forever (KAFKA-1894).
*/
private lazy val initialPartitionOffsets = getOrCreateInitialPartitionOffsets()
override def setOffsetRange(start: ju.Optional[Offset], end: ju.Optional[Offset]): Unit = {
// Make sure initialPartitionOffsets is initialized
initialPartitionOffsets
startPartitionOffsets = Option(start.orElse(null))
.map(_.asInstanceOf[KafkaSourceOffset].partitionToOffsets)
.getOrElse(initialPartitionOffsets)
endPartitionOffsets = Option(end.orElse(null))
.map(_.asInstanceOf[KafkaSourceOffset].partitionToOffsets)
.getOrElse {
val latestPartitionOffsets = kafkaOffsetReader.fetchLatestOffsets()
maxOffsetsPerTrigger.map { maxOffsets =>
rateLimit(maxOffsets, startPartitionOffsets, latestPartitionOffsets)
}.getOrElse {
latestPartitionOffsets
}
}
}
override def planUnsafeInputPartitions(): ju.List[InputPartition[UnsafeRow]] = {
// Find the new partitions, and get their earliest offsets
val newPartitions = endPartitionOffsets.keySet.diff(startPartitionOffsets.keySet)
val newPartitionInitialOffsets = kafkaOffsetReader.fetchEarliestOffsets(newPartitions.toSeq)
if (newPartitionInitialOffsets.keySet != newPartitions) {
// We cannot get from offsets for some partitions. It means they got deleted.
val deletedPartitions = newPartitions.diff(newPartitionInitialOffsets.keySet)
reportDataLoss(
s"Cannot find earliest offsets of ${deletedPartitions}. Some data may have been missed")
}
logInfo(s"Partitions added: $newPartitionInitialOffsets")
newPartitionInitialOffsets.filter(_._2 != 0).foreach { case (p, o) =>
reportDataLoss(
s"Added partition $p starts from $o instead of 0. Some data may have been missed")
}
// Find deleted partitions, and report data loss if required
val deletedPartitions = startPartitionOffsets.keySet.diff(endPartitionOffsets.keySet)
if (deletedPartitions.nonEmpty) {
reportDataLoss(s"$deletedPartitions are gone. Some data may have been missed")
}
// Use the end partitions to calculate offset ranges to ignore partitions that have
// been deleted
val topicPartitions = endPartitionOffsets.keySet.filter { tp =>
// Ignore partitions that we don't know the from offsets.
newPartitionInitialOffsets.contains(tp) || startPartitionOffsets.contains(tp)
}.toSeq
logDebug("TopicPartitions: " + topicPartitions.mkString(", "))
// Calculate offset ranges
val offsetRanges = rangeCalculator.getRanges(
fromOffsets = startPartitionOffsets ++ newPartitionInitialOffsets,
untilOffsets = endPartitionOffsets,
executorLocations = getSortedExecutorList())
// Reuse Kafka consumers only when all the offset ranges have distinct TopicPartitions,
// that is, concurrent tasks will not read the same TopicPartitions.
val reuseKafkaConsumer = offsetRanges.map(_.topicPartition).toSet.size == offsetRanges.size
// Generate factories based on the offset ranges
val factories = offsetRanges.map { range =>
new KafkaMicroBatchDataReaderFactory(
range, executorKafkaParams, pollTimeoutMs, failOnDataLoss, reuseKafkaConsumer)
}
factories.map(_.asInstanceOf[InputPartition[UnsafeRow]]).asJava
}
override def getStartOffset: Offset = {
KafkaSourceOffset(startPartitionOffsets)
}
override def getEndOffset: Offset = {
KafkaSourceOffset(endPartitionOffsets)
}
override def deserializeOffset(json: String): Offset = {
KafkaSourceOffset(JsonUtils.partitionOffsets(json))
}
override def readSchema(): StructType = KafkaOffsetReader.kafkaSchema
override def commit(end: Offset): Unit = {}
override def stop(): Unit = {
kafkaOffsetReader.close()
}
override def toString(): String = s"KafkaV2[$kafkaOffsetReader]"
/**
* Read initial partition offsets from the checkpoint, or decide the offsets and write them to
* the checkpoint.
*/
private def getOrCreateInitialPartitionOffsets(): PartitionOffsetMap = {
// Make sure that `KafkaConsumer.poll` is only called in StreamExecutionThread.
// Otherwise, interrupting a thread while running `KafkaConsumer.poll` may hang forever
// (KAFKA-1894).
assert(Thread.currentThread().isInstanceOf[UninterruptibleThread])
// SparkSession is required for getting Hadoop configuration for writing to checkpoints
assert(SparkSession.getActiveSession.nonEmpty)
val metadataLog =
new KafkaSourceInitialOffsetWriter(SparkSession.getActiveSession.get, metadataPath)
metadataLog.get(0).getOrElse {
val offsets = startingOffsets match {
case EarliestOffsetRangeLimit =>
KafkaSourceOffset(kafkaOffsetReader.fetchEarliestOffsets())
case LatestOffsetRangeLimit =>
KafkaSourceOffset(kafkaOffsetReader.fetchLatestOffsets())
case SpecificOffsetRangeLimit(p) =>
kafkaOffsetReader.fetchSpecificOffsets(p, reportDataLoss)
}
metadataLog.add(0, offsets)
logInfo(s"Initial offsets: $offsets")
offsets
}.partitionToOffsets
}
/** Proportionally distribute limit number of offsets among topicpartitions */
private def rateLimit(
limit: Long,
from: PartitionOffsetMap,
until: PartitionOffsetMap): PartitionOffsetMap = {
val fromNew = kafkaOffsetReader.fetchEarliestOffsets(until.keySet.diff(from.keySet).toSeq)
val sizes = until.flatMap {
case (tp, end) =>
// If begin isn't defined, something's wrong, but let alert logic in getBatch handle it
from.get(tp).orElse(fromNew.get(tp)).flatMap { begin =>
val size = end - begin
logDebug(s"rateLimit $tp size is $size")
if (size > 0) Some(tp -> size) else None
}
}
val total = sizes.values.sum.toDouble
if (total < 1) {
until
} else {
until.map {
case (tp, end) =>
tp -> sizes.get(tp).map { size =>
val begin = from.get(tp).getOrElse(fromNew(tp))
val prorate = limit * (size / total)
// Don't completely starve small topicpartitions
val off = begin + (if (prorate < 1) Math.ceil(prorate) else Math.floor(prorate)).toLong
// Paranoia, make sure not to return an offset that's past end
Math.min(end, off)
}.getOrElse(end)
}
}
}
private def getSortedExecutorList(): Array[String] = {
def compare(a: ExecutorCacheTaskLocation, b: ExecutorCacheTaskLocation): Boolean = {
if (a.host == b.host) {
a.executorId > b.executorId
} else {
a.host > b.host
}
}
val bm = SparkEnv.get.blockManager
bm.master.getPeers(bm.blockManagerId).toArray
.map(x => ExecutorCacheTaskLocation(x.host, x.executorId))
.sortWith(compare)
.map(_.toString)
}
/**
* If `failOnDataLoss` is true, this method will throw an `IllegalStateException`.
* Otherwise, just log a warning.
*/
private def reportDataLoss(message: String): Unit = {
if (failOnDataLoss) {
throw new IllegalStateException(message + s". $INSTRUCTION_FOR_FAIL_ON_DATA_LOSS_TRUE")
} else {
logWarning(message + s". $INSTRUCTION_FOR_FAIL_ON_DATA_LOSS_FALSE")
}
}
/** A version of [[HDFSMetadataLog]] specialized for saving the initial offsets. */
class KafkaSourceInitialOffsetWriter(sparkSession: SparkSession, metadataPath: String)
extends HDFSMetadataLog[KafkaSourceOffset](sparkSession, metadataPath) {
val VERSION = 1
override def serialize(metadata: KafkaSourceOffset, out: OutputStream): Unit = {
out.write(0) // A zero byte is written to support Spark 2.1.0 (SPARK-19517)
val writer = new BufferedWriter(new OutputStreamWriter(out, StandardCharsets.UTF_8))
writer.write("v" + VERSION + "\\n")
writer.write(metadata.json)
writer.flush
}
override def deserialize(in: InputStream): KafkaSourceOffset = {
in.read() // A zero byte is read to support Spark 2.1.0 (SPARK-19517)
val content = IOUtils.toString(new InputStreamReader(in, StandardCharsets.UTF_8))
// HDFSMetadataLog guarantees that it never creates a partial file.
assert(content.length != 0)
if (content(0) == 'v') {
val indexOfNewLine = content.indexOf("\\n")
if (indexOfNewLine > 0) {
val version = parseVersion(content.substring(0, indexOfNewLine), VERSION)
KafkaSourceOffset(SerializedOffset(content.substring(indexOfNewLine + 1)))
} else {
throw new IllegalStateException(
s"Log file was malformed: failed to detect the log file version line.")
}
} else {
// The log was generated by Spark 2.1.0
KafkaSourceOffset(SerializedOffset(content))
}
}
}
}
/** A [[InputPartition]] for reading Kafka data in a micro-batch streaming query. */
private[kafka010] case class KafkaMicroBatchDataReaderFactory(
offsetRange: KafkaOffsetRange,
executorKafkaParams: ju.Map[String, Object],
pollTimeoutMs: Long,
failOnDataLoss: Boolean,
reuseKafkaConsumer: Boolean) extends InputPartition[UnsafeRow] {
override def preferredLocations(): Array[String] = offsetRange.preferredLoc.toArray
override def createPartitionReader(): InputPartitionReader[UnsafeRow] =
new KafkaMicroBatchInputPartitionReader(offsetRange, executorKafkaParams, pollTimeoutMs,
failOnDataLoss, reuseKafkaConsumer)
}
/** A [[InputPartitionReader]] for reading Kafka data in a micro-batch streaming query. */
private[kafka010] case class KafkaMicroBatchInputPartitionReader(
offsetRange: KafkaOffsetRange,
executorKafkaParams: ju.Map[String, Object],
pollTimeoutMs: Long,
failOnDataLoss: Boolean,
reuseKafkaConsumer: Boolean) extends InputPartitionReader[UnsafeRow] with Logging {
private val consumer = KafkaDataConsumer.acquire(
offsetRange.topicPartition, executorKafkaParams, reuseKafkaConsumer)
private val rangeToRead = resolveRange(offsetRange)
private val converter = new KafkaRecordToUnsafeRowConverter
private var nextOffset = rangeToRead.fromOffset
private var nextRow: UnsafeRow = _
override def next(): Boolean = {
if (nextOffset < rangeToRead.untilOffset) {
val record = consumer.get(nextOffset, rangeToRead.untilOffset, pollTimeoutMs, failOnDataLoss)
if (record != null) {
nextRow = converter.toUnsafeRow(record)
true
} else {
false
}
} else {
false
}
}
override def get(): UnsafeRow = {
assert(nextRow != null)
nextOffset += 1
nextRow
}
override def close(): Unit = {
consumer.release()
}
private def resolveRange(range: KafkaOffsetRange): KafkaOffsetRange = {
if (range.fromOffset < 0 || range.untilOffset < 0) {
// Late bind the offset range
val availableOffsetRange = consumer.getAvailableOffsetRange()
val fromOffset = if (range.fromOffset < 0) {
assert(range.fromOffset == KafkaOffsetRangeLimit.EARLIEST,
s"earliest offset ${range.fromOffset} does not equal ${KafkaOffsetRangeLimit.EARLIEST}")
availableOffsetRange.earliest
} else {
range.fromOffset
}
val untilOffset = if (range.untilOffset < 0) {
assert(range.untilOffset == KafkaOffsetRangeLimit.LATEST,
s"latest offset ${range.untilOffset} does not equal ${KafkaOffsetRangeLimit.LATEST}")
availableOffsetRange.latest
} else {
range.untilOffset
}
KafkaOffsetRange(range.topicPartition, fromOffset, untilOffset, None)
} else {
range
}
}
}
|
szhem/spark
|
external/kafka-0-10-sql/src/main/scala/org/apache/spark/sql/kafka010/KafkaMicroBatchReader.scala
|
Scala
|
apache-2.0
| 15,420 |
package com.ronyhe
package object sweeper {
type Coord = (Int, Int) // row, col
}
|
ronyhe/sweeper
|
src/main/scala/com/ronyhe/sweeper/package.scala
|
Scala
|
mit
| 86 |
/**
* Copyright 2011-2017 GatlingCorp (http://gatling.io)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.gatling.core.controller.inject
import scala.concurrent.duration._
import io.gatling.BaseSpec
import io.gatling.commons.util.Collections._
class InjectionStepSpec extends BaseSpec {
private def scheduling(steps: InjectionStep*): List[FiniteDuration] =
steps.reverse.foldLeft[Iterator[FiniteDuration]](Iterator.empty) { (it, step) =>
step.chain(it)
}.toList
"RampInjection" should "return the correct number of users" in {
RampInjection(5, 1 second).users shouldBe 5
}
it should "return the correct injection duration" in {
RampInjection(5, 1 second).duration shouldBe (1 second)
}
it should "schedule with a correct interval" in {
val ramp = RampInjection(5, 1 second)
val rampScheduling = scheduling(ramp)
val interval0 = rampScheduling(1) - rampScheduling.head
val interval1 = rampScheduling(2) - rampScheduling(1)
rampScheduling.length shouldBe ramp.users
interval0 shouldBe interval1
interval0 shouldBe (200 milliseconds)
}
it should "schedule the correct number of users" in {
val step = RampInjection(3, 8 seconds)
step.users shouldBe 3
scheduling(step).size shouldBe 3
}
it should "the first and the last users should be correctly scheduled" in {
val rampScheduling = scheduling(RampInjection(5, 1 second))
val first = rampScheduling.head
val last = rampScheduling.last
first shouldBe Duration.Zero
last shouldBe <(1 second)
rampScheduling shouldBe sorted
}
"ConstantRateInjection" should "return the correct number of users" in {
ConstantRateInjection(1.0, 5 seconds).users shouldBe 5
ConstantRateInjection(0.4978, 100 seconds).users shouldBe 50
}
"NothingForInjection" should "return the correct number of users" in {
NothingForInjection(1 second).users shouldBe 0
}
it should "return the correct injection duration" in {
NothingForInjection(1 second).duration shouldBe (1 second)
}
it should "return the correct injection scheduling" in {
NothingForInjection(1 second).chain(Iterator.empty) shouldBe empty
}
"AtOnceInjection" should "return the correct number of users" in {
AtOnceInjection(4).users shouldBe 4
}
it should "return the correct injection duration" in {
scheduling(AtOnceInjection(4)).max shouldBe Duration.Zero
}
it should "return the correct injection scheduling" in {
val peak = AtOnceInjection(4)
val atOnceScheduling = scheduling(peak)
val uniqueScheduling = atOnceScheduling.toSet
uniqueScheduling should contain(Duration.Zero)
atOnceScheduling should have length peak.users
}
"RampRateInjection" should "return the correct injection duration" in {
RampRateInjection(2, 4, 10 seconds).duration shouldBe (10 seconds)
}
it should "return the correct number of users" in {
RampRateInjection(2, 4, 10 seconds).users shouldBe 30
}
it should "provides an injection scheduling with the correct number of elements" in {
val rampRate = RampRateInjection(2, 4, 10 seconds)
val rampRateScheduling = scheduling(rampRate)
rampRateScheduling.length shouldBe rampRate.users
}
it should "provides an injection scheduling with the correct values" in {
val rampRateScheduling = scheduling(RampRateInjection(2, 4, 10 seconds))
rampRateScheduling.head shouldBe Duration.Zero
rampRateScheduling(1) shouldBe (500 milliseconds)
}
it should "return the correct injection duration when the acceleration is null" in {
RampRateInjection(1.0, 1.0, 10 seconds).duration shouldBe (10 seconds)
}
it should "return the correct number of users when the acceleration is null" in {
RampRateInjection(1.0, 1.0, 10 seconds).users shouldBe 10
}
it should "return a scheduling of constant step when the acceleration is null" in {
val constantRampScheduling = scheduling(RampRateInjection(1.0, 1.0, 10 seconds))
val steps = constantRampScheduling.zip(constantRampScheduling.drop(1)).map {
case (i1, i2) => i2 - i1
}.toSet[FiniteDuration]
constantRampScheduling shouldBe sorted
steps.size shouldBe 1
constantRampScheduling.last shouldBe <(10 seconds)
}
"SplitInjection" should "provide an appropriate injection scheduling and ignore extra users" in {
val scheduling = SplitInjection(6, RampInjection(2, 2 seconds), NothingForInjection(5 seconds)).chain(Iterator.empty).toList
scheduling shouldBe List(
Duration.Zero, 1 second, // 1st ramp
7 seconds, 8 seconds, // 2nd ramp after a pause
14 seconds, 15 seconds
) // 3rd ramp after a pause
}
it should "provide an appropriate injection scheduling when there is only one split" in {
val scheduling = SplitInjection(1, AtOnceInjection(1), NothingForInjection(5 seconds)).chain(Iterator.empty).toList
scheduling.length shouldBe 1
val schedulingWithInjectionInSeparator = SplitInjection(1, AtOnceInjection(1), AtOnceInjection(1)).chain(Iterator.empty).toList
schedulingWithInjectionInSeparator.length shouldBe 1
}
it should "should schedule the first and last user through the 'into' injection step" in {
val scheduling = SplitInjection(5, RampInjection(2, 2 seconds), AtOnceInjection(1)).chain(AtOnceInjection(1).chain(Iterator.empty)).toList
scheduling shouldBe List(
Duration.Zero, 1 second, // 1st ramp
2 seconds, // at once in between
2 seconds, 3 seconds, // 2nd ramp until reaching 5 users
4 seconds
) // at once from the chained injection
}
val heavisideScheduling = HeavisideInjection(100, 5 seconds).chain(Iterator.empty).toList
"HeavisideInjection" should "provide an appropriate number of users" in {
heavisideScheduling.length shouldBe 100
}
it should "provide correct values" in {
heavisideScheduling(1) shouldBe (291 milliseconds)
heavisideScheduling shouldBe sorted
heavisideScheduling.last shouldBe <(5 seconds)
}
it should "have most of the scheduling values close to half of the duration" in {
val l = heavisideScheduling.count((t) => (t > (1.5 seconds)) && (t < (3.5 seconds)))
l shouldBe 67 // two thirds
}
"Injection chaining" should "provide a monotonically increasing series of durations" in {
val scheduling = RampInjection(3, 2 seconds).chain(RampInjection(3, 2 seconds).chain(Iterator.empty)).toVector
scheduling shouldBe sorted
}
"Poisson injection" should "inject constant users at approximately the right rate" in {
// Inject 1000 users per second for 60 seconds
val inject = PoissonInjection(60 seconds, 1000.0, 1000.0, seed = 0L) // Seed with 0, to ensure tests are deterministic
val scheduling = inject.chain(Iterator(0.seconds)).toVector // Chain to an injector with a zero timer
scheduling.size shouldBe (inject.users + 1)
scheduling.size shouldBe 60001 +- 200 // 60000 for the users injected by PoissonInjection, plus the 0 second one
scheduling.last shouldBe (60 seconds)
scheduling(scheduling.size - 2).toMillis shouldBe 60000L +- 5L
scheduling.head.toMillis shouldBe 0L +- 5L
scheduling(30000).toMillis shouldBe 30000L +- 1000L // Half-way through we should have injected half of the users
}
it should "inject ramped users at approximately the right rate" in {
// ramp from 0 to 1000 users per second over 60 seconds
val inject = PoissonInjection(60.seconds, 0.0, 1000.0, seed = 0L) // Seed with 0, to ensure tests are deterministic
val scheduling = inject.chain(Iterator(0.seconds)).toVector // Chain to an injector with a zero timer
scheduling.size shouldBe (inject.users + 1)
scheduling.size shouldBe 30001 +- 500 // 30000 for the users injected by PoissonInjection, plus the 0 second one
scheduling.last shouldBe (60 seconds)
scheduling(scheduling.size - 2).toMillis shouldBe 60000L +- 5L
scheduling.head.toMillis shouldBe 0L +- 200L
scheduling(7500).toMillis shouldBe 30000L +- 1000L // Half-way through ramp-up we should have run a quarter of users
}
"Chain steps" should "inject the expected number of users" in {
val steps = Vector(
RampInjection(50, 9 minutes),
NothingForInjection(1 minute),
RampInjection(50, 1 minute),
NothingForInjection(9 minutes),
RampInjection(50, 1 minute),
NothingForInjection(9 minutes),
RampInjection(50, 1 minute),
NothingForInjection(9 minutes),
RampInjection(50, 1 minute),
NothingForInjection(9 minutes)
)
scheduling(steps: _*).size shouldBe steps.sumBy(_.users)
}
}
|
MykolaB/gatling
|
gatling-core/src/test/scala/io/gatling/core/controller/inject/InjectionStepSpec.scala
|
Scala
|
apache-2.0
| 9,131 |
package ignition.jobs
import ignition.core.testsupport.spark.SharedSparkContext
import org.scalatest.{ShouldMatchers, FlatSpec}
class WordCountSpec extends FlatSpec with ShouldMatchers with SharedSparkContext {
"WordCount" should "count words correctly" in {
val text =
"""
|She sells sea-shells on the sea-shore.
|The shells she sells are sea-shells, I'm sure.
|For if she sells sea-shells on the sea-shore
|Then I'm sure she sells sea-shore shells.
""".stripMargin
val lines = sc.parallelize(text.split("\\n"))
val wordCount = WordCountJob.wc(lines)
val expectedResut = Map(("are",1), ("sea-shells",3), ("if",1), ("on",2), ("shells",2),
("then",1), ("sure",2), ("she",4), ("for",1), ("i'm",2),
("sea-shore",3), ("sells",4), ("the",3))
wordCount.collect().toMap shouldBe expectedResut
}
}
|
chaordic/ignition-template
|
src/test/scala/ignition/jobs/WordCountSpec.scala
|
Scala
|
mit
| 876 |
package org.apache.spark.examples.mllib
import org.apache.spark.SparkConf
import org.apache.spark.SparkConf
import org.apache.spark.SparkContext
import org.apache.spark.SparkContext
import org.apache.spark.mllib.clustering.KMeans
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.functions._
import org.apache.spark.sql.SQLContext
import org.apache.spark.mllib.linalg.Vectors
//case class 必须前面
case class CC1(ID: String, LABEL: String, RTN5: Double, FIVE_DAY_GL: Double, CLOSE: Double, RSI2: Double, RSI_CLOSE_3: Double, PERCENT_RANK_100: Double, RSI_STREAK_2: Double, CRSI: Double)
object KMeansModel {
def main(args: Array[String]) {
val conf = new SparkConf().setAppName("test").setMaster("local")
val sc = new SparkContext(conf)
val data = sc.textFile("../data/mllib/spykmeans.csv")
val header = data.first
//csv乱码,删除第一行数据,使用Excel报错,没有测试其他工具
val rows = data.filter(l => l != header)
// define case class
//定义实例类
// comma separator split
//逗号分隔符分割
val allSplit = rows.map(line => line.split(","))
// map parts to case class
//将映射对到实例类
val allData = allSplit.map(p => CC1(p(0).toString, p(1).toString, p(2).trim.toDouble, p(3).trim.toDouble, p(4).trim.toDouble, p(5).trim.toDouble, p(6).trim.toDouble, p(7).trim.toDouble, p(8).trim.toDouble, p(9).trim.toDouble))
val sqlContext = new SQLContext(sc)
// convert rdd to dataframe
//RDD数据帧的转换
import sqlContext.implicits._
import sqlContext._
val allDF = allData.toDF()
// convert back to rdd and cache the data
//转换回RDD和缓存数据
val rowsRDD = allDF.rdd.map(r => (r.getString(0), r.getString(1), r.getDouble(2), r.getDouble(3), r.getDouble(4), r.getDouble(5), r.getDouble(6), r.getDouble(7), r.getDouble(8), r.getDouble(9)))
rowsRDD.cache()
// convert data to RDD which will be passed to KMeans and cache the data. We are passing in RSI2, RSI_CLOSE_3, PERCENT_RANK_100, RSI_STREAK_2 and CRSI to KMeans. These are the attributes we want to use to assign the instance to a cluster
//将数据转换为RDD将通过kmeans和缓存数据
val vectors = allDF.rdd.map(r => Vectors.dense(r.getDouble(5), r.getDouble(6), r.getDouble(7), r.getDouble(8), r.getDouble(9)))
vectors.cache()
//KMeans model with 2 clusters and 20 iterations
//K均值模型2簇和20次迭代
val kMeansModel = KMeans.train(vectors, 2, 20)
//Print the center of each cluster
//打印每个簇的中心
kMeansModel.clusterCenters.foreach(println)
// Get the prediction from the model with the ID so we can link them back to other information
//从模型中获取预测,以便我们可以将它们链接到其他信息
val predictions = rowsRDD.map { r => (r._1, kMeansModel.predict(Vectors.dense(r._6, r._7, r._8, r._9, r._10))) }
// convert the rdd to a dataframe
//将RDD转换dataframe
val predDF = predictions.toDF("ID", "CLUSTER")
}
}
|
tophua/spark1.52
|
examples/src/main/scala/org/apache/spark/examples/mllib/KMeansModel.scala
|
Scala
|
apache-2.0
| 3,066 |
/*
* Copyright 2015-2020 Noel Welsh
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package doodle
package algebra
package generic
import cats.data.State
import doodle.core._
import doodle.core.{Transform => Tx}
import scala.annotation.tailrec
trait GenericPath[F[_]] extends Path[Finalized[F, ?]] {
trait PathApi {
def closedPath(tx: Tx,
fill: Option[Fill],
stroke: Option[Stroke],
elements: List[PathElement]): F[Unit]
def openPath(tx: Tx,
fill: Option[Fill],
stroke: Option[Stroke],
elements: List[PathElement]): F[Unit]
}
def PathApi: PathApi
def path(path: ClosedPath): Finalized[F, Unit] =
Finalized.leaf { dc =>
val elements = path.elements
val strokeWidth = dc.strokeWidth.getOrElse(0.0)
val bb = boundingBox(elements).expand(strokeWidth)
(bb,
State.inspect(tx =>
PathApi.closedPath(tx, dc.fill, dc.stroke, elements)))
}
def path(path: OpenPath): Finalized[F, Unit] =
Finalized.leaf { dc =>
val elements = path.elements
val strokeWidth = dc.strokeWidth.getOrElse(0.0)
val bb = boundingBox(elements).expand(strokeWidth)
(bb,
State.inspect(tx => PathApi.openPath(tx, dc.fill, dc.stroke, elements)))
}
def boundingBox(elements: List[PathElement]): BoundingBox = {
import PathElement._
// This implementation should avoid allocation
var minX: Double = 0.0
var minY: Double = 0.0
var maxX: Double = 0.0
var maxY: Double = 0.0
@tailrec
def iter(elts: List[PathElement]): Unit =
elts match {
case hd :: tl =>
hd match {
case MoveTo(pos) =>
minX = pos.x min minX
minY = pos.y min minY
maxX = pos.x max maxX
maxY = pos.y max maxY
case LineTo(pos) =>
minX = pos.x min minX
minY = pos.y min minY
maxX = pos.x max maxX
maxY = pos.y max maxY
case BezierCurveTo(cp1, cp2, pos) =>
// The control points form a bounding box around a bezier curve,
// but this may not be a tight bounding box.
// It's an acceptable solution for now but in the future
// we may wish to generate a tighter bounding box.
minX = pos.x min cp2.x min cp1.x min minX
minY = pos.y min cp2.y min cp1.y min minY
maxX = pos.x max cp2.x max cp1.x max maxX
maxY = pos.y max cp2.y max cp1.y max maxY
}
iter(tl)
case Seq() => ()
}
iter(elements)
BoundingBox(minX, maxY, maxX, minY)
}
}
|
underscoreio/doodle
|
core/shared/src/main/scala/doodle/algebra/generic/GenericPath.scala
|
Scala
|
apache-2.0
| 3,248 |
package com.sksamuel.elastic4s.requests.searches.queries.funcscorer
sealed trait CombineFunction
object CombineFunction {
def valueOf(str: String): CombineFunction = str.toLowerCase match {
case "avg" => Avg
case "max" => Max
case "min" => Min
case "replace" => Replace
case "sum" => Sum
case "multiply" => Multiply
}
case object Avg extends CombineFunction
case object Min extends CombineFunction
case object Max extends CombineFunction
case object Sum extends CombineFunction
case object Multiply extends CombineFunction
case object Replace extends CombineFunction
}
|
sksamuel/elastic4s
|
elastic4s-domain/src/main/scala/com/sksamuel/elastic4s/requests/searches/queries/funcscorer/CombineFunction.scala
|
Scala
|
apache-2.0
| 654 |
package org.jetbrains.plugins.scala
package annotator
import com.intellij.codeInsight.intention.IntentionAction
import com.intellij.codeInspection.ProblemHighlightType
import com.intellij.lang.annotation.{Annotation, AnnotationHolder}
import com.intellij.openapi.util.TextRange
import com.intellij.psi.PsiElement
import org.jetbrains.plugins.scala.annotator.quickfix.ReportHighlightingErrorQuickFix
import org.jetbrains.plugins.scala.lang.psi.api.base.types.ScTypeElement
import org.jetbrains.plugins.scala.lang.psi.api.expr.{ScBlockExpr, ScExpression}
import org.jetbrains.plugins.scala.lang.psi.types.ScTypeExt
import org.jetbrains.plugins.scala.lang.psi.types.api.{ScTypePresentation, TypeSystem}
/**
* @author Aleksander Podkhalyuzin
* Date: 25.03.2009
*/
private[annotator] object AnnotatorUtils {
def proccessError(error: String, element: PsiElement, holder: AnnotationHolder, fixes: IntentionAction*) {
proccessError(error, element.getTextRange, holder, fixes: _*)
}
def proccessError(error: String, range: TextRange, holder: AnnotationHolder, fixes: IntentionAction*) {
val annotation = holder.createErrorAnnotation(range, error)
annotation.setHighlightType(ProblemHighlightType.GENERIC_ERROR_OR_WARNING)
for (fix <- fixes) annotation.registerFix(fix)
}
def proccessWarning(error: String, element: PsiElement, holder: AnnotationHolder, fixes: IntentionAction*) {
proccessWarning(error, element.getTextRange, holder, fixes: _*)
}
def proccessWarning(error: String, range: TextRange, holder: AnnotationHolder, fixes: IntentionAction*) {
val annotation: Annotation = holder.createWarningAnnotation(range, error)
annotation.setHighlightType(ProblemHighlightType.GENERIC_ERROR_OR_WARNING)
for (fix <- fixes) annotation.registerFix(fix)
}
def checkConformance(expression: ScExpression, typeElement: ScTypeElement, holder: AnnotationHolder)
(implicit typeSystem: TypeSystem) {
expression.getTypeAfterImplicitConversion().tr.foreach {actual =>
val expected = typeElement.calcType
if (!actual.conforms(expected)) {
val expr = expression match {
case b: ScBlockExpr => b.getRBrace.map(_.getPsi).getOrElse(b)
case _ => expression
}
val (actualText, expText) = ScTypePresentation.different(actual, expected)
val annotation = holder.createErrorAnnotation(expr,
ScalaBundle.message("type.mismatch.found.required", actualText, expText))
annotation.registerFix(ReportHighlightingErrorQuickFix)
}
}
}
}
|
katejim/intellij-scala
|
src/org/jetbrains/plugins/scala/annotator/AnnotatorUtils.scala
|
Scala
|
apache-2.0
| 2,573 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.