code
stringlengths 5
1M
| repo_name
stringlengths 5
109
| path
stringlengths 6
208
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 5
1M
|
---|---|---|---|---|---|
package cmd
import inputdata.{DataHolder, MovieLensDataHolder, NetflixInManyFilesDataHolder, NetflixInOneFileDataHolder}
/**
* Created by Ondra Fiedler on 27.8.14.
*/
trait DataHolderFactoryFromConf extends NameAndDescription {
/**
* Returns an instance of DataHolder
* @param conf Instance of Conf with parsed command line arguments
* @return DataHolder instance
*/
def getDataHolderInstance(conf: Conf): DataHolder
}
object DataHolderFactoryFromConf {
/**
* List of all objects that extend DataHolderFactoryFromConf. DataHolderFactoryFromConf object must be listed here in order to be used through command line interface.
*/
val dataHolderFactories: List[DataHolderFactoryFromConf] = List(MovieLensDataHolderFactory, NetflixInOneFileDataHolderFactory, NetflixInManyFilesDataHolderFactory)
}
//Factories for datasets:
object MovieLensDataHolderFactory extends DataHolderFactoryFromConf {
override def getName = "movieLens"
override def getDescription = "MovieLens data - http://grouplens.org/datasets/movielens/\\\\" +
"Ratings are stored in ratings.dat and movie titles in movies.dat"
override def getDataHolderInstance(conf: Conf) = new MovieLensDataHolder(conf.dir())
}
object NetflixInOneFileDataHolderFactory extends DataHolderFactoryFromConf {
override def getName = "netflix"
override def getDescription = "Netflix data - http://www.netflixprize.com/\\\\" +
"All ratings in file ratings.txt. Each line has format:\\n<movieID>,<userID>,<rating>,<date>\\\\" +
"Movie titles stored in movie_titles.txt"
override def getDataHolderInstance(conf: Conf) = new NetflixInOneFileDataHolder(conf.dir())
}
object NetflixInManyFilesDataHolderFactory extends DataHolderFactoryFromConf {
override def getName = "netflixInManyFiles"
override def getDescription = "Netflix data - original data from Netflix Prize"
override def getDataHolderInstance(conf: Conf) = new NetflixInManyFilesDataHolder(conf.dir())
}
|
OndraFiedler/spark-recommender
|
src/main/scala/cmd/DataHolderFactoryFromConf.scala
|
Scala
|
mit
| 1,967 |
package dawn.flow
class ModelHook[A] {
var model: Option[A] = None
def setModel(m: A) =
model = Some(m)
}
object ModelHook {
def apply[A] = new ModelHook[A]()
}
object RequireModel {
def isRequiring(x: Any) = x match {
case x: RequireModel[_] => true
case x: NamedFunction[_, _] => x.requireModel
case _ => false
}
}
trait RequireModel[M] {
def modelHook: ModelHook[M]
def model = modelHook.model
}
|
rubenfiszel/scala-flow
|
core/src/main/scala/ModelHook.scala
|
Scala
|
mit
| 436 |
object ADTWithArray1 {
case class A(x: Int)
case class B(content: Array[A]) {
require(content.length > 0)
def contains(y: Int): Boolean = {
require(content.length > 0)
content(0).x == y
} ensuring(res => res)
}
}
|
regb/leon
|
src/test/resources/regression/verification/purescala/invalid/ADTWithArray1.scala
|
Scala
|
gpl-3.0
| 247 |
import org.velocity4s.ScalaVelocityEngine
import java.io.StringWriter
import org.apache.velocity.VelocityContext
import org.apache.velocity.app.VelocityEngine
import org.apache.velocity.runtime.RuntimeConstants
import org.apache.velocity.runtime.resource.loader.StringResourceLoader
object Example {
def main(args: Array[String]): Unit = {
usage1()
usage2()
}
def usage1(): Unit = {
case class Person(name: String, age: Int)
val engine = ScalaVelocityEngine.create
engine.addProperty(RuntimeConstants.RESOURCE_LOADER, "string")
engine.addProperty("string.resource.loader.class", classOf[StringResourceLoader].getName)
engine.init()
val templateAsString = """|$person.name
|$person.age
|
|#foreach ($e in $list)
|$e
|#end
|
|#foreach ($k in $map.keys())
|$map[$k]
|#end
|
|#foreach ($v in $some)
|some($v)
|#end
|#foreach ($v in $none)
|none
|#end
|
|""".stripMargin
val templateName = "template.vm"
StringResourceLoader.getRepository.putStringResource(templateName, templateAsString)
val context = new VelocityContext
context.put("person", Person("Taro", 20))
context.put("list", List("Java", "Scala", "Groovy", "Clojure"))
context.put("map", Map("key1" -> "value1", "key2" -> "value2"))
context.put("some", Some("hello"))
context.put("none", None)
val template = engine.getTemplate(templateName)
val writer = new StringWriter
template.merge(context, writer)
println(writer)
}
def usage2(): Unit = {
val engine = new VelocityEngine("examples/src/main/resources/velocity.properties")
engine.addProperty(RuntimeConstants.RESOURCE_LOADER, "string")
engine.addProperty("string.resource.loader.class", classOf[StringResourceLoader].getName)
engine.init()
val templateAsString = """|#foreach ($e in $list)
|$e
|#end""".stripMargin
val templateName = "template.vm"
StringResourceLoader.getRepository.putStringResource(templateName, templateAsString)
val context = new VelocityContext
context.put("list", List("Java", "Scala", "Groovy", "Clojure"))
val template = engine.getTemplate(templateName)
val writer = new StringWriter
template.merge(context, writer)
println(writer)
}
}
|
kazuhira-r/velocity4s
|
examples/src/main/scala/Example.scala
|
Scala
|
apache-2.0
| 2,831 |
package com.twitter.finagle.service
import com.twitter.conversions.time._
import com.twitter.finagle._
import com.twitter.finagle.client.Transporter
import com.twitter.finagle.stats.StatsReceiver
import com.twitter.finagle.util.{DefaultLogger, Updater}
import com.twitter.logging.Level
import com.twitter.util.{Future, Duration, Time, Throw, Return, Timer, TimerTask}
import java.util.logging.Logger
object FailFastFactory {
private sealed trait State
private case object Ok extends State
private case class Retrying(
since: Time,
task: TimerTask,
ntries: Int,
backoffs: Stream[Duration])
extends State
private val url = "https://twitter.github.io/finagle/guide/FAQ.html#why-do-clients-see-com-twitter-finagle-failedfastexception-s"
private object Observation extends Enumeration {
type t = Value
val Success, Fail, Timeout, TimeoutFail, Close = Value
}
// put a reasonably sized cap on the number of jittered backoffs so that a
// "permanently" dead host doesn't create a space leak. since each new backoff
// that is taken will be held onto by this global Stream (having the trailing
// `constant` avoids this issue).
private val defaultBackoffs: Stream[Duration] =
Backoff.exponentialJittered(1.second, 32.seconds).take(16) ++ Backoff.constant(32.seconds)
val role = Stack.Role("FailFast")
/**
* For details on why clients see [[FailedFastException]]s see the
* [[https://twitter.github.io/finagle/guide/FAQ.html#why-do-clients-see-com-twitter-finagle-failedfastexception-s FAQ]]
*
* @see The [[https://twitter.github.io/finagle/guide/Clients.html#fail-fast user guide]]
* for more details.
*/
case class FailFast(enabled: Boolean) {
def mk(): (FailFast, Stack.Param[FailFast]) =
(this, FailFast.param)
}
object FailFast {
implicit val param = Stack.Param(FailFast(enabled = true))
}
/**
* Creates a [[com.twitter.finagle.Stackable]] [[FailFastFactory]] when enabled.
*/
def module[Req, Rep]: Stackable[ServiceFactory[Req, Rep]] =
new Stack.Module6[
FailFast,
param.Stats,
param.Timer,
param.Label,
param.Logger,
Transporter.EndpointAddr,
ServiceFactory[Req, Rep]] {
val role = FailFastFactory.role
val description = "Backoff exponentially from hosts to which we cannot establish a connection"
def make(
failFast: FailFast,
_stats: param.Stats,
_timer: param.Timer,
_label: param.Label,
_logger: param.Logger,
_endpoint: Transporter.EndpointAddr,
next: ServiceFactory[Req, Rep]
) = {
failFast match {
case FailFast(false) =>
next
case FailFast(true) =>
val param.Stats(statsReceiver) = _stats
val param.Timer(timer) = _timer
val param.Label(label) = _label
val param.Logger(logger) = _logger
val Transporter.EndpointAddr(endpoint) = _endpoint
new FailFastFactory(next, statsReceiver.scope("failfast"), timer, label, logger, endpoint)
}
}
}
}
/**
* A fail-fast factory that attempts to reduce the number of requests dispatched
* to endpoints that will anyway fail. It works by marking a host dead on
* failure, launching a background process that attempts to reestablish the
* connection with the given backoff schedule. At this time, the factory is
* marked unavailable (and thus the load balancer above it will avoid its
* use). The factory becomes available again on success or when the backoff
* schedule runs out.
*
* Inflight attempts to connect will continue uninterrupted. However, trying to
* connect *after* being marked dead will fail fast until the background process
* is able to establish a connection.
*
* @see The [[https://twitter.github.io/finagle/guide/Clients.html#fail-fast user guide]]
* for more details.
*/
private[finagle] class FailFastFactory[Req, Rep](
underlying: ServiceFactory[Req, Rep],
statsReceiver: StatsReceiver,
timer: Timer,
label: String,
logger: Logger = DefaultLogger,
endpoint: Address = Address.failing,
backoffs: Stream[Duration] = FailFastFactory.defaultBackoffs)
extends ServiceFactoryProxy(underlying) {
import FailFastFactory._
private[this] val exc = new FailedFastException(s"Endpoint $label is marked down. For more details see: $url")
private[this] val futureExc = Future.exception(exc)
private[this] val markedAvailableCounter = statsReceiver.counter("marked_available")
private[this] val markedDeadCounter = statsReceiver.counter("marked_dead")
private[this] val unhealthyForMsGauge =
statsReceiver.addGauge("unhealthy_for_ms") {
state match {
case r: Retrying => r.since.untilNow.inMilliseconds
case _ => 0
}
}
private[this] val unhealthyNumRetriesGauge =
statsReceiver.addGauge("unhealthy_num_tries") {
state match {
case r: Retrying => r.ntries
case _ => 0
}
}
@volatile private[this] var state: State = Ok
private[this] val update = new Updater[Observation.t] {
def preprocess(elems: Seq[Observation.t]) = elems
def handle(o: Observation.t) = o match {
case Observation.Success if state != Ok =>
val Retrying(_, task, _, _) = state
task.cancel()
markedAvailableCounter.incr()
state = Ok
case Observation.Fail if state == Ok =>
val (wait, rest) = backoffs match {
case Stream.Empty => (Duration.Zero, Stream.empty[Duration])
case wait #:: rest => (wait, rest)
}
val now = Time.now
val task = timer.schedule(now + wait) { this.apply(Observation.Timeout) }
markedDeadCounter.incr()
if (logger.isLoggable(Level.DEBUG))
logger.log(Level.DEBUG, s"""FailFastFactory marking connection to "$label" as dead. Remote Address: ${endpoint.toString}""")
state = Retrying(now, task, 0, rest)
case Observation.Timeout if state != Ok =>
underlying(ClientConnection.nil).respond {
case Throw(_) => this.apply(Observation.TimeoutFail)
case Return(service) =>
this.apply(Observation.Success)
service.close()
}
case Observation.TimeoutFail if state != Ok =>
state match {
case Retrying(_, task, _, Stream.Empty) =>
task.cancel()
// Backoff schedule exhausted. Optimistically become available in
// order to continue trying.
state = Ok
case Retrying(since, task, ntries, wait #:: rest) =>
task.cancel()
val newTask = timer.schedule(Time.now + wait) { this.apply(Observation.Timeout) }
state = Retrying(since, newTask, ntries+1, rest)
case Ok => assert(false)
}
case Observation.Close =>
val oldState = state
state = Ok
oldState match {
case Retrying(_, task, _, _) =>
task.cancel()
case _ =>
}
case _ => ()
}
}
override def apply(conn: ClientConnection): Future[Service[Req, Rep]] = {
if (state != Ok) futureExc else {
underlying(conn).respond {
case Throw(_) => update(Observation.Fail)
case Return(_) if state != Ok => update(Observation.Success)
case _ =>
}
}
}
override def status = state match {
case Ok => underlying.status
case _: Retrying => Status.Busy
}
override val toString = "fail_fast_%s".format(underlying.toString)
override def close(deadline: Time): Future[Unit] = {
update(Observation.Close)
underlying.close(deadline)
}
}
|
koshelev/finagle
|
finagle-core/src/main/scala/com/twitter/finagle/service/FailFastFactory.scala
|
Scala
|
apache-2.0
| 7,723 |
package com.github.chengpohi.parser.impl
/**
* seccrawler
* Created by chengpohi on 12/26/15.
*/
object IteratorHelper {
implicit class PredicateSliceIterator(it: Iterator[String]) {
def sliceByPredicate(start: String => Boolean, end: String => Boolean): Iterator[String] = {
val self = it.buffered
new Iterator[String] {
var sb = new StringBuilder("")
private def findCloseTag(): String = {
sb = new StringBuilder("")
while (self.hasNext && !start(self.head))
self.next()
while (self.hasNext && !end(self.head))
sb.append(self.next())
sb.toString() + self.next()
}
def hasNext = {
while (self.hasNext && !start(self.head))
self.next()
self.hasNext
}
def next() = {
findCloseTag()
}
}
}
}
}
|
chengpohi/secer
|
parser/src/main/scala/com/github/chengpohi/parser/impl/IteratorHelper.scala
|
Scala
|
apache-2.0
| 888 |
package support
case class State(
data: Data,
logs: List[Log]
)
object State {
def load(): State =
State(
data = Data.load(),
logs = Logger.read())
}
|
loicknuchel/scala-class
|
src/main/scala/support/State.scala
|
Scala
|
unlicense
| 222 |
package ru.zconstz.simuctor
import akka.actor.{ActorRef, Actor}
import scala.collection.mutable
trait WorldObject
case object Rock extends WorldObject
case object Pit extends WorldObject
trait ActiveWorldObject extends WorldObject {
def actor: ActorRef
}
sealed trait WorldMessage
case class MoveObject(from: (Int, Int), to: (Int, Int)) extends WorldMessage
case class RemoveObject(at: (Int, Int)) extends WorldMessage
case class AddObject(obj: WorldObject, at: (Int, Int)) extends WorldMessage
case object GetWorldStateAsString extends WorldMessage
case class WorldStateAsString(str: String)
class World extends Actor {
val objects: mutable.Map[(Int, Int), WorldObject] = new mutable.HashMap[(Int, Int), WorldObject]()
def receive = {
case AddObject(obj, at) => objects += ((at, obj))
case RemoveObject(at) => objects -= at
case MoveObject(from, to) => for (obj <- objects.remove(from)) objects += ((to, obj))
case GetWorldStateAsString => sender ! WorldStateAsString(objects.toString())
}
}
|
konstantin-zlobin/simuctor
|
src/main/scala/ru/zconstz/simuctor/World.scala
|
Scala
|
apache-2.0
| 1,031 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.api.scala.batch.sql
import java.sql.Timestamp
import org.apache.flink.api.scala._
import org.apache.flink.table.api.TableException
import org.apache.flink.table.api.scala._
import org.apache.flink.table.plan.logical._
import org.apache.flink.table.utils.TableTestBase
import org.apache.flink.table.utils.TableTestUtil._
import org.junit.Test
class WindowAggregateTest extends TableTestBase {
@Test
def testNonPartitionedTumbleWindow(): Unit = {
val util = batchTestUtil()
util.addTable[(Int, Long, String, Timestamp)]("T", 'a, 'b, 'c, 'ts)
val sqlQuery =
"SELECT SUM(a) AS sumA, COUNT(b) AS cntB FROM T GROUP BY TUMBLE(ts, INTERVAL '2' HOUR)"
val expected =
unaryNode(
"DataSetWindowAggregate",
unaryNode(
"DataSetCalc",
batchTableNode(0),
term("select", "ts, a, b")
),
term("window", EventTimeTumblingGroupWindow('w$, 'ts, 7200000.millis)),
term("select", "SUM(a) AS sumA, COUNT(b) AS cntB")
)
util.verifySql(sqlQuery, expected)
}
@Test
def testPartitionedTumbleWindow(): Unit = {
val util = batchTestUtil()
util.addTable[(Int, Long, String, Timestamp)]("T", 'a, 'b, 'c, 'ts)
val sqlQuery =
"SELECT " +
" TUMBLE_START(ts, INTERVAL '4' MINUTE), " +
" TUMBLE_END(ts, INTERVAL '4' MINUTE), " +
" c, " +
" SUM(a) AS sumA, " +
" MIN(b) AS minB " +
"FROM T " +
"GROUP BY TUMBLE(ts, INTERVAL '4' MINUTE), c"
val expected =
unaryNode(
"DataSetCalc",
unaryNode(
"DataSetWindowAggregate",
batchTableNode(0),
term("groupBy", "c"),
term("window", EventTimeTumblingGroupWindow('w$, 'ts, 240000.millis)),
term("select", "c, SUM(a) AS sumA, MIN(b) AS minB, " +
"start('w$) AS w$start, end('w$) AS w$end")
),
term("select", "CAST(w$start) AS w$start, CAST(w$end) AS w$end, c, sumA, minB")
)
util.verifySql(sqlQuery, expected)
}
@Test
def testNonPartitionedHopWindow(): Unit = {
val util = batchTestUtil()
util.addTable[(Int, Long, String, Timestamp)]("T", 'a, 'b, 'c, 'ts)
val sqlQuery =
"SELECT SUM(a) AS sumA, COUNT(b) AS cntB " +
"FROM T " +
"GROUP BY HOP(ts, INTERVAL '15' MINUTE, INTERVAL '90' MINUTE)"
val expected =
unaryNode(
"DataSetWindowAggregate",
unaryNode(
"DataSetCalc",
batchTableNode(0),
term("select", "ts, a, b")
),
term("window",
EventTimeSlidingGroupWindow('w$, 'ts, 5400000.millis, 900000.millis)),
term("select", "SUM(a) AS sumA, COUNT(b) AS cntB")
)
util.verifySql(sqlQuery, expected)
}
@Test
def testPartitionedHopWindow(): Unit = {
val util = batchTestUtil()
util.addTable[(Int, Long, String, Long, Timestamp)]("T", 'a, 'b, 'c, 'd, 'ts)
val sqlQuery =
"SELECT " +
" c, " +
" HOP_END(ts, INTERVAL '1' HOUR, INTERVAL '3' HOUR), " +
" HOP_START(ts, INTERVAL '1' HOUR, INTERVAL '3' HOUR), " +
" SUM(a) AS sumA, " +
" AVG(b) AS avgB " +
"FROM T " +
"GROUP BY HOP(ts, INTERVAL '1' HOUR, INTERVAL '3' HOUR), d, c"
val expected =
unaryNode(
"DataSetCalc",
unaryNode(
"DataSetWindowAggregate",
batchTableNode(0),
term("groupBy", "c, d"),
term("window",
EventTimeSlidingGroupWindow('w$, 'ts, 10800000.millis, 3600000.millis)),
term("select", "c, d, SUM(a) AS sumA, AVG(b) AS avgB, " +
"start('w$) AS w$start, end('w$) AS w$end")
),
term("select", "c, CAST(w$end) AS w$end, CAST(w$start) AS w$start, sumA, avgB")
)
util.verifySql(sqlQuery, expected)
}
@Test
def testNonPartitionedSessionWindow(): Unit = {
val util = batchTestUtil()
util.addTable[(Int, Long, String, Timestamp)]("T", 'a, 'b, 'c, 'ts)
val sqlQuery =
"SELECT COUNT(*) AS cnt FROM T GROUP BY SESSION(ts, INTERVAL '30' MINUTE)"
val expected =
unaryNode(
"DataSetWindowAggregate",
unaryNode(
"DataSetCalc",
batchTableNode(0),
term("select", "ts")
),
term("window", EventTimeSessionGroupWindow('w$, 'ts, 1800000.millis)),
term("select", "COUNT(*) AS cnt")
)
util.verifySql(sqlQuery, expected)
}
@Test
def testPartitionedSessionWindow(): Unit = {
val util = batchTestUtil()
util.addTable[(Int, Long, String, Int, Timestamp)]("T", 'a, 'b, 'c, 'd, 'ts)
val sqlQuery =
"SELECT " +
" c, d, " +
" SESSION_START(ts, INTERVAL '12' HOUR), " +
" SESSION_END(ts, INTERVAL '12' HOUR), " +
" SUM(a) AS sumA, " +
" MIN(b) AS minB " +
"FROM T " +
"GROUP BY SESSION(ts, INTERVAL '12' HOUR), c, d"
val expected =
unaryNode(
"DataSetCalc",
unaryNode(
"DataSetWindowAggregate",
batchTableNode(0),
term("groupBy", "c, d"),
term("window", EventTimeSessionGroupWindow('w$, 'ts, 43200000.millis)),
term("select", "c, d, SUM(a) AS sumA, MIN(b) AS minB, " +
"start('w$) AS w$start, end('w$) AS w$end")
),
term("select", "c, d, CAST(w$start) AS w$start, CAST(w$end) AS w$end, sumA, minB")
)
util.verifySql(sqlQuery, expected)
}
@Test(expected = classOf[TableException])
def testTumbleWindowNoOffset(): Unit = {
val util = batchTestUtil()
util.addTable[(Int, Long, String, Timestamp)]("T", 'a, 'b, 'c, 'ts)
val sqlQuery =
"SELECT SUM(a) AS sumA, COUNT(b) AS cntB " +
"FROM T " +
"GROUP BY TUMBLE(ts, INTERVAL '2' HOUR, TIME '10:00:00')"
util.verifySql(sqlQuery, "n/a")
}
@Test(expected = classOf[TableException])
def testHopWindowNoOffset(): Unit = {
val util = batchTestUtil()
util.addTable[(Int, Long, String, Timestamp)]("T", 'a, 'b, 'c, 'ts)
val sqlQuery =
"SELECT SUM(a) AS sumA, COUNT(b) AS cntB " +
"FROM T " +
"GROUP BY HOP(ts, INTERVAL '1' HOUR, INTERVAL '2' HOUR, TIME '10:00:00')"
util.verifySql(sqlQuery, "n/a")
}
@Test(expected = classOf[TableException])
def testSessionWindowNoOffset(): Unit = {
val util = batchTestUtil()
util.addTable[(Int, Long, String, Timestamp)]("T", 'a, 'b, 'c, 'ts)
val sqlQuery =
"SELECT SUM(a) AS sumA, COUNT(b) AS cntB " +
"FROM T " +
"GROUP BY SESSION(ts, INTERVAL '2' HOUR, TIME '10:00:00')"
util.verifySql(sqlQuery, "n/a")
}
@Test(expected = classOf[TableException])
def testVariableWindowSize() = {
val util = batchTestUtil()
util.addTable[(Int, Long, String, Timestamp)]("T", 'a, 'b, 'c, 'ts)
val sql = "SELECT COUNT(*) " +
"FROM T " +
"GROUP BY TUMBLE(proctime(), b * INTERVAL '1' MINUTE)"
util.verifySql(sql, "n/a")
}
}
|
hwstreaming/flink
|
flink-libraries/flink-table/src/test/scala/org/apache/flink/table/api/scala/batch/sql/WindowAggregateTest.scala
|
Scala
|
apache-2.0
| 7,823 |
package com.github.akileev.akka.serial.io
import akka.actor._
import akka.util.ByteString
/**
* Serial port extension based on the rxtx library for the akka IO layer.
*/
object Serial extends ExtensionId[SerialExt] with ExtensionIdProvider {
override def lookup = Serial
override def createExtension(system: ExtendedActorSystem): SerialExt = new SerialExt(system)
/** Messages used by the serial IO. */
sealed trait Message
/** Messages that are sent to the serial port. */
sealed trait Command extends Message
/** Messages received from the serial port. */
sealed trait Event extends Message
case class CommandFailed(command: Command, reason: Throwable)
// Communication with manager
/** Command that may be sent to the manager actor. */
sealed trait ManagerCommand extends Command
/** Command that may be sent to the operator actor. */
sealed trait OperatorCommand extends Command
sealed trait DataBits
object DataBits8 extends DataBits
object DataBits7 extends DataBits
object DataBits6 extends DataBits
object DataBits5 extends DataBits
sealed trait Parity
object NoParity extends Parity
object EvenParity extends Parity
object OddParity extends Parity
object MarkParity extends Parity
object SpaceParity extends Parity
sealed trait StopBits
object OneStopBit extends StopBits
object TwoStopBits extends StopBits
object OneAndHalfStopBits extends StopBits
sealed trait FlowControl
object NoFlowControl extends FlowControl
object RtsFlowControl extends FlowControl
object XonXoffFlowControl extends FlowControl
/** Open a serial port. Response: Opened | CommandFailed */
case class Open(handler: ActorRef,
port: String,
baudRate: Int,
dataBits: DataBits = DataBits8,
parity: Parity = NoParity,
stopBits: StopBits = OneStopBit,
flowControl: FlowControl = NoFlowControl) extends ManagerCommand
/**
* Serial port is now open.
* Communication is handled by the operator actor.
* The sender of the Open message will now receive incoming communication from the
* serial port.
*/
case class Opened(port: String) extends Event
/** List all available serial ports. Response: Ports | CommandFailed */
case object ListPorts extends ManagerCommand
/** Available serial ports. */
case class Ports(ports: Vector[String]) extends Event
// Communication with Operator
/** Request that the operator should close the port. Response: Closed */
case object Close extends OperatorCommand
/** The port was closed. Either by request or by an external event (i.e. unplugging) */
case object Closed extends Event
/** Data was received on the serial port. */
case class Received(data: ByteString) extends Event
/** Write data on the serial port. Response: ack (if ack != NoAck) */
case class Write(data: ByteString, ack: AckEvent = NoAck) extends OperatorCommand
/** Ack for a write. */
trait AckEvent extends Event
/** Special ack event (is not sent). */
object NoAck extends AckEvent
class SerialException(message: String) extends Exception(message)
}
class SerialExt(system: ExtendedActorSystem) extends akka.io.IO.Extension {
lazy val manager = system.actorOf(Props(classOf[SerialManager]), name = "IO-SERIAL")
}
|
akileev/akka-serial-io
|
src/main/scala/com/github/akileev/akka/serial/io/Serial.scala
|
Scala
|
apache-2.0
| 3,366 |
package dhg.ccg.tag.learn
import dhg.util._
import scalaz._
import scalaz.Scalaz._
import dhg.ccg.prob._
import dhg.ccg.tagdict.TagDictionary
import com.typesafe.scalalogging.slf4j.{ StrictLogging => Logging }
/**
* Turn raw sentences and a tagdict into P(Tag->Tag)
*/
trait TransitionInitializer[Tag] {
type Word = String
final def fromRaw(sentences: Vector[Vector[Word]], initialTagdict: TagDictionary[Tag]): ConditionalLogProbabilityDistribution[Tag, Tag] = {
val tagdict = initialTagdict.withWords(sentences.flatten.toSet)
fromKnownSupertagSets(sentences.map(_.mapTo(tagdict.entries.getOrElse(_, Set.empty))), tagdict)
}
/**
* Each token associated with its set of possible supertags, if such a set is KNOWN; if the set is unknown, it will be EMPTY.
*/
def fromKnownSupertagSets(sentences: Vector[Vector[(Word, Set[Tag])]], initialTagdict: TagDictionary[Tag]): ConditionalLogProbabilityDistribution[Tag, Tag]
}
class TrUniform[Tag]() extends TransitionInitializer[Tag] {
def fromKnownSupertagSets(sentences: Vector[Vector[(Word, Set[Tag])]], initialTagdict: TagDictionary[Tag]) = {
val allTags = Some(initialTagdict.allTags)
val startTag = Some(initialTagdict.excludedTags + initialTagdict.startTag)
new SimpleConditionalLogProbabilityDistribution(
Map(
initialTagdict.startTag -> new LaplaceLogProbabilityDistribution(Map.empty[Tag, LogDouble], allTags, Some(initialTagdict.excludedTags + initialTagdict.startTag + initialTagdict.endTag), LogDouble(1.0)),
initialTagdict.endTag -> LogProbabilityDistribution.empty[Tag]),
new LaplaceLogProbabilityDistribution(Map.empty[Tag, LogDouble], Some(initialTagdict.allTags + initialTagdict.endTag), startTag, LogDouble(1.0)),
Some(initialTagdict.excludedTags + initialTagdict.endTag))
}
override def toString = f"TrUniform()"
}
class TrCheat[Tag](
taggedSentences: Vector[Vector[(String, Tag)]],
distributioner: TransitionDistributioner[Tag]) extends TransitionInitializer[Tag] {
def fromKnownSupertagSets(sentences: Vector[Vector[(Word, Set[Tag])]], initialTagdict: TagDictionary[Tag]) = {
val tagdict =
initialTagdict
.withWords(sentences.flatten.map(_._1).toSet ++ taggedSentences.flatten.map(_._1))
.withTags(sentences.flatten.flatMap(_._2).toSet ++ taggedSentences.flatten.map(_._2).toSet)
// ignore `sentences` since, presumably, it's either included (in tagged form) in `taggedSentences` or of no use.
distributioner(taggedSentences, tagdict)
}
override def toString = f"TrCheat($distributioner)"
}
/**
* tdcutoff = 0.1, lambda = 0.13
* tdcutoff = 0.01, lambda = 0.07
* tdcutoff = 0.001, lambda = 0.06
* tdcutoff = 0.0, lambda = 0.05
*/
class TrTagDictEntriesPossibilities[Tag](distributioner: TransitionDistributioner[Tag]) extends TransitionInitializer[Tag] /*with Logging*/ {
def fromKnownSupertagSets(sentences: Vector[Vector[(Word, Set[Tag])]], initialTagdict: TagDictionary[Tag]) = {
val tagdict = initialTagdict.withWords(sentences.flatten.map(_._1).toSet).withTags(sentences.flatten.flatMap(_._2).toSet)
//logger.info("TrTagDictEntriesPossibilities => sentenceSets")
val sentenceSets = sentences.map(_.map(_._2))
//logger.info("TrTagDictEntriesPossibilities => trCounts")
val potentialTransitions =
for {
s <- sentenceSets
(as, bs) <- (Set(tagdict.startTag) +: s :+ Set(tagdict.endTag)).sliding2
c = 1.0 / (as.size * bs.size)
a <- as; b <- bs
} yield {
(a, (b, c))
}
val trCounts = potentialTransitions.groupByKey.mapVals(_.groupByKey.mapVals(_.sum))
//logger.info("TrTagDictEntriesPossibilities => tagCounts")
val tagCounts = sentenceSets.flatten.flatMap(ts => ts.mapToVal(1.0 / ts.size)).groupByKey.mapVals(_.sum)
//logger.info("TrTagDictEntriesPossibilities => make distribution")
distributioner.make(trCounts.mapVals(_.mapVals(LogDouble(_))), tagCounts.mapVals(LogDouble(_)), tagdict)
}
override def toString = f"TrTagDictEntriesPossibilities($distributioner)"
}
/**
*
*/
class InterpolatingTransitionInitializer[Tag](delegates: Vector[(TransitionInitializer[Tag], Double)]) extends TransitionInitializer[Tag] {
def fromKnownSupertagSets(sentences: Vector[Vector[(Word, Set[Tag])]], initialTagdict: TagDictionary[Tag]) = {
val tagdict = initialTagdict.withWords(sentences.flatten.map(_._1).toSet).withTags(sentences.flatten.flatMap(_._2).toSet)
new InterpolatingConditionalLogProbabilityDistribution(
delegates.map { case (i, p) => i.fromKnownSupertagSets(sentences, tagdict) -> LogDouble(p) })
}
override def toString = f"InterpolatingInitializer(${delegates.map { case (i, w) => f"$i -> $w" }.mkString(", ")})"
}
//
//
//
trait EmissionInitializer[Tag] {
type Word = String
final def fromRaw(sentences: Vector[Vector[Word]], initialTagdict: TagDictionary[Tag]): ConditionalLogProbabilityDistribution[Tag, Word] = {
val tagdict = initialTagdict.withWords(sentences.flatten.toSet)
fromKnownSupertagSets(sentences.map(_.mapTo(tagdict.entries.getOrElse(_, Set.empty))), tagdict)
}
/**
* Each token associated with its set of possible supertags, if such a set is KNOWN; if the set is unknown, it will be EMPTY.
*/
def fromKnownSupertagSets(sentences: Vector[Vector[(Word, Set[Tag])]], initialTagdict: TagDictionary[Tag]): ConditionalLogProbabilityDistribution[Tag, Word]
}
class EmUniform[Tag]() extends EmissionInitializer[Tag] {
def fromKnownSupertagSets(sentences: Vector[Vector[(Word, Set[Tag])]], initialTagdict: TagDictionary[Tag]): ConditionalLogProbabilityDistribution[Tag, Word] = {
val tagdict = initialTagdict.withWords(sentences.flatten.map(_._1).toSet).withTags(sentences.flatten.flatMap(_._2).toSet)
val knownWordsForTag = tagdict.entries.ungroup.map(_.swap).groupByKey.mapVals(_.toSet).withDefaultValue(Set.empty)
val knownWords = knownWordsForTag.flatMap(_._2).toSet
val allWordsSet = Some(tagdict.allWords)
new SimpleConditionalLogProbabilityDistribution(
tagdict.allTags.mapTo(t => new LaplaceLogProbabilityDistribution(Map(), allWordsSet, /*Some(knownWords -- knownWordsForTag(t) +*/ Some(Set(tagdict.startWord, tagdict.endWord)), LogDouble(1.0))).toMap +
(tagdict.startTag -> new SimpleLogProbabilityDistribution(Map(tagdict.startWord -> LogDouble.one))) +
(tagdict.endTag -> new SimpleLogProbabilityDistribution(Map(tagdict.endWord -> LogDouble.one))),
new LaplaceLogProbabilityDistribution(Map(), allWordsSet, /*Some(knownWords +*/ Some(Set(tagdict.startWord, tagdict.endWord)), LogDouble(1.0)),
Some(tagdict.excludedTags))
}
override def toString = f"EmUniform()"
}
class EmCheat[Tag](
taggedSentences: Vector[Vector[(String, Tag)]],
val distributioner: EmissionDistributioner[Tag])
extends EmissionInitializer[Tag] {
def fromKnownSupertagSets(sentences: Vector[Vector[(Word, Set[Tag])]], initialTagdict: TagDictionary[Tag]) = {
val tagdict =
initialTagdict
.withWords(sentences.flatten.map(_._1).toSet ++ taggedSentences.flatten.map(_._1))
.withTags(sentences.flatten.flatMap(_._2).toSet ++ taggedSentences.flatten.map(_._2).toSet)
// ignore `sentences` since, presumably, it's included (in tagged form) in `taggedSentences`
distributioner(taggedSentences, tagdict)
}
override def toString = f"EmCheat($distributioner)"
}
class TagDictionaryEstimateTagPriorInitializer[Tag](tdCountLambda: Double = 0.26) extends TagPriorInitializer[Tag] {
def fromKnownSupertagSets(sentences: Vector[Vector[(Word, Set[Tag])]], initialTagdict: TagDictionary[Tag]) = {
val tagdict = initialTagdict.withWords(sentences.flatten.map(_._1).toSet).withTags(sentences.flatten.flatMap(_._2).toSet)
// TODO: BEGIN DUPLICATED
val sentenceCk = sentences.flatten.flatMap {
case (w, ts) =>
val partialCounts = 1.0 / ts.size.toDouble
ts.mapTo(t => w -> partialCounts)
}
val smoothingCk = tagdict.entries.toVector.flatMap {
case (w, ts) =>
val partialCounts = tdCountLambda / ts.size.toDouble
ts.mapTo(t => w -> partialCounts)
}
val C_k = (sentenceCk ++ smoothingCk).groupByKey.mapVals(_.groupByKey.mapVals(_.sum))
// TODO: END DUPLICATED
// p(t) = sum_w' C_k(t,w') / Z
new LaplaceLogProbabilityDistribution(tagdict.allTags.mapTo(t => LogDouble(C_k(t).values.sum)).toMap, None, Some(tagdict.excludedTags), LogDouble(0.0))
}
}
class EmTagDictionaryEstimateFromTagPrior[Tag](tagPrior: LogProbabilityDistribution[Tag], lambda: Double = 0.04, tdCountLambda: Double = 0.26, combineKU: Boolean = false) extends EmissionInitializer[Tag] {
def fromKnownSupertagSets(sentences: Vector[Vector[(Word, Set[Tag])]], initialTagdict: TagDictionary[Tag]): SimpleConditionalLogProbabilityDistribution[Tag, Word] = {
val tagdict = initialTagdict.withWords(sentences.flatten.map(_._1).toSet).withTags(sentences.flatten.flatMap(_._2).toSet)
/* We normally smooth emissions from C(t,w)
* p(w|t) = C(t,w) / C(t)
*
* C(t,w) comes in two varieties:
*
* 1. if w is in the TD: C_k(t,w)
* - if t in TD(w): C_k(t,w) = C(w) / |TD(w)|
* else: C_k(t,w) = 0
*
* 2. if w is not in the TD: C_u(t,w) (td-unknown)
* - C_u(t,w) = C(w) * p(t|unk) (divide the counts of w among all tags according
* to likelihood of t given that w is unknown)
* - p(t|unk) = p(unk|t) * p(t) / Z (Bayes rule)
* - p(t) = sum_w' C_k(t,w') / Z
* - p(unk|t) = C(t,unk) / Z XX
* = |TD(t)| / (sum_t' |TD(t')| for known words only)
*
* p(w|t) = (C_k(t,w) + C_u(t,w)) / Z
*
*/
val rawSentences = sentences.map(_.map(_._1))
val sentenceWords = rawSentences.flatten.toSet
val knownWordsForTag = tagdict.knownWordsForTag
val knownWords = knownWordsForTag.flatMap(_._2).toSet
val C = rawSentences.flatten.counts // C(w)
// C_k(t)(w)
// TODO: BEGIN DUPLICATED
val sentenceCk = sentences.flatten.flatMap {
case (w, ts) =>
val partialCounts = 1.0 / ts.size.toDouble
ts.mapTo(t => w -> partialCounts)
}
val smoothingCk = tagdict.entries.toVector.flatMap {
case (w, ts) =>
val partialCounts = tdCountLambda / ts.size.toDouble
ts.mapTo(t => w -> partialCounts)
}
val C_k = (sentenceCk ++ smoothingCk).groupByKey.mapVals(_.groupByKey.mapVals(cs => LogDouble(cs.sum)))
// TODO: END DUPLICATED
val td_unknown_words = sentenceWords -- knownWords
// p(t)
val p = tagPrior
// p(unk|t) = |TD(t)| / (sum_t' |TD(t')| for known words only)
val `p(unk|t)` = tagdict.allTags.mapTo(t => LogDouble(knownWordsForTag(t).size + 0.001)).toMap.normalizeValues
// p(t|unk) = p(unk|t) * p(t) / Z
val `p(t|unk)` = tagdict.allTags.mapTo(t => `p(unk|t)`(t) * p(t)).toMap.normalizeValues
// C_u(t)(w) = C(w) * p(t|unk)
val C_u =
tagdict.allTags.mapTo { t =>
td_unknown_words.mapTo { w =>
LogDouble(C(w)) * `p(t|unk)`(t)
}.toMap
}.toMap
val C_ku = tagdict.allTags.mapTo { t =>
val cut = C_u(t)
val ckt = C_k(t)
if (combineKU)
cut |+| ckt
else
cut ++ ckt
}.toMap
//TODO: CHECK THAT EVERY TD-VALID ENTRY HAS A NON-ZERO PROBABILITY
for (w <- sentenceWords; t <- tagdict(w)) {
assert(C_ku.contains(t), f"C_ku doesn't contain t=$t")
assert(C_ku(t).contains(w), f"C_ku(t=$t) doesn't contain w=$w")
assert(C_ku(t)(w) > LogDouble.zero, f"C_ku(t=$t)(w=$w) = ${C_ku(t)(w)}")
}
val allWordsSet = Some(tagdict.allWords)
new SimpleConditionalLogProbabilityDistribution(
C_ku.mapt((t, counts) => t -> new LaplaceLogProbabilityDistribution(counts.mapVals(LogDouble(_)), allWordsSet, /*Some(knownWords -- knownWordsForTag(t) +*/ Some(Set(tagdict.startWord, tagdict.endWord)), LogDouble(lambda))).toMap +
(tagdict.startTag -> new SimpleLogProbabilityDistribution(Map(tagdict.startWord -> LogDouble.one))) +
(tagdict.endTag -> new SimpleLogProbabilityDistribution(Map(tagdict.endWord -> LogDouble.one))),
new LaplaceLogProbabilityDistribution(C_ku.values.reduce(_ |+| _).mapVals(LogDouble(_)), allWordsSet, /*Some(knownWords*/ Some(Set(tagdict.startWord, tagdict.endWord)), LogDouble(lambda)),
Some(tagdict.excludedTags))
}
override def toString = f"EmTagDictionaryEstimate($lambda, $tdCountLambda, $combineKU)"
}
class EmTagDictionaryEstimate[Tag](tagPriorInit: TagPriorInitializer[Tag], lambda: Double = 0.04, tdCountLambda: Double = 0.26, combineKU: Boolean = false) extends EmissionInitializer[Tag] {
def fromKnownSupertagSets(sentences: Vector[Vector[(Word, Set[Tag])]], initialTagdict: TagDictionary[Tag]): SimpleConditionalLogProbabilityDistribution[Tag, Word] = {
val tagdict = initialTagdict.withWords(sentences.flatten.map(_._1).toSet).withTags(sentences.flatten.flatMap(_._2).toSet)
val tagPrior = tagPriorInit.fromKnownSupertagSets(sentences, tagdict)
val emPriorInit = new EmTagDictionaryEstimateFromTagPrior(tagPrior, lambda, tdCountLambda, combineKU)
emPriorInit.fromKnownSupertagSets(sentences, tagdict)
}
override def toString = f"EmTagDictionaryEstimate($lambda, $tdCountLambda, $combineKU)"
}
|
dhgarrette/2015-ccg-parsing
|
src/main/scala/dhg/ccg/tag/learn/TypesupHmmInitialization.scala
|
Scala
|
apache-2.0
| 13,429 |
/* Title: Pure/Tools/spell_checker.scala
Author: Makarius
Spell checker with completion, based on JOrtho (see
https://sourceforge.net/projects/jortho).
*/
package isabelle
import java.lang.Class
import scala.collection.mutable
import scala.annotation.tailrec
import scala.collection.immutable.SortedMap
object Spell_Checker
{
/* words within text */
def marked_words(base: Text.Offset, text: String, mark: Text.Info[String] => Boolean)
: List[Text.Info[String]] =
{
val result = new mutable.ListBuffer[Text.Info[String]]
var offset = 0
def apostrophe(c: Int): Boolean =
c == '\\'' && (offset + 1 == text.length || text(offset + 1) != '\\'')
@tailrec def scan(pred: Int => Boolean)
{
if (offset < text.length) {
val c = text.codePointAt(offset)
if (pred(c)) {
offset += Character.charCount(c)
scan(pred)
}
}
}
while (offset < text.length) {
scan(c => !Character.isLetter(c))
val start = offset
scan(c => Character.isLetterOrDigit(c) || apostrophe(c))
val stop = offset
if (stop - start >= 2) {
val info = Text.Info(Text.Range(base + start, base + stop), text.substring(start, stop))
if (mark(info)) result += info
}
}
result.toList
}
def current_word(rendering: Rendering, range: Text.Range): Option[Text.Info[String]] =
{
for {
spell_range <- rendering.spell_checker_point(range)
text <- rendering.model.get_text(spell_range)
info <- marked_words(spell_range.start, text, info => info.range.overlaps(range)).headOption
} yield info
}
/* dictionaries */
class Dictionary private[Spell_Checker](val path: Path)
{
val lang = path.drop_ext.file_name
val user_path = Path.explode("$ISABELLE_HOME_USER/dictionaries") + Path.basic(lang)
override def toString: String = lang
}
private object Decl
{
def apply(name: String, include: Boolean): String =
if (include) name else "-" + name
def unapply(decl: String): Option[(String, Boolean)] =
{
val decl1 = decl.trim
if (decl1 == "" || decl1.startsWith("#")) None
else
Library.try_unprefix("-", decl1.trim) match {
case None => Some((decl1, true))
case Some(decl2) => Some((decl2, false))
}
}
}
def dictionaries(): List[Dictionary] =
for {
path <- Path.split(Isabelle_System.getenv("JORTHO_DICTIONARIES"))
if path.is_file
} yield new Dictionary(path)
/* create spell checker */
def apply(dictionary: Dictionary): Spell_Checker = new Spell_Checker(dictionary)
private sealed case class Update(include: Boolean, permanent: Boolean)
}
class Spell_Checker private(dictionary: Spell_Checker.Dictionary)
{
override def toString: String = dictionary.toString
/* main dictionary content */
private var dict = new Object
private var updates = SortedMap.empty[String, Spell_Checker.Update]
private def included_iterator(): Iterator[String] =
for {
(word, upd) <- updates.iterator
if upd.include
} yield word
private def excluded(word: String): Boolean =
updates.get(word) match {
case Some(upd) => !upd.include
case None => false
}
private def load()
{
val main_dictionary = split_lines(File.read_gzip(dictionary.path))
val permanent_updates =
if (dictionary.user_path.is_file)
for {
Spell_Checker.Decl(word, include) <- split_lines(File.read(dictionary.user_path))
} yield (word, Spell_Checker.Update(include, true))
else Nil
updates =
updates -- (for ((name, upd) <- updates.iterator; if upd.permanent) yield name) ++
permanent_updates
val factory_class = Class.forName("com.inet.jortho.DictionaryFactory")
val factory_cons = factory_class.getConstructor()
factory_cons.setAccessible(true)
val factory = factory_cons.newInstance()
val add = Untyped.method(factory_class, "add", classOf[String])
for {
word <- main_dictionary.iterator ++ included_iterator()
if !excluded(word)
} add.invoke(factory, word)
dict = Untyped.method(factory_class, "create").invoke(factory)
}
load()
private def save()
{
val permanent_decls =
(for {
(word, upd) <- updates.iterator
if upd.permanent
} yield Spell_Checker.Decl(word, upd.include)).toList
if (permanent_decls.nonEmpty || dictionary.user_path.is_file) {
val header = """# User updates for spell-checker dictionary
#
# * each line contains at most one word
# * extra blanks are ignored
# * lines starting with "#" are stripped
# * lines starting with "-" indicate excluded words
#
#:mode=text:encoding=UTF-8:
"""
Isabelle_System.mkdirs(dictionary.user_path.expand.dir)
File.write(dictionary.user_path, header + cat_lines(permanent_decls))
}
}
def update(word: String, include: Boolean, permanent: Boolean)
{
updates += (word -> Spell_Checker.Update(include, permanent))
if (include) {
if (permanent) save()
Untyped.method(dict.getClass, "add", classOf[String]).invoke(dict, word)
}
else { save(); load() }
}
def reset()
{
updates = SortedMap.empty
load()
}
def reset_enabled(): Int =
updates.valuesIterator.filter(upd => !upd.permanent).length
/* check known words */
def contains(word: String): Boolean =
Untyped.method(dict.getClass.getSuperclass, "exist", classOf[String]).
invoke(dict, word).asInstanceOf[java.lang.Boolean].booleanValue
def check(word: String): Boolean =
word match {
case Word.Case(c) if c != Word.Lowercase =>
contains(word) || contains(Word.lowercase(word))
case _ =>
contains(word)
}
def marked_words(base: Text.Offset, text: String): List[Text.Info[String]] =
Spell_Checker.marked_words(base, text, info => !check(info.info))
/* completion: suggestions for unknown words */
private def suggestions(word: String): Option[List[String]] =
{
val res =
Untyped.method(dict.getClass.getSuperclass, "searchSuggestions", classOf[String]).
invoke(dict, word).asInstanceOf[java.util.List[AnyRef]].toArray.toList.map(_.toString)
if (res.isEmpty) None else Some(res)
}
def complete(word: String): List[String] =
if (check(word)) Nil
else {
val word_case = Word.Case.unapply(word)
def recover_case(s: String) =
word_case match {
case Some(c) => Word.Case(c, s)
case None => s
}
val result =
word_case match {
case Some(c) if c != Word.Lowercase =>
suggestions(word) orElse suggestions(Word.lowercase(word))
case _ =>
suggestions(word)
}
result.getOrElse(Nil).map(recover_case)
}
def completion(rendering: Rendering, caret: Text.Offset): Option[Completion.Result] =
{
val caret_range = rendering.before_caret_range(caret)
for {
word <- Spell_Checker.current_word(rendering, caret_range)
words = complete(word.info)
if words.nonEmpty
descr = "(from dictionary " + quote(dictionary.toString) + ")"
items =
words.map(w => Completion.Item(word.range, word.info, "", List(w, descr), w, 0, false))
} yield Completion.Result(word.range, word.info, false, items)
}
}
class Spell_Checker_Variable
{
private val no_spell_checker: (String, Option[Spell_Checker]) = ("", None)
private var current_spell_checker = no_spell_checker
def get: Option[Spell_Checker] = synchronized { current_spell_checker._2 }
def update(options: Options): Unit = synchronized {
if (options.bool("spell_checker")) {
val lang = options.string("spell_checker_dictionary")
if (current_spell_checker._1 != lang) {
Spell_Checker.dictionaries.find(_.lang == lang) match {
case Some(dictionary) =>
val spell_checker =
Exn.capture { Spell_Checker(dictionary) } match {
case Exn.Res(spell_checker) => Some(spell_checker)
case Exn.Exn(_) => None
}
current_spell_checker = (lang, spell_checker)
case None =>
current_spell_checker = no_spell_checker
}
}
}
else current_spell_checker = no_spell_checker
}
}
|
larsrh/libisabelle
|
modules/pide/2019-RC4/src/main/scala/Tools/spell_checker.scala
|
Scala
|
apache-2.0
| 8,377 |
package refpay
//
//import unfiltered.filter.Plan
//import unfiltered.request._
//import unfiltered.response._
/**
*
*/
//class RefPayPlan extends Plan{
//
// override def intent = {
// case r@_ => {
// Ok ~> Html5(<p>Her kommer en ny tjeneste for dommere</p>)
// }
// }
//}
|
magott/refpay
|
src/main/scala/refpay/RefPayPlan.scala
|
Scala
|
apache-2.0
| 293 |
class Test {
object Foo { def unapply(x: Int) = if (x == 2) Some(x.toString) else None }
def test: Unit = {
val a: PartialFunction[Int, String] = { case Foo(x) => x }
val b: PartialFunction[Int, String] = { case x => x.toString }
val e: PartialFunction[String, String] = { case x @ "abc" => x }
val f: PartialFunction[String, String] = x => x match { case "abc" => x }
val g: PartialFunction[String, String] = x => x match { case "abc" if x.isEmpty => x }
type P = PartialFunction[String,String]
val h: P = { case x => x.toString }
val i: PartialFunction[Int, Int] = { x => x match { case x => x } }
}
}
|
som-snytt/dotty
|
tests/pos/i4177.scala
|
Scala
|
apache-2.0
| 647 |
package io.fintrospect
import com.twitter.finagle.http.Status
import com.twitter.io.Buf.ByteArray.Shared.extract
import io.fintrospect.formats.{Argo, JsonLibrary}
import io.fintrospect.parameters.BodySpec
import scala.util.Try
import scala.xml.Elem
/**
* Defines a potential response from a route, with a possible example
*/
class ResponseSpec private[fintrospect](statusAndDescription: (Status, String), val example: Option[String] = None) {
val status = statusAndDescription._1
val description = statusAndDescription._2
}
object ResponseSpec {
def json[T](statusAndDescription: (Status, String), example: T, jsonLib: JsonLibrary[T, _] = Argo): ResponseSpec =
ResponseSpec(statusAndDescription, example, BodySpec.json(jsonLib))
def xml(statusAndDescription: (Status, String), example: Elem): ResponseSpec =
ResponseSpec(statusAndDescription, example, BodySpec.xml())
def apply(statusAndDescription: (Status, String)): ResponseSpec = new ResponseSpec(statusAndDescription)
def apply[T](statusAndDescription: (Status, String), example: T, bodySpec: BodySpec[T]): ResponseSpec =
new ResponseSpec(statusAndDescription, Try(new String(extract(bodySpec.serialize(example)))).toOption)
}
|
daviddenton/fintrospect
|
core/src/main/scala/io/fintrospect/ResponseSpec.scala
|
Scala
|
apache-2.0
| 1,216 |
package kvstore
import akka.actor.Props
import akka.actor.Actor
import akka.actor.ActorRef
import scala.concurrent.duration._
import akka.actor.ReceiveTimeout
object Replicator {
case class Replicate(key: String, valueOption: Option[String], id: Long)
case class Replicated(key: String, id: Long)
case class Snapshot(key: String, valueOption: Option[String], seq: Long)
case class SnapshotAck(key: String, seq: Long)
def props(replica: ActorRef): Props = Props(new Replicator(replica))
}
class Replicator(val replica: ActorRef) extends Actor {
import Replicator._
import Replica._
import context.dispatcher
/*
* The contents of this actor is just a suggestion, you can implement it in any way you like.
*/
// map from sequence number to pair of sender and request
var acks = Map.empty[Long, (ActorRef, Replicate)]
// a sequence of not-yet-sent snapshots (you can disregard this if not implementing batching)
var pending = Vector.empty[Snapshot]
context.setReceiveTimeout(200 milliseconds)
var _seqCounter = 0L
def nextSeq = {
val ret = _seqCounter
_seqCounter += 1
ret
}
/* TODO Behavior for the Replicator. */
def receive: Receive = {
case Replicate(key: String, valueOption: Option[String], id: Long) => {
val seq = nextSeq
replica ! Snapshot(key, valueOption, seq)
acks += seq -> (sender, Replicate(key, valueOption, id))
}
case ReceiveTimeout => {
// resend all unacks
acks map {
case (seq, (sender, req)) => replica ! Snapshot(req.key, req.valueOption, seq)
}
}
case SnapshotAck(key: String, seq: Long) => {
println("snapshotAck: by seq: " + seq)
val pair = acks(seq)
pair._1 ! Replicated(key, pair._2.id)
acks -= seq
context.setReceiveTimeout(200 milliseconds)
}
case _ =>
}
}
|
ByzanTine/Coursera-Scala
|
kvstore/src/main/scala/kvstore/Replicator.scala
|
Scala
|
mit
| 1,880 |
/* Copyright 2014, 2015 Richard Wiedenhöft <[email protected]>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package xyz.wiedenhoeft.scalacrypt
import scala.util.{ Try, Success, Failure }
import iteratees._
import scala.concurrent.{ Promise, Future }
/** Base class for Keyed hash (Message Authentication Code) implementations. */
trait KeyedHash[KeyType <: Key] {
/** Returns an iteratee calculating the MAC. */
def apply(key: KeyType): Try[Iteratee[Seq[Byte], Seq[Byte]]]
/** Calculates the MAC. */
def apply(key: KeyType, data: Seq[Byte]): Try[Seq[Byte]] = apply(key) flatMap {
_.fold(Element(data)).run
}
/** Takes an iterator of data and returns a future containing the hash and an identical iterator */
def apply(key: KeyType, data: Iterator[Seq[Byte]]): Try[(Iterator[Seq[Byte]], Future[Seq[Byte]])] = {
val promise = Promise[Seq[Byte]]
val iteratorTry = apply(key) map { initIteratee ⇒
new Iterator[Seq[Byte]] {
var iteratee = initIteratee
def hasNext = data.hasNext
def next = {
val chunk = data.next
iteratee = iteratee.fold(Element(chunk))
if (!data.hasNext) {
promise.complete(iteratee.run)
}
chunk
}
}
}
iteratorTry map { iterator ⇒
(iterator, promise.future)
}
}
def verify(key: KeyType, hash: Seq[Byte]): Try[Iteratee[Seq[Byte], Boolean]]
def verify(key: KeyType, data: Seq[Byte], hash: Seq[Byte]): Try[Boolean] = verify(key, hash) flatMap {
_.fold(Element(data)).run
}
/** The length in bytes of the MAC. */
def length: Int
}
|
Richard-W/scalacrypt
|
src/main/scala/KeyedHash.scala
|
Scala
|
apache-2.0
| 2,144 |
package org.biosys.affy.models
import play.api.libs.json.Json
case class AffyDbRef(db:String, acc:String) {
}
object AffyDbRef {
implicit val affyDbRefFormat = Json.format[AffyDbRef]
}
|
sdor/biosys
|
affy/src/main/scala/org/biosys/affy/models/AffyDbRef.scala
|
Scala
|
gpl-2.0
| 191 |
/*
* Copyright 2013 Commonwealth Computer Research, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.locationtech.geomesa.core
import com.typesafe.scalalogging.slf4j.Logging
import org.apache.accumulo.core.data.{Key, Value}
import org.geotools.factory.Hints.{ClassKey, IntegerKey}
import org.geotools.filter.identity.FeatureIdImpl
import org.geotools.geometry.jts.ReferencedEnvelope
import org.joda.time.DateTime
import org.locationtech.geomesa.utils.geotools.SimpleFeatureTypes
import org.opengis.feature.simple.SimpleFeatureType
import org.opengis.filter.identity.FeatureId
/**
* These are package-wide constants.
*/
package object index {
val MIN_DATE = new DateTime(Long.MinValue)
val MAX_DATE = new DateTime(Long.MaxValue)
val SF_PROPERTY_GEOMETRY = "geomesa_index_geometry"
val SF_PROPERTY_START_TIME = "geomesa_index_start_time"
val SF_PROPERTY_END_TIME = "geomesa_index_end_time"
// wrapping function in option to protect against incorrect values in SF_PROPERTY_START_TIME
def getDtgFieldName(sft: SimpleFeatureType) =
for {
nameFromUserData <- Option(sft.getUserData.get(SF_PROPERTY_START_TIME)).map { _.toString }
if Option(sft.getDescriptor(nameFromUserData)).isDefined
} yield nameFromUserData
// wrapping function in option to protect against incorrect values in SF_PROPERTY_START_TIME
def getDtgDescriptor(sft: SimpleFeatureType) = getDtgFieldName(sft).flatMap{name => Option(sft.getDescriptor(name))}
val spec = "geom:Geometry:srid=4326,dtg:Date,dtg_end_time:Date"
val indexSFT = SimpleFeatureTypes.createType("geomesa-idx", spec)
implicit def string2id(s: String): FeatureId = new FeatureIdImpl(s)
type KeyValuePair = (Key, Value)
object QueryHints {
val DENSITY_KEY = new ClassKey(classOf[java.lang.Boolean])
val WIDTH_KEY = new IntegerKey(256)
val HEIGHT_KEY = new IntegerKey(256)
val BBOX_KEY = new ClassKey(classOf[ReferencedEnvelope])
}
type ExplainerOutputType = ( => String) => Unit
object ExplainPrintln extends ExplainerOutputType {
override def apply(v1: => String): Unit = println(v1)
}
trait ExplainingLogging extends Logging {
def log(stringFnx: => String) = {
lazy val s: String = stringFnx
logger.trace(s)
}
}
}
|
nhambletCCRI/geomesa
|
geomesa-core/src/main/scala/org/locationtech/geomesa/core/index/index.scala
|
Scala
|
apache-2.0
| 2,795 |
/*
*
* * Copyright 2020 Lenses.io.
* *
* * Licensed under the Apache License, Version 2.0 (the "License");
* * you may not use this file except in compliance with the License.
* * You may obtain a copy of the License at
* *
* * http://www.apache.org/licenses/LICENSE-2.0
* *
* * Unless required by applicable law or agreed to in writing, software
* * distributed under the License is distributed on an "AS IS" BASIS,
* * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* * See the License for the specific language governing permissions and
* * limitations under the License.
*
*/
package com.datamountaineer.streamreactor.connect.jms.source
import com.datamountaineer.streamreactor.common.utils.{JarManifest, ProgressCounter}
import com.datamountaineer.streamreactor.connect.jms.config.{JMSConfig, JMSConfigConstants, JMSSettings}
import com.datamountaineer.streamreactor.connect.jms.source.readers.JMSReader
import com.typesafe.scalalogging.StrictLogging
import org.apache.kafka.connect.source.{SourceRecord, SourceTask}
import java.util
import java.util.Collections
import java.util.concurrent.ConcurrentHashMap
import java.util.concurrent.atomic.AtomicLong
import java.util.function.BiConsumer
import javax.jms.Message
import scala.collection.JavaConverters._
import scala.concurrent.duration.{FiniteDuration, _}
import scala.util.{Failure, Success, Try}
/**
* Created by [email protected] on 10/03/2017.
* stream-reactor
*/
class JMSSourceTask extends SourceTask with StrictLogging {
private var reader: JMSReader = _
private val progressCounter = new ProgressCounter
private var enableProgress: Boolean = false
private val pollingTimeout: AtomicLong = new AtomicLong(0L)
private val recordsToCommit = new ConcurrentHashMap[SourceRecord, MessageAndTimestamp]()
private val manifest = JarManifest(getClass.getProtectionDomain.getCodeSource.getLocation)
private val EmptyRecords = Collections.emptyList[SourceRecord]()
private var lastEvictedTimestamp: FiniteDuration = FiniteDuration(System.currentTimeMillis(), MILLISECONDS)
private var evictInterval: Int = 0
private var evictThreshold: Int = 0
override def start(props: util.Map[String, String]): Unit = {
logger.info(scala.io.Source.fromInputStream(getClass.getResourceAsStream("/jms-source-ascii.txt")).mkString + s" $version")
logger.info(manifest.printManifest())
val conf = if (context.configs().isEmpty) props else context.configs()
JMSConfig.config.parse(conf)
val config = new JMSConfig(conf)
val settings = JMSSettings(config, sink = false)
reader = JMSReader(settings)
enableProgress = config.getBoolean(JMSConfigConstants.PROGRESS_COUNTER_ENABLED)
pollingTimeout.set(settings.pollingTimeout)
evictInterval = settings.evictInterval
evictThreshold = settings.evictThreshold
}
override def stop(): Unit = {
logger.info("Stopping JMS readers")
synchronized {
this.notifyAll()
}
reader.stop match {
case Failure(t) => logger.error(s"Error encountered while stopping JMS Source Task. $t")
case Success(_) => logger.info("Successfully stopped JMS Source Task.")
}
}
override def poll(): util.List[SourceRecord] = {
val polled = reader.poll()
if (polled.isEmpty) {
synchronized {
this.wait(pollingTimeout.get())
}
if (enableProgress) {
progressCounter.update(Vector.empty)
}
EmptyRecords
} else {
val timestamp = System.currentTimeMillis()
val records = polled.map { case (msg, record) =>
recordsToCommit.put(record, MessageAndTimestamp(msg, FiniteDuration(timestamp, MILLISECONDS)))
record
}
if (enableProgress) {
progressCounter.update(records)
}
records.asJava
}
}
private def evictUncommittedMessages(): Unit = {
val current = FiniteDuration(System.currentTimeMillis(), MILLISECONDS)
if ((current - lastEvictedTimestamp).toMinutes > evictInterval) {
recordsToCommit.forEach(new BiConsumer[SourceRecord, MessageAndTimestamp] {
override def accept(t: SourceRecord, u: MessageAndTimestamp): Unit = evictIfApplicable(t, u, current)
})
}
lastEvictedTimestamp = current
}
private def evictIfApplicable(record: SourceRecord, msg: MessageAndTimestamp, now: FiniteDuration): Unit = {
if ((now - msg.timestamp).toMinutes > evictThreshold) {
recordsToCommit.remove(record)
}
}
override def commitRecord(record: SourceRecord): Unit = {
Option(recordsToCommit.remove(record)).foreach { case MessageAndTimestamp(msg, _) =>
Try(msg.acknowledge())
}
evictUncommittedMessages()
}
override def version: String = manifest.version()
}
case class MessageAndTimestamp(msg: Message, timestamp: FiniteDuration)
|
datamountaineer/stream-reactor
|
kafka-connect-jms/src/main/scala/com/datamountaineer/streamreactor/connect/jms/source/JMSSourceTask.scala
|
Scala
|
apache-2.0
| 4,850 |
object Test extends dotty.runtime.LegacyApp {
import scala.tools.nsc.io._
val dir: VirtualDirectory = new VirtualDirectory("foo", None)
dir.subdirectoryNamed("foo")
assert(dir.lookupName("foo", true) != null)
}
|
yusuke2255/dotty
|
tests/run/t1618.scala
|
Scala
|
bsd-3-clause
| 224 |
/**
* Copyright 2009 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS-IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package net.appjet.oui;
import net.appjet.bodylock.{BodyLock, Executable};
import java.io.File;
import java.util.{Properties, Date};
import java.lang.annotation.Annotation;
import java.text.SimpleDateFormat;
import scala.collection.mutable.{HashMap, SynchronizedMap, HashSet};
import scala.collection.jcl.{IterableWrapper, Conversions};
import org.mortbay.thread.QueuedThreadPool;
import org.mortbay.jetty.servlet.{Context, HashSessionIdManager, FilterHolder, ServletHolder};
import org.mortbay.jetty.handler.{HandlerCollection, RequestLogHandler, HandlerList};
import org.mortbay.jetty.{Server, NCSARequestLog, Request, Response};
import org.mortbay.servlet.GzipFilter;
import com.oreilly.servlet.MultipartFilter;
import net.appjet.common.util.{BetterFile, HttpServletRequestFactory};
import net.appjet.common.cli._;
import net.appjet.bodylock.JSCompileException;
import Util.enumerationToRichEnumeration;
object main {
val startTime = new java.util.Date();
def quit(status: Int) {
java.lang.Runtime.getRuntime().halt(status);
}
def setupFilesystem() {
val logdir = new File(config.logDir+"/backend/access");
if (! logdir.isDirectory())
if (! logdir.mkdirs())
quit(1);
}
val options =
for (m <- config.allProperties if (m.getAnnotation(classOf[ConfigParam]) != null)) yield {
val cp = m.getAnnotation(classOf[ConfigParam])
new CliOption(m.getName(), cp.value(), if (cp.argName().length > 0) Some(cp.argName()) else None);
}
def printUsage() {
println("\\n--------------------------------------------------------------------------------");
println("usage:");
println((new CliParser(options)).usage);
println("--------------------------------------------------------------------------------\\n");
}
def extractOptions(args: Array[String]) {
val parser = new CliParser(options);
val opts =
try {
parser.parseOptions(args)._1;
} catch {
case e: ParseException => {
println("error: "+e.getMessage());
printUsage();
System.exit(1);
null;
}
}
if (opts.contains("configFile")) {
val p = new Properties();
p.load(new java.io.FileInputStream(opts("configFile")));
extractOptions(p);
}
for ((k, v) <- opts) {
config.values(k) = v;
}
}
def extractOptions(props: Properties) {
for (k <- for (o <- props.propertyNames()) yield o.asInstanceOf[String]) {
config.values(k) = props.getProperty(k);
}
}
val startupExecutable = (new FixedDiskLibrary(new SpecialJarOrNotFile(config.ajstdlibHome, "onstartup.js"))).executable;
def runOnStartup() {
execution.runOutOfBand(startupExecutable, "Startup", None, { error =>
error match {
case e: JSCompileException => { }
case e: Throwable => { e.printStackTrace(); }
case (sc: Int, msg: String) => { println(msg); }
case x => println(x);
}
System.exit(1);
});
}
lazy val shutdownExecutable = (new FixedDiskLibrary(new SpecialJarOrNotFile(config.ajstdlibHome, "onshutdown.js"))).executable;
def runOnShutdown() {
execution.runOutOfBand(shutdownExecutable, "Shutdown", None, { error =>
error match {
case e: JSCompileException => { }
case e: Throwable => { }
case (sc: Int, msg: String) => { println(msg); }
case x => println(x);
}
});
}
def runOnSars(q: String) = {
val ec = execution.runOutOfBand(execution.sarsExecutable, "SARS", Some(Map("sarsRequest" -> q)), { error =>
error match {
case e: JSCompileException => { throw e; }
case e: Throwable => { exceptionlog(e); throw e; }
case (sc: Int, msg: String) => { println(msg); throw new RuntimeException(""+sc+": "+msg) }
case x => { println(x); throw new RuntimeException(x.toString()) }
}
});
ec.attributes.get("sarsResponse").map(_.toString());
}
def stfu() {
System.setProperty("org.mortbay.log.class", "net.appjet.oui.STFULogger");
System.setProperty("com.mchange.v2.log.MLog", "com.mchange.v2.log.FallbackMLog");
System.setProperty("com.mchange.v2.log.FallbackMLog.DEFAULT_CUTOFF_LEVEL", "OFF");
}
var server: Server = null;
var sarsServer: net.appjet.common.sars.SarsServer = null;
var loggers = new HashSet[GenericLogger];
def main(args: Array[String]) {
val etherpadProperties = getClass.getResource("/etherpad.properties");
if (etherpadProperties != null) {
val p = new Properties();
p.load(etherpadProperties.openStream);
extractOptions(p);
}
extractOptions(args);
if (! config.verbose)
stfu();
setupFilesystem();
if (config.devMode)
config.print;
if (config.profile)
profiler.start();
if (config.listenMonitoring != "0:0")
monitoring.startMonitoringServer();
// this needs a better place.
if (config.devMode)
BodyLock.map = Some(new HashMap[String, String] with SynchronizedMap[String, String]);
server = new Server();
if (config.maxThreads > 0)
server.setThreadPool(new QueuedThreadPool(config.maxThreads));
else
server.setThreadPool(new QueuedThreadPool());
// set up socket connectors
val nioconnector = new CometSelectChannelConnector;
var sslconnector: CometSslSelectChannelConnector = null;
nioconnector.setPort(config.listenPort);
nioconnector.setForwarded(true);
if (config.listenHost.length > 0)
nioconnector.setHost(config.listenHost);
if (config.listenSecurePort == 0) {
server.setConnectors(Array(nioconnector));
} else {
sslconnector = new CometSslSelectChannelConnector;
sslconnector.setForwarded(true);
sslconnector.setPort(config.listenSecurePort);
if (config.listenSecureHost.length > 0)
sslconnector.setHost(config.listenSecureHost);
if (! config.sslKeyStore_isSet) {
val url = getClass.getResource("/mirror/snakeoil-ssl-cert");
if (url != null)
sslconnector.setKeystore(url.toString());
else
sslconnector.setKeystore(config.sslKeyStore);
} else {
sslconnector.setKeystore(config.sslKeyStore);
}
sslconnector.setPassword(config.sslStorePassword);
sslconnector.setKeyPassword(config.sslKeyPassword);
sslconnector.setTrustPassword(config.sslStorePassword);
sslconnector.setExcludeCipherSuites(Array[String](
"SSL_RSA_WITH_3DES_EDE_CBC_SHA",
"SSL_DHE_RSA_WITH_DES_CBC_SHA",
"SSL_DHE_DSS_WITH_DES_CBC_SHA",
"SSL_DHE_RSA_WITH_3DES_EDE_CBC_SHA",
"SSL_DHE_DSS_WITH_3DES_EDE_CBC_SHA",
"SSL_RSA_WITH_DES_CBC_SHA",
"SSL_RSA_EXPORT_WITH_RC4_40_MD5",
"SSL_RSA_EXPORT_WITH_DES40_CBC_SHA",
"SSL_DHE_RSA_EXPORT_WITH_DES40_CBC_SHA",
"SSL_DHE_DSS_EXPORT_WITH_DES40_CBC_SHA",
"SSL_RSA_WITH_NULL_MD5",
"SSL_RSA_WITH_NULL_SHA",
"SSL_DH_anon_WITH_3DES_EDE_CBC_SHA",
"SSL_DH_anon_WITH_DES_CBC_SHA",
"SSL_DH_anon_EXPORT_WITH_RC4_40_MD5",
"SSL_DH_anon_EXPORT_WITH_DES40_CBC_SHA"));
server.setConnectors(Array(nioconnector, sslconnector));
}
// set up Context and Servlet
val handler = new Context(server, "/", Context.NO_SESSIONS | Context.NO_SECURITY);
handler.addServlet(new ServletHolder(new OuiServlet), "/");
val filterHolder = new FilterHolder(new MultipartFilter());
filterHolder.setInitParameter("uploadDir", System.getProperty("java.io.tmpdir"));
handler.addFilter(filterHolder, "/*", 1);
global.context = handler;
// set up apache-style logging
val requestLogHandler = new RequestLogHandler();
val requestLog = new NCSARequestLog(config.logDir+"/backend/access/access-yyyy_mm_dd.request.log") {
override def log(req: Request, res: Response) {
try {
if (config.devMode || config.specialDebug)
super.log(req, res);
else if (res.getStatus() != 200 || config.transportPrefix == null || ! req.getRequestURI().startsWith(config.transportPrefix))
super.log(req, res);
val d = new Date();
appstats.stati.foreach(_(if (res.getStatus() < 0) 404 else res.getStatus()).hit(d));
} catch {
case e => { exceptionlog("Error writing to log?"); exceptionlog(e); }
}
}
};
requestLog.setRetainDays(365);
requestLog.setAppend(true);
requestLog.setExtended(true);
requestLog.setLogServer(true);
requestLog.setLogLatency(true);
requestLog.setLogTimeZone("PST");
requestLogHandler.setRequestLog(requestLog);
// set handlers with server
val businessHandlers = new HandlerList();
businessHandlers.setHandlers(Array(handler));
val allHandlers = new HandlerCollection();
allHandlers.setHandlers(Array(businessHandlers, requestLogHandler));
server.setHandler(allHandlers);
// fix slow startup bug
server.setSessionIdManager(new HashSessionIdManager(new java.util.Random()));
// run the onStartup script.
runOnStartup();
// preload some runners, if necessary.
if (config.preloadRunners > 0) {
val b = new java.util.concurrent.CountDownLatch(config.preloadRunners);
for (i <- 0 until config.preloadRunners)
(new Thread {
ScopeReuseManager.freeRunner(ScopeReuseManager.newRunner);
b.countDown();
}).start();
while (b.getCount() > 0) {
b.await();
}
println("Preloaded "+config.preloadRunners+" runners.");
}
// start SARS server.
if (config.listenSarsPort > 0) {
try {
import net.appjet.common.sars._;
sarsServer = new SarsServer(config.sarsAuthKey,
new SarsMessageHandler { override def handle(q: String) = runOnSars(q) },
if (config.listenSarsHost.length > 0) Some(config.listenSarsHost) else None,
config.listenSarsPort);
sarsServer.daemon = true;
sarsServer.start();
} catch {
case e: java.net.SocketException => {
println("SARS: A socket exception occurred: "+e.getMessage()+" on SARS server at "+config.listenSarsHost+":"+config.listenSarsPort);
java.lang.Runtime.getRuntime().halt(1);
}
}
}
// start server
java.lang.Runtime.getRuntime().addShutdownHook(new Thread() {
override def run() {
val df = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss.SSSZ");
def printts(str: String) {
println("["+df.format(new Date())+"]: "+str);
}
printts("Shutting down...");
handler.setShutdown(true);
Thread.sleep(if (config.devMode) 500 else 3000);
printts("...done, running onshutdown.");
runOnShutdown();
printts("...done, stopping server.");
server.stop();
server.join();
printts("...done, flushing logs.");
for (l <- loggers) { l.flush(); l.close(); }
printts("...done.");
}
});
def socketError(c: org.mortbay.jetty.Connector, e: java.net.SocketException) {
var msg = e.getMessage();
println("SOCKET ERROR: "+msg+" - "+(c match {
case null => "(unknown socket)";
case x => {
(x.getHost() match {
case null => "localhost";
case y => y;
})+":"+x.getPort();
}
}));
if (msg.contains("Address already in use")) {
println("Did you make sure that ports "+config.listenPort+" and "+config.listenSecurePort+" are not in use?");
}
if (msg.contains("Permission denied")) {
println("Perhaps you need to run as the root user or as an Administrator?");
}
}
var c: org.mortbay.jetty.Connector = null;
try {
c = nioconnector;
c.open();
if (sslconnector != null) {
c = sslconnector;
c.open();
}
c = null;
allHandlers.start();
server.start();
} catch {
case e: java.net.SocketException => {
socketError(c, e);
java.lang.Runtime.getRuntime().halt(1);
}
case e: org.mortbay.util.MultiException => {
println("SERVER ERROR: Couldn't start server; multiple errors.");
for (i <- new IterableWrapper[Throwable] { override val underlying = e.getThrowables.asInstanceOf[java.util.List[Throwable]] }) {
i match {
case se: java.net.SocketException => {
socketError(c, se);
}
case e =>
println("SERVER ERROR: Couldn't start server: "+i.getMessage());
}
}
java.lang.Runtime.getRuntime().halt(1);
}
case e => {
println("SERVER ERROR: Couldn't start server: "+e.getMessage());
java.lang.Runtime.getRuntime().halt(1);
}
}
println("HTTP server listening on http://"+
(if (config.listenHost.length > 0) config.listenHost else "localhost")+
":"+config.listenPort+"/");
if (config.listenSecurePort > 0)
println("HTTPS server listening on https://"+
(if (config.listenSecureHost.length > 0) config.listenSecureHost else "localhost")+
":"+config.listenSecurePort+"/");
if (config.listenSarsPort > 0)
println("SARS server listening on "+
(if (config.listenSarsHost.length > 0) config.listenSarsHost else "localhost")+
":"+config.listenSarsPort);
}
}
|
titanpad/titanpad
|
infrastructure/net.appjet.oui/main.scala
|
Scala
|
apache-2.0
| 14,117 |
package org.xarcher.ea.macros.common
import scala.reflect.macros.blackbox.Context
import scala.language.experimental.macros
/**
* Created by djx314 on 15-5-24.
*/
trait MacroUtils {
val c: Context
import c.universe._
def typeFromParamTree(tree: Tree) = c.typecheck(tree.duplicate, c.TYPEmode).tpe
def extractTermName(methodSymbol: Name) = {
val TermName(s) = methodSymbol
s
}
}
|
scalax/slickea
|
src/main/scala/org/xarcher/ea/macros/common/MacroUtils.scala
|
Scala
|
mit
| 403 |
package io.buoyant.linkerd
import com.fasterxml.jackson.annotation.JsonSubTypes.Type
import com.fasterxml.jackson.annotation.{JsonIgnore, JsonSubTypes}
import com.fasterxml.jackson.databind.annotation.JsonDeserialize
import com.twitter.conversions.DurationOps._
import com.twitter.finagle.Stack
import com.twitter.finagle.loadbalancer.LoadBalancerFactory.EnableProbation
import com.twitter.finagle.loadbalancer.buoyant.DeregisterLoadBalancerFactory
import com.twitter.finagle.loadbalancer.{Balancers, LoadBalancerFactory}
import io.buoyant.config.PolymorphicConfig
@JsonSubTypes(Array(
new Type(value = classOf[P2C], name = "p2c"),
new Type(value = classOf[P2CEwma], name = "ewma"),
new Type(value = classOf[Aperture], name = "aperture"),
new Type(value = classOf[Heap], name = "heap"),
new Type(value = classOf[RoundRobin], name = "roundRobin")
))
abstract class LoadBalancerConfig extends PolymorphicConfig {
val factory: LoadBalancerFactory
val enableProbation: Option[Boolean] = None
@JsonIgnore
def clientParams = Stack.Params.empty +
LoadBalancerFactory.Param(new DeregisterLoadBalancerFactory(factory)) +
LoadBalancerFactory.EnableProbation(enableProbation.getOrElse(false))
}
case class P2C(maxEffort: Option[Int]) extends LoadBalancerConfig {
@JsonIgnore
val factory = Balancers.p2c(maxEffort.getOrElse(Balancers.MaxEffort))
}
case class P2CEwma(decayTimeMs: Option[Int], maxEffort: Option[Int]) extends LoadBalancerConfig {
@JsonIgnore
val factory = Balancers.p2cPeakEwma(
decayTime = decayTimeMs.map(_.millis).getOrElse(10.seconds),
maxEffort = maxEffort.getOrElse(Balancers.MaxEffort)
)
}
case class Aperture(
smoothWindowMs: Option[Int],
maxEffort: Option[Int],
@JsonDeserialize(contentAs = classOf[java.lang.Double]) lowLoad: Option[Double],
@JsonDeserialize(contentAs = classOf[java.lang.Double]) highLoad: Option[Double],
minAperture: Option[Int]
) extends LoadBalancerConfig {
@JsonIgnore
val factory = Balancers.aperture(
smoothWin = smoothWindowMs.map(_.millis).getOrElse(5.seconds),
maxEffort = maxEffort.getOrElse(Balancers.MaxEffort),
lowLoad = lowLoad.getOrElse(0.5),
highLoad = highLoad.getOrElse(2.0),
minAperture = minAperture.getOrElse(1)
)
}
class Heap extends LoadBalancerConfig {
@JsonIgnore
val factory = Balancers.heap()
}
case class RoundRobin(maxEffort: Option[Int]) extends LoadBalancerConfig {
@JsonIgnore
val factory = Balancers.roundRobin(
maxEffort = maxEffort.getOrElse(Balancers.MaxEffort)
)
}
|
linkerd/linkerd
|
linkerd/core/src/main/scala/io/buoyant/linkerd/LoadBalancerConfig.scala
|
Scala
|
apache-2.0
| 2,538 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.metrics.sink
import java.util.Properties
import scala.collection.JavaConverters._
import com.codahale.metrics._
import org.apache.spark.{SecurityManager, SparkConf, SparkFunSuite}
class GraphiteSinkSuite extends SparkFunSuite {
test("GraphiteSink with default MetricsFilter") {
val props = new Properties
props.put("host", "127.0.0.1")
props.put("port", "54321")
val registry = new MetricRegistry
val securityMgr = new SecurityManager(new SparkConf(false))
val sink = new GraphiteSink(props, registry, securityMgr)
val gauge = new Gauge[Double] {
override def getValue: Double = 1.23
}
sink.registry.register("gauge", gauge)
sink.registry.register("anothergauge", gauge)
sink.registry.register("streaminggauge", gauge)
val metricKeys = sink.registry.getGauges(sink.filter).keySet.asScala
assert(metricKeys.equals(Set("gauge", "anothergauge", "streaminggauge")),
"Should contain all metrics registered")
}
test("GraphiteSink with regex MetricsFilter") {
val props = new Properties
props.put("host", "127.0.0.1")
props.put("port", "54321")
props.put("regex", "local-[0-9]+.driver.(CodeGenerator|BlockManager)")
val registry = new MetricRegistry
val securityMgr = new SecurityManager(new SparkConf(false))
val sink = new GraphiteSink(props, registry, securityMgr)
val gauge = new Gauge[Double] {
override def getValue: Double = 1.23
}
sink.registry.register("gauge", gauge)
sink.registry.register("anothergauge", gauge)
sink.registry.register("streaminggauge", gauge)
sink.registry.register("local-1563838109260.driver.CodeGenerator.generatedMethodSize", gauge)
sink.registry.register("local-1563838109260.driver.BlockManager.disk.diskSpaceUsed_MB", gauge)
sink.registry.register("local-1563813796998.driver.spark.streaming.nicklocal.latency", gauge)
sink.registry.register("myapp.driver.CodeGenerator.generatedMethodSize", gauge)
sink.registry.register("myapp.driver.BlockManager.disk.diskSpaceUsed_MB", gauge)
val metricKeys = sink.registry.getGauges(sink.filter).keySet.asScala
val filteredMetricKeys = Set(
"local-1563838109260.driver.CodeGenerator.generatedMethodSize",
"local-1563838109260.driver.BlockManager.disk.diskSpaceUsed_MB"
)
assert(metricKeys.equals(filteredMetricKeys),
"Should contain only metrics matches regex filter")
}
}
|
pgandhi999/spark
|
core/src/test/scala/org/apache/spark/metrics/sink/GraphiteSinkSuite.scala
|
Scala
|
apache-2.0
| 3,263 |
package com.artclod.mathml.scalar.concept
import com.artclod.math.TrigonometryFix
import com.artclod.mathml.scalar._
object Trigonometry {
def cos(v: Double) : Double = TrigonometryFix.cos0(v)
def sin(v: Double) : Double = TrigonometryFix.sin0(v)
def tan(v: Double) : Double = TrigonometryFix.tan0(v)
def csc(v: Double) : Double = TrigonometryFix.csc0(v)
def sec(v: Double) : Double = TrigonometryFix.sec0(v)
def cot(v: Double) : Double = TrigonometryFix.cot0(v)
def sin(value: Constant) = value match {
case c: ConstantInteger => Cn(TrigonometryFix.sin0(c.v.doubleValue))
case c: ConstantDecimal => Cn(TrigonometryFix.sin0(c.v.doubleValue))
}
def cos(value: Constant) = value match {
case c: ConstantInteger => Cn(TrigonometryFix.cos0(c.v.doubleValue))
case c: ConstantDecimal => Cn(TrigonometryFix.cos0(c.v.doubleValue))
}
def tan(value: Constant) = value match {
case c: ConstantInteger => Cn(TrigonometryFix.tan0(c.v.doubleValue))
case c: ConstantDecimal => Cn(TrigonometryFix.tan0(c.v.doubleValue))
}
def csc(value: Constant) : Constant = value match {
case c: ConstantInteger => Cn(TrigonometryFix.csc0(c.v.doubleValue))
case c: ConstantDecimal => Cn(TrigonometryFix.csc0(c.v.doubleValue))
}
def sec(value: Constant) : Constant = value match {
case c: ConstantInteger => Cn(TrigonometryFix.sec0(c.v.doubleValue))
case c: ConstantDecimal => Cn(TrigonometryFix.sec0(c.v.doubleValue))
}
def cot(value: Constant) : Constant = value match {
case c: ConstantInteger => Cn(TrigonometryFix.cot0(c.v.doubleValue))
case c: ConstantDecimal => Cn(TrigonometryFix.cot0(c.v.doubleValue))
}
}
|
kristiankime/calc-tutor
|
app/com/artclod/mathml/scalar/concept/Trigonometry.scala
|
Scala
|
mit
| 1,649 |
package mesosphere.marathon.core.task.update.impl
import javax.inject.Inject
import com.google.inject.name.Names
import mesosphere.marathon.MarathonSchedulerDriverHolder
import mesosphere.marathon.core.base.Clock
import mesosphere.marathon.core.task.bus.MarathonTaskStatus
import mesosphere.marathon.core.task.tracker.{ TaskStateOpProcessor, TaskTracker }
import mesosphere.marathon.core.task.update.TaskStatusUpdateProcessor
import mesosphere.marathon.core.task.{ Task, TaskStateOp }
import mesosphere.marathon.metrics.Metrics.Timer
import mesosphere.marathon.metrics.{ MetricPrefixes, Metrics }
import org.apache.mesos.{ Protos => MesosProtos }
import org.slf4j.LoggerFactory
import scala.concurrent.Future
object TaskStatusUpdateProcessorImpl {
lazy val name = Names.named(getClass.getSimpleName)
}
/**
* Executes the given TaskStatusUpdateSteps for every update.
*/
class TaskStatusUpdateProcessorImpl @Inject() (
metrics: Metrics,
clock: Clock,
taskTracker: TaskTracker,
stateOpProcessor: TaskStateOpProcessor,
driverHolder: MarathonSchedulerDriverHolder) extends TaskStatusUpdateProcessor {
import scala.concurrent.ExecutionContext.Implicits.global
private[this] val log = LoggerFactory.getLogger(getClass)
private[this] val publishFutureTimer: Timer =
metrics.timer(metrics.name(MetricPrefixes.SERVICE, getClass, "publishFuture"))
private[this] val killUnknownTaskTimer: Timer =
metrics.timer(metrics.name(MetricPrefixes.SERVICE, getClass, "killUnknownTask"))
log.info("Started status update processor")
override def publish(status: MesosProtos.TaskStatus): Future[Unit] = publishFutureTimer.timeFuture {
val now = clock.now()
val taskId = Task.Id(status.getTaskId)
taskTracker.task(taskId).flatMap {
case _ if status.getState == MesosProtos.TaskState.TASK_KILLING =>
// introduced in Mesos 0.28.0, not yet processed
log.info("Ignoring TASK_KILLING update for {}", taskId)
acknowledge(status)
case Some(task) if task.launched.isDefined =>
val taskStateOp = TaskStateOp.MesosUpdate(task, MarathonTaskStatus(status), now)
stateOpProcessor.process(taskStateOp).flatMap(_ => acknowledge(status))
case _ =>
killUnknownTaskTimer {
if (status.getState != MesosProtos.TaskState.TASK_LOST) {
// If we kill a unknown task, we will get another TASK_LOST notification which leads to an endless
// stream of kills and TASK_LOST updates.
killTask(taskId.mesosTaskId)
}
acknowledge(status)
}
}
}
private[this] def acknowledge(taskStatus: MesosProtos.TaskStatus): Future[Unit] = {
driverHolder.driver.foreach(_.acknowledgeStatusUpdate(taskStatus))
Future.successful(())
}
private[this] def killTask(taskId: MesosProtos.TaskID): Unit = {
driverHolder.driver.foreach(_.killTask(taskId))
}
}
|
vivekjuneja/marathon
|
src/main/scala/mesosphere/marathon/core/task/update/impl/TaskStatusUpdateProcessorImpl.scala
|
Scala
|
apache-2.0
| 2,919 |
package korolev.http
import java.net.SocketAddress
import java.nio.channels.AsynchronousChannelGroup
import korolev.data.BytesLike
import korolev.data.syntax._
import korolev.effect.io.ServerSocket
import korolev.effect.syntax._
import korolev.effect.{Decoder, Effect, Stream}
import korolev.http.protocol.Http11
import korolev.web.{Request, Response}
import scala.concurrent.ExecutionContext
import scala.util.control.NonFatal
object HttpServer {
/**
* @see [[ServerSocket.bind]]
*/
def apply[F[_]: Effect, B: BytesLike](address: SocketAddress,
backlog: Int = 0,
bufferSize: Int = 8096,
group: AsynchronousChannelGroup = null,
gracefulShutdown: Boolean = false)
(f: Request[Stream[F, B]] => F[Response[Stream[F, B]]])
(implicit ec: ExecutionContext): F[ServerSocket.ServerSocketHandler[F]] = {
val InternalServerErrorMessage = BytesLike[B].ascii("Internal server error")
val http11 = new Http11[B]
ServerSocket.accept[F, B](address, backlog, bufferSize, group, gracefulShutdown) { client =>
http11
.decodeRequest(Decoder(client.stream))
.foreach { request =>
for {
response <- f(request).recoverF {
case NonFatal(error) =>
ec.reportFailure(error)
Stream(InternalServerErrorMessage).mat() map { body =>
Response(Response.Status.InternalServerError, body, Nil, Some(InternalServerErrorMessage.length))
}
}
byteStream <- http11.renderResponse(response)
_ <- byteStream.foreach(client.write)
} yield ()
}
}
}
}
|
fomkin/korolev
|
modules/http/src/main/scala/korolev/http/HttpServer.scala
|
Scala
|
apache-2.0
| 1,863 |
package com.atomist.source
import org.scalatest.{FlatSpec, Matchers}
class ArtifactSourceExceptionTest extends FlatSpec with Matchers {
it should "create new ArtifactSourceAccessException with message" in {
val e = new ArtifactSourceException("message")
e.getMessage should equal("message")
e.getCause should be(null)
}
it should "create new ArtifactSourceCreationException with statusCode and message" in {
val e = new ArtifactSourceException("message")
e.getMessage should equal("message")
e.getCause should be(null)
}
it should "create new ArtifactSourceUpdateException with message and throwable" in {
val iae = new IllegalArgumentException("illegal argument")
val e = new ArtifactSourceException("message", iae)
e.getMessage should equal("message")
e.getCause should not be null
e.getCause.getMessage should equal("illegal argument")
}
}
|
atomist/artifact-source
|
src/test/scala/com/atomist/source/ArtifactSourceExceptionTest.scala
|
Scala
|
gpl-3.0
| 904 |
import play.api._
import play.api.mvc._
import play.filters.csrf._
object Global extends WithFilters(CSRFFilter()) with GlobalSettings {
}
|
MJCallahanPage/CsrfDemoApp
|
app/Global.scala
|
Scala
|
mit
| 139 |
package com.aurelpaulovic.transaction
trait Context {
}
|
AurelPaulovic/transactions-api
|
src/main/scala/com/aurelpaulovic/transaction/Context.scala
|
Scala
|
apache-2.0
| 57 |
package kafka.consumer
import com.softwaremill.react.kafka.ConsumerProperties
import kafka.serializer.DefaultDecoder
import kafka.utils.Logging
import scala.concurrent.duration._
import scala.language.postfixOps
/**
* Copied from https://github.com/stealthly/scala-kafka, 0.8.2-beta (not released at the moment)
*/
class KafkaConsumer[T](val props: ConsumerProperties[T]) extends Logging {
val connector = Consumer.create(props.toConsumerConfig)
val filterSpec = new Whitelist(props.topic)
logger.info(s"setup:start topic=${props.topic} for zk=${props.zookeeperConnect} and groupId=${props.groupId}")
val stream = connector.createMessageStreamsByFilter(filterSpec, 1, new DefaultDecoder(), props.decoder).head
logger.info(s"setup:complete topic=${props.topic} for zk=${props.zookeeperConnect} and groupId=${props.groupId}")
def iterator() = stream.iterator()
def close(): Unit = {
connector.shutdown()
}
def commitInterval = props.commitInterval.getOrElse(KafkaConsumer.DefaultCommitInterval)
def kafkaOffsetStorage = props.kafkaOffsetStorage
}
object KafkaConsumer {
val DefaultCommitInterval = 30 seconds
}
|
kali786516/reactive-kafka
|
core/src/main/scala/kafka/consumer/KafkaConsumer.scala
|
Scala
|
apache-2.0
| 1,146 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.hive.execution
import scala.collection.JavaConversions._
import org.apache.hadoop.hive.conf.HiveConf
import org.apache.hadoop.hive.ql.metadata.{Partition => HivePartition}
import org.apache.hadoop.hive.serde.serdeConstants
import org.apache.hadoop.hive.serde2.objectinspector._
import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorUtils.ObjectInspectorCopyOption
import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.execution._
import org.apache.spark.sql.hive._
import org.apache.spark.sql.types.{BooleanType, DataType}
/**
* The Hive table scan operator. Column and partition pruning are both handled.
*
* @param requestedAttributes Attributes to be fetched from the Hive table.
* @param relation The Hive table be be scanned.
* @param partitionPruningPred An optional partition pruning predicate for partitioned table.
*/
private[hive]
case class HiveTableScan(
requestedAttributes: Seq[Attribute],
relation: MetastoreRelation,
partitionPruningPred: Option[Expression])(
@transient val context: HiveContext)
extends LeafNode {
require(partitionPruningPred.isEmpty || relation.hiveQlTable.isPartitioned,
"Partition pruning predicates only supported for partitioned tables.")
// Retrieve the original attributes based on expression ID so that capitalization matches.
val attributes = requestedAttributes.map(relation.attributeMap)
// Bind all partition key attribute references in the partition pruning predicate for later
// evaluation.
private[this] val boundPruningPred = partitionPruningPred.map { pred =>
require(
pred.dataType == BooleanType,
s"Data type of predicate $pred must be BooleanType rather than ${pred.dataType}.")
BindReferences.bindReference(pred, relation.partitionKeys)
}
// Create a local copy of hiveconf,so that scan specific modifications should not impact
// other queries
@transient
private[this] val hiveExtraConf = new HiveConf(context.hiveconf)
// append columns ids and names before broadcast
addColumnMetadataToConf(hiveExtraConf)
@transient
private[this] val hadoopReader =
new HadoopTableReader(attributes, relation, context, hiveExtraConf)
private[this] def castFromString(value: String, dataType: DataType) = {
Cast(Literal(value), dataType).eval(null)
}
private def addColumnMetadataToConf(hiveConf: HiveConf) {
// Specifies needed column IDs for those non-partitioning columns.
val neededColumnIDs = attributes.flatMap(relation.columnOrdinals.get).map(o => o: Integer)
HiveShim.appendReadColumns(hiveConf, neededColumnIDs, attributes.map(_.name))
val tableDesc = relation.tableDesc
val deserializer = tableDesc.getDeserializerClass.newInstance
deserializer.initialize(hiveConf, tableDesc.getProperties)
// Specifies types and object inspectors of columns to be scanned.
val structOI = ObjectInspectorUtils
.getStandardObjectInspector(
deserializer.getObjectInspector,
ObjectInspectorCopyOption.JAVA)
.asInstanceOf[StructObjectInspector]
val columnTypeNames = structOI
.getAllStructFieldRefs
.map(_.getFieldObjectInspector)
.map(TypeInfoUtils.getTypeInfoFromObjectInspector(_).getTypeName)
.mkString(",")
hiveConf.set(serdeConstants.LIST_COLUMN_TYPES, columnTypeNames)
hiveConf.set(serdeConstants.LIST_COLUMNS, relation.attributes.map(_.name).mkString(","))
}
/**
* Prunes partitions not involve the query plan.
*
* @param partitions All partitions of the relation.
* @return Partitions that are involved in the query plan.
*/
private[hive] def prunePartitions(partitions: Seq[HivePartition]) = {
boundPruningPred match {
case None => partitions
case Some(shouldKeep) => partitions.filter { part =>
val dataTypes = relation.partitionKeys.map(_.dataType)
val castedValues = for ((value, dataType) <- part.getValues.zip(dataTypes)) yield {
castFromString(value, dataType)
}
// Only partitioned values are needed here, since the predicate has already been bound to
// partition key attribute references.
val row = new GenericRow(castedValues.toArray)
shouldKeep.eval(row).asInstanceOf[Boolean]
}
}
}
override def execute() = if (!relation.hiveQlTable.isPartitioned) {
hadoopReader.makeRDDForTable(relation.hiveQlTable)
} else {
hadoopReader.makeRDDForPartitionedTable(prunePartitions(relation.hiveQlPartitions))
}
override def output = attributes
}
|
hengyicai/OnlineAggregationUCAS
|
sql/hive/src/main/scala/org/apache/spark/sql/hive/execution/HiveTableScan.scala
|
Scala
|
apache-2.0
| 5,460 |
package org.alitouka.spark.dbscan.util.commandLine
private [dbscan] trait NumberOfBucketsArgParsing [C <: CommonArgs with NumberOfBucketsArg]
extends CommonArgsParser[C] {
opt[Int] ("numBuckets")
.foreach { args.numberOfBuckets = _ }
.valueName("<numBuckets>")
.text("Number of buckets in a histogram")
}
|
isaacboucinha/CardioStream
|
web-app/src/main/scala/org/alitouka/spark/dbscan/util/commandLine/NumberOfBucketsArgParsing.scala
|
Scala
|
apache-2.0
| 323 |
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.kudu.backup
import com.google.common.collect.ImmutableList
import org.apache.kudu.client.CreateTableOptions
import org.apache.kudu.client.KuduTable
import org.apache.kudu.test.ClientTestUtil.getBasicSchema
import org.apache.kudu.test.KuduTestHarness
import org.junit.Assert._
import org.junit.Before
import org.junit.Rule
import org.junit.Test
import org.slf4j.Logger
import org.slf4j.LoggerFactory
import scala.annotation.meta.getter
class TestBackupGraph {
val log: Logger = LoggerFactory.getLogger(getClass)
var tableName: String = "TestBackupGraph"
var table: KuduTable = _
@(Rule @getter)
val harness = new KuduTestHarness
@Before
def setUp(): Unit = {
// Create the test table.
val builder = new CreateTableOptions().setNumReplicas(3)
builder.setRangePartitionColumns(ImmutableList.of("key"))
table = harness.getClient.createTable(tableName, getBasicSchema, builder)
}
@Test
def testSimpleBackupGraph() {
val graph = new BackupGraph(table.getTableId)
val full = createBackupVertex(table, 0, 1)
graph.addBackup(full)
// Validate a graph with only a single full backup.
assertEquals(1, graph.fullBackups.size)
assertEquals(1, graph.backupPaths.size)
val fullPath = graph.backupPaths.head
assertEquals("0", fullPath.pathString)
// Validate a graph with a couple incremental backups.
val inc1 = createBackupVertex(table, 1, 2)
graph.addBackup(inc1)
val inc2 = createBackupVertex(table, 2, 3)
graph.addBackup(inc2)
assertEquals(1, graph.fullBackups.size)
assertEquals(1, graph.backupPaths.size)
val incPath = graph.backupPaths.head
assertEquals("0 -> 1 -> 2", incPath.pathString)
}
@Test
def testForkingBackupGraph() {
val graph = new BackupGraph(table.getTableId)
val full = createBackupVertex(table, 0, 1)
graph.addBackup(full)
// Duplicate fromMs of 1 creates a fork in the graph.
val inc1 = createBackupVertex(table, 1, 2)
graph.addBackup(inc1)
val inc2 = createBackupVertex(table, 1, 4)
graph.addBackup(inc2)
val inc3 = createBackupVertex(table, 2, 3)
graph.addBackup(inc3)
val inc4 = createBackupVertex(table, 4, 5)
graph.addBackup(inc4)
assertEquals(1, graph.fullBackups.size)
assertEquals(2, graph.backupPaths.size)
val path1 = graph.backupPaths.head
assertEquals("0 -> 1 -> 2", path1.pathString)
val path2 = graph.backupPaths.last
assertEquals("0 -> 1 -> 4", path2.pathString)
// Ensure the most recent incremental is used for a backup base and restore path.
assertEquals(5, graph.backupBase.metadata.getToMs)
assertEquals(5, graph.restorePath.toMs)
}
@Test
def testMultiFullBackupGraph() {
val graph = new BackupGraph(table.getTableId)
val full1 = createBackupVertex(table, 0, 1)
graph.addBackup(full1)
val inc1 = createBackupVertex(table, 1, 2)
graph.addBackup(inc1)
val inc2 = createBackupVertex(table, 2, 4)
graph.addBackup(inc2)
// Add a second full backup.
val full2 = createBackupVertex(table, 0, 4)
graph.addBackup(full2)
val inc3 = createBackupVertex(table, 4, 5)
graph.addBackup(inc3)
assertEquals(2, graph.fullBackups.size)
assertEquals(2, graph.backupPaths.size)
val path1 = graph.backupPaths.head
assertEquals("0 -> 1 -> 2 -> 4", path1.pathString)
val path2 = graph.backupPaths.last
assertEquals("0 -> 4", path2.pathString)
// Ensure the most recent incremental is used for a backup base and restore path.
assertEquals(5, graph.backupBase.metadata.getToMs)
assertEquals(5, graph.restorePath.toMs)
}
@Test
def testFilterByTime() {
val graph = new BackupGraph(table.getName)
val full1 = createBackupVertex(table, 0, 1)
graph.addBackup(full1)
val inc1 = createBackupVertex(table, 1, 2)
graph.addBackup(inc1)
val inc2 = createBackupVertex(table, 2, 4)
graph.addBackup(inc2)
// Add a second full backup.
val full2 = createBackupVertex(table, 0, 4)
graph.addBackup(full2)
val inc3 = createBackupVertex(table, 4, 5)
graph.addBackup(inc3)
val newGraph = graph.filterByTime(2)
assertEquals(1, newGraph.fullBackups.size)
assertEquals(1, newGraph.backupPaths.size)
}
private def createBackupVertex(table: KuduTable, fromMs: Long, toMs: Long): BackupNode = {
val metadata = TableMetadata.getTableMetadata(table, fromMs, toMs, "parquet")
BackupNode(null, metadata)
}
}
|
helifu/kudu
|
java/kudu-backup-common/src/test/scala/org/apache/kudu/backup/TestBackupGraph.scala
|
Scala
|
apache-2.0
| 5,289 |
package visitor
case class StyleVisitor() extends Visitor {
private lazy val ParentWidth = 58
private lazy val ElWidth = 46
override def visit(element: HtmlElement): Unit = applyWidth(element, ElWidth)
override def visit(parentElement: HtmlParentElement): Unit = applyWidth(parentElement, ParentWidth)
private def applyWidth(el: HtmlTag, w: Int): Unit = {
el.setStartTag(el.getStartTag.filter(_ != '>') + s" style='width:${w}px;'>")
}
}
|
BBK-PiJ-2015-67/sdp-portfolio
|
exercises/week11/src/main/scala/visitor/StyleVisitor.scala
|
Scala
|
unlicense
| 456 |
/*
* The MIT License
*
* Copyright (c) 2016 Fulcrum Genomics LLC
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
package dagr.tasks.samtools
import java.nio.file.Path
import dagr.core.tasksystem.FixedResources
import dagr.tasks.DagrDef
import DagrDef.{PathToBam, PathToFasta, PathToIntervals}
import scala.collection.mutable.ListBuffer
/**
* Runs samtools pileup.
*/
class SamtoolsPileup(ref: PathToFasta,
regions: Option[PathToIntervals] = None,
bam: PathToBam,
out: Option[Path],
maxDepth: Int = 5000,
minMappingQuality: Int = 1,
minBaseQuality: Int = 13)
extends SamtoolsTask("mpileup") with FixedResources {
override def addSubcommandArgs(buffer: ListBuffer[Any]): Unit = {
buffer.append("--fasta-ref", ref.toString)
regions.foreach(r => buffer.append("--positions", r.toString))
out.foreach(f => buffer.append("--output", f.toString))
buffer.append("--max-depth", maxDepth.toString)
buffer.append("--min-MQ", minMappingQuality.toString)
buffer.append("--min-BQ", minBaseQuality.toString)
buffer.append(bam)
}
}
|
fulcrumgenomics/dagr
|
tasks/src/main/scala/dagr/tasks/samtools/SamtoolsPileup.scala
|
Scala
|
mit
| 2,219 |
package p04Euler
import p05Func.{ListFunc, Prime}
object HighDivTriNum {
def main() = {
val ln: Stream[Long] = ListFunc.listTriangularNumber
println(ln.find(Prime.nDivisor(_) > 500))
}
}
|
vkubicki/ScalaTest
|
src/main/scala/p04Euler/012 - HighDivTriNum.scala
|
Scala
|
mit
| 208 |
package com.github.diegopacheco.scala.sandbox.fp.cats.fun
object EQMain extends App {
import java.util.Date
import cats.Eq
import cats.syntax.eq._
import cats.instances.long._
implicit val dateEq:Eq[Date] =
Eq.instance[Date] { (date1,date2) =>
date1.getTime === date2.getTime
}
val x = new Date()
Thread.sleep(1000)
val y = new Date()
println( x === x )
println( y === x )
}
|
diegopacheco/scala-playground
|
cats-scala-fp/src/main/scala/com/github/diegopacheco/scala/sandbox/fp/cats/fun/EQMain.scala
|
Scala
|
unlicense
| 431 |
package example
class ValPattern {
val (left, right) = (1, 2)
val Some(number1) =
Some(1)
var (leftVar, rightVar) = (1, 2)
var Some(number1Var) =
Some(1)
def app(): Unit = {
println(
(
number1,
left,
right,
number1Var,
leftVar,
rightVar
)
)
locally {
val (left, right) = (1, 2)
val Some(number1) =
Some(1)
var (leftVar, rightVar) = (1, 2)
var Some(number1Var) =
Some(1)
println(
(
number1,
left,
right,
number1Var,
leftVar,
rightVar
)
)
}
}
}
|
scalameta/scalameta
|
semanticdb/integration/src/main/scala/example/ValPattern.scala
|
Scala
|
bsd-3-clause
| 673 |
trait Monad[T <: Bound[T], MyType[x <: Bound[x]], Bound[_]] {
def flatMap[S <: RBound[S], RContainer[x <: RBound[x]], RBound[_],
Result[x <: RBound[x]] <: Monad[x, RContainer, RBound]]
(f: T => Result[S]): Result[S]
def flatMap[S <: RBound[S], RContainer[x <: RBound[x]], RBound[_],
Result[x <: RBound[x]] <: Monad[x, RContainer, RBound]]
(f: T => Result[S], foo: String): Result[S]
def flatMap[S <: Bound[S]]
(f: T => MyType[S], foo: Int): MyType[S]
}
trait Test {
def moo: MList[Int]
class MList[T](el: T) extends Monad[T, List, Any] {
def flatMap[S <: RBound[S], RContainer[x <: RBound[x]], RBound[_],
Result[x <: RBound[x]] <: Monad[x, RContainer, RBound]]
(f: T => Result[S]): Result[S] = sys.error("foo")
def flatMap[S <: RBound[S], RContainer[x <: RBound[x]], RBound[_],
Result[x <: RBound[x]] <: Monad[x, RContainer, RBound]]
(f: T => Result[S], foo: String): Result[S] = sys.error("foo")
def flatMap[S]
(f: T => List[S], foo: Int): List[S] = sys.error("foo")
}
val l: MList[String] = moo.flatMap[String, List, Any, MList]((x: Int) => new MList("String"))
}
|
yusuke2255/dotty
|
tests/pending/pos/tcpoly_overloaded.scala
|
Scala
|
bsd-3-clause
| 1,225 |
package is.hail.expr.ir
import scala.collection.mutable
object RefEquality {
def apply[T <: AnyRef](t: T): RefEquality[T] = new RefEquality[T](t)
}
class RefEquality[+T <: AnyRef](val t: T) {
override def equals(obj: scala.Any): Boolean = obj match {
case r: RefEquality[T] => t.eq(r.t)
case _ => false
}
override def hashCode(): Int = System.identityHashCode(t)
override def toString: String = s"RefEquality($t)"
}
object Memo {
def empty[T]: Memo[T] = new Memo[T](new mutable.HashMap[RefEquality[BaseIR], T])
}
class Memo[T] private(val m: mutable.HashMap[RefEquality[BaseIR], T]) {
def bind(ir: BaseIR, t: T): Memo[T] = bind(RefEquality(ir), t)
def bind(ir: RefEquality[BaseIR], t: T): Memo[T] = {
if (m.contains(ir))
throw new RuntimeException(s"IR already in memo: ${ ir.t }")
m += ir -> t
this
}
def contains(ir: BaseIR): Boolean = contains(RefEquality(ir))
def contains(ir: RefEquality[BaseIR]): Boolean = m.contains(ir)
def lookup(ir: BaseIR): T = lookup(RefEquality(ir))
def lookup(ir: RefEquality[BaseIR]): T = m(ir)
def apply(ir: BaseIR): T = lookup(ir)
def update(ir: BaseIR, t: => T): Unit = m.update(RefEquality(ir), t)
def get(ir: BaseIR): Option[T] = get(RefEquality(ir))
def get(ir: RefEquality[BaseIR]): Option[T] = m.get(ir)
def getOrElse(ir: BaseIR, default: => T): T = m.getOrElse(RefEquality(ir), default)
def getOrElseUpdate(ir: BaseIR, t: => T): T = m.getOrElseUpdate(RefEquality(ir), t)
def getOrElseUpdate(ir: RefEquality[BaseIR], t: => T): T = m.getOrElseUpdate(ir, t)
def delete(ir: BaseIR): Unit = delete(RefEquality(ir))
def delete(ir: RefEquality[BaseIR]): Unit = m -= ir
}
object HasIRSharing {
def apply(ir: BaseIR): Boolean = {
val m = mutable.HashSet.empty[RefEquality[BaseIR]]
def recur(x: BaseIR): Boolean = {
val re = RefEquality(x)
if (m.contains(re))
true
else {
m.add(re)
x.children.exists(recur)
}
}
recur(ir)
}
}
|
danking/hail
|
hail/src/main/scala/is/hail/expr/ir/RefEquality.scala
|
Scala
|
mit
| 2,015 |
/*
* @author Philip Stutz
*
* Copyright 2013 University of Zurich
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.signalcollect.storage
import com.signalcollect.interfaces.Storage
import com.signalcollect.interfaces.VertexStore
import com.signalcollect.Vertex
class JavaMapVertexStorage[Id, Signal] extends Storage[Id, Signal] {
val vertices = vertexStoreFactory
protected def vertexStoreFactory: VertexStore[Id, Signal] = new JavaVertexMap[Id, Signal]
def updateStateOfVertex(vertex: Vertex[Id, _, Id, Signal]): Unit = Unit
def close(): Unit = Unit
val toCollect = vertexSignalFactory //holds all signals that are not collected yet
protected def vertexSignalFactory = new JavaVertexMap[Id, Signal]
val toSignal = vertexSetFactory //holds all vertex ids that need to signal
protected def vertexSetFactory = new JavaVertexMap[Id, Signal]
}
|
hicolour/signal-collect
|
src/main/scala/com/signalcollect/storage/JavaMapVertexStorage.scala
|
Scala
|
apache-2.0
| 1,402 |
/*
* Copyright 2014 okumin.com
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package akka.persistence.snapshot.sqlasync
import akka.persistence.helper.PostgreSQLInitializer
import akka.persistence.snapshot.SnapshotStoreSpec
import com.typesafe.config.ConfigFactory
class PostgreSQLSnapshotStoreSpec
extends SnapshotStoreSpec(ConfigFactory.load("postgresql-application.conf"))
with PostgreSQLInitializer
|
okumin/akka-persistence-sql-async
|
core/src/test/scala/akka/persistence/snapshot/sqlasync/PostgreSQLSnapshotStoreSpec.scala
|
Scala
|
apache-2.0
| 931 |
/*
* =========================================================================================
* Copyright © 2013-2017 the kamon project <http://kamon.io/>
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
* except in compliance with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the
* License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
* either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
* =========================================================================================
*/
package kamon.netty.util
import java.util
import io.netty.channel.EventLoop
import kamon.Kamon
import kamon.context.{Context, HasContext}
import kamon.netty.Metrics
import kamon.netty.Metrics.EventLoopMetrics
import kamon.netty.util.EventLoopUtils.name
import kamon.util.Clock
class MonitoredQueue(eventLoop:EventLoop, underlying:util.Queue[Runnable]) extends QueueWrapperAdapter[Runnable](underlying) {
import MonitoredQueue._
implicit lazy val eventLoopMetrics: EventLoopMetrics = Metrics.forEventLoop(name(eventLoop))
override def add(runnable: Runnable): Boolean = {
eventLoopMetrics.taskQueueSize.increment()
underlying.add(new TimedTask(runnable))
}
override def offer(runnable: Runnable): Boolean = {
eventLoopMetrics.taskQueueSize.increment()
underlying.offer(new TimedTask(runnable))
}
override def remove(): Runnable = {
val runnable = underlying.remove()
eventLoopMetrics.taskQueueSize.decrement()
eventLoopMetrics.taskWaitingTime.record(timeInQueue(runnable))
runnable
}
override def poll(): Runnable = {
val runnable = underlying.poll()
if(runnable != null) {
eventLoopMetrics.taskQueueSize.decrement()
eventLoopMetrics.taskWaitingTime.record(timeInQueue(runnable))
}
runnable
}
}
object MonitoredQueue {
def apply(eventLoop: EventLoop, underlying: util.Queue[Runnable]): MonitoredQueue =
new MonitoredQueue(eventLoop, underlying)
def timeInQueue(runnable: Runnable):Long =
runnable.asInstanceOf[TimedTask].timeInQueue
}
private[this] class TimedTask(underlying:Runnable)(implicit metrics: EventLoopMetrics) extends Runnable with HasContext {
val startTime:Long = Clock.relativeNanoTimestamp()
val context: Context = Kamon.currentContext()
override def run(): Unit =
Kamon.withContext(context) {
Latency.measure(metrics.taskProcessingTime)(underlying.run())
}
def timeInQueue: Long =
Clock.relativeNanoTimestamp() - startTime
}
|
kamon-io/kamon-netty
|
src/main/scala/kamon/netty/util/MonitoredQueue.scala
|
Scala
|
apache-2.0
| 2,783 |
package com.twitter.scalding.typed.memory_backend
import org.scalatest.FunSuite
import org.scalatest.prop.PropertyChecks
import com.twitter.scalding.{ TypedPipe, Execution, Config, Local }
import com.twitter.scalding.typed.TypedPipeGen
class MemoryTest extends FunSuite with PropertyChecks {
private def mapMatch[K, V](ex: Execution[Iterable[(K, V)]]) = {
val mm = MemoryMode.empty
val mkv = ex.waitFor(Config.empty, mm)
val lkv = ex.waitFor(Config.empty, Local(true))
assert(mkv.get.toMap == lkv.get.toMap)
}
private def timeit[A](msg: String, a: => A): A = {
val start = System.nanoTime()
val res = a
val diff = System.nanoTime() - start
val ms = diff / 1e6
// uncomment this for some poor version of benchmarking,
// but scalding in-memory mode seems about 3-100x faster
//
// println(s"$msg: $ms ms")
res
}
private def sortMatch[A: Ordering](ex: Execution[Iterable[A]]) = {
val mm = MemoryMode.empty
val mkv = timeit("scalding", ex.waitFor(Config.empty, mm))
val lkv = timeit("cascading", ex.waitFor(Config.empty, Local(true)))
assert(mkv.get.toList.sorted == lkv.get.toList.sorted)
}
test("basic word count") {
val x = TypedPipe.from(0 until 100)
.groupBy(_ % 2)
.sum
.toIterableExecution
mapMatch(x)
}
test("mapGroup works") {
val x = TypedPipe.from(0 until 100)
.groupBy(_ % 2)
.mapGroup { (k, vs) => Iterator.single(vs.foldLeft(k)(_ + _)) }
.toIterableExecution
mapMatch(x)
}
test("hashJoin works") {
val input = TypedPipe.from(0 until 100)
val left = input.map { k => (k, k % 2) }
val right = input.map { k => (k, k % 3) }
mapMatch(left.hashJoin(right).toIterableExecution)
}
test("join works") {
val input = TypedPipe.from(0 until 100)
val left = input.map { k => (k, k % 2) }
val right = input.map { k => (k, k % 3) }
mapMatch(left.join(right).toIterableExecution)
}
test("scalding memory mode matches cascading local mode") {
import TypedPipeGen.genWithIterableSources
implicit val generatorDrivenConfig: PropertyCheckConfiguration = PropertyCheckConfiguration(minSuccessful = 50)
forAll(genWithIterableSources) { pipe => sortMatch(pipe.toIterableExecution) }
}
test("writing gives the same result as toIterableExecution") {
import TypedPipeGen.genWithIterableSources
// we can afford to test a lot more in just memory mode because it is faster than cascading
implicit val generatorDrivenConfig: PropertyCheckConfiguration = PropertyCheckConfiguration(minSuccessful = 500)
forAll(genWithIterableSources) { pipe =>
val sink = new MemorySink.LocalVar[Int]
val ex1 = pipe.writeExecution(SinkT("my_sink"))
val ex2 = pipe.toIterableExecution
val mm = MemoryMode.empty.addSink(SinkT("my_sink"), sink)
val res1 = ex1.waitFor(Config.empty, mm)
val res2 = ex2.waitFor(Config.empty, MemoryMode.empty)
assert(sink.reset().get.toList.sorted == res2.get.toList.sorted)
}
}
test("using sources work") {
val srctag = SourceT[Int]("some_source")
val job = TypedPipe.from(srctag).map { i => (i % 31, i) }.sumByKey.toIterableExecution
val jobRes = job.waitFor(Config.empty, MemoryMode.empty.addSourceIterable(srctag, (0 to 10000)))
val expected = (0 to 10000).groupBy(_ % 31).mapValues(_.sum).toList.sorted
assert(jobRes.get.toList.sorted == expected)
}
}
|
jzmq/scalding
|
scalding-core/src/test/scala/com/twitter/scalding/typed/memory_backend/MemoryTest.scala
|
Scala
|
apache-2.0
| 3,463 |
package org.apache.kafka.streams.processor.internals.assignment
import com.twitter.inject.Test
// Currently empty as Kafka 2.5 doesn't need any test and BUILD requires a
// non empty set of source files.
class StaticPartitioningTest extends Test {}
|
twitter/finatra
|
kafka-streams/kafka-streams-static-partitioning/src/test/scala-kafka2.5/StaticPartitioningTest.scala
|
Scala
|
apache-2.0
| 251 |
/*
* Copyright 2020 Precog Data
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package quasar.impl.datasources
import slamdata.Predef._
import quasar.{RateLimiting, RenderTreeT}
import quasar.api.datasource._
import quasar.api.datasource.DatasourceError._
import quasar.api.resource._
import quasar.impl.{DatasourceModule, QuasarDatasource}
import quasar.impl.IncompatibleModuleException.linkDatasource
import quasar.connector.{MonadResourceErr, QueryResult}
import quasar.connector.datasource.Reconfiguration
import quasar.qscript.MonadPlannerErr
import scala.concurrent.ExecutionContext
import argonaut.Json
import argonaut.Argonaut.jEmptyObject
import cats.{Monad, MonadError}
import cats.effect.{Resource, ConcurrentEffect, ContextShift, Timer, Bracket}
import cats.implicits._
import cats.kernel.Hash
import fs2.Stream
import matryoshka.{BirecursiveT, EqualT, ShowT}
import scalaz.{ISet, EitherT, -\\/, \\/-}
import shims.{monadToScalaz, monadToCats}
trait DatasourceModules[T[_[_]], F[_], G[_], H[_], I, C, R, P <: ResourcePathType] { self =>
def create(i: I, ref: DatasourceRef[C])
: EitherT[Resource[F, ?], CreateError[C], QuasarDatasource[T, G, H, R, P]]
def sanitizeRef(inp: DatasourceRef[C]): DatasourceRef[C]
def supportedTypes: F[ISet[DatasourceType]]
def reconfigureRef(original: DatasourceRef[C], patch: C)
: Either[CreateError[C], (Reconfiguration, DatasourceRef[C])]
def withMiddleware[HH[_], S, Q <: ResourcePathType](
f: (I, QuasarDatasource[T, G, H, R, P]) => F[QuasarDatasource[T, G, HH, S, Q]])(
implicit
AF: Monad[F])
: DatasourceModules[T, F, G, HH, I, C, S, Q] =
new DatasourceModules[T, F, G, HH, I, C, S, Q] {
def create(i: I, ref: DatasourceRef[C])
: EitherT[Resource[F, ?], CreateError[C], QuasarDatasource[T, G, HH, S, Q]] =
self.create(i, ref) flatMap { (mds: QuasarDatasource[T, G, H, R, P]) =>
EitherT.rightT(Resource.liftF(f(i, mds)))
}
def sanitizeRef(inp: DatasourceRef[C]): DatasourceRef[C] =
self.sanitizeRef(inp)
def supportedTypes: F[ISet[DatasourceType]] =
self.supportedTypes
def reconfigureRef(original: DatasourceRef[C], patch: C)
: Either[CreateError[C], (Reconfiguration, DatasourceRef[C])] =
self.reconfigureRef(original, patch)
}
def withFinalizer(
f: (I, QuasarDatasource[T, G, H, R, P]) => F[Unit])(
implicit F: Monad[F])
: DatasourceModules[T, F, G, H, I, C, R, P] =
new DatasourceModules[T, F, G, H, I, C, R, P] {
def create(i: I, ref: DatasourceRef[C])
: EitherT[Resource[F, ?], CreateError[C], QuasarDatasource[T, G, H, R, P]] =
self.create(i, ref) flatMap { (mds: QuasarDatasource[T, G, H, R, P]) =>
EitherT.rightT(Resource.make(mds.pure[F])(x => f(i, x)))
}
def sanitizeRef(inp: DatasourceRef[C]): DatasourceRef[C] =
self.sanitizeRef(inp)
def supportedTypes: F[ISet[DatasourceType]] =
self.supportedTypes
def reconfigureRef(original: DatasourceRef[C], patch: C)
: Either[CreateError[C], (Reconfiguration, DatasourceRef[C])] =
self.reconfigureRef(original, patch)
}
def widenPathType[PP >: P <: ResourcePathType](implicit AF: Monad[F])
: DatasourceModules[T, F, G, H, I, C, R, PP] =
new DatasourceModules[T, F, G, H, I, C, R, PP] {
def create(i: I, ref: DatasourceRef[C])
: EitherT[Resource[F, ?], CreateError[C], QuasarDatasource[T, G, H, R, PP]] =
self.create(i, ref) map { QuasarDatasource.widenPathType[T, G, H, R, P, PP](_) }
def sanitizeRef(inp: DatasourceRef[C]): DatasourceRef[C] =
self.sanitizeRef(inp)
def supportedTypes: F[ISet[DatasourceType]] =
self.supportedTypes
def reconfigureRef(original: DatasourceRef[C], patch: C)
: Either[CreateError[C], (Reconfiguration, DatasourceRef[C])] =
self.reconfigureRef(original, patch)
}
}
object DatasourceModules {
type Modules[T[_[_]], F[_], I] =
DatasourceModules[T, F, Resource[F, ?], Stream[F, ?], I, Json, QueryResult[F], ResourcePathType.Physical]
type MDS[T[_[_]], F[_]] =
QuasarDatasource[T, Resource[F, ?], Stream[F, ?], QueryResult[F], ResourcePathType.Physical]
private[impl] def apply[
T[_[_]]: BirecursiveT: EqualT: ShowT: RenderTreeT,
F[_]: ConcurrentEffect: ContextShift: Timer: MonadResourceErr: MonadPlannerErr,
I, A: Hash](
modules: List[DatasourceModule],
rateLimiting: RateLimiting[F, A],
byteStores: ByteStores[F, I])(
implicit
ec: ExecutionContext)
: Modules[T, F, I] = {
lazy val moduleSet: ISet[DatasourceType] =
ISet.fromList(modules.map(_.kind))
lazy val moduleMap: Map[DatasourceType, DatasourceModule] =
Map(modules.map(ds => (ds.kind, ds)): _*)
new DatasourceModules[T, F, Resource[F, ?], Stream[F, ?], I, Json, QueryResult[F], ResourcePathType.Physical] {
def create(i: I, ref: DatasourceRef[Json])
: EitherT[Resource[F, ?], CreateError[Json], MDS[T, F]] =
moduleMap.get(ref.kind) match {
case None =>
EitherT.pureLeft(DatasourceUnsupported(ref.kind, moduleSet))
case Some(module) =>
EitherT.rightU[CreateError[Json]](Resource.liftF(byteStores.get(i))) flatMap { store =>
module match {
case DatasourceModule.Lightweight(lw) =>
handleInitErrors(module.kind, lw.lightweightDatasource[F, A](ref.config, rateLimiting, store))
.map(QuasarDatasource.lightweight[T](_))
case DatasourceModule.Heavyweight(hw) =>
handleInitErrors(module.kind, hw.heavyweightDatasource[T, F](ref.config, store))
.map(QuasarDatasource.heavyweight(_))
}
}
}
def sanitizeRef(inp: DatasourceRef[Json]): DatasourceRef[Json] =
moduleMap.get(inp.kind) match {
case None => inp.copy(config = jEmptyObject)
case Some(x) => inp.copy(config = x.sanitizeConfig(inp.config))
}
def supportedTypes: F[ISet[DatasourceType]] =
moduleSet.pure[F]
def reconfigureRef(original: DatasourceRef[Json], patch: Json)
: Either[CreateError[Json], (Reconfiguration, DatasourceRef[Json])] =
moduleMap.get(original.kind) match {
case None =>
Left(datasourceUnsupported[CreateError[Json]]((original.kind, moduleSet)))
case Some(ds) =>
ds.reconfigure(original.config, patch)
.map(_.map(c => original.copy(config = c)))
}
}
}
private def handleInitErrors[F[_]: Bracket[?[_], Throwable]: MonadError[?[_], Throwable], A](
kind: DatasourceType,
res: => Resource[F, Either[InitializationError[Json], A]])
: EitherT[Resource[F, ?], CreateError[Json], A] = EitherT {
linkDatasource(kind, res) map {
case Right(a) => \\/-(a)
case Left(x) => -\\/(x)
}
}
}
|
djspiewak/quasar
|
impl/src/main/scala/quasar/impl/datasources/DatasourceModules.scala
|
Scala
|
apache-2.0
| 7,548 |
/*
* Copyright 2017 PayPal
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.squbs.testkit
import java.util.concurrent.TimeUnit
import java.util.concurrent.atomic.AtomicInteger
import akka.actor.ActorSystem
import akka.testkit.{ImplicitSender, TestKit}
import com.typesafe.config.{Config, ConfigFactory, ConfigObject}
import org.scalatest.{BeforeAndAfterAll, Suite}
import org.squbs.lifecycle.GracefulStop
import org.squbs.unicomplex.{JMX, Unicomplex, UnicomplexBoot}
import org.squbs.util.ConfigUtil._
import scala.concurrent.Await
import scala.concurrent.duration.FiniteDuration
import scala.collection.JavaConverters._
import scala.util.Try
object CustomTestKit {
private[testkit] val actorSystems = collection.concurrent.TrieMap.empty[String, ActorSystem]
private[testkit] def checkInit(actorSystem: ActorSystem): Unit = {
if (actorSystems.putIfAbsent(actorSystem.name, actorSystem).isEmpty)
sys.addShutdownHook {
val stopTimeoutInMs = actorSystem.settings.config.getDuration("squbs.default-stop-timeout", TimeUnit.MILLISECONDS)
Await.ready(actorSystem.terminate(), FiniteDuration(stopTimeoutInMs, TimeUnit.MILLISECONDS))
}
}
// JUnit creates a new object for each @Test method. To prevent actor system name collisions, appending an integer
// to the actor system name.
val counter = new AtomicInteger(0)
val stackTraceDepth =
if (util.Properties.versionNumberString.startsWith("2.12")) 5 // scala 2.12 - CustomTestKit$ x 2 -> Option -> CustomTestKit$ -> CustomTestKit -> Spec
else 6 // scala 2.11 - CustomTestKit$ x 3 -> Option -> CustomTestKit$ -> CustomTestKit -> Spec
/* Example stack trace:
java.lang.Exception
at org.squbs.testkit.CustomTestKit$.defaultActorSystemName(CustomTestKit.scala:52)
at org.squbs.testkit.CustomTestKit$.$anonfun$boot$1(CustomTestKit.scala:109)
at scala.Option.getOrElse(Option.scala:121)
at org.squbs.testkit.CustomTestKit$.boot(CustomTestKit.scala:109)
at org.squbs.testkit.CustomTestKit.<init>(CustomTestKit.scala:128)
at org.squbs.testkit.CustomTestKitDefaultSpec.<init>(CustomTestKitSpec.scala:60)
at sun.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method)
at sun.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:62)
at sun.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45)
at java.lang.reflect.Constructor.newInstance(Constructor.java:423)
at java.lang.Class.newInstance(Class.java:442)
*/
def defaultActorSystemName =
s"${actorSystemNameFrom((new Exception).getStackTrace.apply(stackTraceDepth).getClassName)}-${counter.getAndIncrement()}"
def actorSystemNameFrom(className: String): String =
className
.replace('.', '-')
.replace('_', '-')
.filter(_ != '$')
/**
* Detects default resources for this test. These are usually at two locations:
* <ul>
* <li>$project-path/target/scala-2.11/classes/META-INF/squbs-meta.conf</li>
* <li>$project-path/target/scala-2.11/test-classes/META-INF/squbs-meta.conf</li>
* </ul>
* @return The list of detected resources
*/
val defaultResources: Seq[String] = {
val loader = getClass.getClassLoader
val resourceHome = loader.getResource("").getPath
val lastSlashOption = Try {
if (resourceHome endsWith "/") resourceHome.lastIndexOf("/", resourceHome.length - 2)
else resourceHome.lastIndexOf("/")
} .toOption.filter { _ > 0 }
val targetPathOption = lastSlashOption map { lastSlash => resourceHome.substring(0, lastSlash + 1) }
targetPathOption map { targetPath =>
Seq("conf", "json", "properties")
.flatMap { ext => loader.getResources(s"META-INF/squbs-meta.$ext").asScala }
.map { _.getPath}
.filter { _.startsWith(targetPath) }
} getOrElse Seq.empty
}
def defaultConfig(actorSystemName: String): Config = {
val baseConfig = ConfigFactory.load()
val listeners = baseConfig.root.asScala.toSeq.collect {
case (n, v: ConfigObject) if v.toConfig.getOption[String]("type").contains("squbs.listener") => n
}
val portOverrides = listeners.map { listener => s"$listener.bind-port = 0" } .mkString("\n")
ConfigFactory.parseString(
s"""
|squbs {
| actorsystem-name = $actorSystemName
| ${JMX.prefixConfig} = true
|}
""".stripMargin + portOverrides
)
}
def boot(actorSystemName: Option[String] = None,
config: Option[Config] = None,
resources: Option[Seq[String]] = None,
withClassPath: Option[Boolean] = None): UnicomplexBoot = {
val baseConfig = defaultConfig(actorSystemName.getOrElse(defaultActorSystemName))
boot(config.map(_.withFallback(baseConfig)).getOrElse(baseConfig), resources, withClassPath)
}
private def boot(config: Config, resources: Option[Seq[String]], withClassPath: Option[Boolean]): UnicomplexBoot =
UnicomplexBoot(config)
.createUsing {(name, config) => ActorSystem(name, config)}
.scanResources(withClassPath.getOrElse(false), resources.getOrElse(defaultResources):_*)
.initExtensions.start()
}
/**
* The custom test kit allows custom configuration of the Unicomplex before boot. It also does not require the test
* to run in a separate process and allow for parallel tests.
*/
abstract class CustomTestKit(val boot: UnicomplexBoot) extends TestKit(boot.actorSystem)
with DebugTiming with ImplicitSender with Suite with BeforeAndAfterAll with PortGetter {
def this() {
this(CustomTestKit.boot())
}
def this(actorSystemName: String) {
this(CustomTestKit.boot(Option(actorSystemName)))
}
def this(config: Config) {
this(CustomTestKit.boot(config = Option(config)))
}
def this(withClassPath: Boolean) {
this(CustomTestKit.boot(withClassPath = Option(withClassPath)))
}
def this(resources: Seq[String], withClassPath: Boolean) {
this(CustomTestKit.boot(resources = Option(resources), withClassPath = Option(withClassPath)))
}
def this(actorSystemName: String, resources: Seq[String], withClassPath: Boolean) {
this(CustomTestKit.boot(Option(actorSystemName), resources = Option(resources), withClassPath = Option(withClassPath)))
}
def this(config: Config, resources: Seq[String], withClassPath: Boolean) {
this(CustomTestKit.boot(config = Option(config), resources = Option(resources), withClassPath = Option(withClassPath)))
}
override protected def beforeAll(): Unit = {
CustomTestKit.checkInit(system)
}
override protected def afterAll(): Unit = {
Unicomplex(system).uniActor ! GracefulStop
}
}
|
anilgursel/squbs
|
squbs-testkit/src/main/scala/org/squbs/testkit/CustomTestKit.scala
|
Scala
|
apache-2.0
| 7,245 |
package org.scalatra
import java.net.URI
import javax.servlet.http.{ HttpServletRequest, HttpServletResponse }
import org.scalatra.servlet.ServletApiImplicits
/**
* Redirects unsecured requests to the corresponding secure URL.
*/
trait SslRequirement extends Handler with ServletApiImplicits {
abstract override def handle(req: HttpServletRequest, res: HttpServletResponse) {
if (!req.isSecure) {
val oldUri = req.uri
val port = securePortMap.lift(oldUri.getPort) getOrElse 443
val uri = new URI(
"https",
oldUri.getRawUserInfo,
oldUri.getHost,
port,
oldUri.getPath,
oldUri.getQuery,
oldUri.getFragment
).toString
res.redirect(uri)
} else {
super.handle(req, res)
}
}
/**
* Maps unsecured ports to secure ports.
* By default, 80 redirects to 443, and 8080 to 8443.
*/
protected def securePortMap: PartialFunction[Int, Int] = {
Map(80 -> 443, 8080 -> 8443)
}
}
|
lightvector/scalatra
|
core/src/main/scala/org/scalatra/SslRequirement.scala
|
Scala
|
bsd-2-clause
| 995 |
// Copyright 2014 Commonwealth Bank of Australia
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package au.com.cba.omnia.maestro.core
package clean
import au.com.cba.omnia.maestro.core.data._
case class Clean(run: (Field[_, _], String) => String) {
/** Returns a new Clean instance that applied the current clean action ONLY if the condition evaluated to true. */
def applyTo(conditionFn: Field[_, _] => Boolean): Clean =
Clean((field, data) => if (conditionFn(field)) run(field, data) else data)
}
object Clean {
def all(cleans: Clean*): Clean =
Clean((field, data) => cleans.foldLeft(data)((acc, clean) => clean.run(field, acc)))
def trim: Clean =
Clean((_, data) => data.trim)
def removeNonPrintables: Clean =
Clean((_, data) => data.replaceAll("[^\\p{Print}]", ""))
def default: Clean =
Clean((_, data) => data.trim.replaceAll("[^\\p{Print}]", ""))
/** Allow users to apply cleaners on selected fields */
def applyTo(conditionFn: Field[_, _] => Boolean, cleaner: Clean): Clean =
cleaner.applyTo(conditionFn)
}
|
toddmowen/maestro
|
maestro-core/src/main/scala/au/com/cba/omnia/maestro/core/clear/Clean.scala
|
Scala
|
apache-2.0
| 1,591 |
package com.twitter.finatra.tests.json.internal
import com.fasterxml.jackson.annotation.{JsonIgnore, JsonIgnoreProperties, JsonProperty}
import com.fasterxml.jackson.databind.JsonNode
import com.twitter.finatra.domain.WrappedValue
import com.twitter.finatra.request._
import com.twitter.finatra.validation.{NotEmpty, ValidationResult}
import com.twitter.inject.Logging
import org.joda.time.DateTime
import scala.annotation.meta.param
case class CaseClass(id: Long, name: String)
case class CaseClassWithLazyVal(id: Long) {
lazy val woo = "yeah"
}
case class CaseClassWithIgnoredField(id: Long) {
@JsonIgnore
val ignoreMe = "Foo"
}
@JsonIgnoreProperties(Array("ignore_me", "feh"))
case class CaseClassWithIgnoredFieldsMatchAfterToSnakeCase(id: Long) {
val ignoreMe = "Foo"
val feh = "blah"
}
@JsonIgnoreProperties(Array("ignore_me", "feh"))
case class CaseClassWithIgnoredFieldsExactMatch(id: Long) {
val ignore_me = "Foo"
val feh = "blah"
}
case class CaseClassWithTransientField(id: Long) {
@transient
val lol = "asdf"
}
case class CaseClassWithLazyField(id: Long) {
lazy val lol = "asdf"
}
case class CaseClassWithOverloadedField(id: Long) {
def id(prefix: String): String = prefix + id
}
case class CaseClassWithOption(value: Option[String] = None)
case class CaseClassWithJsonNode(value: JsonNode)
case class CaseClassWithAllTypes(map: Map[String, String],
set: Set[Int],
string: String,
list: List[Int],
seq: Seq[Int],
indexedSeq: IndexedSeq[Int],
vector: Vector[Int],
bigDecimal: BigDecimal,
bigInt: Int, //TODO: BigInt,
int: Int,
long: Long,
char: Char,
bool: Boolean,
short: Short,
byte: Byte,
float: Float,
double: Double,
any: Any,
anyRef: AnyRef,
intMap: Map[Int, Int] = Map(),
longMap: Map[Long, Long] = Map())
case class CaseClassWithException() {
throw new NullPointerException("Oops!!!")
}
object OuterObject {
case class NestedCaseClass(id: Long)
object InnerObject {
case class SuperNestedCaseClass(id: Long)
}
}
case class CaseClassWithTwoConstructors(id: Long, name: String) {
def this(id: Long) = this(id, "New User")
}
case class CaseClassWithSnakeCase(oneThing: String, twoThing: String)
case class CaseClassWithArrays(
one: String,
two: Array[String],
three: Array[Int],
four: Array[Long],
five: Array[Char])
case class CaseClassWithArrayLong(array: Array[Long])
case class CaseClassWithSeqLong(seq: Seq[Long])
case class Foo(name: String)
case class Car(
id: Long,
make: CarMake,
model: String,
passengers: Seq[Person]) {
def validateId = {
ValidationResult(
id > 0,
"id must be > 0")
}
}
case class Person(
id: Int,
name: String,
age: Option[Int],
age_with_default: Option[Int] = None,
nickname: String = "unknown")
case class PersonWithDottedName(
id: Int,
@JsonProperty("name.last") lastName: String)
case class SimplePerson(name: String)
case class CaseClassWithMap(map: Map[String, String])
case class CaseClassWithSetOfLongs(set: Set[Long])
case class CaseClassWithSeqOfLongs(seq: Seq[Long])
case class CaseClassWithNestedSeqLong(
seqClass: CaseClassWithSeqLong,
setClass: CaseClassWithSetOfLongs)
case class Blah(foo: String)
case class TestIdStringWrapper(id: String)
extends WrappedValue[String]
case class ObjWithTestId(id: TestIdStringWrapper)
object Obj {
case class NestedCaseClassInObject(id: String)
}
case class WrappedValueInt(value: Int)
extends WrappedValue[Int]
case class WrappedValueLong(value: Long)
extends WrappedValue[Long]
case class WrappedValueString(value: String)
extends WrappedValue[String]
case class WrappedValueIntInObj(
foo: WrappedValueInt)
case class WrappedValueStringInObj(
foo: WrappedValueString)
case class WrappedValueLongInObj(
foo: WrappedValueLong)
case class CaseClassWithVal(
name: String) {
val `type`: String = "person"
}
case class CaseClassWithEnum(
name: String,
make: CarMakeEnum)
case class CaseClassWithComplexEnums(
name: String,
make: CarMakeEnum,
makeOpt: Option[CarMakeEnum],
makeSeq: Seq[CarMakeEnum],
makeSet: Set[CarMakeEnum])
case class CaseClassWithSeqEnum(
enumSeq: Seq[CarMakeEnum])
case class CaseClassWithOptionEnum(
enumOpt: Option[CarMakeEnum])
case class CaseClassWithDateTime(
dateTime: DateTime)
case class CaseClassWithIntAndDateTime(
@NotEmpty name: String,
age: Int,
age2: Int,
age3: Int,
dateTime: DateTime,
dateTime2: DateTime,
dateTime3: DateTime,
dateTime4: DateTime,
@NotEmpty dateTime5: Option[DateTime])
case class ClassWithFooClassInject(
@RequestInject fooClass: FooClass)
case class ClassWithQueryParamDateTimeInject(
@QueryParam dateTime: DateTime)
case class CaseClassWithEscapedLong(
`1-5`: Long)
case class CaseClassWithEscapedString(
`1-5`: String)
case class CaseClassWithEscapedNormalString(
`a`: String)
case class UnicodeNameCaseClass(`winning-id`: Int, name: String)
case class TestEntityIdsResponse(
entityIds: Seq[Long],
previousCursor: String,
nextCursor: String)
object TestEntityIdsResponseWithCompanion {
val msg = "im the companion"
}
case class TestEntityIdsResponseWithCompanion(
entityIds: Seq[Long],
previousCursor: String,
nextCursor: String)
case class WrappedValueStringMapObject(
map: Map[WrappedValueString, String])
case class FooClass(id: String)
case class Group3(id: String)
extends Logging
case class CaseClassWithInvalidValidation(
@(InvalidValidationInternal@param) name: String,
make: CarMakeEnum)
case class NoConstructorArgs()
|
kaushik94/finatra
|
jackson/src/test/scala/com/twitter/finatra/tests/json/internal/ExampleCaseClasses.scala
|
Scala
|
apache-2.0
| 5,566 |
/*
* Copyright (C) 2016 Christopher Batey and Dogan Narinc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scassandra.codec.messages
import org.scassandra.codec.{CodecSpec, ProtocolVersion, Ready}
import scodec.Codec
class ReadySpec extends CodecSpec {
"Ready.codec" when {
withProtocolVersions { (protocolVersion: ProtocolVersion) =>
implicit val p = protocolVersion
implicit val codec = Codec[Ready.type]
"encode and decode Ready" in {
encodeAndDecode(Ready)
}
}
}
}
|
mikefero/cpp-driver
|
gtests/src/integration/scassandra/server/codec/src/test/scala/org/scassandra/codec/messages/ReadySpec.scala
|
Scala
|
apache-2.0
| 1,041 |
/*
* Copyright 2001-2013 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalactic
import org.scalatest._
import scala.collection.GenSeq
import scala.collection.GenMap
import scala.collection.GenSet
import scala.collection.GenIterable
import scala.collection.GenTraversable
import scala.collection.GenTraversableOnce
import scala.collection.{mutable,immutable}
class TypeCheckedSetEqualityConstraintsSpec extends FunSpec with NonImplicitAssertions with TypeCheckedTripleEquals with SetEqualityConstraints {
case class Super(size: Int)
class Sub(sz: Int) extends Super(sz)
val super1: Super = new Super(1)
val sub1: Sub = new Sub(1)
val super2: Super = new Super(2)
val sub2: Sub = new Sub(2)
val nullSuper: Super = null
case class Fruit(name: String)
class Apple extends Fruit("apple")
class Orange extends Fruit("orange")
describe("the SetEqualityConstraints trait") {
it("should allow any Set to be compared with any other Set, so long as the element types of the two Sets adhere to the equality constraint in force for those types") {
assert(mutable.HashSet(1, 2, 3) === immutable.HashSet(1, 2, 3))
// assert(mutable.HashSet(1, 2, 3) === immutable.HashSet(1L, 2L, 3L)) // does not compile last time I checked
// assert(mutable.HashSet(1L, 2L, 3L) === immutable.HashSet(1, 2, 3)) // does not compile last time I checked
// assert(immutable.HashSet(1, 2, 3) === mutable.HashSet(1L, 2L, 3L)) // does not compile last time I checked
// assert(immutable.HashSet(1L, 2L, 3L) === mutable.HashSet(1, 2, 3)) // does not compile last time I checked
assert(mutable.HashSet(new Apple, new Apple) === immutable.HashSet(new Fruit("apple"), new Fruit("apple")))
assert(immutable.HashSet(new Fruit("apple"), new Fruit("apple")) === mutable.HashSet(new Apple, new Apple))
// assert(mutable.HashSet(new Apple, new Apple) === immutable.HashSet(new Orange, new Orange)) // does not compile last time I checked
// assert(immutable.HashSet(new Apple, new Apple) === mutable.HashSet(new Orange, new Orange)) // does not compile last time I checked
// assert(immutable.HashSet(new Orange, new Orange) === mutable.HashSet(new Apple, new Apple)) // does not compile last time I checked
// assert(mutable.HashSet(new Orange, new Orange) === immutable.HashSet(new Apple, new Apple)) // does not compile last time I checked
}
}
}
|
dotty-staging/scalatest
|
scalactic-test/src/test/scala/org/scalactic/TypeCheckedSetEqualityConstraintsSpec.scala
|
Scala
|
apache-2.0
| 2,950 |
package mesosphere.mesos.simulation
import akka.actor.{ Actor, ActorRef, Cancellable, Props }
import akka.event.LoggingReceive
import mesosphere.mesos.simulation.DriverActor.{ KillTask, LaunchTasks }
import org.apache.mesos.Protos._
import org.apache.mesos.SchedulerDriver
import org.slf4j.LoggerFactory
import scala.collection.JavaConversions._
import scala.concurrent.duration._
object DriverActor {
case class DeclineOffer(offerId: OfferID)
/**
* Corresponds to the following method in [[org.apache.mesos.MesosSchedulerDriver]]:
*
* `override def launchTasks(offerIds: util.Collection[OfferID], tasks: util.Collection[TaskInfo]): Status`
*/
case class LaunchTasks(offerIds: Seq[OfferID], tasks: Seq[TaskInfo])
/**
* Corresponds to the following method in [[org.apache.mesos.MesosSchedulerDriver]]:
*
* `override def killTask(taskId: TaskID): Status`
*/
case class KillTask(taskId: TaskID)
/**
* Corresponds to the following method in [[org.apache.mesos.MesosSchedulerDriver]]:
*
* `override def reconcileTasks(statuses: util.Collection[TaskStatus]): Status`
*/
case class ReconcileTask(taskStatus: Seq[TaskStatus])
}
class DriverActor(schedulerProps: Props) extends Actor {
private val log = LoggerFactory.getLogger(getClass)
private[this] val numberOfOffersPerCycle: Int = 10
private[this] var periodicOffers: Option[Cancellable] = None
private[this] var scheduler: ActorRef = _
override def preStart(): Unit = {
super.preStart()
scheduler = context.actorOf(schedulerProps, "scheduler")
def resource(name: String, value: Double): Resource = {
Resource.newBuilder()
.setName(name)
.setType(Value.Type.SCALAR)
.setScalar(Value.Scalar.newBuilder().setValue(value))
.build()
}
val offer: Offer = Offer.newBuilder()
.setId(OfferID.newBuilder().setValue("thisisnotandid"))
.setFrameworkId(FrameworkID.newBuilder().setValue("notanidframework"))
.setSlaveId(SlaveID.newBuilder().setValue("notanidslave"))
.setHostname("hostname")
.addAllResources(Seq(
resource("cpus", 100),
resource("mem", 500000),
resource("disk", 1000000000),
Resource.newBuilder()
.setName("ports")
.setType(Value.Type.RANGES)
.setRanges(
Value.Ranges
.newBuilder()
.addRange(Value.Range.newBuilder().setBegin(10000).setEnd(20000)))
.build()
))
.build()
val offers = SchedulerActor.ResourceOffers((1 to numberOfOffersPerCycle).map(_ => offer))
import context.dispatcher
periodicOffers = Some(
context.system.scheduler.schedule(1.second, 1.seconds, scheduler, offers)
)
}
override def postStop(): Unit = {
periodicOffers.foreach(_.cancel())
periodicOffers = None
super.postStop()
}
override def receive: Receive = LoggingReceive {
case driver: SchedulerDriver =>
log.debug(s"pass on driver to scheduler $scheduler")
scheduler ! driver
case LaunchTasks(offers, tasks) =>
log.debug(s"launch tasks $offers, $tasks")
tasks.foreach { taskInfo: TaskInfo =>
scheduler ! TaskStatus.newBuilder()
.setSource(TaskStatus.Source.SOURCE_EXECUTOR)
.setTaskId(taskInfo.getTaskId)
.setState(TaskState.TASK_RUNNING)
.build()
}
case KillTask(taskId) =>
log.debug(s"kill tasks $taskId")
scheduler ! TaskStatus.newBuilder()
.setSource(TaskStatus.Source.SOURCE_EXECUTOR)
.setTaskId(taskId)
.setState(TaskState.TASK_KILLED)
.build()
case _ =>
}
}
|
quamilek/marathon
|
mesos-simulation/src/main/scala/mesosphere/mesos/simulation/DriverActor.scala
|
Scala
|
apache-2.0
| 3,674 |
package nodes.learning
import breeze.linalg._
import breeze.stats.distributions.Multinomial
import breeze.stats.mean
import org.apache.spark.rdd.RDD
import pipelines.{Estimator, Transformer}
import utils.MatrixUtils
/**
*
* @param means matrix of dimension numClusters by numFeatures
*/
case class KMeansModel(means: DenseMatrix[Double]) extends Transformer[DenseVector[Double], DenseVector[Double]] {
/**
* Returns the assignment of each vector to the nearest cluster.
*/
def apply(in: DenseVector[Double]): DenseVector[Double] = {
// TODO: Could maybe do more efficient single-item implementation
apply(in.asDenseMatrix).flatten()
}
def apply(in: DenseMatrix[Double]): DenseMatrix[Double] = {
val XSqNormHlf: DenseVector[Double] = sum(in :* in, Axis._1) / 2.0
/* compute the distance to all of the centers and assign each point to its nearest center. */
val sqDistToCenters = in * means.t
sqDistToCenters :*= -1.0
sqDistToCenters(::, *) += XSqNormHlf
sqDistToCenters(*, ::) += (sum(means :* means, Axis._1) :*= 0.5)
val nearestCenter = argmin(sqDistToCenters(*, ::))
// reuse the previous (potentially large) matrix to keep memory usage down
val centerAssign = sqDistToCenters
var row: Int = 0
while (row < in.rows) {
var col: Int = 0
while (col < means.rows) {
centerAssign(row, col) = 0.0
col += 1
}
centerAssign(row, nearestCenter(row)) = 1.0
row += 1
}
centerAssign
}
override def apply(in: RDD[DenseVector[Double]]): RDD[DenseVector[Double]] = {
in.mapPartitions { partition =>
val assignments = apply(MatrixUtils.rowsToMatrix(partition))
MatrixUtils.matrixToRowArray(assignments).iterator
}
}
}
/**
* if you run for one round, this is the same as the k-means++
* initialization. If you run for more rounds, you are running Lloyd's
* algorithm with the k-means++ initialization scheme.
*
* @param numMeans
* @param maxIterations
* @param stopTolerance Tolerance used to decide when to terminate Lloyd's algorithm
*/
case class KMeansPlusPlusEstimator(numMeans: Int, maxIterations: Int, stopTolerance: Double = 1e-3) extends Estimator[DenseVector[Double], DenseVector[Double]] {
def fit(data: RDD[DenseVector[Double]]): KMeansModel = {
val X = MatrixUtils.rowsToMatrix(data.collect())
fit(X)
}
def fit(X: DenseMatrix[Double]): KMeansModel = {
val numSamples = X.rows
val numFeatures = X.cols
val XSqNormHlf: DenseVector[Double] = sum(X :* X, Axis._1) / 2.0
val centers = Array.fill(numMeans)(0)
centers(0) = Multinomial(DenseVector.fill(numSamples, 1.0/numSamples)).draw()
var curSqDistanceToClusters: DenseVector[Double] = null
var k = 0
while (k < (numMeans - 1)) {
val curCenter = X(centers(k), ::)
val curCenterNorm = norm(curCenter, 2)
// slick vectorized code to compute the distance to the current center
val sqDistToNewCenter = (XSqNormHlf - (X * curCenter.t)) += (0.5 * curCenterNorm * curCenterNorm)
curSqDistanceToClusters = if (k > 0) {
min(sqDistToNewCenter, curSqDistanceToClusters)
} else {
sqDistToNewCenter
}
// add a new center by the k-means++ rule
centers(k + 1) = Multinomial(max(curSqDistanceToClusters, 0.0)).draw()
k += 1
}
var kMeans = X(centers.toSeq, ::).toDenseMatrix
val curCost = DenseVector.zeros[Double](maxIterations)
var iter = 0
var costImproving = true
while ((iter < maxIterations) && costImproving) {
/* compute the distance to all of the centers and assign each point to its
nearest center. (Again, mad slick and vectorized). */
val sqDistToCenters = X * kMeans.t
sqDistToCenters :*= -1.0
sqDistToCenters(::, *) += XSqNormHlf
sqDistToCenters(*, ::) += (sum(kMeans :* kMeans, Axis._1) :*= 0.5)
val bestDist = min(sqDistToCenters(*, ::))
curCost(iter) = mean(bestDist)
val nearestCenter = argmin(sqDistToCenters(*, ::))
// For memory efficiency reuse the big sqDistToCenters matrix
val centerAssign = sqDistToCenters
var row: Int = 0
while (row < numSamples) {
var col: Int = 0
while (col < numMeans) {
centerAssign(row, col) = 0.0
col += 1
}
centerAssign(row, nearestCenter(row)) = 1.0
row += 1
}
val assignMass = sum(centerAssign, Axis._0).toDenseVector
kMeans = centerAssign.t * X
kMeans(::, *) :/= assignMass
if (iter > 0) {
costImproving = (curCost(iter - 1) - curCost(iter)) >= stopTolerance * math.abs(curCost(iter - 1))
println("Iteration: " + iter + " current cost " + curCost(iter) + " imp " + costImproving)
}
iter += 1
}
KMeansModel(kMeans)
}
}
|
shivaram/keystone
|
src/main/scala/nodes/learning/KMeansPlusPlus.scala
|
Scala
|
apache-2.0
| 4,838 |
/*
Copyright (c) 2013, Noel Raymond Cower
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package net.spifftastic.spastic.util.io
import java.io._
import scala.collection.mutable
import scala.annotation.tailrec
import java.nio.charset.Charset
import org.json.{JSONException, JSONObject}
import net.spifftastic.spastic.util.{log, LoggerTag}
object IOUtils {
implicit val TAG = LoggerTag("IOUtils")
implicit val DefaultEncoding = Charset.forName("UTF-8")
def closeQuietly(closeable: Closeable): Unit = try {
closeable.close()
} catch {
case ioex: IOException =>
log warning ("Error auto-closing stream, ignoring", ioex)
}
/**
* Convenience function to read all data until the end of a stream and return it as a string.
* The stream is not closed once done.
* @param stream the stream to read from.
* @return a string containing the contents of the stream until its end.
*/
def readUntilEndOfStream(stream: InputStream)(implicit encoding: Charset = DefaultEncoding): String = {
val builder = new mutable.StringBuilder(128)
val bytes = new Array[Byte](128)
@tailrec
def readBytes(): Unit = {
val bytesRead: Int = stream.read(bytes)
if (bytesRead > 0) builder.append(new String(bytes, 0, bytesRead, encoding))
if (bytesRead != -1) readBytes()
}
readBytes()
builder.mkString
}
/**
* Reads the remaining contents of a stream and closes it.
* @param stream the stream to read from.
* @return Some(String) containing the contents of the stream until its end if successful,
* otherwise None.
*/
def readStreamContentsAndClose(stream: InputStream)(implicit encoding: Charset = DefaultEncoding): Option[String] =
for (str <- stream) yield readUntilEndOfStream(str)(encoding)
/**
* Reads a file's contents and returns it as a string.
* @param file the file to read.
* @return Some(String) if successful.
*/
def loadFileAsString(file: File)(implicit encoding: Charset = DefaultEncoding): Option[String] =
for (reader <- new FileInputStream(file)) yield readUntilEndOfStream(reader)(encoding)
/**
* Handles copying data from input to output recursively. Stores data read and to-be-written in
* the given buffer. Does not attempt to catch exceptions or close either the stream.
* @param input The input stream to read data from.
* @param output The output stream to write the data read from the input to.
* @param buffer The buffer to store data read from the input stream in.
*/
@tailrec
private def copyStreamBuffered(
input: InputStream,
output: OutputStream,
buffer: Array[Byte]
): Unit =
input.read(buffer, 0, buffer.length) match {
case -1 => // Done reading
case bytesRead: Int =>
if (bytesRead > 0) output.write(buffer, 0, bytesRead)
copyStreamBuffered(input, output, buffer)
}
/**
* Returns true if successful, otherwise false. After calling this, both streams should be
* considered unusable or in an indeterminate state and closed.
*
* This method swallows any IOExceptions that may occur during reading from input or writing to
* output. If an IOException is raised, the method returns false.
*
* @param input The stream to read bytes from.
* @param output The stream to write bytes read from input to.
* @param rwBuffer A byte array to use as a buffer. Must have a length > 0.
* @return true if the copy succeeds, false otherwise. If the result is true,
* the input stream is at its end. If false, the stream's state is indeterminate and
* further use may result in an error.
*/
def tryCopyStreams(
input: InputStream,
output: OutputStream,
rwBuffer: Array[Byte] = new Array[Byte](512)
): Boolean = {
try {
copyStreamBuffered(input, output, rwBuffer)
true
} catch {
case ioex: IOException => // bail if the copy fails
log warning (s"Error copying streams", ioex)
false
}
}
}
|
nilium/spastic
|
src/main/scala/util/io/IOUtils.scala
|
Scala
|
bsd-2-clause
| 5,288 |
package util
object Util {
def readFile(fileName: String, maxTries: Int = 3): String = {
def readFile0(count: Int): String = {
try { // if file is empty, try again, it should be there
val contents: String = scala.io.Source.fromFile(fileName).mkString
if (contents.isEmpty && (count < maxTries)) readFile0(count + 1)
else contents
} catch { // if file is not found, try again, it should be there
case e: Throwable =>
if (count < maxTries) readFile0(count + 1)
else sys.error("File not found: " + fileName)
}
}
readFile0(0)
}
def checkFileExist(fileName: String): Boolean =
try {
scala.io.Source.fromFile(fileName).close()
true
} catch {
case e: Throwable => false
}
}
|
julianpeeters/avrohugger
|
avrohugger-core/src/test/scala/util/Util.scala
|
Scala
|
apache-2.0
| 789 |
package b
class A {
def foo = 1
}
|
som-snytt/xsbt
|
sbt/src/sbt-test/source-dependencies/package-object-name/changes/A1.scala
|
Scala
|
bsd-3-clause
| 37 |
package com.bizo.hive.sparkplug.auth
import com.amazonaws.auth.AWSCredentials
import org.json.simple.JSONObject
import org.json.simple.parser.JSONParser
import org.json.simple.JSONValue
import java.io.File
import java.io.{FileInputStream, InputStreamReader, InputStream}
/**
* Parse credentials.json style creds
*/
class EmrJsonCredentials private (accessAndSecretKeys: (String, String)) extends AWSCredentials {
def this(jsonFile: File) = this(EmrJsonCredentials.parseKeys(jsonFile))
def this(jsonResourcePath: String) = this(EmrJsonCredentials.parseKeys(jsonResourcePath))
override def getAWSAccessKeyId() = accessAndSecretKeys._1
override def getAWSSecretKey() = accessAndSecretKeys._2
}
object EmrJsonCredentials {
def apply() = {
new EmrJsonCredentials(new File(System.getenv("AWS_EMR_HOME"), "credentials.json"))
}
private def parseKeys(jsonResourcePath: String): (String, String) = {
Option(this.getClass.getClassLoader.getResourceAsStream(jsonResourcePath)).map { stream =>
parseKeys(stream)
} getOrElse { sys.error("unable to open resource '%s'".format(jsonResourcePath)) }
}
private def parseKeys(jsonFile: File): (String, String) = {
parseKeys(new FileInputStream(jsonFile))
}
private[this] def parseKeys(in: InputStream): (String, String) = {
val r = new InputStreamReader(in, "UTF-8")
try {
val o = JSONValue.parse(r).asInstanceOf[JSONObject]
(o.get("access-id").toString, o.get("private-key").toString)
} finally {
r.close()
}
}
}
|
pkallos/spark-plug
|
src/main/scala/com/bizo/hive/sparkplug/auth/EmrJsonCredentials.scala
|
Scala
|
apache-2.0
| 1,534 |
package com.caibowen.prma.monitor
import java.util.{List => JList, Map => JMap}
import akka.AkkaException
import akka.actor._
import akka.actor.SupervisorStrategy._
import com.caibowen.prma.api.model.EventVO
import com.caibowen.prma.monitor.eval._
import com.caibowen.prma.monitor.notify.Notifier
import org.slf4j.LoggerFactory
import scala.util.{Failure, Success, Try}
/**
* @author BowenCai
* @since 30/11/2014.
*/
object Monitor {
def prop(evaluator: Evaluator, notifierMap: Map[String, Notifier])
= Props(new Monitor(evaluator, notifierMap))
final val defaultDecider: SupervisorStrategy.Decider = {
case _: ActorInitializationException => Stop
case _: ActorKilledException => Stop
case _: DeathPactException => Stop
case _: AkkaException => Restart
case _: Exception => Escalate
}
final val defaultStrategy = new OneForOneStrategy(defaultDecider)
}
@SerialVersionUID(7376528395328272879L)
class Monitor(val evaluator: Evaluator, val notifierMap: Map[String, Notifier]) extends Actor with ActorLogging{
override val supervisorStrategy = Monitor.defaultStrategy
private[this] val allNotifiers = notifierMap.values.toList
def receive = {
case vo: EventVO => {
Try(evaluator.eval(vo)) match {
case Success(response) => response match {
case n: NotifyOne =>
val notifier = notifierMap get n.name
if (notifier.isDefined)
notifier.get.take(n)
else log.warning(s"Could not find notifier named [${n.name}] on event [$vo]")
case n: Response => allNotifiers.foreach(_ take n)
}
case Failure(e) => log.error(s"Could not evaluate [$vo] with evaluator [$evaluator]", e)
}
}
case x => unhandled(x)
}
}
|
xkommando/PRMA
|
monitor/src/main/scala/com/caibowen/prma/monitor/Monitor.scala
|
Scala
|
lgpl-3.0
| 1,760 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.ml.classification
import org.apache.spark.SparkException
import org.apache.spark.annotation.Since
import org.apache.spark.ml.{PredictionModel, Predictor, PredictorParams}
import org.apache.spark.ml.feature.{Instance, LabeledPoint}
import org.apache.spark.ml.linalg.{Vector, VectorUDT}
import org.apache.spark.ml.param.ParamMap
import org.apache.spark.ml.param.shared.HasRawPredictionCol
import org.apache.spark.ml.util.{MetadataUtils, SchemaUtils}
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, Dataset, Row}
import org.apache.spark.sql.functions._
import org.apache.spark.sql.types.{DataType, StructType}
/**
* (private[spark]) Params for classification.
*/
private[spark] trait ClassifierParams
extends PredictorParams with HasRawPredictionCol {
override protected def validateAndTransformSchema(
schema: StructType,
fitting: Boolean,
featuresDataType: DataType): StructType = {
val parentSchema = super.validateAndTransformSchema(schema, fitting, featuresDataType)
SchemaUtils.appendColumn(parentSchema, $(rawPredictionCol), new VectorUDT)
}
/**
* Extract [[labelCol]], weightCol(if any) and [[featuresCol]] from the given dataset,
* and put it in an RDD with strong types.
* Validates the label on the classifier is a valid integer in the range [0, numClasses).
*/
protected def extractInstances(
dataset: Dataset[_],
numClasses: Int): RDD[Instance] = {
val validateInstance = (instance: Instance) => {
val label = instance.label
require(label.toLong == label && label >= 0 && label < numClasses, s"Classifier was given" +
s" dataset with invalid label $label. Labels must be integers in range" +
s" [0, $numClasses).")
}
extractInstances(dataset, validateInstance)
}
}
/**
* Single-label binary or multiclass classification.
* Classes are indexed {0, 1, ..., numClasses - 1}.
*
* @tparam FeaturesType Type of input features. E.g., `Vector`
* @tparam E Concrete Estimator type
* @tparam M Concrete Model type
*/
abstract class Classifier[
FeaturesType,
E <: Classifier[FeaturesType, E, M],
M <: ClassificationModel[FeaturesType, M]]
extends Predictor[FeaturesType, E, M] with ClassifierParams {
/** @group setParam */
def setRawPredictionCol(value: String): E = set(rawPredictionCol, value).asInstanceOf[E]
// TODO: defaultEvaluator (follow-up PR)
/**
* Extract [[labelCol]] and [[featuresCol]] from the given dataset,
* and put it in an RDD with strong types.
*
* @param dataset DataFrame with columns for labels ([[org.apache.spark.sql.types.NumericType]])
* and features (`Vector`).
* @param numClasses Number of classes label can take. Labels must be integers in the range
* [0, numClasses).
* @note Throws `SparkException` if any label is a non-integer or is negative
*/
protected def extractLabeledPoints(dataset: Dataset[_], numClasses: Int): RDD[LabeledPoint] = {
validateNumClasses(numClasses)
dataset.select(col($(labelCol)), col($(featuresCol))).rdd.map {
case Row(label: Double, features: Vector) =>
validateLabel(label, numClasses)
LabeledPoint(label, features)
}
}
/**
* Validates that number of classes is greater than zero.
*
* @param numClasses Number of classes label can take.
*/
protected def validateNumClasses(numClasses: Int): Unit = {
require(numClasses > 0, s"Classifier (in extractLabeledPoints) found numClasses =" +
s" $numClasses, but requires numClasses > 0.")
}
/**
* Validates the label on the classifier is a valid integer in the range [0, numClasses).
*
* @param label The label to validate.
* @param numClasses Number of classes label can take. Labels must be integers in the range
* [0, numClasses).
*/
protected def validateLabel(label: Double, numClasses: Int): Unit = {
require(label.toLong == label && label >= 0 && label < numClasses, s"Classifier was given" +
s" dataset with invalid label $label. Labels must be integers in range" +
s" [0, $numClasses).")
}
/**
* Get the number of classes. This looks in column metadata first, and if that is missing,
* then this assumes classes are indexed 0,1,...,numClasses-1 and computes numClasses
* by finding the maximum label value.
*
* Label validation (ensuring all labels are integers >= 0) needs to be handled elsewhere,
* such as in `extractLabeledPoints()`.
*
* @param dataset Dataset which contains a column [[labelCol]]
* @param maxNumClasses Maximum number of classes allowed when inferred from data. If numClasses
* is specified in the metadata, then maxNumClasses is ignored.
* @return number of classes
* @throws IllegalArgumentException if metadata does not specify numClasses, and the
* actual numClasses exceeds maxNumClasses
*/
protected def getNumClasses(dataset: Dataset[_], maxNumClasses: Int = 100): Int = {
MetadataUtils.getNumClasses(dataset.schema($(labelCol))) match {
case Some(n: Int) => n
case None =>
// Get number of classes from dataset itself.
val maxLabelRow: Array[Row] = dataset.select(max($(labelCol))).take(1)
if (maxLabelRow.isEmpty || maxLabelRow(0).get(0) == null) {
throw new SparkException("ML algorithm was given empty dataset.")
}
val maxDoubleLabel: Double = maxLabelRow.head.getDouble(0)
require((maxDoubleLabel + 1).isValidInt, s"Classifier found max label value =" +
s" $maxDoubleLabel but requires integers in range [0, ... ${Int.MaxValue})")
val numClasses = maxDoubleLabel.toInt + 1
require(numClasses <= maxNumClasses, s"Classifier inferred $numClasses from label values" +
s" in column $labelCol, but this exceeded the max numClasses ($maxNumClasses) allowed" +
s" to be inferred from values. To avoid this error for labels with > $maxNumClasses" +
s" classes, specify numClasses explicitly in the metadata; this can be done by applying" +
s" StringIndexer to the label column.")
logInfo(this.getClass.getCanonicalName + s" inferred $numClasses classes for" +
s" labelCol=$labelCol since numClasses was not specified in the column metadata.")
numClasses
}
}
}
/**
* Model produced by a [[Classifier]].
* Classes are indexed {0, 1, ..., numClasses - 1}.
*
* @tparam FeaturesType Type of input features. E.g., `Vector`
* @tparam M Concrete Model type
*/
abstract class ClassificationModel[FeaturesType, M <: ClassificationModel[FeaturesType, M]]
extends PredictionModel[FeaturesType, M] with ClassifierParams {
/** @group setParam */
def setRawPredictionCol(value: String): M = set(rawPredictionCol, value).asInstanceOf[M]
/** Number of classes (values which the label can take). */
def numClasses: Int
override def transformSchema(schema: StructType): StructType = {
var outputSchema = super.transformSchema(schema)
if ($(predictionCol).nonEmpty) {
outputSchema = SchemaUtils.updateNumValues(schema,
$(predictionCol), numClasses)
}
if ($(rawPredictionCol).nonEmpty) {
outputSchema = SchemaUtils.updateAttributeGroupSize(outputSchema,
$(rawPredictionCol), numClasses)
}
outputSchema
}
/**
* Transforms dataset by reading from [[featuresCol]], and appending new columns as specified by
* parameters:
* - predicted labels as [[predictionCol]] of type `Double`
* - raw predictions (confidences) as [[rawPredictionCol]] of type `Vector`.
*
* @param dataset input dataset
* @return transformed dataset
*/
override def transform(dataset: Dataset[_]): DataFrame = {
val outputSchema = transformSchema(dataset.schema, logging = true)
// Output selected columns only.
// This is a bit complicated since it tries to avoid repeated computation.
var outputData = dataset
var numColsOutput = 0
if (getRawPredictionCol != "") {
val predictRawUDF = udf { features: Any =>
predictRaw(features.asInstanceOf[FeaturesType])
}
outputData = outputData.withColumn(getRawPredictionCol, predictRawUDF(col(getFeaturesCol)),
outputSchema($(rawPredictionCol)).metadata)
numColsOutput += 1
}
if (getPredictionCol != "") {
val predCol = if (getRawPredictionCol != "") {
udf(raw2prediction _).apply(col(getRawPredictionCol))
} else {
val predictUDF = udf { features: Any =>
predict(features.asInstanceOf[FeaturesType])
}
predictUDF(col(getFeaturesCol))
}
outputData = outputData.withColumn(getPredictionCol, predCol,
outputSchema($(predictionCol)).metadata)
numColsOutput += 1
}
if (numColsOutput == 0) {
logWarning(s"$uid: ClassificationModel.transform() does nothing" +
" because no output columns were set.")
}
outputData.toDF
}
final override def transformImpl(dataset: Dataset[_]): DataFrame =
throw new UnsupportedOperationException(s"transformImpl is not supported in $getClass")
/**
* Predict label for the given features.
* This method is used to implement `transform()` and output [[predictionCol]].
*
* This default implementation for classification predicts the index of the maximum value
* from `predictRaw()`.
*/
override def predict(features: FeaturesType): Double = {
raw2prediction(predictRaw(features))
}
/**
* Raw prediction for each possible label.
* The meaning of a "raw" prediction may vary between algorithms, but it intuitively gives
* a measure of confidence in each possible label (where larger = more confident).
* This internal method is used to implement `transform()` and output [[rawPredictionCol]].
*
* @return vector where element i is the raw prediction for label i.
* This raw prediction may be any real number, where a larger value indicates greater
* confidence for that label.
*/
@Since("3.0.0")
def predictRaw(features: FeaturesType): Vector
/**
* Given a vector of raw predictions, select the predicted label.
* This may be overridden to support thresholds which favor particular labels.
* @return predicted label
*/
protected def raw2prediction(rawPrediction: Vector): Double = rawPrediction.argmax
/**
* If the rawPrediction and prediction columns are set, this method returns the current model,
* otherwise it generates new columns for them and sets them as columns on a new copy of
* the current model
*/
private[classification] def findSummaryModel():
(ClassificationModel[FeaturesType, M], String, String) = {
val model = if ($(rawPredictionCol).isEmpty && $(predictionCol).isEmpty) {
copy(ParamMap.empty)
.setRawPredictionCol("rawPrediction_" + java.util.UUID.randomUUID.toString)
.setPredictionCol("prediction_" + java.util.UUID.randomUUID.toString)
} else if ($(rawPredictionCol).isEmpty) {
copy(ParamMap.empty).setRawPredictionCol("rawPrediction_" +
java.util.UUID.randomUUID.toString)
} else if ($(predictionCol).isEmpty) {
copy(ParamMap.empty).setPredictionCol("prediction_" + java.util.UUID.randomUUID.toString)
} else {
this
}
(model, model.getRawPredictionCol, model.getPredictionCol)
}
}
|
maropu/spark
|
mllib/src/main/scala/org/apache/spark/ml/classification/Classifier.scala
|
Scala
|
apache-2.0
| 12,321 |
package djinni
import djinni.ast._
import djinni.generatorTools._
import djinni.meta._
class CppMarshal(spec: Spec) extends Marshal(spec) {
// The scopeSymbols parameter accepted by many of these functions describes a Seq of
// symbols/names that are declared in the current scope. The TypeRef or MExpr expression
// will be fully qualified if it clashes with any of these symbols, even if full qualification
// has not been requested.
override def typename(tm: MExpr): String = toCppType(tm, None, Seq())
def typename(tm: MExpr, scopeSymbols: Seq[String]): String = toCppType(tm, None, scopeSymbols)
def typename(ty: TypeRef, scopeSymbols: Seq[String]): String = typename(ty.resolved, scopeSymbols)
def typename(name: String, ty: TypeDef): String = ty match {
case e: Enum => idCpp.enumType(name)
case i: Interface => idCpp.ty(name)
case r: Record => idCpp.ty(name)
}
override def fqTypename(tm: MExpr): String = toCppType(tm, Some(spec.cppNamespace), Seq())
def fqTypename(name: String, ty: TypeDef): String = ty match {
case e: Enum => withNs(Some(spec.cppNamespace), idCpp.enumType(name))
case i: Interface => withNs(Some(spec.cppNamespace), idCpp.ty(name))
case r: Record => withNs(Some(spec.cppNamespace), idCpp.ty(name))
}
def paramType(tm: MExpr, scopeSymbols: Seq[String]): String = toCppParamType(tm, None, scopeSymbols)
def paramType(ty: TypeRef, scopeSymbols: Seq[String]): String = paramType(ty.resolved, scopeSymbols)
override def paramType(tm: MExpr): String = toCppParamType(tm)
override def fqParamType(tm: MExpr): String = toCppParamType(tm, Some(spec.cppNamespace))
def returnType(ret: Option[TypeRef], scopeSymbols: Seq[String]): String = {
ret.fold("void")(toCppType(_, None, scopeSymbols))
}
override def returnType(ret: Option[TypeRef]): String = ret.fold("void")(toCppType(_, None))
override def fqReturnType(ret: Option[TypeRef]): String = {
ret.fold("void")(toCppType(_, Some(spec.cppNamespace)))
}
def fieldType(tm: MExpr, scopeSymbols: Seq[String]): String = typename(tm, scopeSymbols)
def fieldType(ty: TypeRef, scopeSymbols: Seq[String]): String = fieldType(ty.resolved, scopeSymbols)
override def fieldType(tm: MExpr): String = typename(tm)
override def fqFieldType(tm: MExpr): String = fqTypename(tm)
override def toCpp(tm: MExpr, expr: String): String = throw new AssertionError("cpp to cpp conversion")
override def fromCpp(tm: MExpr, expr: String): String = throw new AssertionError("cpp to cpp conversion")
def hppReferences(m: Meta, exclude: String, forwardDeclareOnly: Boolean): Seq[SymbolReference] = m match {
case p: MPrimitive => p.idlName match {
case "i8" | "i16" | "i32" | "i64" => List(ImportRef("<cstdint>"))
case _ => List()
}
case MString => List(ImportRef("<string>"))
case MDate => List(ImportRef("<chrono>"))
case MBinary => List(ImportRef("<vector>"), ImportRef("<cstdint>"))
case MOptional => List(ImportRef(spec.cppOptionalHeader))
case MList => List(ImportRef("<vector>"))
case MSet => List(ImportRef("<unordered_set>"))
case MMap => List(ImportRef("<unordered_map>"))
case d: MDef => d.body match {
case r: Record =>
if (d.name != exclude) {
if (forwardDeclareOnly) {
List(DeclRef(s"struct ${typename(d.name, d.body)};", Some(spec.cppNamespace)))
} else {
List(ImportRef(include(d.name, r.ext.cpp)))
}
} else {
List()
}
case e: Enum =>
if (d.name != exclude) {
if (forwardDeclareOnly) {
val underlyingType = if(e.flags) " : unsigned" else ""
List(DeclRef(s"enum class ${typename(d.name, d.body)}${underlyingType};", Some(spec.cppNamespace)))
} else {
List(ImportRef(include(d.name)))
}
} else {
List()
}
case i: Interface =>
val base = if (d.name != exclude) {
List(ImportRef("<memory>"), DeclRef(s"class ${typename(d.name, d.body)};", Some(spec.cppNamespace)))
} else {
List(ImportRef("<memory>"))
}
spec.cppNnHeader match {
case Some(nnHdr) => ImportRef(nnHdr) :: base
case _ => base
}
}
case e: MExtern => e.defType match {
// Do not forward declare extern types, they might be in arbitrary namespaces.
// This isn't a problem as extern types cannot cause dependency cycles with types being generated here
case DInterface => List(ImportRef("<memory>"), ImportRef(e.cpp.header))
case _ => List(ImportRef(e.cpp.header))
}
case p: MParam => List()
}
def cppReferences(m: Meta, exclude: String, forwardDeclareOnly: Boolean): Seq[SymbolReference] = {
// Only need to provide full definitions for cpp if forward decls were used in header
if (!forwardDeclareOnly) {
List()
} else {
m match {
case d: MDef => d.body match {
case r: Record =>
if (d.name != exclude) {
List(ImportRef(include(d.name, r.ext.cpp)))
} else {
List()
}
case e: Enum =>
if (d.name != exclude) {
List(ImportRef(include(d.name)))
} else {
List()
}
case _ => List()
}
case _ => List()
}
}
}
def include(ident: String, isExtendedRecord: Boolean = false): String = {
val prefix = if (isExtendedRecord) spec.cppExtendedRecordIncludePrefix else spec.cppIncludePrefix
q(prefix + spec.cppFileIdentStyle (ident) + "." + spec.cppHeaderExt)
}
private def toCppType(ty: TypeRef, namespace: Option[String] = None, scopeSymbols: Seq[String] = Seq()): String =
toCppType(ty.resolved, namespace, scopeSymbols)
private def toCppType(tm: MExpr, namespace: Option[String], scopeSymbols: Seq[String]): String = {
def withNamespace(name: String): String = {
// If an unqualified symbol needs to have its namespace added, this code assumes that the
// namespace is the one that's defined for generated types (spec.cppNamespace).
// This seems like a safe assumption for the C++ generator as it doesn't make much use of
// other global names, but might need to be refined to cover other types in the future.
val ns = namespace match {
case Some(ns) => Some(ns)
case None => if (scopeSymbols.contains(name)) Some(spec.cppNamespace) else None
}
withNs(ns, name)
}
def base(m: Meta): String = m match {
case p: MPrimitive => p.cName
case MString => if (spec.cppUseWideStrings) "std::wstring" else "std::string"
case MDate => "std::chrono::system_clock::time_point"
case MBinary => "std::vector<uint8_t>"
case MOptional => spec.cppOptionalTemplate
case MList => "std::vector"
case MSet => "std::unordered_set"
case MMap => "std::unordered_map"
case d: MDef =>
d.defType match {
case DEnum => withNamespace(idCpp.enumType(d.name))
case DRecord => withNamespace(idCpp.ty(d.name))
case DInterface => s"std::shared_ptr<${withNamespace(idCpp.ty(d.name))}>"
}
case e: MExtern => e.defType match {
case DInterface => s"std::shared_ptr<${e.cpp.typename}>"
case _ => e.cpp.typename
}
case p: MParam => idCpp.typeParam(p.name)
}
def expr(tm: MExpr): String = {
spec.cppNnType match {
case Some(nnType) => {
// if we're using non-nullable pointers for interfaces, then special-case
// both optional and non-optional interface types
val args = if (tm.args.isEmpty) "" else tm.args.map(expr).mkString("<", ", ", ">")
tm.base match {
case d: MDef =>
d.defType match {
case DInterface => s"${nnType}<${withNamespace(idCpp.ty(d.name))}>"
case _ => base(tm.base) + args
}
case MOptional =>
tm.args.head.base match {
case d: MDef =>
d.defType match {
case DInterface => s"std::shared_ptr<${withNamespace(idCpp.ty(d.name))}>"
case _ => base(tm.base) + args
}
case _ => base(tm.base) + args
}
case _ => base(tm.base) + args
}
}
case None =>
if (isOptionalInterface(tm)) {
// otherwise, interfaces are always plain old shared_ptr
expr(tm.args.head)
} else {
val args = if (tm.args.isEmpty) "" else tm.args.map(expr).mkString("<", ", ", ">")
base(tm.base) + args
}
}
}
expr(tm)
}
def byValue(tm: MExpr): Boolean = tm.base match {
case p: MPrimitive => true
case d: MDef => d.defType match {
case DEnum => true
case _ => false
}
case e: MExtern => e.defType match {
case DInterface => false
case DEnum => true
case DRecord => e.cpp.byValue
}
case MOptional => byValue(tm.args.head)
case _ => false
}
def byValue(td: TypeDecl): Boolean = td.body match {
case i: Interface => false
case r: Record => false
case e: Enum => true
}
// this can be used in c++ generation to know whether a const& should be applied to the parameter or not
private def toCppParamType(tm: MExpr, namespace: Option[String] = None, scopeSymbols: Seq[String] = Seq()): String = {
val cppType = toCppType(tm, namespace, scopeSymbols)
val refType = "const " + cppType + " &"
val valueType = cppType
if(byValue(tm)) valueType else refType
}
}
|
dropbox/djinni
|
src/source/CppMarshal.scala
|
Scala
|
apache-2.0
| 9,714 |
package scuff.web
import org.junit._
import org.junit.Assert._
import scuff.MediaType
class AcceptHeaderTest {
@Test
def basic(): Unit = {
val acceptTypes = Set(MediaType("text/html"))
assertTrue(AcceptHeader("text/html").get.accepts("text/html"))
assertTrue(AcceptHeader("*/*").get.accepts("text/html"))
assertFalse(AcceptHeader("*/*").get.hasExactly("text/html"))
assertTrue(AcceptHeader("text/*").get.accepts("text/html"))
assertFalse(AcceptHeader("text/*").get.hasExactly("text/html"))
assertFalse(AcceptHeader("image/*").get.accepts("text/html"))
assertFalse(AcceptHeader("text/*").get.accepts("image/jpeg"))
assertTrue(AcceptHeader("*/*").get.accepts("image/jpeg"))
assertTrue(AcceptHeader("*/*").forall(_.acceptsAnyOf(acceptTypes)))
assertTrue(AcceptHeader("text/*").forall(_.acceptsAnyOf(acceptTypes)))
assertFalse(AcceptHeader("image/*").forall(_.acceptsAnyOf(acceptTypes)))
assertTrue(AcceptHeader("").forall(_.acceptsAnyOf(acceptTypes)))
}
@Test
def complex(): Unit = {
val ah = AcceptHeader("text/html; q=1.0, text/*; q=0.8, image/gif; q=0.6, image/jpeg; q=0.6, image/*; q=0.5").get
assertTrue(ah.accepts("image/png"))
assertTrue(ah.accepts("text/plain"))
assertFalse(ah.accepts("application/json"))
}
@Test
def preference(): Unit = {
val ah = AcceptHeader("text/html; q=1.0, text/*; q=0.8, image/gif; q=0.6, image/jpeg; q=0.6, image/*; q=0.5, */*; q=0.1").get
assertTrue(ah.preference.matches("text/html"))
val ah2 = AcceptHeader("audio/*; q=0.2, audio/basic").get
assertEquals("audio/basic", ah2.preference.baseType)
}
@Test
def rfc2616_1(): Unit = {
val byPreference = AcceptHeader("text/plain; q=0.5, text/html, text/x-dvi; q=0.8, text/x-c").get.preferenceOrdered
assertEquals(4, byPreference.size)
assertTrue(byPreference(0).matches("text/html"))
assertTrue(byPreference(1).matches("text/x-c"))
assertTrue(byPreference(2).matches("text/x-dvi"))
assertTrue(byPreference(3).matches("text/plain"))
}
@Test
def rfc2616_2(): Unit = {
val byPreference = AcceptHeader("text/*, text/html, text/html;level=1, */*").get.preferenceOrdered
assertEquals(4, byPreference.size)
assertEquals("text/html; level=1", byPreference(0).toString)
assertEquals("text/html", byPreference(1).toString)
assertEquals("text/*", byPreference(2).toString)
assertEquals("*/*", byPreference(3).toString)
}
@Test
def rfc2616_3(): Unit = {
val ah = AcceptHeader("audio/*; q=0.2, audio/basic").get
assertEquals("audio/basic", ah.preference.toString)
}
@Test
def rfc2616_4(): Unit = {
val ah = AcceptHeader("text/*;q=0.3, text/html;q=0.7, text/html;level=1, text/html;level=2;q=0.4, */*;q=0.5").get
val byPreference = ah.preferenceOrdered
assertEquals(5, byPreference.size)
assertEquals("text/html; level=1", byPreference(0).toString)
assertTrue(byPreference(1).matches("text/html;level=3"))
assertTrue(byPreference(1).matches("text/html"))
assertEquals("*/*; q=0.5", byPreference(2).toString)
assertTrue(ah.accepts("image/jpeg"))
assertEquals(MediaType("text/html; level=2; q=0.4").toString, byPreference(3).toString)
}
@Test
def versioned(): Unit = {
val request = AcceptHeader("application/vnd.scuff+json;v=41, application/vnd.scuff+json;v=42").get
val expected = MediaType("application/vnd.scuff+json")
assertTrue(request.accepts(expected))
assertFalse(request.accepts("application/json"))
request.withParm(expected, "v", _.toInt).toList.sortBy(_._2).reverse.headOption match {
case None => fail("Should match")
case Some((_, version)) =>
assertEquals(42, version)
}
}
@Test
def `vendor match`(): Unit = {
val plainJson = AcceptHeader("application/json").get
val responseType = MediaType("application/vnd.scuff+json")
assertTrue(plainJson.accepts(responseType))
}
}
|
nilskp/scuff
|
src/test/scala/scuff/web/AcceptHeaderTest.scala
|
Scala
|
mit
| 3,940 |
/*
* Copyright 2014 Christos KK Loverdos
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.ckkloverdos.topsort
/**
* Provides API related to the node structure of a graph.
* This is totally external to concrete graph representations.
* The reason for its introduction is to not
* impose specific nominal sub-typing on Graph implementations.
*
* `G` is the graph data type and `N` is the node data type.
*/
trait GraphStructure[G, N] {
/**
* Given a `graph`, returns an iterator of its nodes.
*
* @param graph The given graph.
* @return An iterator of the `graph` nodes.
*/
def nodes(graph: G): Iterator[N]
/**
* Given a `node` belonging to `graph`, returns an iterator with the `node` dependencies.
*/
def nodeDependencies(graph: G, node: N): Iterator[N]
}
|
loverdos/topsort
|
src/main/scala/com/ckkloverdos/topsort/GraphStructure.scala
|
Scala
|
apache-2.0
| 1,319 |
/*
* Copyright 2011, Patrick Boe
* ===========================
* This program is distributed under the terms of the GNU General Public License.
*
* This file is part of Thimblus.
*
* Thimblus is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Thimblus is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with Thimblus. If not, see <http://www.gnu.org/licenses/>.
*/
package org.thimblus.model
import org.thimblus.data._
import scala.swing._
import scala.swing.event._
import akka.actor._
import akka.dispatch._
import akka.event.EventHandler
import java.util.Date
import org.thimblus.repo._
import java.io.Closeable
class HomeModelA(
service: IPlanDispatch,
time: ()=>Date
) extends Publisher with Closeable {
def plan: Plan = _plan
def plan_=(p: Plan) {
_plan = p
publish(PlanUpdate(p))
}
def post(msg: String) {
plan += Message(msg, time())
service.getRepo() ! (_metadata, plan)
}
def close() = service.close()
private var _plan: Plan = null
private var _metadata: String = null
private def init() {
val r = service.getRepo()
(r !!! PlanRequest) onComplete { f =>
f.result.get.asInstanceOf[Tuple2[String,Plan]] match {
case (m,p) => {
plan=p
_metadata=m
}
}
}
}
try {
init()
} catch {
case e: Exception => {
close()
throw e
}
}
}
class HomeModel(
poster: (String,Plan,String)=>Unit,
loadPlan: ()=>(String,Plan)
) extends HomeSource {
loadPlan() match { case (x,y) => { metadata=x; plan=y; } }
def post(s: String) = {
poster(metadata,plan,s)
loadPlan() match { case (x,y) => { plan=y } }
}
}
// vim: sw=2:softtabstop=2:et:
|
patrickboe/thimblus
|
src/main/scala/org/thimblus/model/HomeModel.scala
|
Scala
|
gpl-3.0
| 2,160 |
/*
* SPDX-License-Identifier: Apache-2.0
*
* Copyright 2015-2021 Andre White.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.truthencode.ddo.model.feats
import io.truthencode.ddo.model.race.Race
import io.truthencode.ddo.support.requisite.{FeatRequisiteImpl, GrantsToRace}
/**
* Created by adarr on 2/19/2017.
*/
trait Elf extends FeatRequisiteImpl with Passive with GrantsToRace { self: RacialFeat =>
override def grantsToRace: Seq[(Race, Int)] = List((Race.Elf, 1), (Race.Morninglord, 1))
}
|
adarro/ddo-calc
|
subprojects/common/ddo-core/src/main/scala/io/truthencode/ddo/model/feats/Elf.scala
|
Scala
|
apache-2.0
| 1,057 |
/*
* Copyright 2014 IBM Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.ibm.spark.utils
import joptsimple.{OptionSpec, OptionParser}
import scala.collection.JavaConverters._
import scala.language.implicitConversions
import java.io.{PrintStream, OutputStream}
trait ArgumentParsingSupport {
protected lazy val parser = new OptionParser()
private var options: joptsimple.OptionSet = _
parser.allowsUnrecognizedOptions()
/**
* Parses the arguments provided as a string, updating all internal
* references to specific arguments.
*
* @param args The arguments as a string
* @param delimiter An optional delimiter for separating arguments
*/
def parseArgs(args: String, delimiter: String = " ") = {
options = parser.parse(args.split(delimiter): _*)
options.nonOptionArguments().asScala.map(_.toString)
}
def printHelp(outputStream: OutputStream, usage: String) = {
val printStream = new PrintStream(outputStream)
printStream.println(s"Usage: $usage\\n")
parser.printHelpOn(outputStream)
}
implicit def has[T](spec: OptionSpec[T]): Boolean = {
require(options != null, "Arguments not parsed yet!")
options.has(spec)
}
implicit def get[T](spec: OptionSpec[T]): Option[T] = {
require(options != null, "Arguments not parsed yet!")
Some(options.valueOf(spec)).filter(_ != null)
}
}
|
bpburns/spark-kernel
|
kernel-api/src/main/scala/com/ibm/spark/utils/ArgumentParsingSupport.scala
|
Scala
|
apache-2.0
| 1,890 |
/*
* Copyright (c) 2010-2011 Belmont Technology Pty Ltd. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.sodatest.runtime.data
import blocks.Block
class SodaTest(val testName: String, val testPath: String, val blocks: List[Block])
|
GrahamLea/SodaTest
|
sodatest-runtime/src/main/scala/org/sodatest/runtime/data/SodaTest.scala
|
Scala
|
apache-2.0
| 781 |
package models.daos.slick
import models.User
import play.api.db.slick._
import play.api.db.slick.Config.driver.simple._
import models.daos.slick.DBTableDefinitions._
import com.mohiva.play.silhouette.core.LoginInfo
import scala.concurrent.Future
import java.util.UUID
import play.Logger
import models.daos.UserDAO
/**
* Give access to the user object using Slick
*/
class UserDAOSlick extends UserDAO {
import play.api.Play.current
/**
* Finds a user by its login info.
*
* @param loginInfo The login info of the user to find.
* @return The found user or None if no user for the given login info could be found.
*/
def find(loginInfo: LoginInfo) = {
DB withSession { implicit session =>
Future.successful {
slickLoginInfos.filter(
x => x.providerID === loginInfo.providerID && x.providerKey === loginInfo.providerKey
).firstOption match {
case Some(info) =>
slickUserLoginInfos.filter(_.loginInfoId === info.id).firstOption match {
case Some(userLoginInfo) =>
slickUsers.filter(_.id === userLoginInfo.userID).firstOption match {
case Some(user) =>
Some(User(UUID.fromString(user.userID), loginInfo, user.firstName, user.lastName, user.fullName, user.email, user.avatarURL))
case None => None
}
case None => None
}
case None => None
}
}
}
}
/**
* Finds a user by its user ID.
*
* @param userID The ID of the user to find.
* @return The found user or None if no user for the given ID could be found.
*/
def find(userID: UUID) = {
DB withSession { implicit session =>
Future.successful {
slickUsers.filter(
_.id === userID.toString
).firstOption match {
case Some(user) =>
slickUserLoginInfos.filter(_.userID === user.userID).firstOption match {
case Some(info) =>
slickLoginInfos.filter(_.id === info.loginInfoId).firstOption match {
case Some(loginInfo) =>
Some(User(UUID.fromString(user.userID), LoginInfo(loginInfo.providerID, loginInfo.providerKey), user.firstName, user.lastName, user.fullName, user.email, user.avatarURL))
case None => None
}
case None => None
}
case None => None
}
}
}
}
/**
* Saves a user.
*
* @param user The user to save.
* @return The saved user.
*/
def save(user: User) = {
DB withSession { implicit session =>
Future.successful {
val dbUser = DBUser(user.userID.toString, user.firstName, user.lastName, user.fullName, user.email, user.avatarURL)
slickUsers.filter(_.id === dbUser.userID).firstOption match {
case Some(userFound) => slickUsers.filter(_.id === dbUser.userID).update(dbUser)
case None => slickUsers.insert(dbUser)
}
var dbLoginInfo = DBLoginInfo(None, user.loginInfo.providerID, user.loginInfo.providerKey)
// Insert if it does not exist yet
slickLoginInfos.filter(info => info.providerID === dbLoginInfo.providerID && info.providerKey === dbLoginInfo.providerKey).firstOption match {
case None => slickLoginInfos.insert(dbLoginInfo)
case Some(info) => Logger.debug("Nothing to insert since info already exists: " + info)
}
dbLoginInfo = slickLoginInfos.filter(info => info.providerID === dbLoginInfo.providerID && info.providerKey === dbLoginInfo.providerKey).first
// Now make sure they are connected
slickUserLoginInfos.filter(info => info.userID === dbUser.userID && info.loginInfoId === dbLoginInfo.id).firstOption match {
case Some(info) =>
// They are connected already, we could as well omit this case ;)
case None =>
slickUserLoginInfos += DBUserLoginInfo(dbUser.userID, dbLoginInfo.id.get)
}
user // We do not change the user => return it
}
}
}
}
|
sne11ius/play-silhouette-slick-seed
|
app/models/daos/slick/UserDAOSlick.scala
|
Scala
|
apache-2.0
| 4,100 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.producer
import org.apache.kafka.common.protocol.Errors
import collection.mutable.HashMap
import kafka.api.TopicMetadata
import kafka.common.KafkaException
import kafka.utils.Logging
import kafka.client.ClientUtils
@deprecated("This class has been deprecated and will be removed in a future release.", "0.10.0.0")
class BrokerPartitionInfo(producerConfig: ProducerConfig,
producerPool: ProducerPool,
topicPartitionInfo: HashMap[String, TopicMetadata])
extends Logging {
val brokerList = producerConfig.brokerList
val brokers = ClientUtils.parseBrokerList(brokerList)
/**
* Return a sequence of (brokerId, numPartitions).
* @param topic the topic for which this information is to be returned
* @return a sequence of (brokerId, numPartitions). Returns a zero-length
* sequence if no brokers are available.
*/
def getBrokerPartitionInfo(topic: String, correlationId: Int): Seq[PartitionAndLeader] = {
debug("Getting broker partition info for topic %s".format(topic))
// check if the cache has metadata for this topic
val topicMetadata = topicPartitionInfo.get(topic)
val metadata: TopicMetadata =
topicMetadata match {
case Some(m) => m
case None =>
// refresh the topic metadata cache
updateInfo(Set(topic), correlationId)
val topicMetadata = topicPartitionInfo.get(topic)
topicMetadata match {
case Some(m) => m
case None => throw new KafkaException("Failed to fetch topic metadata for topic: " + topic)
}
}
val partitionMetadata = metadata.partitionsMetadata
if(partitionMetadata.isEmpty) {
if(metadata.errorCode != Errors.NONE.code) {
throw new KafkaException(Errors.forCode(metadata.errorCode).exception)
} else {
throw new KafkaException("Topic metadata %s has empty partition metadata and no error code".format(metadata))
}
}
partitionMetadata.map { m =>
m.leader match {
case Some(leader) =>
debug("Partition [%s,%d] has leader %d".format(topic, m.partitionId, leader.id))
new PartitionAndLeader(topic, m.partitionId, Some(leader.id))
case None =>
debug("Partition [%s,%d] does not have a leader yet".format(topic, m.partitionId))
new PartitionAndLeader(topic, m.partitionId, None)
}
}.sortWith((s, t) => s.partitionId < t.partitionId)
}
/**
* It updates the cache by issuing a get topic metadata request to a random broker.
* @param topics the topics for which the metadata is to be fetched
*/
def updateInfo(topics: Set[String], correlationId: Int) {
var topicsMetadata: Seq[TopicMetadata] = Nil
val topicMetadataResponse = ClientUtils.fetchTopicMetadata(topics, brokers, producerConfig, correlationId)
topicsMetadata = topicMetadataResponse.topicsMetadata
// throw partition specific exception
topicsMetadata.foreach(tmd =>{
trace("Metadata for topic %s is %s".format(tmd.topic, tmd))
if(tmd.errorCode == Errors.NONE.code) {
topicPartitionInfo.put(tmd.topic, tmd)
} else
warn("Error while fetching metadata [%s] for topic [%s]: %s ".format(tmd, tmd.topic, Errors.forCode(tmd.errorCode).exception.getClass))
tmd.partitionsMetadata.foreach(pmd =>{
if (pmd.errorCode != Errors.NONE.code && pmd.errorCode == Errors.LEADER_NOT_AVAILABLE.code) {
warn("Error while fetching metadata %s for topic partition [%s,%d]: [%s]".format(pmd, tmd.topic, pmd.partitionId,
Errors.forCode(pmd.errorCode).exception.getClass))
} // any other error code (e.g. ReplicaNotAvailable) can be ignored since the producer does not need to access the replica and isr metadata
})
})
producerPool.updateProducer(topicsMetadata)
}
}
@deprecated("This class has been deprecated and will be removed in a future release.", "0.10.0.0")
case class PartitionAndLeader(topic: String, partitionId: Int, leaderBrokerIdOpt: Option[Int])
|
flange/drift-dev
|
kafka/00-kafka_2.11-0.10.1.0/libs/tmp/kafka/producer/BrokerPartitionInfo.scala
|
Scala
|
apache-2.0
| 4,875 |
package models.join
import models.db._
/**
*
* @author ponkotuy
* Date: 14/12/16.
*/
case class ShipWithSpecs(ship: Ship, master: MasterShipBase, stype: MasterStype, spec: MasterShipSpecs) extends ShipParameter
|
nekoworkshop/MyFleetGirls
|
server/app/models/join/ShipWithSpecs.scala
|
Scala
|
mit
| 217 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.api.scala.util
import org.apache.flink.api.java.Utils.ChecksumHashCode
import org.apache.flink.api.scala._
import org.apache.flink.api.scala.utils._
import org.apache.flink.test.util.MultipleProgramsTestBase
import org.junit._
import org.junit.runner.RunWith
import org.junit.runners.Parameterized
@RunWith(classOf[Parameterized])
class DataSetUtilsITCase (
mode: MultipleProgramsTestBase.TestExecutionMode)
extends MultipleProgramsTestBase(mode) {
@Test
@throws(classOf[Exception])
def testZipWithIndex(): Unit = {
val env = ExecutionEnvironment.getExecutionEnvironment
val expectedSize = 100L
val numbers = env.generateSequence(0, expectedSize - 1)
val result = numbers.zipWithIndex.collect()
Assert.assertEquals(expectedSize, result.size)
for( ((index, _), expected) <- result.sortBy(_._1).zipWithIndex) {
Assert.assertEquals(expected, index)
}
}
@Test
@throws(classOf[Exception])
def testZipWithUniqueId(): Unit = {
val env = ExecutionEnvironment.getExecutionEnvironment
val expectedSize = 100L
val numbers = env.generateSequence(1L, expectedSize)
val result = numbers.zipWithUniqueId.collect().map(_._1).toSet
Assert.assertEquals(expectedSize, result.size)
}
@Test
def testIntegerDataSetChecksumHashCode(): Unit = {
val env = ExecutionEnvironment.getExecutionEnvironment
val ds = CollectionDataSets.getIntDataSet(env)
val checksum: ChecksumHashCode = ds.checksumHashCode()
Assert.assertEquals(checksum.getCount, 15)
Assert.assertEquals(checksum.getChecksum, 55)
}
@Test
@throws(classOf[Exception])
def testCountElementsPerPartition(): Unit = {
val env = ExecutionEnvironment.getExecutionEnvironment
val expectedSize = 100L
val numbers = env.generateSequence(0, expectedSize - 1)
val ds = numbers.countElementsPerPartition
Assert.assertEquals(env.getParallelism, ds.collect().size)
Assert.assertEquals(expectedSize, ds.sum(1).collect().head._2)
}
}
|
yew1eb/flink
|
flink-tests/src/test/scala/org/apache/flink/api/scala/util/DataSetUtilsITCase.scala
|
Scala
|
apache-2.0
| 2,840 |
package org.jetbrains.plugins.scala
package lang.resolve.processor
import java.util
import com.intellij.psi.util.PsiTreeUtil
import com.intellij.psi.{PsiClass, PsiElement, PsiPackage}
import org.jetbrains.plugins.scala.lang.psi.api.statements.ScTypeAliasDefinition
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.imports.ScImportExpr
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.imports.usages.{ImportExprUsed, ImportSelectorUsed, ImportWildcardSelectorUsed}
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.packaging.ScPackaging
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.typedef.ScObject
import org.jetbrains.plugins.scala.lang.resolve.{ResolveUtils, ScalaResolveResult}
import org.jetbrains.plugins.scala.util.ScEquivalenceUtil
import scala.collection.mutable
import scala.collection.mutable.ArrayBuffer
/**
* User: Alexander Podkhalyuzin
* Date: 01.12.11
*/
//todo: logic is too complicated, too many connections between classes. Rewrite?
trait PrecedenceHelper[T] {
this: BaseProcessor =>
protected def getPlace: PsiElement
protected lazy val placePackageName: String = ResolveUtils.getPlacePackage(getPlace)
protected val levelSet: util.HashSet[ScalaResolveResult] = new util.HashSet
protected val qualifiedNamesSet: util.HashSet[T] = new util.HashSet[T]
protected val levelQualifiedNamesSet: util.HashSet[T] = new util.HashSet[T]
protected val ignoredSet: util.HashSet[ScalaResolveResult] = new util.HashSet[ScalaResolveResult]
protected sealed trait HistoryEvent
protected case object ChangedLevel extends HistoryEvent
protected case class AddResult(results: Seq[ScalaResolveResult]) extends HistoryEvent
protected val history: ArrayBuffer[HistoryEvent] = new ArrayBuffer
private var fromHistory: Boolean = false
protected def compareWithIgnoredSet(set: mutable.HashSet[ScalaResolveResult]): Boolean = {
import scala.collection.JavaConversions._
if (ignoredSet.nonEmpty && set.isEmpty) return false
ignoredSet.forall { result =>
set.forall { otherResult =>
if (!ScEquivalenceUtil.smartEquivalence(result.getActualElement, otherResult.getActualElement)) {
(result.getActualElement, otherResult.getActualElement) match {
case (ta: ScTypeAliasDefinition, cls: PsiClass) => ta.isExactAliasFor(cls)
case (cls: PsiClass, ta: ScTypeAliasDefinition) => ta.isExactAliasFor(cls)
case _ => false
}
} else true
}
}
}
protected def restartFromHistory(): Unit = {
candidatesSet.clear()
ignoredSet.clear()
levelQualifiedNamesSet.clear()
qualifiedNamesSet.clear()
levelSet.clear()
fromHistory = true
try {
history.foreach {
case ChangedLevel => changedLevel
case AddResult(results) => addResults(results)
}
}
finally fromHistory = false
}
def isUpdateHistory: Boolean = false
protected def addChangedLevelToHistory(): Unit = {
if (isUpdateHistory && !fromHistory && history.lastOption != Some(ChangedLevel)) history += ChangedLevel
}
protected def getQualifiedName(result: ScalaResolveResult): T
private lazy val suspiciousPackages: Set[String] = {
def collectPackages(elem: PsiElement, res: Set[String] = Set.empty): Set[String] = {
PsiTreeUtil.getContextOfType(elem, true, classOf[ScPackaging]) match {
case null => res
case p: ScPackaging => collectPackages(p, res + p.fullPackageName)
}
}
Set("scala", "java.lang", "scala", "scala.Predef") ++ collectPackages(getPlace)
}
protected def isSpecialResult(result: ScalaResolveResult): Boolean = {
val importsUsed = result.importsUsed.toSeq
if (importsUsed.length == 1) {
val importExpr = importsUsed.head match {
case ImportExprUsed(expr) => expr
case ImportSelectorUsed(selector) => PsiTreeUtil.getContextOfType(selector, true, classOf[ScImportExpr])
case ImportWildcardSelectorUsed(expr) => expr
}
importExpr.qualifier.bind() match {
case Some(ScalaResolveResult(p: PsiPackage, _)) => suspiciousPackages.contains(p.getQualifiedName)
case Some(ScalaResolveResult(o: ScObject, _)) => suspiciousPackages.contains(o.qualifiedName)
case _ => false
}
} else false
}
/**
* Returns highest precedence of all resolve results.
* 1 - import a._
* 2 - import a.x
* 3 - definition or declaration
*/
protected def getTopPrecedence(result: ScalaResolveResult): Int
protected def setTopPrecedence(result: ScalaResolveResult, i: Int)
protected def filterNot(p: ScalaResolveResult, n: ScalaResolveResult): Boolean = {
getPrecedence(p) < getTopPrecedence(n)
}
protected def isCheckForEqualPrecedence = true
protected def clearLevelQualifiedSet(result: ScalaResolveResult) {
levelQualifiedNamesSet.clear()
}
protected def getLevelSet(result: ScalaResolveResult): util.HashSet[ScalaResolveResult] = levelSet
/**
* Do not add ResolveResults through candidatesSet. It may break precedence. Use this method instead.
*/
protected def addResult(result: ScalaResolveResult): Boolean = addResults(Seq(result))
protected def addResults(results: Seq[ScalaResolveResult]): Boolean = {
if (isUpdateHistory && !fromHistory) history += AddResult(results)
if (results.isEmpty) return true
val result: ScalaResolveResult = results.head
lazy val qualifiedName: T = getQualifiedName(result)
lazy val levelSet = getLevelSet(result)
def addResults() {
if (qualifiedName != null) levelQualifiedNamesSet.add(qualifiedName)
val iterator = results.iterator
while (iterator.hasNext) {
levelSet.add(iterator.next())
}
}
val currentPrecedence = getPrecedence(result)
val topPrecedence = getTopPrecedence(result)
if (currentPrecedence < topPrecedence) return false
else if (currentPrecedence == topPrecedence && levelSet.isEmpty) return false
else if (currentPrecedence == topPrecedence) {
if (isCheckForEqualPrecedence && qualifiedName != null &&
(levelQualifiedNamesSet.contains(qualifiedName) ||
qualifiedNamesSet.contains(qualifiedName))) {
return false
} else if (qualifiedName != null && qualifiedNamesSet.contains(qualifiedName)) return false
if (!fromHistory && isUpdateHistory && isSpecialResult(result)) {
results.foreach(ignoredSet.add)
} else addResults()
} else {
if (qualifiedName != null && qualifiedNamesSet.contains(qualifiedName)) {
return false
} else {
if (!fromHistory && isUpdateHistory && isSpecialResult(result)) {
results.foreach(ignoredSet.add)
} else {
setTopPrecedence(result, currentPrecedence)
val levelSetIterator = levelSet.iterator()
while (levelSetIterator.hasNext) {
val next = levelSetIterator.next()
if (filterNot(next, result)) {
levelSetIterator.remove()
}
}
clearLevelQualifiedSet(result)
addResults()
}
}
}
true
}
protected def getPrecedence(result: ScalaResolveResult): Int = {
specialPriority match {
case Some(priority) => priority
case None => result.getPrecedence(getPlace, placePackageName)
}
}
}
object PrecedenceHelper {
object PrecedenceTypes {
val JAVA_LANG = 1
val SCALA = 2
val SCALA_PREDEF = 3
val PACKAGE_LOCAL_PACKAGE = 4
val WILDCARD_IMPORT_PACKAGE = 5
val IMPORT_PACKAGE = 6
val PACKAGE_LOCAL = 7
val WILDCARD_IMPORT = 8
val IMPORT = 9
val OTHER_MEMBERS = 10
}
}
|
LPTK/intellij-scala
|
src/org/jetbrains/plugins/scala/lang/resolve/processor/PrecedenceHelper.scala
|
Scala
|
apache-2.0
| 7,661 |
package sigmastate.serialization.generators
import org.scalacheck.{Arbitrary, Gen}
import sigmastate.Values.{FalseLeaf, TrueLeaf}
import sigmastate.{If, SInt, TreeLookup}
trait RelationGenerators {
this: ObjectGenerators with ConcreteCollectionGenerators =>
val treeLookupGen: Gen[TreeLookup] = for {
t <- arbTaggedAvlTree.arbitrary
b1 <- arbByteArrayConstant.arbitrary
b2 <- arbByteArrayConstant.arbitrary
} yield TreeLookup(t, b1, b2)
val ifGen: Gen[If[SInt.type]] = for {
c <- Gen.oneOf(TrueLeaf, FalseLeaf)
tb <- arbIntConstants.arbitrary
fb <- arbIntConstants.arbitrary
} yield If(c, tb, fb)
implicit val arbTreeLookup: Arbitrary[TreeLookup] = Arbitrary(treeLookupGen)
implicit val arbIf: Arbitrary[If[SInt.type]] = Arbitrary(ifGen)
}
|
ScorexFoundation/sigmastate-interpreter
|
sigmastate/src/test/scala/sigmastate/serialization/generators/RelationGenerators.scala
|
Scala
|
mit
| 784 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.streaming
import java.io.{File, IOException}
import java.nio.charset.StandardCharsets
import java.util.UUID
import java.util.concurrent.TimeUnit
import scala.collection.JavaConverters._
import scala.collection.mutable.ArrayBuffer
import scala.reflect.ClassTag
import scala.util.Random
import com.google.common.io.Files
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.Path
import org.apache.spark.internal.Logging
import org.apache.spark.streaming.dstream.DStream
import org.apache.spark.util.Utils
private[streaming]
object MasterFailureTest extends Logging {
@volatile var killed = false
@volatile var killCount = 0
@volatile var setupCalled = false
def main(args: Array[String]): Unit = {
// scalastyle:off println
if (args.size < 2) {
println(
"Usage: MasterFailureTest <local/HDFS directory> <# batches> " +
"[<batch size in milliseconds>]")
System.exit(1)
}
val directory = args(0)
val numBatches = args(1).toInt
val batchDuration = if (args.size > 2) Milliseconds(args(2).toInt) else Seconds(1)
println("\n\n========================= MAP TEST =========================\n\n")
testMap(directory, numBatches, batchDuration)
println("\n\n================= UPDATE-STATE-BY-KEY TEST =================\n\n")
testUpdateStateByKey(directory, numBatches, batchDuration)
println("\n\nSUCCESS\n\n")
// scalastyle:on println
}
def testMap(directory: String, numBatches: Int, batchDuration: Duration): Unit = {
// Input: time=1 ==> [ 1 ] , time=2 ==> [ 2 ] , time=3 ==> [ 3 ] , ...
val input = (1 to numBatches).map(_.toString).toSeq
// Expected output: time=1 ==> [ 1 ] , time=2 ==> [ 2 ] , time=3 ==> [ 3 ] , ...
val expectedOutput = (1 to numBatches)
val operation = (st: DStream[String]) => st.map(_.toInt)
// Run streaming operation with multiple master failures
val output = testOperation(directory, batchDuration, input, operation, expectedOutput)
logInfo("Expected output, size = " + expectedOutput.size)
logInfo(expectedOutput.mkString("[", ",", "]"))
logInfo("Output, size = " + output.size)
logInfo(output.mkString("[", ",", "]"))
// Verify whether all the values of the expected output is present
// in the output
assert(output.distinct.toSet == expectedOutput.toSet)
}
def testUpdateStateByKey(directory: String, numBatches: Int, batchDuration: Duration): Unit = {
// Input: time=1 ==> [ a ] , time=2 ==> [ a, a ] , time=3 ==> [ a, a, a ] , ...
val input = (1 to numBatches).map(i => (1 to i).map(_ => "a").mkString(" ")).toSeq
// Expected output: time=1 ==> [ (a, 1) ] , time=2 ==> [ (a, 3) ] , time=3 ==> [ (a,6) ] , ...
val expectedOutput = (1L to numBatches).map(i => (1L to i).sum).map(j => ("a", j))
val operation = (st: DStream[String]) => {
val updateFunc = (values: Seq[Long], state: Option[Long]) => {
Some(values.foldLeft(0L)(_ + _) + state.getOrElse(0L))
}
st.flatMap(_.split(" "))
.map(x => (x, 1L))
.updateStateByKey[Long](updateFunc)
.checkpoint(batchDuration * 5)
}
// Run streaming operation with multiple master failures
val output = testOperation(directory, batchDuration, input, operation, expectedOutput)
logInfo("Expected output, size = " + expectedOutput.size + "\n" + expectedOutput)
logInfo("Output, size = " + output.size + "\n" + output)
// Verify whether all the values in the output are among the expected output values
output.foreach(o =>
assert(expectedOutput.contains(o), "Expected value " + o + " not found")
)
// Verify whether the last expected output value has been generated, there by
// confirming that none of the inputs have been missed
assert(output.last == expectedOutput.last)
}
/**
* Tests stream operation with multiple master failures, and verifies whether the
* final set of output values is as expected or not.
*/
def testOperation[T: ClassTag](
directory: String,
batchDuration: Duration,
input: Seq[String],
operation: DStream[String] => DStream[T],
expectedOutput: Seq[T]
): Seq[T] = {
// Just making sure that the expected output does not have duplicates
assert(expectedOutput.distinct.toSet == expectedOutput.toSet)
// Reset all state
reset()
// Create the directories for this test
val uuid = UUID.randomUUID().toString
val rootDir = new Path(directory, uuid)
val fs = rootDir.getFileSystem(new Configuration())
val checkpointDir = new Path(rootDir, "checkpoint")
val testDir = new Path(rootDir, "test")
fs.mkdirs(checkpointDir)
fs.mkdirs(testDir)
// Setup the stream computation with the given operation
val ssc = StreamingContext.getOrCreate(checkpointDir.toString, () => {
setupStreams(batchDuration, operation, checkpointDir, testDir)
})
// Check if setupStream was called to create StreamingContext
// (and not created from checkpoint file)
assert(setupCalled, "Setup was not called in the first call to StreamingContext.getOrCreate")
// Start generating files in the a different thread
val fileGeneratingThread = new FileGeneratingThread(input, testDir, batchDuration.milliseconds)
fileGeneratingThread.start()
// Run the streams and repeatedly kill it until the last expected output
// has been generated, or until it has run for twice the expected time
val lastExpectedOutput = expectedOutput.last
val maxTimeToRun = expectedOutput.size * batchDuration.milliseconds * 2
val mergedOutput = runStreams(ssc, lastExpectedOutput, maxTimeToRun)
fileGeneratingThread.join()
ssc.stop()
fs.delete(checkpointDir, true)
fs.delete(testDir, true)
logInfo("Finished test after " + killCount + " failures")
mergedOutput
}
/**
* Sets up the stream computation with the given operation, directory (local or HDFS),
* and batch duration. Returns the streaming context and the directory to which
* files should be written for testing.
*/
private def setupStreams[T: ClassTag](
batchDuration: Duration,
operation: DStream[String] => DStream[T],
checkpointDir: Path,
testDir: Path
): StreamingContext = {
// Mark that setup was called
setupCalled = true
// Setup the streaming computation with the given operation
val ssc = new StreamingContext("local[4]", "MasterFailureTest", batchDuration, null, Nil,
Map())
ssc.checkpoint(checkpointDir.toString)
val inputStream = ssc.textFileStream(testDir.toString)
val operatedStream = operation(inputStream)
val outputStream = new TestOutputStream(operatedStream)
outputStream.register()
ssc
}
/**
* Repeatedly starts and kills the streaming context until timed out or
* the last expected output is generated. Finally, return
*/
private def runStreams[T: ClassTag](
_ssc: StreamingContext,
lastExpectedOutput: T,
maxTimeToRun: Long
): Seq[T] = {
var ssc = _ssc
var totalTimeRan = 0L
var isLastOutputGenerated = false
var isTimedOut = false
val mergedOutput = new ArrayBuffer[T]()
val checkpointDir = ssc.checkpointDir
val batchDuration = ssc.graph.batchDuration
while(!isLastOutputGenerated && !isTimedOut) {
// Get the output buffer
val outputQueue = ssc.graph.getOutputStreams().head.asInstanceOf[TestOutputStream[T]].output
def output = outputQueue.asScala.flatten
// Start the thread to kill the streaming after some time
killed = false
val killingThread = new KillingThread(ssc, batchDuration.milliseconds * 10)
killingThread.start()
var timeRan = 0L
try {
// Start the streaming computation and let it run while ...
// (i) StreamingContext has not been shut down yet
// (ii) The last expected output has not been generated yet
// (iii) Its not timed out yet
System.clearProperty("spark.streaming.clock")
System.clearProperty("spark.driver.port")
ssc.start()
val startTimeNs = System.nanoTime()
while (!killed && !isLastOutputGenerated && !isTimedOut) {
Thread.sleep(100)
timeRan = System.nanoTime() - startTimeNs
isLastOutputGenerated = (output.nonEmpty && output.last == lastExpectedOutput)
isTimedOut = (timeRan + totalTimeRan > TimeUnit.MILLISECONDS.toNanos(maxTimeToRun))
}
} catch {
case e: Exception => logError("Error running streaming context", e)
} finally {
ssc.stop()
}
if (killingThread.isAlive) {
killingThread.interrupt()
// SparkContext.stop will set SparkEnv.env to null. We need to make sure SparkContext is
// stopped before running the next test. Otherwise, it's possible that we set SparkEnv.env
// to null after the next test creates the new SparkContext and fail the test.
killingThread.join()
}
logInfo("Has been killed = " + killed)
logInfo("Is last output generated = " + isLastOutputGenerated)
logInfo("Is timed out = " + isTimedOut)
// Verify whether the output of each batch has only one element or no element
// and then merge the new output with all the earlier output
mergedOutput ++= output.toSeq
totalTimeRan += timeRan
logInfo("New output = " + output.toSeq)
logInfo("Merged output = " + mergedOutput)
logInfo("Time ran = " + timeRan)
logInfo("Total time ran = " + TimeUnit.NANOSECONDS.toMillis(totalTimeRan))
if (!isLastOutputGenerated && !isTimedOut) {
val sleepTime = Random.nextInt(batchDuration.milliseconds.toInt * 10)
logInfo(
"\n-------------------------------------------\n" +
" Restarting stream computation in " + sleepTime + " ms " +
"\n-------------------------------------------\n"
)
Thread.sleep(sleepTime)
// Recreate the streaming context from checkpoint
ssc = StreamingContext.getOrCreate(checkpointDir, () => {
throw new Exception("Trying to create new context when it " +
"should be reading from checkpoint file")
})
}
}
mergedOutput
}
/**
* Verifies the output value are the same as expected. Since failures can lead to
* a batch being processed twice, a batches output may appear more than once
* consecutively. To avoid getting confused with those, we eliminate consecutive
* duplicate batch outputs of values from the `output`. As a result, the
* expected output should not have consecutive batches with the same values as output.
*/
private def verifyOutput[T: ClassTag](output: Seq[T], expectedOutput: Seq[T]): Unit = {
// Verify whether expected outputs do not consecutive batches with same output
for (i <- 0 until expectedOutput.size - 1) {
assert(expectedOutput(i) != expectedOutput(i + 1),
"Expected output has consecutive duplicate sequence of values")
}
// Log the output
// scalastyle:off println
println("Expected output, size = " + expectedOutput.size)
println(expectedOutput.mkString("[", ",", "]"))
println("Output, size = " + output.size)
println(output.mkString("[", ",", "]"))
// scalastyle:on println
// Match the output with the expected output
output.foreach(o =>
assert(expectedOutput.contains(o), "Expected value " + o + " not found")
)
}
/** Resets counter to prepare for the test */
private def reset(): Unit = {
killed = false
killCount = 0
setupCalled = false
}
}
/**
* Thread to kill streaming context after a random period of time.
*/
private[streaming]
class KillingThread(ssc: StreamingContext, maxKillWaitTime: Long) extends Thread with Logging {
override def run(): Unit = {
try {
// If it is the first killing, then allow the first checkpoint to be created
var minKillWaitTime = if (MasterFailureTest.killCount == 0) 5000 else 2000
val killWaitTime = minKillWaitTime + math.abs(Random.nextLong % maxKillWaitTime)
logInfo("Kill wait time = " + killWaitTime)
Thread.sleep(killWaitTime)
logInfo(
"\n---------------------------------------\n" +
"Killing streaming context after " + killWaitTime + " ms" +
"\n---------------------------------------\n"
)
if (ssc != null) {
ssc.stop()
MasterFailureTest.killed = true
MasterFailureTest.killCount += 1
}
logInfo("Killing thread finished normally")
} catch {
case ie: InterruptedException => logInfo("Killing thread interrupted")
case e: Exception => logWarning("Exception in killing thread", e)
}
}
}
/**
* Thread to generate input files periodically with the desired text.
*/
private[streaming]
class FileGeneratingThread(input: Seq[String], testDir: Path, interval: Long)
extends Thread with Logging {
override def run(): Unit = {
val localTestDir = Utils.createTempDir()
var fs = testDir.getFileSystem(new Configuration())
val maxTries = 3
try {
Thread.sleep(5000) // To make sure that all the streaming context has been set up
for (i <- 0 until input.size) {
// Write the data to a local file and then move it to the target test directory
val localFile = new File(localTestDir, (i + 1).toString)
val hadoopFile = new Path(testDir, (i + 1).toString)
val tempHadoopFile = new Path(testDir, ".tmp_" + (i + 1).toString)
Files.write(input(i) + "\n", localFile, StandardCharsets.UTF_8)
var tries = 0
var done = false
while (!done && tries < maxTries) {
tries += 1
try {
// fs.copyFromLocalFile(new Path(localFile.toString), hadoopFile)
fs.copyFromLocalFile(new Path(localFile.toString), tempHadoopFile)
fs.rename(tempHadoopFile, hadoopFile)
done = true
} catch {
case ioe: IOException =>
fs = testDir.getFileSystem(new Configuration())
logWarning("Attempt " + tries + " at generating file " + hadoopFile + " failed.",
ioe)
}
}
if (!done) {
logError("Could not generate file " + hadoopFile)
} else {
logInfo("Generated file " + hadoopFile + " at " + System.currentTimeMillis)
}
Thread.sleep(interval)
localFile.delete()
}
logInfo("File generating thread finished normally")
} catch {
case ie: InterruptedException => logInfo("File generating thread interrupted")
case e: Exception => logWarning("File generating in killing thread", e)
} finally {
fs.close()
Utils.deleteRecursively(localTestDir)
}
}
}
|
rezasafi/spark
|
streaming/src/test/scala/org/apache/spark/streaming/MasterFailureTest.scala
|
Scala
|
apache-2.0
| 15,744 |
package domala.jdbc.query
import domala.internal.expr.ExpressionEvaluator
import domala.internal.jdbc.sql.NodePreparedSqlBuilder
import org.seasar.doma.jdbc._
class SqlFileModifyQuery(kind: SqlKind, sqlFilePath: String) extends AbstractSqlModifyQuery(kind) {
protected var sqlFile: SqlFile = _
protected def prepareSql(): Unit = {
sqlFile = config.getSqlFileRepository.getSqlFile(method, sqlFilePath, config.getDialect)
val evaluator = new ExpressionEvaluator(this.parameters, this.config.getDialect.getExpressionFunctions, this.config.getClassHelper)
val sqlBuilder = new NodePreparedSqlBuilder(this.config, this.kind, evaluator, this.sqlLogType, this.expandColumns _, this.populateValues _, sqlFilePath)
this.sql = sqlBuilder.build(this.sqlFile.getSqlNode, this.comment _)
}
}
|
bakenezumi/domala
|
core/src/main/scala/domala/jdbc/query/SqlFileModifyQuery.scala
|
Scala
|
apache-2.0
| 807 |
package sampler.r
import java.nio.file.{Paths, Files}
import collection.JavaConversions.asScalaBuffer
import java.nio.file.Path
import org.scalatest.BeforeAndAfter
import org.scalatest.FreeSpec
class ScriptRunnerTest extends FreeSpec with BeforeAndAfter {
val workingDir = Paths.get(getClass.getClassLoader.getResource("data").toURI())
val scriptPath = workingDir.resolve("deleteMe.r")
val rOutPath = workingDir.resolve("deleteMe.r.Rout")
val jsonPath = workingDir.resolve("deleteMe.json")
val noExtension = workingDir.resolve("noExtension")
"Nothing prints out from simple script" in {
val script =
"""
a <- c(2,4,6)
a
"""
}
"Runs sleep command" in {
val startTime = System.nanoTime
ScriptRunner("Sys.sleep(1)", scriptPath)
val runTime = (System.nanoTime() - startTime) / 1e9
assert(runTime > 1.0)
}
"ScriptRunnerException when code fails" in {
intercept[ScriptRunnerException] {
ScriptRunner("results <- toJSON(object)", scriptPath)
}
intercept[ScriptRunnerException] {
ScriptRunner("library(\\"rjson\\")\\nresults <- toJSON(object)", scriptPath)
}
}
"Error when file name doesn't end with an R file extension" in {
intercept[AssertionError] {
ScriptRunner("a <- 1", noExtension)
}
}
after {
List(scriptPath, rOutPath, jsonPath, noExtension).foreach(Files.deleteIfExists)
}
}
|
tsaratoon/Sampler
|
sampler-core/src/test/scala/sampler/r/ScriptRunnerTest.scala
|
Scala
|
apache-2.0
| 1,362 |
import scala.reflect.runtime.universe._
import scala.reflect.runtime.{currentMirror => m}
import scala.reflect.runtime.{universe => u}
object Test extends App {
locally {
try {
case class Test(a:String,b:List[Int])
val lookAtMe = m.reflect(Test("a",List(5)))
val value = u.weakTypeOf[Test]
val members = value.members
val member = value.members.filter(_.name.encodedName == TermName("a"))
val aAccessor = lookAtMe.reflectMethod(member.head.asMethod)
val thisShouldBeA = aAccessor.apply()
println(thisShouldBeA)
} catch {
case ScalaReflectionException(msg) => println(msg)
}
}
}
|
felixmulder/scala
|
test/files/run/t6323b.scala
|
Scala
|
bsd-3-clause
| 649 |
package demo.components
import chandu0101.macros.tojs.GhPagesMacros
import chandu0101.scalajs.react.components.{IconName, ReactGeomIcon}
import japgolly.scalajs.react._
import japgolly.scalajs.react.vdom.html_<^._
object ReactGeomIconDemo {
val code = GhPagesMacros.exampleSource
// EXAMPLE:START
case class State(value: String = "", multiValue: String = "")
class Backend(t: BackendScope[_, State]) {
def render(S: State) = {
<.div(
CodeExample(code, "Demo")(
<.div(
ReactGeomIcon(name = IconName.CAMERA, width = "2em", height = "2em", fill = "red")(),
ReactGeomIcon(name = IconName.CALENDAR, width = "2em", height = "2em", fill = "blue")(),
ReactGeomIcon(name = IconName.CHAT, width = "2em", height = "2em", fill = "black")(),
ReactGeomIcon(name = IconName.CHEVRONDOWN,
width = "2em",
height = "2em",
fill = "orange")()
)
)
)
}
}
val component = ScalaComponent
.builder[Unit]("ReactGeomIconDemo")
.initialState(State())
.renderBackend[Backend]
.build
// EXAMPLE:END
def apply() = component()
}
|
rleibman/scalajs-react-components
|
demo/src/main/scala/demo/components/ReactGeomIconDemo.scala
|
Scala
|
apache-2.0
| 1,220 |
package org.jetbrains.plugins.scala.lang.parser.parsing.xml.pattern
import com.intellij.psi.xml.XmlTokenType
import org.jetbrains.plugins.scala.lang.lexer.ScalaXmlTokenTypes
import org.jetbrains.plugins.scala.lang.parser.{ErrMsg, ScalaElementTypes}
import org.jetbrains.plugins.scala.lang.parser.parsing.builder.ScalaPsiBuilder
/**
* @author Alexander Podkhalyuzin
* Date: 18.04.2008
*/
/*
* EmptyElemTagP ::= '<' Name [S]'/>'
*/
object EmptyElemTagP {
def parse(builder: ScalaPsiBuilder): Boolean = {
val tagMarker = builder.mark()
builder.getTokenType match {
case ScalaXmlTokenTypes.XML_START_TAG_START =>
builder.advanceLexer()
case _ =>
tagMarker.drop()
return false
}
builder.getTokenType match {
case ScalaXmlTokenTypes.XML_NAME =>
builder.advanceLexer()
case _ => builder error ErrMsg("xml.name.expected")
}
builder.getTokenType match {
case XmlTokenType.XML_WHITE_SPACE => builder.advanceLexer()
case _ =>
}
builder.getTokenType match {
case ScalaXmlTokenTypes.XML_EMPTY_ELEMENT_END =>
builder.advanceLexer()
tagMarker.done(ScalaElementTypes.XML_EMPTY_TAG)
true
case _ =>
tagMarker.rollbackTo()
false
}
}
}
|
gtache/intellij-lsp
|
intellij-lsp-dotty/src/org/jetbrains/plugins/scala/lang/parser/parsing/xml/pattern/EmptyElemTagP.scala
|
Scala
|
apache-2.0
| 1,281 |
/*
* Copyright 2020 Spotify AB
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.spotify.scio.transforms.syntax
import com.spotify.scio.values.SCollection
import com.spotify.scio.transforms.DoFnWithResource.ResourceType
import com.spotify.scio.coders.Coder
import com.spotify.scio.transforms.{
CollectFnWithResource,
FilterFnWithResource,
FlatMapFnWithResource,
MapFnWithResource
}
trait SCollectionWithResourceSyntax {
implicit class SCollectionWithResourceFunctions[T](private val self: SCollection[T]) {
/**
* Return a new [[SCollection]] by applying a function that also takes in a resource and
* `ResourceType` to all elements of this SCollection.
*/
def mapWithResource[R, U: Coder](resource: => R, resourceType: ResourceType)(
fn: (R, T) => U
): SCollection[U] =
self.parDo(new MapFnWithResource(resource, resourceType, fn))
/**
* Filter the elements for which the given `PartialFunction` that also takes in a resource and
* `ResourceType` is defined, and then map.
*/
def collectWithResource[R, U: Coder](resource: => R, resourceType: ResourceType)(
pfn: PartialFunction[(R, T), U]
): SCollection[U] =
self.parDo(new CollectFnWithResource(resource, resourceType, pfn))
/**
* Return a new [[SCollection]] by first applying a function that also takes in a resource and
* `ResourceType` to all elements of this SCollection, and then flattening the results.
*/
def flatMapWithResource[R, U: Coder](resource: => R, resourceType: ResourceType)(
fn: (R, T) => TraversableOnce[U]
): SCollection[U] =
self.parDo(new FlatMapFnWithResource(resource, resourceType, fn))
/**
* Return a new [[SCollection]] containing only the elements that satisfy a predicate that takes
* in a resource and `ResourceType`
*/
def filterWithResource[R](resource: => R, resourceType: ResourceType)(
fn: (R, T) => Boolean
): SCollection[T] =
self.parDo(new FilterFnWithResource(resource, resourceType, fn))(self.coder)
}
}
|
spotify/scio
|
scio-core/src/main/scala/com/spotify/scio/transforms/syntax/SCollectionWithResourceSyntax.scala
|
Scala
|
apache-2.0
| 2,600 |
import DistributedSignal.PVarFactory
import rescala.default._
import rescala.extra.lattices.sequences.RGA.RGA
import rescala.extra.lattices.sequences.{RGA, Vertex}
/** DistributedVertexLists are LinkedLists operating on so called Vertices. Vertices store a value of type `A`.
*
* @param initial The initial value of this variable.
*/
case class PVertexList[A](initial: RGA[A] = RGA.empty[A])
extends DistributedSignal[List[A], RGA[A]](initial, _.toList) {
def contains(v: Vertex): Boolean = crdtSignal.readValueOnce.contains(v)
/** @return True if the list contains both u and v and u is ordered before v. */
def before(u: Vertex, v: Vertex): Boolean = crdtSignal.readValueOnce.before(u, v)
def append(value: A): Unit = crdtSignal.transform(_.append(value))
def successor(v: Vertex): Vertex = crdtSignal.readValueOnce.successor(v)
def valueIterator: Iterator[A] = crdtSignal.readValueOnce.iterator
def iterator: Iterator[Vertex] = crdtSignal.readValueOnce.vertexIterator
}
object PVertexList {
/** Allows creation of DistributedVertexLists by passing a list of initial values. */
def apply[A](values: List[A]): PVertexList[A] = {
val init: RGA[A] = RGA[A](values)
new PVertexList[A](init)
}
// noinspection ConvertExpressionToSAM
implicit def PVertexListFactory[A]: PVarFactory[PVertexList[A]] =
new PVarFactory[PVertexList[A]] {
override def apply(): PVertexList[A] = PVertexList[A]()
}
}
|
guidosalva/REScala
|
Historical/dividiParoli/src/main/scala/PVertexList.scala
|
Scala
|
apache-2.0
| 1,455 |
package authentication.entities
sealed trait AuthState
case object Offline extends AuthState
case object Online extends AuthState
case object SigningIn extends AuthState
case object SigningOut extends AuthState
case object Updating extends AuthState
|
lymr/fun-chat
|
fun-chat-client/src/main/scala/authentication/entities/AuthState.scala
|
Scala
|
mit
| 256 |
/**
* The MIT License (MIT)
*
* Copyright (c) 2018 Israel Freitas([email protected])
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of
* this software and associated documentation files (the "Software"), to deal in
* the Software without restriction, including without limitation the rights to
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
* the Software, and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
* FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
* COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
* IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
package ifreitas.scalaaiml.elements
case class ConditionItem(expression: TemplateExpression*) extends TemplateExpression {
def toXml = <li>{ expression.toXml }</li>
}
|
ifreitas/AimlToXml
|
src/main/scala/ifreitas/scalaaiml/elements/ConditionItem.scala
|
Scala
|
mit
| 1,353 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.command
import org.apache.spark.sql.AnalysisException
import org.apache.spark.sql.test.util.QueryTest
import org.junit.Assert
import org.scalatest.BeforeAndAfterAll
import org.apache.carbondata.spark.exception.ProcessMetaDataException
class CarbonTableSchemaCommonSuite extends QueryTest with BeforeAndAfterAll {
test("Creating table: Duplicate dimensions found with name, it should throw AnalysisException") {
sql("DROP TABLE IF EXISTS carbon_table")
try {
sql(
s"""
| CREATE TABLE carbon_table(
| BB INT, bb char(10)
| )
| STORED BY 'carbondata'
""".stripMargin)
Assert.assertTrue(false)
} catch {
case _: AnalysisException => Assert.assertTrue(true)
case _: Exception => Assert.assertTrue(false)
} finally {
sql("DROP TABLE IF EXISTS carbon_table")
}
}
test("Altering table: Duplicate column found with name, it should throw RuntimeException") {
sql("DROP TABLE IF EXISTS carbon_table")
sql(
s"""
| CREATE TABLE if not exists carbon_table(
| BB INT, cc char(10)
| )
| STORED BY 'carbondata'
""".stripMargin)
val ex = intercept[ProcessMetaDataException] {
sql(
s"""
| alter TABLE carbon_table add columns(
| bb char(10)
)
""".stripMargin)
}
sql("DROP TABLE IF EXISTS carbon_table")
}
}
|
ravipesala/incubator-carbondata
|
integration/spark-common-test/src/test/scala/org/apache/spark/sql/execution/command/CarbonTableSchemaCommonSuite.scala
|
Scala
|
apache-2.0
| 2,277 |
/*
* Copyright (C) 2009-2017 Lightbend Inc. <https://www.lightbend.com>
*/
package play.it.test
import java.security.KeyStore
import javax.net.ssl._
import play.api.test.PlayRunners
import play.api.{ Application, Configuration, Mode }
import play.core.ApplicationProvider
import play.core.server.ssl.FakeKeyStore
import play.core.server.{ ServerConfig, ServerProvider }
import play.server.api.SSLEngineProvider
trait ServerEndpoints {
/**
* Contains information about the port and protocol used to connect to the server.
* This class is used to abstract out the details of connecting to different backends
* and protocols. Most tests will operate the same no matter which endpoint they
* are connected to.
*/
sealed trait ServerEndpoint {
def description: String
def scheme: String
def port: Int
def expectedHttpVersions: Set[String]
def expectedServerAttr: Option[String]
final override def toString = description
}
/** Represents an HTTP connection to a server. */
trait HttpEndpoint extends ServerEndpoint {
override final val scheme: String = "http"
}
/** Represents an HTTPS connection to a server. */
trait HttpsEndpoint extends ServerEndpoint {
override final val scheme: String = "https"
/** Information about the server's SSL setup. */
def serverSsl: ServerSSL
}
/** Contains information how SSL is configured for an [[HttpsEndpoint]]. */
case class ServerSSL(sslContext: SSLContext, trustManager: X509TrustManager)
/**
* A recipe for making a [[ServerEndpoint]]. Recipes are often used
* when describing which tests to run. The recipe can be used to start
* servers with the correct [[ServerEndpoint]]s.
*
* @see [[ServerEndpoints.withEndpoint()]]
*/
trait ServerEndpointRecipe {
type EndpointType <: ServerEndpoint
/** A human-readable description of this endpoint. */
val description: String
/** The HTTP port to use when configuring the server. */
val configuredHttpPort: Option[Int]
/** The HTTPS port to use when configuring the server. */
val configuredHttpsPort: Option[Int]
/**
* Any extra configuration to use when configuring the server. This
* configuration will be applied last so it will override any existing
* configuration.
*/
def serverConfiguration: Configuration
/** The provider used to create the server instance. */
def serverProvider: ServerProvider
/**
* Once a server has been started using this recipe, the running instance
* can be queried to create an endpoint. Usually this just involves asking
* the server what port it is using.
*/
def createEndpointFromServer(runningTestServer: play.api.test.TestServer): EndpointType
}
/** Provides a recipe for making an [[HttpEndpoint]]. */
protected class HttpServerEndpointRecipe(
override val description: String,
override val serverProvider: ServerProvider,
extraServerConfiguration: Configuration = Configuration.empty,
expectedHttpVersions: Set[String],
expectedServerAttr: Option[String]
) extends ServerEndpointRecipe {
recipe =>
override type EndpointType = HttpEndpoint
override val configuredHttpPort: Option[Int] = Some(0)
override val configuredHttpsPort: Option[Int] = None
override val serverConfiguration: Configuration = extraServerConfiguration
override def createEndpointFromServer(runningServer: play.api.test.TestServer): HttpEndpoint = {
new HttpEndpoint {
override def description: String = recipe.description
override def port: Int = runningServer.runningHttpPort.get
override def expectedHttpVersions: Set[String] = recipe.expectedHttpVersions
override def expectedServerAttr: Option[String] = recipe.expectedServerAttr
}
}
override def toString: String = s"HttpServerEndpointRecipe($description)"
}
/** Provides a recipe for making an [[HttpsEndpoint]]. */
protected class HttpsServerEndpointRecipe(
override val description: String,
override val serverProvider: ServerProvider,
extraServerConfiguration: Configuration = Configuration.empty,
expectedHttpVersions: Set[String],
expectedServerAttr: Option[String]
) extends ServerEndpointRecipe {
recipe =>
override type EndpointType = HttpsEndpoint
override val configuredHttpPort: Option[Int] = None
override val configuredHttpsPort: Option[Int] = Some(0)
override def serverConfiguration: Configuration = Configuration(
"play.server.https.engineProvider" -> classOf[ServerEndpoints.SelfSignedSSLEngineProvider].getName
) ++ extraServerConfiguration
override def createEndpointFromServer(runningServer: play.api.test.TestServer): HttpsEndpoint = {
new HttpsEndpoint {
override def description: String = recipe.description
override def port: Int = runningServer.runningHttpsPort.get
override def expectedHttpVersions: Set[String] = recipe.expectedHttpVersions
override def expectedServerAttr: Option[String] = recipe.expectedServerAttr
override val serverSsl: ServerSSL = ServerSSL(
ServerEndpoints.SelfSigned.sslContext,
ServerEndpoints.SelfSigned.trustManager
)
}
}
override def toString: String = s"HttpsServerEndpointRecipe($description)"
}
/**
* Starts a server by following a [[ServerEndpointRecipe]] and using the
* application provided by an [[ApplicationFactory]]. The server's endpoint
* is passed to the given `block` of code.
*/
def withEndpoint[A](endpointRecipe: ServerEndpointRecipe, appFactory: ApplicationFactory)(block: ServerEndpoint => A): A = {
val application: Application = appFactory.create()
// Create a ServerConfig with dynamic ports and using a self-signed certificate
val serverConfig = {
val sc: ServerConfig = ServerConfig(
port = endpointRecipe.configuredHttpPort,
sslPort = endpointRecipe.configuredHttpsPort,
mode = Mode.Test,
rootDir = application.path
)
val patch = endpointRecipe.serverConfiguration
sc.copy(configuration = sc.configuration ++ patch)
}
// Initialize and start the TestServer
val testServer: play.api.test.TestServer = new play.api.test.TestServer(
serverConfig, application, Some(endpointRecipe.serverProvider)
)
val runners = new PlayRunners {} // We can't mix in PlayRunners because it pollutes the namespace
runners.running(testServer) {
val endpoint: ServerEndpoint = endpointRecipe.createEndpointFromServer(testServer)
block(endpoint)
}
}
}
object ServerEndpoints {
/**
* An SSLEngineProvider which simply references the values in the
* SelfSigned object.
*/
private[test] class SelfSignedSSLEngineProvider(serverConfig: ServerConfig, appProvider: ApplicationProvider) extends SSLEngineProvider {
override lazy val createSSLEngine: SSLEngine = SelfSigned.sslContext.createSSLEngine()
}
/**
* Contains a statically initialized self-signed certificate.
*/
private[test] object SelfSigned {
/**
* The SSLContext and TrustManager associated with the self-signed certificate.
*/
lazy val (sslContext, trustManager): (SSLContext, X509TrustManager) = {
val keyStore: KeyStore = FakeKeyStore.generateKeyStore
val kmf: KeyManagerFactory = KeyManagerFactory.getInstance("SunX509")
kmf.init(keyStore, "".toCharArray)
val kms: Array[KeyManager] = kmf.getKeyManagers
val tmf: TrustManagerFactory = TrustManagerFactory
.getInstance(TrustManagerFactory.getDefaultAlgorithm())
tmf.init(keyStore)
val tms: Array[TrustManager] = tmf.getTrustManagers
val x509TrustManager: X509TrustManager = tms(0).asInstanceOf[X509TrustManager]
val sslContext: SSLContext = SSLContext.getInstance("TLS")
sslContext.init(kms, tms, null)
(sslContext, x509TrustManager)
}
}
}
|
Shruti9520/playframework
|
framework/src/play-integration-test/src/test/scala/play/it/test/ServerEndpoints.scala
|
Scala
|
apache-2.0
| 8,001 |
/* __ *\\
** ________ ___ / / ___ Scala API **
** / __/ __// _ | / / / _ | (c) 2003-2010, LAMP/EPFL **
** __\\ \\/ /__/ __ |/ /__/ __ | http://scala-lang.org/ **
** /____/\\___/_/ |_/____/_/ | | **
** |/ **
\\* */
package scala.testing
import compat.Platform
/** <p>
* <code>Benchmark</code> can be used to quickly turn an existing
* class into a benchmark. Here is a short example:
* </p><pre>
* <b>object</b> sort1 <b>extends</b> Sorter <b>with</b> Benchmark {
* <b>def</b> run = sort(List.range(1, 1000))
* }
* </pre>
* <p>
* The <code>run</code> method has to be defined by the user, who
* will perform the timed operation there.
* Run the benchmark as follows:
* </p>
* <pre>
* > scala sort1 5 times.log
* </pre>
* <p>
* This will run the benchmark 5 times and log the execution times in
* a file called <code>times.log</code>
* </p>
*
* @author Iulian Dragos, Burak Emir
*/
trait Benchmark {
/** this method should be implemented by the concrete benchmark */
def run()
var multiplier = 1
/** Run the benchmark the specified number of times
* and return a list with the execution times in milliseconds
* in reverse order of the execution
*
* @param noTimes ...
* @return ...
*/
def runBenchmark(noTimes: Int): List[Long] =
for (i <- List.range(1, noTimes + 1)) yield {
val startTime = Platform.currentTime
var i = 0; while (i < multiplier) {
run()
i += 1
}
val stopTime = Platform.currentTime
Platform.collectGarbage
stopTime - startTime
}
/** a string that is written at the beginning of the output line
* that contains the timings. By default, this is the class name.
*/
def prefix: String = getClass().getName()
/**
* The entry point. It takes two arguments (n),
* and an optional argument multiplier (mult).
* (n) is the number of consecutive runs,
* if (mult) is present, the n runs are repeated (mult)
* times.
*/
def main(args: Array[String]) {
if (args.length > 0) {
val logFile = new java.io.OutputStreamWriter(System.out)
if (args.length > 1) multiplier = args(1).toInt
logFile.write(prefix)
for (t <- runBenchmark(args(0).toInt))
logFile.write("\\t\\t" + t)
logFile.write(Platform.EOL)
logFile.flush()
} else {
println("Usage: scala benchmarks.program <runs> ")
println(" or: scala benchmarks.program <runs> <multiplier>")
println("""
The benchmark is run <runs> times, forcing a garbage collection between runs. The optional
<multiplier> causes the benchmark to be repeated <multiplier> times, each time for <runs>
executions.
""")
}
}
}
|
cran/rkafkajars
|
java/scala/testing/Benchmark.scala
|
Scala
|
apache-2.0
| 3,072 |
package com.twitter.finagle.buoyant
import com.twitter.finagle._
import com.twitter.finagle.Stack.Params
import com.twitter.finagle.client.{StackClient, StdStackClient, Transporter}
import com.twitter.finagle.dispatch.{SerialClientDispatcher, SerialServerDispatcher}
import com.twitter.finagle.netty3.{Netty3Listener, Netty3Transporter}
import com.twitter.finagle.server.{StackServer, StdStackServer}
import com.twitter.finagle.transport.Transport
import com.twitter.io.Charsets
import com.twitter.util.Future
import java.net.SocketAddress
import java.nio.charset.StandardCharsets.UTF_8
import org.jboss.netty.channel._
import org.jboss.netty.handler.codec.frame.{DelimiterBasedFrameDecoder, Delimiters}
import org.jboss.netty.handler.codec.string.{StringDecoder, StringEncoder}
/**
* Lovingly stolen from finagle-core's tests
*
* Copyright 2015 Twitter Inc and all that jazz.
*/
object Echo extends Client[String, String] with Server[String, String] {
def newClient(dest: Name, label: String) =
client.newClient(dest, label)
def newService(dest: Name, label: String) =
client.newService(dest, label)
def serve(addr: SocketAddress, service: ServiceFactory[String, String]) =
server.serve(addr, service)
/*
* Finagle Client
*/
private class DelimEncoder(delim: Char) extends SimpleChannelHandler {
override def writeRequested(ctx: ChannelHandlerContext, evt: MessageEvent) = {
val newMessage = evt.getMessage match {
case m: String => m + delim
case m => m
}
Channels.write(ctx, evt.getFuture, newMessage, evt.getRemoteAddress)
}
}
private object StringClientPipeline extends ChannelPipelineFactory {
def getPipeline = {
val pipeline = Channels.pipeline()
pipeline.addLast("stringEncode", new StringEncoder(UTF_8))
pipeline.addLast("stringDecode", new StringDecoder(UTF_8))
pipeline.addLast("line", new DelimEncoder('\\n'))
pipeline
}
}
case class RichClient(underlying: Service[String, String]) {
def ping(): Future[String] = underlying("ping")
}
trait StringRichClient { self: com.twitter.finagle.Client[String, String] =>
def newRichClient(dest: Name, label: String): RichClient =
RichClient(newService(dest, label))
}
case class Client(
stack: Stack[ServiceFactory[String, String]] = StackClient.newStack,
params: Stack.Params = Stack.Params.empty
)
extends StdStackClient[String, String, Client]
with StringRichClient {
protected def copy1(
stack: Stack[ServiceFactory[String, String]] = this.stack,
params: Stack.Params = this.params
): Client = copy(stack, params)
protected type In = String
protected type Out = String
protected def newTransporter(addr: SocketAddress): Transporter[String, String] =
Netty3Transporter(StringClientPipeline, addr, params)
protected def newDispatcher(transport: Transport[In, Out]) =
new SerialClientDispatcher(transport)
}
val client = Client()
/*
* Finagle Server
*/
object StringServerPipeline extends ChannelPipelineFactory {
def getPipeline = {
val pipeline = Channels.pipeline()
pipeline.addLast("line", new DelimiterBasedFrameDecoder(100, Delimiters.lineDelimiter: _*))
pipeline.addLast("stringDecoder", new StringDecoder(UTF_8))
pipeline.addLast("stringEncoder", new StringEncoder(UTF_8))
pipeline
}
}
case class Server(
stack: Stack[ServiceFactory[String, String]] = StackServer.newStack,
params: Stack.Params = StackServer.defaultParams
) extends StdStackServer[String, String, Server] {
protected def copy1(
stack: Stack[ServiceFactory[String, String]] = this.stack,
params: Stack.Params = this.params
) = copy(stack, params)
protected type In = String
protected type Out = String
protected def newListener() = Netty3Listener(StringServerPipeline, params)
protected def newDispatcher(transport: Transport[In, Out], service: Service[String, String]) =
new SerialServerDispatcher(transport, service)
}
val server = Server()
}
|
denverwilliams/linkerd
|
router/core/src/e2e/scala/com/twitter/finagle/buoyant/Echo.scala
|
Scala
|
apache-2.0
| 4,115 |
/*
* Copyright (C) 2014 - 2017 Contributors as noted in the AUTHORS.md file
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package com.wegtam.tensei.agent
import akka.actor.{ActorRef, ActorSystem}
import org.openjdk.jmh.annotations._
import scala.concurrent.Await
import scala.concurrent.duration._
@State(Scope.Benchmark)
@Fork(3)
@Warmup(iterations = 4)
@Measurement(iterations = 10)
@BenchmarkMode(Array(Mode.SampleTime))
class DataTreeNodeCreationBenchmark {
implicit val system: ActorSystem = ActorSystem("DataTreeNodeCreationBenchmark")
@TearDown(Level.Trial)
def shutdown(): Unit = {
val _ = Await.ready(system.terminate(), 15.seconds)
}
@Benchmark
def testCreateDataTreeNode: ActorRef = {
system.actorOf(DataTreeNode.props(Option("An agent run identifier is somewhat longer...")))
}
}
|
Tensei-Data/tensei-agent
|
benchmarks/src/jmh/scala/com/wegtam/tensei/agent/DataTreeNodeCreationBenchmark.scala
|
Scala
|
agpl-3.0
| 1,451 |
package com.twitter.finagle.http
/**
* This puts it all together: The HTTP codec itself.
*/
import com.twitter.conversions.storage._
import com.twitter.finagle._
import com.twitter.finagle.transport.Transport
import com.twitter.finagle.http.codec._
import com.twitter.finagle.http.filter.{HttpNackFilter, DtabFilter}
import com.twitter.finagle.stats.{StatsReceiver, NullStatsReceiver}
import com.twitter.finagle.tracing._
import com.twitter.util.{Try, StorageUnit, Future, Closable}
import java.net.InetSocketAddress
import org.jboss.netty.channel.{
ChannelPipelineFactory, UpstreamMessageEvent, Channel, Channels,
ChannelEvent, ChannelHandlerContext, SimpleChannelDownstreamHandler, MessageEvent}
import org.jboss.netty.handler.codec.http._
case class BadHttpRequest(httpVersion: HttpVersion, method: HttpMethod, uri: String, exception: Exception)
extends DefaultHttpRequest(httpVersion, method, uri)
object BadHttpRequest {
def apply(exception: Exception) =
new BadHttpRequest(HttpVersion.HTTP_1_0, HttpMethod.GET, "/bad-http-request", exception)
}
/** Convert exceptions to BadHttpRequests */
class SafeHttpServerCodec(
maxInitialLineLength: Int,
maxHeaderSize: Int,
maxChunkSize: Int)
extends HttpServerCodec(maxInitialLineLength, maxHeaderSize, maxChunkSize)
{
override def handleUpstream(ctx: ChannelHandlerContext, e: ChannelEvent) {
// this only catches Codec exceptions -- when a handler calls sendUpStream(), it
// rescues exceptions from the upstream handlers and calls notifyHandlerException(),
// which doesn't throw exceptions.
try {
super.handleUpstream(ctx, e)
} catch {
case ex: Exception =>
val channel = ctx.getChannel()
ctx.sendUpstream(new UpstreamMessageEvent(
channel, BadHttpRequest(ex), channel.getRemoteAddress()))
}
}
}
/**
* @param _compressionLevel The compression level to use. If passed the default value (-1) then use
* [[com.twitter.finagle.http.codec.TextualContentCompressor TextualContentCompressor]] which will
* compress text-like content-types with the default compression level (6). Otherwise, use
* [[org.jboss.netty.handler.codec.http.HttpContentCompressor HttpContentCompressor]] for all
* content-types with specified compression level.
*/
case class Http(
_compressionLevel: Int = -1,
_maxRequestSize: StorageUnit = 5.megabytes,
_maxResponseSize: StorageUnit = 5.megabytes,
_decompressionEnabled: Boolean = true,
_channelBufferUsageTracker: Option[ChannelBufferUsageTracker] = None,
_annotateCipherHeader: Option[String] = None,
_enableTracing: Boolean = false,
_maxInitialLineLength: StorageUnit = 4096.bytes,
_maxHeaderSize: StorageUnit = 8192.bytes)
extends CodecFactory[HttpRequest, HttpResponse]
{
def compressionLevel(level: Int) = copy(_compressionLevel = level)
def maxRequestSize(bufferSize: StorageUnit) = copy(_maxRequestSize = bufferSize)
def maxResponseSize(bufferSize: StorageUnit) = copy(_maxResponseSize = bufferSize)
def decompressionEnabled(yesno: Boolean) = copy(_decompressionEnabled = yesno)
def channelBufferUsageTracker(usageTracker: ChannelBufferUsageTracker) =
copy(_channelBufferUsageTracker = Some(usageTracker))
def annotateCipherHeader(headerName: String) = copy(_annotateCipherHeader = Option(headerName))
def enableTracing(enable: Boolean) = copy(_enableTracing = enable)
def maxInitialLineLength(length: StorageUnit) = copy(_maxInitialLineLength = length)
def maxHeaderSize(size: StorageUnit) = copy(_maxHeaderSize = size)
def client = { config =>
new Codec[HttpRequest, HttpResponse] {
def pipelineFactory = new ChannelPipelineFactory {
def getPipeline() = {
val pipeline = Channels.pipeline()
val maxInitialLineLengthInBytes = _maxInitialLineLength.inBytes.toInt
val maxHeaderSizeInBytes = _maxHeaderSize.inBytes.toInt
val maxChunkSize = 8192
pipeline.addLast(
"httpCodec", new HttpClientCodec(
maxInitialLineLengthInBytes, maxHeaderSizeInBytes, maxChunkSize))
pipeline.addLast(
"httpDechunker",
new HttpChunkAggregator(_maxResponseSize.inBytes.toInt))
if (_decompressionEnabled)
pipeline.addLast("httpDecompressor", new HttpContentDecompressor)
pipeline
}
}
override def newClientTransport(ch: Channel, statsReceiver: StatsReceiver): Transport[Any,Any] =
new HttpTransport(super.newClientTransport(ch, statsReceiver))
override def newClientDispatcher(transport: Transport[Any, Any]) =
new DtabHttpDispatcher(transport)
override def newTraceInitializer =
if (_enableTracing) new HttpClientTraceInitializer[HttpRequest, HttpResponse]
else TraceInitializerFilter.empty[HttpRequest, HttpResponse]
}
}
def server = { config =>
new Codec[HttpRequest, HttpResponse] {
def pipelineFactory = new ChannelPipelineFactory {
def getPipeline() = {
val pipeline = Channels.pipeline()
if (_channelBufferUsageTracker.isDefined) {
pipeline.addLast(
"channelBufferManager", new ChannelBufferManager(_channelBufferUsageTracker.get))
}
val maxRequestSizeInBytes = _maxRequestSize.inBytes.toInt
val maxInitialLineLengthInBytes = _maxInitialLineLength.inBytes.toInt
val maxHeaderSizeInBytes = _maxHeaderSize.inBytes.toInt
pipeline.addLast("httpCodec", new SafeHttpServerCodec(maxInitialLineLengthInBytes, maxHeaderSizeInBytes, maxRequestSizeInBytes))
if (_compressionLevel > 0) {
pipeline.addLast("httpCompressor", new HttpContentCompressor(_compressionLevel))
} else if (_compressionLevel == -1) {
pipeline.addLast("httpCompressor", new TextualContentCompressor)
}
// Response to ``Expect: Continue'' requests.
pipeline.addLast("respondToExpectContinue", new RespondToExpectContinue)
pipeline.addLast(
"httpDechunker",
new HttpChunkAggregator(maxRequestSizeInBytes))
_annotateCipherHeader foreach { headerName: String =>
pipeline.addLast("annotateCipher", new AnnotateCipher(headerName))
}
pipeline
}
}
override def newServerDispatcher(
transport: Transport[Any, Any],
service: Service[HttpRequest, HttpResponse]
): Closable =
new HttpServerDispatcher(new HttpTransport(transport), service)
override def prepareConnFactory(
underlying: ServiceFactory[HttpRequest, HttpResponse]
): ServiceFactory[HttpRequest, HttpResponse] =
(new HttpNackFilter).andThen(DtabFilter.Netty).andThen(underlying)
override def newTraceInitializer =
if (_enableTracing) new HttpServerTraceInitializer[HttpRequest, HttpResponse]
else TraceInitializerFilter.empty[HttpRequest, HttpResponse]
}
}
override val protocolLibraryName: String = "http"
}
object Http {
def get() = new Http()
}
object HttpTracing {
object Header {
val TraceId = "X-B3-TraceId"
val SpanId = "X-B3-SpanId"
val ParentSpanId = "X-B3-ParentSpanId"
val Sampled = "X-B3-Sampled"
val Flags = "X-B3-Flags"
val All = Seq(TraceId, SpanId, ParentSpanId, Sampled, Flags)
val Required = Seq(TraceId, SpanId)
}
/**
* Remove any parameters from url.
*/
private[http] def stripParameters(uri: String): String = {
uri.indexOf('?') match {
case -1 => uri
case n => uri.substring(0, n)
}
}
}
private object TraceInfo {
import HttpTracing._
def letTraceIdFromRequestHeaders[R](request: HttpRequest)(f: => R): R = {
val id = if (Header.Required.forall { request.headers.contains(_) }) {
val spanId = SpanId.fromString(request.headers.get(Header.SpanId))
spanId map { sid =>
val traceId = SpanId.fromString(request.headers.get(Header.TraceId))
val parentSpanId = SpanId.fromString(request.headers.get(Header.ParentSpanId))
val sampled = Option(request.headers.get(Header.Sampled)) flatMap { sampled =>
Try(sampled.toBoolean).toOption
}
val flags = getFlags(request)
TraceId(traceId, parentSpanId, sid, sampled, flags)
}
} else if (request.headers.contains(Header.Flags)) {
// even if there are no id headers we want to get the debug flag
// this is to allow developers to just set the debug flag to ensure their
// trace is collected
Some(Trace.nextId.copy(flags = getFlags(request)))
} else {
Some(Trace.nextId)
}
// remove so the header is not visible to users
Header.All foreach { request.headers.remove(_) }
id match {
case Some(id) =>
Trace.letId(id) {
traceRpc(request)
f
}
case None =>
traceRpc(request)
f
}
}
def setClientRequestHeaders(request: HttpRequest): Unit = {
Header.All.foreach { request.headers.remove(_) }
val traceId = Trace.id
request.headers.add(Header.TraceId, traceId.traceId.toString)
request.headers.add(Header.SpanId, traceId.spanId.toString)
// no parent id set means this is the root span
traceId._parentId.foreach { id =>
request.headers.add(Header.ParentSpanId, id.toString)
}
// three states of sampled, yes, no or none (let the server decide)
traceId.sampled.foreach { sampled =>
request.headers.add(Header.Sampled, sampled.toString)
}
request.headers.add(Header.Flags, traceId.flags.toLong)
traceRpc(request)
}
def traceRpc(request: HttpRequest): Unit = {
if (Trace.isActivelyTracing) {
Trace.recordRpc(request.getMethod.getName)
Trace.recordBinary("http.uri", stripParameters(request.getUri))
}
}
/**
* Safely extract the flags from the header, if they exist. Otherwise return empty flag.
*/
def getFlags(request: HttpRequest): Flags = {
try {
Flags(Option(request.headers.get(Header.Flags)).map(_.toLong).getOrElse(0L))
} catch {
case _: Throwable => Flags()
}
}
}
private[finagle] class HttpServerTraceInitializer[Req <: HttpRequest, Rep]
extends Stack.Module1[param.Tracer, ServiceFactory[Req, Rep]] {
val role = TraceInitializerFilter.role
val description = "Initialize the tracing system with trace info from the incoming request"
def make(_tracer: param.Tracer, next: ServiceFactory[Req, Rep]) = {
val param.Tracer(tracer) = _tracer
val traceInitializer = Filter.mk[Req, Rep, Req, Rep] { (req, svc) =>
Trace.letTracer(tracer) {
TraceInfo.letTraceIdFromRequestHeaders(req) { svc(req) }
}
}
traceInitializer andThen next
}
}
private[finagle] class HttpClientTraceInitializer[Req <: HttpRequest, Rep]
extends Stack.Module1[param.Tracer, ServiceFactory[Req, Rep]] {
val role = TraceInitializerFilter.role
val description = "Sets the next TraceId and attaches trace information to the outgoing request"
def make(_tracer: param.Tracer, next: ServiceFactory[Req, Rep]) = {
val param.Tracer(tracer) = _tracer
val traceInitializer = Filter.mk[Req, Rep, Req, Rep] { (req, svc) =>
Trace.letTracerAndNextId(tracer) {
TraceInfo.setClientRequestHeaders(req)
svc(req)
}
}
traceInitializer andThen next
}
}
/**
* Pass along headers with the required tracing information.
*/
private[finagle] class HttpClientTracingFilter[Req <: HttpRequest, Res](serviceName: String)
extends SimpleFilter[Req, Res]
{
import HttpTracing._
def apply(request: Req, service: Service[Req, Res]) = {
TraceInfo.setClientRequestHeaders(request)
service(request)
}
}
/**
* Adds tracing annotations for each http request we receive.
* Including uri, when request was sent and when it was received.
*/
private[finagle] class HttpServerTracingFilter[Req <: HttpRequest, Res](serviceName: String)
extends SimpleFilter[Req, Res]
{
def apply(request: Req, service: Service[Req, Res]) =
TraceInfo.letTraceIdFromRequestHeaders(request) {
service(request)
}
}
/**
* Ed. note: I'm not sure how parameterizing on REQUEST <: Request
* works safely, ever. This is a big typesafety bug: the codec is
* blindly casting Requests to REQUEST.
*
* Setting aggregateChunks to false disables the aggregator, and consequently
* lifts the restriction on request size.
*
* @param httpFactory the underlying HTTP CodecFactory
* @param aggregateChunks if true, the client pipeline collects HttpChunks into the body of each HttpResponse
*/
case class RichHttp[REQUEST <: Request](
httpFactory: Http,
aggregateChunks: Boolean = true
) extends CodecFactory[REQUEST, Response] {
def client = { config =>
new Codec[REQUEST, Response] {
def pipelineFactory = new ChannelPipelineFactory {
def getPipeline() = {
val pipeline = httpFactory.client(config).pipelineFactory.getPipeline
if (!aggregateChunks) pipeline.remove("httpDechunker")
pipeline
}
}
override def prepareServiceFactory(
underlying: ServiceFactory[REQUEST, Response]
): ServiceFactory[REQUEST, Response] =
underlying map(new DelayedReleaseService(_))
override def prepareConnFactory(
underlying: ServiceFactory[REQUEST, Response]
): ServiceFactory[REQUEST, Response] = {
// Note: This is a horrible hack to ensure that close() calls from
// ExpiringService do not propagate until all chunks have been read
// Waiting on CSL-915 for a proper fix.
underlying map(new DelayedReleaseService(_))
}
override def newClientTransport(ch: Channel, statsReceiver: StatsReceiver): Transport[Any,Any] =
new HttpTransport(super.newClientTransport(ch, statsReceiver))
override def newClientDispatcher(transport: Transport[Any, Any]) =
new HttpClientDispatcher(transport)
override def newTraceInitializer =
if (httpFactory._enableTracing) new HttpClientTraceInitializer[REQUEST, Response]
else TraceInitializerFilter.empty[REQUEST, Response]
}
}
def server = { config =>
new Codec[REQUEST, Response] {
def pipelineFactory = new ChannelPipelineFactory {
def getPipeline() = {
val pipeline = httpFactory.server(config).pipelineFactory.getPipeline
if (!aggregateChunks) pipeline.remove("httpDechunker")
pipeline
}
}
override def newServerDispatcher(
transport: Transport[Any, Any],
service: Service[REQUEST, Response]): Closable =
new HttpServerDispatcher(new HttpTransport(transport), service)
override def prepareConnFactory(
underlying: ServiceFactory[REQUEST, Response]
): ServiceFactory[REQUEST, Response] =
new DtabFilter.Finagle[REQUEST] andThen underlying
override def newTraceInitializer =
if (httpFactory._enableTracing) new HttpServerTraceInitializer[REQUEST, Response]
else TraceInitializerFilter.empty[REQUEST, Response]
}
}
override val protocolLibraryName: String = "http"
}
|
suls/finagle
|
finagle-http/src/main/scala/com/twitter/finagle/http/Codec.scala
|
Scala
|
apache-2.0
| 15,211 |
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package wvlet.airframe.codec
import java.time.Instant
import java.util.Base64
import wvlet.airframe.json.JSON.JSONValue
import wvlet.airframe.json.Json
import wvlet.airframe.msgpack.spi.Value.ExtensionValue
import wvlet.airframe.msgpack.spi._
import wvlet.airframe.surface.{Primitive, Surface}
import wvlet.airframe.ulid.ULID
import scala.util.Try
/**
*/
object PrimitiveCodec {
val primitiveCodec: Map[Surface, MessageCodec[_]] = Map(
Primitive.Int -> IntCodec,
Primitive.Long -> LongCodec,
Primitive.Float -> FloatCodec,
Primitive.Double -> DoubleCodec,
Primitive.Boolean -> BooleanCodec,
Primitive.String -> StringCodec,
Primitive.Byte -> ByteCodec,
Primitive.Short -> ShortCodec,
Primitive.Char -> CharCodec,
Primitive.Unit -> UnitCodec,
Primitive.BigInt -> BigIntCodec,
Primitive.BigInteger -> BigIntegerCodec,
// MessagePack types
Surface.of[Value] -> ValueCodec,
Surface.of[MsgPack] -> RawMsgPackCodec,
// JSON types
Surface.of[JSONValue] -> JSONValueCodec,
Surface.of[Json] -> RawJsonCodec,
Surface.of[Any] -> AnyCodec
)
val primitiveArrayCodec = Map(
Surface.of[Array[Int]] -> IntArrayCodec,
Surface.of[Array[Long]] -> LongArrayCodec,
Surface.of[Array[Float]] -> FloatArrayCodec,
Surface.of[Array[Double]] -> DoubleArrayCodec,
Surface.of[Array[Boolean]] -> BooleanArrayCodec,
Surface.of[Array[String]] -> StringArrayCodec,
Surface.of[Array[Byte]] -> ByteArrayCodec,
Surface.of[Array[Short]] -> ShortArrayCodec,
Surface.of[Array[Char]] -> CharArrayCodec,
Surface.of[Array[Any]] -> AnyArrayCodec
)
private implicit class RichBoolean(b: Boolean) {
def toInt: Int = if (b) 1 else 0
def toChar: Char = if (b) 1 else 0
def toByte: Byte = if (b) 1 else 0
def toShort: Short = if (b) 1 else 0
}
trait PrimitiveCodec[A] extends MessageCodec[A] {
def surface: Surface
}
object UnitCodec extends PrimitiveCodec[Unit] {
override def surface: Surface = Primitive.Unit
override def pack(p: Packer, v: Unit): Unit = {
// do not pack anything
}
override def unpack(
u: Unpacker,
v: MessageContext
): Unit = {
// Do not read anything
v.setNull
}
}
object ByteCodec extends PrimitiveCodec[Byte] {
def surface = Primitive.Byte
override def pack(p: Packer, v: Byte): Unit = {
p.packByte(v)
}
override def unpack(u: Unpacker, v: MessageContext): Unit = {
def read(body: => Byte): Unit = {
try {
v.setByte(body)
} catch {
case e: IntegerOverflowException =>
v.setIncompatibleFormatException(this, s"${e.getBigInteger} is too large for a Byte value")
case e: NumberFormatException =>
v.setIncompatibleFormatException(this, e.getMessage)
}
}
val f = u.getNextFormat
val vt = f.getValueType
vt match {
case ValueType.NIL =>
u.unpackNil
v.setNull
case ValueType.INTEGER =>
read(u.unpackByte)
case ValueType.FLOAT =>
read(u.unpackDouble.toByte)
case ValueType.STRING =>
read {
val s = u.unpackString
Try(s.toByte).getOrElse(s.toDouble.toByte)
}
case ValueType.BOOLEAN =>
read(u.unpackBoolean.toByte)
case _ =>
u.skipValue
v.setNull
}
}
}
object CharCodec extends PrimitiveCodec[Char] {
def surface = Primitive.Char
override def pack(p: Packer, v: Char): Unit = {
p.packInt(v)
}
override def unpack(u: Unpacker, v: MessageContext): Unit = {
def read(body: => Char): Unit = {
try {
v.setChar(body)
} catch {
case e: IntegerOverflowException =>
v.setIncompatibleFormatException(this, s"${e.getBigInteger} is too large for a Char value")
case e: NumberFormatException =>
v.setIncompatibleFormatException(this, e.getMessage)
}
}
val f = u.getNextFormat
val vt = f.getValueType
vt match {
case ValueType.NIL =>
u.unpackNil
v.setNull
case ValueType.INTEGER =>
read(u.unpackInt.toChar)
case ValueType.STRING =>
read {
val s = u.unpackString
if (s.length == 1) {
s.charAt(0)
} else {
s.toDouble.toChar
}
}
case ValueType.BOOLEAN =>
read(u.unpackBoolean.toChar)
case ValueType.FLOAT =>
read(u.unpackDouble.toChar)
case _ =>
u.skipValue
v.setNull
}
}
}
object ShortCodec extends PrimitiveCodec[Short] {
def surface = Primitive.Short
override def pack(p: Packer, v: Short): Unit = {
p.packShort(v)
}
override def unpack(u: Unpacker, v: MessageContext): Unit = {
def read(body: => Short): Unit = {
try {
v.setShort(body)
} catch {
case e: IntegerOverflowException =>
v.setIncompatibleFormatException(this, s"${e.getBigInteger} is too large for a Short value")
case e: NumberFormatException =>
v.setIncompatibleFormatException(this, e.getMessage)
}
}
val f = u.getNextFormat
val vt = f.getValueType
vt match {
case ValueType.NIL =>
u.unpackNil
v.setNull
case ValueType.INTEGER =>
read(u.unpackShort)
case ValueType.STRING =>
read {
val s = u.unpackString
Try(s.toShort).getOrElse(s.toDouble.toShort)
}
case ValueType.BOOLEAN =>
read(u.unpackBoolean.toShort)
case ValueType.FLOAT =>
read(u.unpackDouble.toShort)
case _ =>
u.skipValue
v.setNull
}
}
}
object IntCodec extends PrimitiveCodec[Int] {
def surface = Primitive.Int
override def pack(p: Packer, v: Int): Unit = {
p.packInt(v)
}
override def unpack(u: Unpacker, v: MessageContext): Unit = {
def read(body: => Int): Unit = {
try {
v.setInt(body)
} catch {
case e: IntegerOverflowException =>
v.setIncompatibleFormatException(this, s"${e.getBigInteger} is too large for an Int value")
case e: NumberFormatException =>
v.setIncompatibleFormatException(this, e.getMessage)
}
}
val f = u.getNextFormat
val vt = f.getValueType
vt match {
case ValueType.NIL =>
u.unpackNil
v.setNull
case ValueType.INTEGER =>
read(u.unpackInt)
case ValueType.STRING =>
read {
val s = u.unpackString
Try(s.toInt).getOrElse(s.toDouble.toInt)
}
case ValueType.BOOLEAN =>
read(u.unpackBoolean.toInt)
case ValueType.FLOAT =>
read(u.unpackDouble.toInt)
case _ =>
u.skipValue
v.setNull
}
}
}
object LongCodec extends PrimitiveCodec[Long] {
def surface = Primitive.Long
override def pack(p: Packer, v: Long): Unit = {
p.packLong(v)
}
override def unpack(u: Unpacker, v: MessageContext): Unit = {
def read(body: => Long): Unit = {
try {
v.setLong(body)
} catch {
case e: IntegerOverflowException =>
v.setIncompatibleFormatException(this, s"${e.getBigInteger} is too large for a Long value")
case e: NumberFormatException =>
v.setIncompatibleFormatException(this, e.getMessage)
}
}
val f = u.getNextFormat
val vt = f.getValueType
vt match {
case ValueType.NIL =>
u.unpackNil
v.setNull
case ValueType.INTEGER =>
read(u.unpackLong)
case ValueType.STRING =>
read {
val s = u.unpackString
Try(s.toLong).getOrElse(s.toDouble.toLong)
}
case ValueType.BOOLEAN =>
read(u.unpackBoolean.toInt)
case ValueType.FLOAT =>
read(u.unpackDouble.toLong)
case _ =>
u.skipValue
v.setNull
}
}
}
object BigIntCodec extends PrimitiveCodec[BigInt] {
def surface = Primitive.BigInt
override def pack(p: Packer, v: BigInt): Unit = {
if (v.compareTo(BigInt(Long.MaxValue)) <= 0) {
p.packLong(v.longValue)
} else {
p.packString(v.toString(10))
}
}
override def unpack(u: Unpacker, v: MessageContext): Unit = {
def read(body: => BigInt): Unit = {
try {
v.setObject(body)
} catch {
case e: IntegerOverflowException =>
v.setIncompatibleFormatException(this, s"${e.getBigInteger} is too large for a Long value")
case e: NumberFormatException =>
v.setIncompatibleFormatException(this, e.getMessage)
}
}
val f = u.getNextFormat
val vt = f.getValueType
vt match {
case ValueType.NIL =>
u.unpackNil
v.setNull
case ValueType.INTEGER =>
read(BigInt(u.unpackLong))
case ValueType.STRING =>
read {
val s = u.unpackString
Try(BigInt(s, 10)).getOrElse(BigInt(s.toDouble.toLong))
}
case ValueType.BOOLEAN =>
read(BigInt(u.unpackBoolean.toInt))
case ValueType.FLOAT =>
read(BigInt(u.unpackDouble.toLong))
case _ =>
u.skipValue
v.setNull
}
}
}
object BigIntegerCodec extends PrimitiveCodec[java.math.BigInteger] {
def surface = Primitive.BigInteger
override def pack(p: Packer, v: java.math.BigInteger): Unit = {
if (v.compareTo(java.math.BigInteger.valueOf(Long.MaxValue)) <= 0) {
p.packLong(v.longValue())
} else {
p.packString(v.toString(10))
}
}
override def unpack(u: Unpacker, v: MessageContext): Unit = {
def read(body: => java.math.BigInteger): Unit = {
try {
v.setObject(body)
} catch {
case e: IntegerOverflowException =>
v.setIncompatibleFormatException(this, s"${e.getBigInteger} is too large for a Long value")
case e: NumberFormatException =>
v.setIncompatibleFormatException(this, e.getMessage)
}
}
val f = u.getNextFormat
val vt = f.getValueType
vt match {
case ValueType.NIL =>
u.unpackNil
v.setNull
case ValueType.INTEGER =>
read(java.math.BigInteger.valueOf(u.unpackLong))
case ValueType.STRING =>
read {
val s = u.unpackString
Try(new java.math.BigInteger(s, 10)).getOrElse(java.math.BigInteger.valueOf(s.toDouble.toLong))
}
case ValueType.BOOLEAN =>
read(java.math.BigInteger.valueOf(u.unpackBoolean.toInt))
case ValueType.FLOAT =>
read(java.math.BigInteger.valueOf(u.unpackDouble.toLong))
case _ =>
u.skipValue
v.setNull
}
}
}
object StringCodec extends PrimitiveCodec[String] {
def surface = Primitive.String
override def pack(p: Packer, v: String): Unit = {
if (v == null) {
p.packNil
} else {
p.packString(v)
}
}
override def unpack(u: Unpacker, v: MessageContext): Unit = {
def read(body: => String): Unit = {
try {
val s = body
v.setString(s)
} catch {
case e: IntegerOverflowException =>
read(e.getBigInteger.toString())
case e: NumberFormatException =>
v.setIncompatibleFormatException(this, e.getMessage)
}
}
u.getNextFormat.getValueType match {
case ValueType.NIL =>
u.unpackNil
v.setNull
case ValueType.STRING =>
read(u.unpackString)
case ValueType.INTEGER =>
read(u.unpackLong.toString)
case ValueType.BOOLEAN =>
read(u.unpackBoolean.toString)
case ValueType.FLOAT =>
read(u.unpackDouble.toString)
case ValueType.MAP =>
read(u.unpackValue.toJson)
case ValueType.ARRAY =>
read(u.unpackValue.toJson)
case ValueType.BINARY =>
read {
val len = u.unpackBinaryHeader
Base64.getEncoder.encodeToString(u.readPayload(len))
}
case _ =>
// Use JSON format for unknown types so that we can read arbitrary types as String value
read(u.unpackValue.toJson)
}
}
}
object BooleanCodec extends PrimitiveCodec[Boolean] {
def surface = Primitive.Boolean
override def pack(p: Packer, v: Boolean): Unit = {
p.packBoolean(v)
}
override def unpack(u: Unpacker, v: MessageContext): Unit = {
def read(body: => Boolean): Unit = {
try {
val b = body
v.setBoolean(b)
} catch {
case e: IntegerOverflowException =>
v.setBoolean(e.getBigInteger.doubleValue() != 0.0)
case e: IllegalArgumentException =>
v.setIncompatibleFormatException(this, e.getMessage)
}
}
u.getNextFormat.getValueType match {
case ValueType.NIL =>
u.unpackNil
v.setNull
case ValueType.BOOLEAN =>
read(u.unpackBoolean)
case ValueType.STRING =>
read {
val s = u.unpackString
Try(s.toBoolean).getOrElse(s.toDouble != 0.0)
}
case ValueType.INTEGER =>
read(u.unpackLong != 0L)
case ValueType.FLOAT =>
read(u.unpackDouble != 0.0)
case _ =>
u.skipValue
v.setNull
}
}
}
object FloatCodec extends PrimitiveCodec[Float] {
def surface = Primitive.Float
override def pack(p: Packer, v: Float): Unit = {
p.packFloat(v)
}
override def unpack(u: Unpacker, v: MessageContext): Unit = {
def read(body: => Float): Unit = {
try {
v.setFloat(body)
} catch {
case e: IntegerOverflowException =>
v.setFloat(e.getBigInteger.floatValue())
case e: IllegalArgumentException =>
v.setIncompatibleFormatException(this, e.getMessage)
}
}
u.getNextFormat.getValueType match {
case ValueType.NIL =>
u.unpackNil
v.setNull
case ValueType.FLOAT =>
read(u.unpackFloat.toFloat)
case ValueType.INTEGER =>
read(u.unpackLong.toFloat)
case ValueType.BOOLEAN =>
read(u.unpackBoolean.toInt.toFloat)
case ValueType.STRING =>
read(u.unpackString.toFloat)
case _ =>
u.skipValue
v.setNull
}
}
}
object DoubleCodec extends PrimitiveCodec[Double] {
def surface = Primitive.Double
override def pack(p: Packer, v: Double): Unit = {
p.packDouble(v)
}
override def unpack(u: Unpacker, v: MessageContext): Unit = {
def read(body: => Double): Unit = {
try {
v.setDouble(body)
} catch {
case e: IntegerOverflowException =>
v.setDouble(e.getBigInteger.doubleValue())
case e: IllegalArgumentException =>
v.setIncompatibleFormatException(this, e.getMessage)
}
}
u.getNextFormat.getValueType match {
case ValueType.NIL =>
u.unpackNil
v.setNull
case ValueType.FLOAT =>
read(u.unpackDouble)
case ValueType.INTEGER =>
read(u.unpackLong.toDouble)
case ValueType.BOOLEAN =>
read(u.unpackBoolean.toInt.toDouble)
case ValueType.STRING =>
read(u.unpackString.toDouble)
case _ =>
u.skipValue
v.setNull
}
}
}
trait PrimitiveArrayCodec { self: MessageCodec[_] =>
/**
* Unpack the input as JSON Array (ValueType.STRING) or ValueType.ARRAY
* @param u
* @param v
* @param unpackRawArray
*/
protected def unpackArray(u: Unpacker, v: MessageContext)(unpackRawArray: => Unit): Unit = {
u.getNextFormat.getValueType match {
case ValueType.STRING =>
// Assume it's JSON input
val jsonArray = u.unpackString
val msgpack = JSONCodec.toMsgPack(jsonArray)
val unpacker = MessagePack.newUnpacker(msgpack)
// Parse again
unpack(unpacker, v)
case ValueType.ARRAY =>
unpackRawArray
case other =>
v.setIncompatibleFormatException(this, s"STRING or ARRAY type ie expected, but ${other} is found")
u.skipValue
}
}
}
object IntArrayCodec extends MessageCodec[Array[Int]] with PrimitiveArrayCodec {
override def pack(p: Packer, v: Array[Int]): Unit = {
p.packArrayHeader(v.length)
v.foreach { x =>
IntCodec.pack(p, x)
}
}
override def unpack(u: Unpacker, v: MessageContext): Unit = {
unpackArray(u, v) {
val len = u.unpackArrayHeader
val b = Array.newBuilder[Int]
b.sizeHint(len)
(0 until len).foreach { i =>
IntCodec.unpack(u, v)
if (v.isNull) {
// TODO report error?
b += 0
} else {
val l = v.getInt
b += l.toInt
}
}
v.setObject(b.result())
}
}
}
object ShortArrayCodec extends MessageCodec[Array[Short]] with PrimitiveArrayCodec {
override def pack(p: Packer, v: Array[Short]): Unit = {
p.packArrayHeader(v.length)
v.foreach { x =>
ShortCodec.pack(p, x)
}
}
override def unpack(u: Unpacker, v: MessageContext): Unit = {
unpackArray(u, v) {
val len = u.unpackArrayHeader
val b = Array.newBuilder[Short]
b.sizeHint(len)
(0 until len).foreach { i =>
IntCodec.unpack(u, v)
if (v.isNull) {
// TODO report error?
b += 0
} else {
val l = v.getShort
if (l.isValidInt) {
b += l.toShort
} else {
// report error?
b += 0
}
}
}
v.setObject(b.result())
}
}
}
object CharArrayCodec extends MessageCodec[Array[Char]] with PrimitiveArrayCodec {
override def pack(p: Packer, v: Array[Char]): Unit = {
p.packArrayHeader(v.length)
v.foreach { x =>
CharCodec.pack(p, x)
}
}
override def unpack(u: Unpacker, v: MessageContext): Unit = {
unpackArray(u, v) {
val len = u.unpackArrayHeader
val b = Array.newBuilder[Char]
b.sizeHint(len)
(0 until len).foreach { i =>
CharCodec.unpack(u, v)
if (v.isNull) {
// TODO report error?
b += 0
} else {
val l = v.getLong
if (l.isValidChar) {
b += l.toChar
} else {
// report error?
b += 0
}
}
}
v.setObject(b.result())
}
}
}
object LongArrayCodec extends MessageCodec[Array[Long]] with PrimitiveArrayCodec {
override def pack(p: Packer, v: Array[Long]): Unit = {
p.packArrayHeader(v.length)
v.foreach { x =>
LongCodec.pack(p, x)
}
}
override def unpack(u: Unpacker, v: MessageContext): Unit = {
unpackArray(u, v) {
val len = u.unpackArrayHeader
val b = Array.newBuilder[Long]
b.sizeHint(len)
(0 until len).foreach { i =>
LongCodec.unpack(u, v)
if (v.isNull) {
// TODO report error?
b += 0L
} else {
val l = v.getLong
b += l
}
}
v.setObject(b.result())
}
}
}
object FloatArrayCodec extends MessageCodec[Array[Float]] with PrimitiveArrayCodec {
override def pack(p: Packer, v: Array[Float]): Unit = {
p.packArrayHeader(v.length)
v.foreach { x =>
FloatCodec.pack(p, x)
}
}
override def unpack(u: Unpacker, v: MessageContext): Unit = {
unpackArray(u, v) {
val len = u.unpackArrayHeader
val b = Array.newBuilder[Float]
b.sizeHint(len)
(0 until len).foreach { i =>
val d = FloatCodec.unpack(u, v)
if (v.isNull) {
// report error?
b += 0
} else {
// TODO check precision
b += v.getDouble.toFloat
}
}
v.setObject(b.result())
}
}
}
object DoubleArrayCodec extends MessageCodec[Array[Double]] with PrimitiveArrayCodec {
override def pack(p: Packer, v: Array[Double]): Unit = {
p.packArrayHeader(v.length)
v.foreach { x =>
DoubleCodec.pack(p, x)
}
}
override def unpack(u: Unpacker, v: MessageContext): Unit = {
unpackArray(u, v) {
val len = u.unpackArrayHeader
val b = Array.newBuilder[Double]
b.sizeHint(len)
(0 until len).foreach { i =>
val d = DoubleCodec.unpack(u, v)
if (v.isNull) {
// report error?
b += 0
} else {
b += v.getDouble
}
}
v.setObject(b.result())
}
}
}
object BooleanArrayCodec extends MessageCodec[Array[Boolean]] with PrimitiveArrayCodec {
override def pack(p: Packer, v: Array[Boolean]): Unit = {
p.packArrayHeader(v.length)
v.foreach { x =>
BooleanCodec.pack(p, x)
}
}
override def unpack(u: Unpacker, v: MessageContext): Unit = {
unpackArray(u, v) {
val len = u.unpackArrayHeader
val b = Array.newBuilder[Boolean]
b.sizeHint(len)
(0 until len).foreach { i =>
BooleanCodec.unpack(u, v)
if (v.isNull) {
// report error?
b += false
} else {
b += v.getBoolean
}
}
v.setObject(b.result())
}
}
}
object ByteArrayCodec extends MessageCodec[Array[Byte]] {
override def pack(p: Packer, v: Array[Byte]): Unit = {
p.packBinaryHeader(v.length)
p.addPayload(v)
}
override def unpack(u: Unpacker, v: MessageContext): Unit = {
u.getNextValueType match {
case ValueType.BINARY =>
val len = u.unpackBinaryHeader
val b = u.readPayload(len)
v.setObject(b)
case ValueType.STRING =>
val strByteLen = u.unpackRawStringHeader
val strBinary = u.readPayload(strByteLen)
val arr: Array[Byte] =
try {
// Try decoding as base64
Base64.getDecoder.decode(strBinary)
} catch {
case e: IllegalArgumentException =>
// Raw string
strBinary
}
v.setObject(arr)
case _ =>
// Set MessagePack binary
val value = u.unpackValue
v.setObject(value.toMsgpack)
}
}
}
object RawMsgPackCodec extends MessageCodec[MsgPack] {
override def pack(p: Packer, v: Array[Byte]): Unit = {
p.packBinaryHeader(v.length)
p.addPayload(v)
}
override def unpack(u: Unpacker, v: MessageContext): Unit = {
u.getNextValueType match {
case ValueType.BINARY =>
val len = u.unpackBinaryHeader
val b = u.readPayload(len)
v.setObject(b)
case _ =>
// Set MessagePack binary
val value = u.unpackValue
v.setObject(value.toMsgpack)
}
}
}
object StringArrayCodec extends MessageCodec[Array[String]] with PrimitiveArrayCodec {
override def pack(p: Packer, v: Array[String]): Unit = {
p.packArrayHeader(v.length)
v.foreach { x =>
StringCodec.pack(p, x)
}
}
override def unpack(u: Unpacker, v: MessageContext): Unit = {
unpackArray(u, v) {
val len = u.unpackArrayHeader
val b = Array.newBuilder[String]
b.sizeHint(len)
(0 until len).foreach { i =>
StringCodec.unpack(u, v)
if (v.isNull) {
b += "" // or report error?
} else {
b += v.getString
}
}
v.setObject(b.result())
}
}
}
object AnyArrayCodec extends MessageCodec[Array[Any]] with PrimitiveArrayCodec {
override def pack(p: Packer, v: Array[Any]): Unit = {
p.packArrayHeader(v.length)
v.foreach { x =>
AnyCodec.pack(p, x)
}
}
override def unpack(
u: Unpacker,
v: MessageContext
): Unit = {
unpackArray(u, v) {
val len = u.unpackArrayHeader
val b = Array.newBuilder[Any]
b.sizeHint(len)
(0 until len).foreach { i =>
AnyCodec.unpack(u, v)
if (v.isNull) {
b += null // or report error?
} else {
b += v.getLastValue
}
}
v.setObject(b.result())
}
}
}
/**
* MessagePack value codec
*/
object ValueCodec extends MessageCodec[Value] {
override def pack(p: Packer, v: Value): Unit = {
p.packValue(v)
}
override def unpack(u: Unpacker, v: MessageContext): Unit = {
v.setObject(u.unpackValue)
}
}
/**
* Codec for Any values. This only supports very basic types to enable packing/unpacking collections like Seq[Any],
* Map[Any, Any] at ease.
*
* Another option to implement AnyCodec is packing pairs of (type, value), but we will not take this approach as this
* will require many bytes to fully encode type names.
*/
object AnyCodec extends MessageCodec[Any] {
override def pack(p: Packer, v: Any): Unit = {
v match {
case null => p.packNil
// Primitive types
case v: String => StringCodec.pack(p, v)
case v: Boolean => BooleanCodec.pack(p, v)
case v: Int => IntCodec.pack(p, v)
case v: Long => LongCodec.pack(p, v)
case v: Float => FloatCodec.pack(p, v)
case v: Double => DoubleCodec.pack(p, v)
case v: Byte => ByteCodec.pack(p, v)
case v: Short => ShortCodec.pack(p, v)
case v: Char => CharCodec.pack(p, v)
case v: JSONValue => JSONValueCodec.pack(p, v)
case v: Value => ValueCodec.pack(p, v)
case v: Instant => p.packTimestamp(v)
case v: ULID => ULIDCodec.pack(p, v)
// Arrays
case v: Array[String] => StringArrayCodec.pack(p, v)
case v: Array[Boolean] => BooleanArrayCodec.pack(p, v)
case v: Array[Int] => IntArrayCodec.pack(p, v)
case v: Array[Long] => LongArrayCodec.pack(p, v)
case v: Array[Float] => FloatArrayCodec.pack(p, v)
case v: Array[Double] => DoubleArrayCodec.pack(p, v)
case v: Array[Byte] => ByteArrayCodec.pack(p, v)
case v: Array[Short] => ShortArrayCodec.pack(p, v)
case v: Array[Char] => CharArrayCodec.pack(p, v)
case v: Array[_] =>
p.packArrayHeader(v.length)
for (x <- v) {
pack(p, x)
}
// Collections
case v: Option[_] =>
if (v.isEmpty) {
p.packNil
} else {
pack(p, v.get)
}
case v: Seq[_] =>
p.packArrayHeader(v.length)
for (x <- v) {
pack(p, x)
}
case m: Map[_, _] =>
p.packMapHeader(m.size)
for ((k, v) <- m) {
pack(p, k)
pack(p, v)
}
case e: Either[_, _] =>
p.packArrayHeader(2)
e match {
case Left(l) =>
pack(p, l)
p.packNil
case Right(r) =>
p.packNil
pack(p, r)
}
case v: Throwable =>
ThrowableCodec.pack(p, v)
case _ =>
val cl = v.getClass
wvlet.airframe.codec.Compat.codecOfClass(cl) match {
case Some(codec) =>
codec.asInstanceOf[MessageCodec[Any]].pack(p, v)
case None =>
// Pack as a string for unknown types
StringCodec.pack(p, v.toString)
}
}
}
override def unpack(u: Unpacker, v: MessageContext): Unit = {
u.getNextValueType match {
case ValueType.NIL =>
u.unpackNil
v.setNull
case ValueType.BOOLEAN =>
v.setBoolean(u.unpackBoolean)
case ValueType.INTEGER =>
v.setLong(u.unpackLong)
case ValueType.FLOAT =>
v.setDouble(u.unpackDouble)
case ValueType.STRING =>
v.setString(u.unpackString)
case ValueType.BINARY =>
val len = u.unpackBinaryHeader
v.setObject(u.readPayload(len))
case ValueType.ARRAY =>
val len = u.unpackArrayHeader
val b = Seq.newBuilder[Any]
b.sizeHint(len)
(0 until len).foreach { i =>
unpack(u, v)
if (v.isNull) {
b += null // or report error?
} else {
b += v.getLastValue
}
}
v.setObject(b.result())
case ValueType.MAP =>
val len = u.unpackMapHeader
val b = Map.newBuilder[Any, Any]
b.sizeHint(len)
for (i <- 0 until len) {
unpack(u, v)
val key = v.getLastValue
unpack(u, v)
val value = v.getLastValue
b += (key -> value)
}
v.setObject(b.result())
case ValueType.EXTENSION =>
val ext = u.unpackExtTypeHeader
if (ext.extType == -1) {
v.setObject(u.unpackTimestamp(ext))
} else {
val extBody = u.readPayload(ext.byteLength)
v.setObject(ExtensionValue(ext.extType, extBody))
}
}
}
}
}
|
wvlet/airframe
|
airframe-codec/src/main/scala/wvlet/airframe/codec/PrimitiveCodec.scala
|
Scala
|
apache-2.0
| 30,837 |
/*
Copyright 2011 Andrew Fowler <[email protected]>
This file is part of Terinology2ODM Terminology2ODMConverter.
Terminology2ODMConverter is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Terminology2ODMConverter is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Terminology2ODMConverter. If not, see <http://www.gnu.org/licenses/>.
*/
package model
class IGroupDef(val name: String, val repeating: String) extends Sourced with OIDEntity with References {
def this(oid: String, name : String, repeating: String) = {
this(name, repeating)
this.oid = oid
}
override def toString(): String = "IGroupDef[" + oid + ", name=" + name + ", src=" + source + "]"
}
|
rwynne/ops-data-conversion
|
ODM_Converter/src/model/IGroupDef.scala
|
Scala
|
bsd-3-clause
| 1,132 |
package fpinscala.errorhandling
import scala.{Option => _, Some => _, Either => _, _} // hide std library `Option`, `Some` and `Either`, since we are writing our own in this chapter
sealed trait Option[+A] {
def map[B](f: A => B): Option[B] = this match {
case None => None
case Some(a) => Some(f(a))
}
def getOrElse[B>:A](default: => B): B = this match {
case None => default
case Some(a) => a
}
def flatMap[B](f: A => Option[B]): Option[B] = this match {
case None => None
case Some(a) => f(a)
}
def orElse[B>:A](ob: => Option[B]): Option[B] = this match {
case None => ob
case a => a
}
def filter(f: A => Boolean): Option[A] = this match {
case i@Some(a) if f(a) => i
case _ => None
}
}
case class Some[+A](get: A) extends Option[A]
case object None extends Option[Nothing]
object Option {
def failingFn(i: Int): Int = {
val y: Int = throw new Exception("fail!") // `val y: Int = ...` declares `y` as having type `Int`, and sets it equal to the right hand side of the `=`.
try {
val x = 42 + 5
x + y
}
catch { case e: Exception => 43 } // A `catch` block is just a pattern matching block like the ones we've seen. `case e: Exception` is a pattern that matches any `Exception`, and it binds this value to the identifier `e`. The match returns the value 43.
}
def failingFn2(i: Int): Int = {
try {
val x = 42 + 5
x + ((throw new Exception("fail!")): Int) // A thrown Exception can be given any type; here we're annotating it with the type `Int`
}
catch { case e: Exception => 43 }
}
def mean(xs: Seq[Double]): Option[Double] =
if (xs.isEmpty) None
else Some(xs.sum / xs.length)
def variance(xs: Seq[Double]): Option[Double] =
mean(xs).flatMap((m) => mean(xs.map((x) => math.pow(x - m, 2))))
def map2[A,B,C](a: Option[A], b: Option[B])(f: (A, B) => C): Option[C] = (a,b) match {
case (None, _) => None
case (_, None) => None
case (Some(x), Some(y)) => Some(f(x,y))
}
def sequence[A](a: List[Option[A]]): Option[List[A]] =
a.foldRight(Some(Nil):Option[List[A]]){(e, i) => e match {
case Some(s) => i.map(s :: _)
case None => None
}}
def traverse[A, B](a: List[A])(f: A => Option[B]): Option[List[B]] =
a.foldRight(Some(Nil):Option[List[B]]){(e, i) => i.flatMap{ii => f(e) match {
case Some(s) => Some(s :: ii)
case None => None
}}}
def sequenceTrav[A](a: List[Option[A]]): Option[List[A]] = traverse(a)((e) => e)
}
|
Errorific/fpinscala
|
exercises/src/main/scala/fpinscala/errorhandling/Option.scala
|
Scala
|
mit
| 2,526 |
package org.wartremover
package contrib.test
import org.scalatest.Assertions
import org.wartremover.test.WartTestTraverser
trait ResultAssertions extends Assertions {
def assertEmpty(result: WartTestTraverser.Result) = {
assertResult(List.empty, "result.errors")(result.errors)
assertResult(List.empty, "result.warnings")(result.warnings)
}
def assertError(result: WartTestTraverser.Result)(message: String) = assertErrors(result)(message, 1)
def assertErrors(result: WartTestTraverser.Result)(message: String, times: Int) = {
assertResult(List.fill(times)(message), "result.errors")(result.errors.map(skipTraverserPrefix))
assertResult(List.empty, "result.warnings")(result.warnings.map(skipTraverserPrefix))
}
def assertWarnings(result: WartTestTraverser.Result)(message: String, times: Int) = {
assertResult(List.empty, "result.errors")(result.errors.map(skipTraverserPrefix))
assertResult(List.fill(times)(message), "result.warnings")(result.warnings.map(skipTraverserPrefix))
}
private val messageFormat = """\[wartremover:\S+\] ([\s\S]+)""".r
private def skipTraverserPrefix(msg: String) = msg match {
case messageFormat(rest) => rest
case s => s
}
}
|
wartremover/wartremover-contrib
|
core/src/test/scala/wartremover/contrib/ResultAssertions.scala
|
Scala
|
apache-2.0
| 1,218 |
package org.apache.spark.storage.pmof
import org.apache.spark.storage._
import org.apache.spark.serializer._
import org.apache.spark.executor.TaskMetrics
import org.apache.spark.internal.Logging
import org.apache.spark.{SparkConf, SparkEnv}
import org.apache.spark.util.Utils
import java.io.{File, OutputStream}
import org.apache.spark.util.configuration.pmof.PmofConf
import scala.collection.mutable.ArrayBuffer
class PmemBlockId (stageId: Int, tmpId: Int) extends ShuffleBlockId(stageId, 0, tmpId) {
override def name: String = "reduce_spill_" + stageId + "_" + tmpId
override def isShuffle: Boolean = false
}
object PmemBlockId {
private var tempId: Int = 0
def getTempBlockId(stageId: Int): PmemBlockId = synchronized {
val cur_tempId = tempId
tempId += 1
new PmemBlockId (stageId, cur_tempId)
}
}
private[spark] class PmemBlockOutputStream(
taskMetrics: TaskMetrics,
blockId: BlockId,
serializerManager: SerializerManager,
serializer: Serializer,
conf: SparkConf,
pmofConf: PmofConf,
numMaps: Int = 0,
numPartitions: Int = 1
) extends DiskBlockObjectWriter(new File(Utils.getConfiguredLocalDirs(conf).toList(0) + "/null"), null, null, 0, true, null, null) with Logging {
var size: Int = 0
var records: Int = 0
var recordsPerBlock: Int = 0
val recordsArray: ArrayBuffer[Int] = ArrayBuffer()
var spilled: Boolean = false
var partitionMeta: Array[(Long, Int, Int)] = _
val root_dir = Utils.getConfiguredLocalDirs(conf).toList.sortWith(_ < _)(0)
val persistentMemoryWriter: PersistentMemoryHandler = PersistentMemoryHandler.getPersistentMemoryHandler(pmofConf,
root_dir, pmofConf.path_list, blockId.name, pmofConf.maxPoolSize)
//disable metadata updating by default
//persistentMemoryWriter.updateShuffleMeta(blockId.name)
val pmemOutputStream: PmemOutputStream = new PmemOutputStream(
persistentMemoryWriter, numPartitions, blockId.name, numMaps, (pmofConf.spill_throttle.toInt + 1024))
val serInstance = serializer.newInstance()
val bs = serializerManager.wrapStream(blockId, pmemOutputStream)
var objStream: SerializationStream = serInstance.serializeStream(bs)
override def write(key: Any, value: Any): Unit = {
objStream.writeKey(key)
objStream.writeValue(value)
records += 1
recordsPerBlock += 1
if (blockId.isShuffle == true) {
taskMetrics.shuffleWriteMetrics.incRecordsWritten(1)
}
maybeSpill()
}
override def close() {
if (objStream != null) {
objStream.close()
objStream = null
}
pmemOutputStream.close()
}
override def flush() {
objStream.flush()
bs.flush()
}
def maybeSpill(force: Boolean = false): Unit = {
if ((pmofConf.spill_throttle != -1 && pmemOutputStream.remainingSize >= pmofConf.spill_throttle) || force == true) {
val start = System.nanoTime()
flush()
pmemOutputStream.doFlush()
val bufSize = pmemOutputStream.flushedSize
if (bufSize > 0) {
recordsArray += recordsPerBlock
recordsPerBlock = 0
size += bufSize
if (blockId.isShuffle == true) {
val writeMetrics = taskMetrics.shuffleWriteMetrics
writeMetrics.incWriteTime(System.nanoTime() - start)
writeMetrics.incBytesWritten(bufSize)
} else {
taskMetrics.incDiskBytesSpilled(bufSize)
}
pmemOutputStream.reset()
spilled = true
}
}
}
def ifSpilled(): Boolean = {
spilled
}
def getPartitionMeta(): Array[(Long, Int, Int)] = {
if (partitionMeta == null) {
var i = -1
partitionMeta = persistentMemoryWriter.getPartitionBlockInfo(blockId.name).map{ x=> i+=1; (x._1, x._2, recordsArray(i))}
}
partitionMeta
}
def getBlockId(): BlockId = {
blockId
}
def getRkey(): Long = {
persistentMemoryWriter.rkey
}
def getTotalRecords(): Long = {
records
}
def getSize(): Long = {
size
}
def getPersistentMemoryHandler: PersistentMemoryHandler = {
persistentMemoryWriter
}
}
|
Intel-bigdata/OAP
|
oap-shuffle/RPMem-shuffle/core/src/main/scala/org/apache/spark/storage/pmof/PmemBlockOutputStream.scala
|
Scala
|
apache-2.0
| 4,050 |
package demo
package components
package reacttable
import chandu0101.macros.tojs.GhPagesMacros
import japgolly.scalajs.react._
import japgolly.scalajs.react.vdom.html_<^._
object ReactTableInfo {
val code = GhPagesMacros.exampleSource
// EXAMPLE:START
case class Backend($ : BackendScope[Unit, Unit]) {
def render =
InfoTemplate(componentFilePath = "/tables/ReactTable.scala", scalacss = true)(
<.div(^.cls := "full-width-section")(
<.h3("React Table :"),
<.p("Responsive HTML(flexbox) table with the following features"),
<.ul(^.paddingLeft := "25px")(
<.li("Search"),
<.li("Pagination"),
<.li("Sorting"),
<.li("Custom Styles"),
<.li("Custom Custom Column Sizes"),
<.li("Custom Cell Factory")
)
)
)
}
val component = ScalaComponent
.builder[Unit]("ReactTableInfo")
.renderBackend[Backend]
.build
// EXAMPLE:END
def apply() = component()
}
|
chandu0101/scalajs-react-components
|
demo/src/main/scala/demo/components/reacttable/ReactTableInfo.scala
|
Scala
|
apache-2.0
| 1,017 |
// Copyright (C) 2011-2012 the original author or authors.
// See the LICENCE.txt file distributed with this work for additional
// information regarding copyright ownership.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package org.scalastyle.scalariform
import org.scalastyle.PositionError
import org.scalastyle.ScalariformChecker
import org.scalastyle.ScalastyleError
import _root_.scalariform.lexer.Token
import _root_.scalariform.lexer.Tokens.ARROW
import _root_.scalariform.lexer.Tokens.USCORE
import _root_.scalariform.parser.AstNode
import _root_.scalariform.parser.BlockImportExpr
import _root_.scalariform.parser.CompilationUnit
import _root_.scalariform.parser.Expr
import _root_.scalariform.parser.GeneralTokens
import _root_.scalariform.parser.ImportClause
import _root_.scalariform.parser.ImportSelectors
class BlockImportChecker extends ScalariformChecker {
val errorKey = "block.import"
def verify(ast: CompilationUnit): List[ScalastyleError] =
findBlockImports(ast)
private def findBlockImports(in: AstNode): List[PositionError] = in match {
// comma separated import
case ImportClause(_, firstImport, otherImports, _) if otherImports.nonEmpty =>
List(PositionError(firstImport.firstToken.offset))
// rename or hide import
case BlockImportExpr(prefix, ImportSelectors(_, Expr(
List(_, GeneralTokens(List(Token(ARROW, "=>", _, _))), _)
), otherImports, _)) =>
val blockImportFound = otherImports exists {
case (_, Expr(List(GeneralTokens(List(Token(tokenType, _, _, _)))))) =>
tokenType != USCORE
case _ =>
false
}
if (blockImportFound) List(PositionError(prefix.firstToken.offset)) else Nil
// other block imports
case b: BlockImportExpr => List(PositionError(b.firstToken.offset))
// remaining nodes
case a: AstNode => a.immediateChildren flatMap findBlockImports
}
}
|
scalastyle/scalastyle
|
src/main/scala/org/scalastyle/scalariform/BlockImportChecker.scala
|
Scala
|
apache-2.0
| 2,430 |
/**
* Created by Variant on 16/3/22.
*/
object List_Fold_Sort {
def main(args: Array[String]) {
//0 + 1 + 2 + 3..100 从1到100
println((1 to 100).foldLeft(0)(_+_))
println((1 /: (1 to 100))(_ + _))
//1...3-(4-(5-100))从5到1
println((1 to 5).foldRight(100)(_ - _))
println(((1 to 5) :\\ 100)(_ - _))
println(List(1, -3, 4, 2, 6) sortWith (_ < _))
println(List(2,3,56,7,12) sortWith(_ > _))
println(List.apply(1,2,3))
println(List.fill(3, 3)(5))
println(List.range(1, 5))
//步长
println(List.range(9, 1, -3))
val zipped = "abcde".toList zip List(1, 2, 3, 5, 6)
println(zipped)
println(zipped.unzip)
//把多个list合并成一个List
println(List(List('a', 'b'), List('c'), List('d', 'e')).flatten)
println(List.concat(List(),List('b'), List('c')))
println(List.map2(List(10,20),List(10,10,20))(_ * _))
println((List(20,30,10),List(10,20)).zipped.map(_ * _))
}
}
|
sparkLiwei/ProgrammingNote
|
scalaLearning/scalaDataStructure/List_Fold_Sort.scala
|
Scala
|
cc0-1.0
| 965 |
/*
* Scala (https://www.scala-lang.org)
*
* Copyright EPFL and Lightbend, Inc.
*
* Licensed under Apache License 2.0
* (http://www.apache.org/licenses/LICENSE-2.0).
*
* See the NOTICE file distributed with this work for
* additional information regarding copyright ownership.
*/
package scala.runtime
import java.lang.invoke._
import java.lang.ref.SoftReference
import java.lang.reflect.Method
final class StructuralCallSite private (callType: MethodType) {
private var cache: SoftReference[MethodCache] = new SoftReference(new EmptyMethodCache)
val parameterTypes: Array[Class[_]] = callType.parameterArray
def get: MethodCache = {
var cache = this.cache.get
if (cache == null) {
cache = new EmptyMethodCache
this.cache = new SoftReference(cache)
}
cache
}
def find(receiver: Class[_]): Method = get.find(receiver)
def add(receiver: Class[_], m: Method): Method = {
cache = new SoftReference(get.add(receiver, m))
m
}
}
object StructuralCallSite {
def bootstrap(lookup: MethodHandles.Lookup, invokedName: String, invokedType: MethodType, reflectiveCallType: MethodType): CallSite = {
val structuralCallSite = new StructuralCallSite(reflectiveCallType)
new ConstantCallSite(MethodHandles.constant(classOf[StructuralCallSite], structuralCallSite))
}
}
|
scala/scala
|
src/library/scala/runtime/StructuralCallSite.scala
|
Scala
|
apache-2.0
| 1,332 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.