code
stringlengths 5
1M
| repo_name
stringlengths 5
109
| path
stringlengths 6
208
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 5
1M
|
---|---|---|---|---|---|
package lila.api
import play.api.libs.json._
import chess.format.Forsyth
import lila.common.paginator.Paginator
import lila.common.PimpedJson._
import lila.game.{ Game, PerfPicker }
final class UserGameApi(bookmarkApi: lila.bookmark.BookmarkApi) {
import lila.round.JsonView._
def filter(filterName: String, pag: Paginator[Game])(implicit ctx: Context): JsObject = {
val bookmarkedIds = ctx.userId ?? bookmarkApi.gameIds
implicit val gameWriter = Writes[Game] { g =>
write(g, bookmarkedIds(g.id))
}
Json.obj(
"filter" -> filterName,
"paginator" -> lila.common.paginator.PaginatorJson(pag)
)
}
private def write(g: Game, bookmarked: Boolean) = Json.obj(
"id" -> g.id,
"rated" -> g.rated,
"variant" -> g.variant,
"speed" -> g.speed.key,
"perf" -> PerfPicker.key(g),
"timestamp" -> g.createdAt.getDate,
"turns" -> g.turns,
"status" -> g.status,
"clock" -> g.clock,
"correspondence" -> g.daysPerTurn.map { d =>
Json.obj("daysPerTurn" -> d)
},
"opening" -> g.opening,
"players" -> JsObject(g.players map { p =>
p.color.name -> Json.obj(
"userId" -> p.userId,
"name" -> p.name,
"aiLevel" -> p.aiLevel,
"rating" -> p.rating,
"ratingDiff" -> p.ratingDiff
).noNull
}),
"fen" -> Forsyth.exportBoard(g.toChess.board),
"lastMove" -> g.castleLastMoveTime.lastMoveString,
"opening" -> g.opening.map { o =>
Json.obj("code" -> o.code, "name" -> o.name)
},
"winner" -> g.winnerColor.map(_.name),
"bookmarks" -> g.bookmarks,
"bookmarked" -> bookmarked.option(true)
).noNull
}
|
Happy0/lila
|
modules/api/src/main/UserGameApi.scala
|
Scala
|
mit
| 1,660 |
/****************************************************************************
* Copyright Fabien Sartor
* Contributors: Fabien Sartor ([email protected])
* http://fasar.fr
*
* This software is a computer program whose purpose to compute differences
* between two files.
*
****************************************************************************
*
* This software is governed by the CeCILL license under French law and
* abiding by the rules of distribution of free software. You can use,
* modify and/ or redistribute the software under the terms of the CeCILL
* license as circulated by CEA, CNRS and INRIA at the following URL:
* "http://www.cecill.info".
*
* As a counterpart to the access to the source code and rights to copy,
* modify and redistribute granted by the license, users are provided only
* with a limited warranty and the software's author, the holder of the
* economic rights, and the successive licensors have only limited
* liability.
*
* In this respect, the user's attention is drawn to the risks associated
* with loading, using, modifying and/or developing or reproducing the
* software by the user in light of its specific status of free software,
* that may mean that it is complicated to manipulate, and that also
* therefore means that it is reserved for developers and experienced
* professionals having in-depth computer knowledge. Users are therefore
* encouraged to load and test the software's suitability as regards their
* requirements in conditions enabling the security of their systems and/or
* data to be ensured and, more generally, to use and operate it in the
* same conditions as regards security.
*
* The fact that you are presently reading this means that you have had
* knowledge of the CeCILL license and that you accept its terms.
*
****************************************************************************
*/
package fsart.diffTools.script.scriptEngineImpl
import fsart.diffTools.csvModel.CsvData
import java.util.Calendar
import org.apache.commons.logging.{LogFactory, Log}
import fsart.diffTools.script.{Interpreter, InputOutputData, ScriptEngine}
/**
*
* User: fabien
* Date: 20/07/12
* Time: 18:42
*
*/
object DefaultScriptEngine extends ScriptEngine {
private val log: Log = LogFactory.getLog(this.getClass)
def process(ioData: InputOutputData) {
val file1 = ioData.file1
val file2 = ioData.file2
val trans = ioData.translator
val outputDriver = ioData.outputDriver
val dateInitFile = Calendar.getInstance.getTimeInMillis
import fsart.diffTools.csvDsl.CsvBuilderDsl._
val csv1:CsvData[String] = file1 toCsv() firstLineAsHeader(true)//(firstLineAsHeader)
val csv2:CsvData[String] = file2 toCsv() firstLineAsHeader(true)//(firstLineAsHeader)
val dateGenerateCsvData = Calendar.getInstance.getTimeInMillis
log.debug("It takes " + (dateGenerateCsvData - dateInitFile) + " secondes to create csv data")
import fsart.diffTools.csvDsl.CsvRulesDsl._
log.debug("Generate differences between two files")
val csvDiff:DiffData = modificationsMade by csv2 withRef csv1
val csvAdd:DiffData = additionsMade by csv2 withRef csv1
val csvSuppr:DiffData = suppressionsMade by csv2 withRef csv1
val csvRes = csvDiff concatWith csvSuppr concatWith csvAdd
val dateGenerateCsvDiffData = Calendar.getInstance.getTimeInMillis
log.debug("It takes " + (dateGenerateCsvDiffData - dateGenerateCsvData) + " secondes to generate differences")
val myres = trans.translate(csvRes)
outputDriver.addCsvTable("Comparison1", myres)
}
}
|
fasar/diffTools
|
core/console/src/main/scala/fsart/diffTools/script/scriptEngineImpl/DefaultScriptEngine.scala
|
Scala
|
apache-2.0
| 3,685 |
package com.malliina.bundler
import com.malliina.bundler.ClientPlugin.autoImport.writeAssets
import com.malliina.live.LiveReloadPlugin.autoImport.refreshBrowsers
import com.malliina.live.LiveRevolverPlugin
import org.scalajs.sbtplugin.ScalaJSPlugin.autoImport.{fastOptJS, fullOptJS, scalaJSStage}
import org.scalajs.sbtplugin.Stage
import sbt.Keys._
import sbt._
import spray.revolver.RevolverPlugin.autoImport.reStart
import spray.revolver.GlobalState
object ServerPlugin extends AutoPlugin {
override def requires: Plugins = LiveRevolverPlugin && FileInputPlugin
object autoImport {
val start = Keys.start
val clientProject = settingKey[Project]("Scala.js project")
}
import autoImport._
override def projectSettings: Seq[Def.Setting[_]] = Seq(
start := Def.taskIf {
val log = streams.value.log
val changes = start.inputFileChanges
// Restarts if a) not running, or b) input files have changed
val isRunning = GlobalState.get().getProcess(thisProjectRef.value).isDefined
val word = if (isRunning) "" else "not "
val fileWord = if (changes.hasChanges) "" else "not"
log.debug(s"${name.value} ${word}running. Files ${fileWord}changed.")
if (changes.hasChanges || !isRunning) {
reStart.toTask(" ").value
} else {
Def.task(streams.value.log.info(s"No changes to ${name.value}, no restart.")).value
}
}.value,
start := Def.taskIf {
if (start.inputFileChanges.hasChanges) {
refreshBrowsers.value
} else {
Def.task(streams.value.log.info("No backend changes."))
}
}.dependsOn(start).value,
Compile / sourceGenerators := (Compile / sourceGenerators).value :+ Def
.taskDyn[Seq[File]] {
val sjsStage = (clientProject / scalaJSStage).value match {
case Stage.FastOpt => fastOptJS
case Stage.FullOpt => fullOptJS
}
clientProject.value / Compile / sjsStage / writeAssets
}
.taskValue
)
}
|
malliina/sbt-utils
|
bundler/src/main/scala/com/malliina/bundler/ServerPlugin.scala
|
Scala
|
mit
| 1,992 |
/*
* Scala.js (https://www.scala-js.org/)
*
* Copyright EPFL.
*
* Licensed under Apache License 2.0
* (https://www.apache.org/licenses/LICENSE-2.0).
*
* See the NOTICE file distributed with this work for
* additional information regarding copyright ownership.
*/
package org.scalajs.testsuite.niobuffer
import java.nio._
import org.scalajs.testsuite.niobuffer.ByteBufferFactories._
abstract class ShortBufferTest extends BaseBufferTest {
type Factory = BufferFactory.ShortBufferFactory
class AllocShortBufferFactory extends Factory {
def allocBuffer(capacity: Int): ShortBuffer =
ShortBuffer.allocate(capacity)
}
class WrappedShortBufferFactory extends Factory with BufferFactory.WrappedBufferFactory {
def baseWrap(array: Array[Short]): ShortBuffer =
ShortBuffer.wrap(array)
def baseWrap(array: Array[Short], offset: Int, length: Int): ShortBuffer =
ShortBuffer.wrap(array, offset, length)
}
class ByteBufferShortViewFactory(
byteBufferFactory: BufferFactory.ByteBufferFactory,
order: ByteOrder)
extends Factory with BufferFactory.ByteBufferViewFactory {
require(!byteBufferFactory.createsReadOnly)
def baseAllocBuffer(capacity: Int): ShortBuffer =
byteBufferFactory.allocBuffer(capacity * 2).order(order).asShortBuffer()
}
}
class AllocShortBufferTest extends ShortBufferTest {
val factory: Factory = new AllocShortBufferFactory
}
class WrappedShortBufferTest extends ShortBufferTest {
val factory: Factory = new WrappedShortBufferFactory
}
class WrappedShortReadOnlyBufferTest extends ShortBufferTest {
val factory: Factory =
new WrappedShortBufferFactory with BufferFactory.ReadOnlyBufferFactory
}
class AllocShortSlicedBufferTest extends ShortBufferTest {
val factory: Factory =
new AllocShortBufferFactory with BufferFactory.SlicedBufferFactory
}
// Short views of byte buffers
abstract class ShortViewOfByteBufferTest(
byteBufferFactory: BufferFactory.ByteBufferFactory, order: ByteOrder)
extends ShortBufferTest {
val factory: BufferFactory.ShortBufferFactory =
new ByteBufferShortViewFactory(byteBufferFactory, order)
}
class ShortViewOfAllocByteBufferBigEndianTest
extends ShortViewOfByteBufferTest(new AllocByteBufferFactory, ByteOrder.BIG_ENDIAN)
class ShortViewOfWrappedByteBufferBigEndianTest
extends ShortViewOfByteBufferTest(new WrappedByteBufferFactory, ByteOrder.BIG_ENDIAN)
class ShortViewOfSlicedAllocByteBufferBigEndianTest
extends ShortViewOfByteBufferTest(new SlicedAllocByteBufferFactory, ByteOrder.BIG_ENDIAN)
class ShortViewOfAllocByteBufferLittleEndianTest
extends ShortViewOfByteBufferTest(new AllocByteBufferFactory, ByteOrder.LITTLE_ENDIAN)
class ShortViewOfWrappedByteBufferLittleEndianTest
extends ShortViewOfByteBufferTest(new WrappedByteBufferFactory, ByteOrder.LITTLE_ENDIAN)
class ShortViewOfSlicedAllocByteBufferLittleEndianTest
extends ShortViewOfByteBufferTest(new SlicedAllocByteBufferFactory, ByteOrder.LITTLE_ENDIAN)
// Read only Short views of byte buffers
abstract class ReadOnlyShortViewOfByteBufferTest(
byteBufferFactory: BufferFactory.ByteBufferFactory, order: ByteOrder)
extends ShortBufferTest {
val factory: BufferFactory.ShortBufferFactory = {
new ByteBufferShortViewFactory(byteBufferFactory, order)
with BufferFactory.ReadOnlyBufferFactory
}
}
class ReadOnlyShortViewOfAllocByteBufferBigEndianTest
extends ReadOnlyShortViewOfByteBufferTest(new AllocByteBufferFactory, ByteOrder.BIG_ENDIAN)
class ReadOnlyShortViewOfWrappedByteBufferBigEndianTest
extends ReadOnlyShortViewOfByteBufferTest(new WrappedByteBufferFactory, ByteOrder.BIG_ENDIAN)
class ReadOnlyShortViewOfSlicedAllocByteBufferBigEndianTest
extends ReadOnlyShortViewOfByteBufferTest(new SlicedAllocByteBufferFactory, ByteOrder.BIG_ENDIAN)
class ReadOnlyShortViewOfAllocByteBufferLittleEndianTest
extends ReadOnlyShortViewOfByteBufferTest(new AllocByteBufferFactory, ByteOrder.LITTLE_ENDIAN)
class ReadOnlyShortViewOfWrappedByteBufferLittleEndianTest
extends ReadOnlyShortViewOfByteBufferTest(new WrappedByteBufferFactory, ByteOrder.LITTLE_ENDIAN)
class ReadOnlyShortViewOfSlicedAllocByteBufferLittleEndianTest
extends ReadOnlyShortViewOfByteBufferTest(new SlicedAllocByteBufferFactory, ByteOrder.LITTLE_ENDIAN)
|
scala-js/scala-js
|
test-suite/shared/src/test/scala/org/scalajs/testsuite/niobuffer/ShortBufferTest.scala
|
Scala
|
apache-2.0
| 4,344 |
package mot.unit
import org.scalatest.FunSuite
import mot.Context
import mot.Client
import mot.Server
import mot.Address
import mot.util.UnaryPromise
import mot.IncomingResponse
import mot.Message
import java.util.concurrent.TimeUnit
import org.scalatest.BeforeAndAfterAll
import org.scalatest.ConfigMap
import scala.concurrent.duration.Duration
import java.text.SimpleDateFormat
import scala.collection.mutable.ListBuffer
import mot.dump.Event
import mot.dump.Direction
import mot.dump.MotEvent
import mot.IncomingMessage
import java.util.concurrent.Executors
class Test extends FunSuite with BeforeAndAfterAll {
val port = 5000
val target = Address("localhost", port)
val invalidTarget = Address("localhost", port + 1)
val timeoutMs = 6000
val pollMs = 5000
val iterations = 1000
test(s"(request + response) * $iterations") {
val ctx = new Context
val client = new Client(ctx, "client")
val request = Message.fromString("the-request")
val response = Message.fromString("the-response")
def handler(incomingMessage: IncomingMessage): Unit = {
assertResult(request.stringBody)(incomingMessage.message.stringBody)
val responseSuccess = incomingMessage.responder.offer(response)
assert(responseSuccess)
}
val server = new Server(ctx, "server", Executors.newSingleThreadExecutor, handler, bindPort = port)
for (i <- 1 to iterations) {
val promise = new UnaryPromise[IncomingResponse]
val requestSuccess = client.offerRequest(target, request, timeoutMs, promise)
assert(requestSuccess)
val incominResponse = promise.result(pollMs, TimeUnit.MILLISECONDS).get
val receivedResponse = incominResponse.message.get
assertResult(response.stringBody)(receivedResponse.stringBody)
}
ctx.close()
}
test(s"invalid request with pessimistic error") {
val ctx = new Context
val pessimisticClient = new Client(ctx, "pessimisic-client", tolerance = Duration(100, TimeUnit.MILLISECONDS))
val request = Message.fromString("the-request")
val firstSuccess = pessimisticClient.offerRequest(invalidTarget, request, timeoutMs, new UnaryPromise[IncomingResponse])
assert(firstSuccess)
Thread.sleep(200)
intercept[Exception] {
pessimisticClient.offerRequest(invalidTarget, request, timeoutMs, new UnaryPromise[IncomingResponse])
}
pessimisticClient.close()
ctx.close()
}
test("dump") {
val ctx = new Context(monitoringPort = 4002, dumpPort = 6002)
val listener = ctx.dumper.listen(bufferSize = 1000)
val client = new Client(ctx, "client-dump")
def handle(incomingMessage: IncomingMessage): Unit = {
assert(incomingMessage != null)
val response = Message.fromString("the-response")
val responseSuccess = incomingMessage.responder.offer(response)
assert(responseSuccess)
}
val server = new Server(ctx, "server-dump", Executors.newSingleThreadExecutor, handle, bindPort = port + 2)
val target = Address("localhost", port + 2)
val request = Message.fromString("the-request")
val promise = new UnaryPromise[IncomingResponse]
val requestSuccess = client.offerRequest(target, request, timeoutMs, promise)
assert(requestSuccess)
val receivedResponse = promise.result(pollMs, TimeUnit.MILLISECONDS).get.message.get
val sdf = new SimpleDateFormat("HH:mm:ss.SSS'Z'")
var event = listener.queue.poll()
client.close()
server.close()
val events = ListBuffer[Event]()
while (event != null) {
events += event
event = listener.queue.poll()
}
ctx.close()
val groups = events.groupBy(_.direction)
val incoming = groups(Direction.Incoming)
val outgoing = groups(Direction.Outgoing)
val incomingMessages = for (MotEvent(conn, dir, msg) <- incoming) yield msg
val outgoingMessages = for (MotEvent(conn, dir, msg) <- outgoing) yield msg
// transform into sets because hello frames can appear in different order in each side or the connection
assert(incomingMessages.toSet == outgoingMessages.toSet)
}
}
|
marianobarrios/mot
|
src/test/scala/mot/unit/Test.scala
|
Scala
|
bsd-2-clause
| 4,067 |
package backend.shared
import akka.stream.BidiShape
import akka.stream.io.Framing
import akka.stream.scaladsl.{BidiFlow, Flow, FlowGraph}
import akka.util.ByteString
/**
* Basic TCP Framing stage
*/
object FramingStage {
def apply() = BidiFlow.fromGraph(FlowGraph.create() { b =>
val delimiter = ByteString("\\n")
val in = b.add(Framing.delimiter(delimiter, 256, allowTruncation = false))
val out = b.add(Flow[ByteString].map(_ ++ delimiter))
BidiShape.fromFlows(in, out)
})
}
|
intelix/activator-reactive-fx
|
app/backend/shared/FramingStage.scala
|
Scala
|
apache-2.0
| 502 |
import akka.actor.ActorRef
import codebook.runtime.packet.Packet
import codebook.runtime.protocol.{Decoder, Request}
import codebook.runtime.server.UserServiceActorBase
object State extends Enumeration {
val Running = Value
}
class UserServiceActor(val socket:ActorRef) extends UserServiceActorBase[State.Value,Int] {
import codebook.runtime.io.PortConversion._
override def decoder: Decoder = DummyDecoder
when(State.Running) {
case Event(pkt:Packet,_) =>
println(s"PKT:${pkt.length}")
socket.send(pkt)
stay()
}
setupHandlers()
startWith(State.Running,0)
}
|
RustyRaven/CodebookRuntime
|
scala/tlstest/src/main/scala/UserServiceActor.scala
|
Scala
|
mit
| 601 |
/**
* @author Petri Kivikangas
* @date 1.2.2011
*
*/
package cdl.query
import scala.collection.mutable.StringBuilder
import org.neo4j.cypher.ExecutionResult
import org.slf4j.LoggerFactory
import cdl.objects.{ Arc, Concept, DefinitionLabel, Statement }
import cdl.wrappers.NeoWrapper
import cdl.parser.CDLParser
object CDLQuery {
private val logger = LoggerFactory.getLogger(this.getClass)
def it = this // convenience method for Java
def getQuery(cdlQuery: String): CDLQuery = {
val parsedStatement = CDLParser.parseDocument(cdlQuery).entities(0).asInstanceOf[Statement]
var qvars = List[Concept]()
var concepts = List[Concept]()
parsedStatement.entities.foreach(entity => {
if (entity.asInstanceOf[Concept].uw.startsWith("?")) {
qvars ::= entity.asInstanceOf[Concept]
} else {
concepts ::= entity.asInstanceOf[Concept]
}
})
return new CDLQuery(concepts.reverse, parsedStatement.arcs, qvars.reverse)
}
def getCypher(cdlQuery: String, expansion: Int): String = {
return getQuery(cdlQuery).toCypher(expansion)
}
/**
* Converts CDL query into Cypher query.
*
* TODO: allow use of attributes and inner entities
*
* @param cdlQuery
* @return Cypher query string
*/
def getCypher(cdlQuery: CDLQuery, expansion: Int = 0): String = {
if (cdlQuery.entities.isEmpty) return ""
var cypherQuery = new StringBuilder("START\\tx"+cdlQuery.entities(0).rlabel+"=node:concepts(uw='"+cdlQuery.entities.head.uw+"')")
if (cdlQuery.arcs.nonEmpty) {
cypherQuery ++= "\\nMATCH"
cdlQuery.arcs.map(arc => "\\t"+arc.toCypherString).addString(cypherQuery, ",\\n")
}
cypherQuery ++= "\\nWHERE"
cdlQuery.entities.map(entity => entity match {
case s: Statement => "" // TODO: allow use of inner entities in the query
case c: Concept => {
/* The level of query expansion */
expansion match {
/* Do exact concept matching */
case 0 => "\\tx"+c.rlabel+".uw! = '"+c.uw+"'"
/* Expand query with hyponyms */
case 1 => {
val hyponyms = NeoWrapper.getHyponyms(c)
if (hyponyms.isEmpty) {
logger.info("Didn't find hyponyms for ["+c.uw + ']')
"\\tx"+c.rlabel+".uw! = '"+c.uw+"'"
} else {
val sb = new StringBuilder("\\t(x"+c.rlabel+".uw! = '"+c.uw+"' OR ")
hyponyms.map(h => "x"+c.rlabel+".uw! = '"+h+"'").addString(sb, " OR ")
sb += ')'
sb.toString
}
}
/* TODO: utilize also other semantic relations in the query */
}
}
}).addString(cypherQuery, " AND\\n")
if (cdlQuery.queryVars.nonEmpty) {
cypherQuery ++= "\\nRETURN"
cdlQuery.queryVars.map(qvar => "\\tx"+qvar.rlabel).addString(cypherQuery, ",\\n")
} else {
cypherQuery ++= "\\nRETURN"
cdlQuery.entities.map(e => "\\tx"+e.rlabel).addString(cypherQuery, ",\\n")
}
return cypherQuery.toString
}
}
class CDLQuery(override val entities: List[Concept], override val arcs: List[Arc], val queryVars: List[Concept])
extends Statement("", DefinitionLabel.Null, entities ::: queryVars, arcs) {
def toCypher(expansion: Int): String = CDLQuery.getCypher(this, expansion)
def execute: ExecutionResult = {
val result = NeoWrapper.query(toCypher(0))
CDLQuery.logger.info("Query returned: "+result)
return result
}
/* TODO: implement maybe
def resultToCDL(result: ExecutionResult): Concept = {
val res = result.javaIterator
var cons = List[Concept]()
while(res.hasNext) {
val c = res.next
val rlabel = c.get(Neo4jCDLWrapper.nodeProperties("rlabel")).asInstanceOf[String]
val uw = c.get(Neo4jCDLWrapper.nodeProperties("uw")).asInstanceOf[String]
cons ::= new Concept(rlabel, uw)
}
cons(0)
}*/
}
|
Valafar/cdl-toolkit
|
src/main/scala/cdl/query/CDLQuery.scala
|
Scala
|
gpl-3.0
| 3,876 |
package org.opensplice.mobile.dev.paxos
import nuvo.nio.prelude._
import nuvo.nio._
import nuvo.core.Tuple
import org.opensplice.mobile.dev.common.Uuid
object PaxosSamplesTypeRegistration {
val typeList = List("org.opensplice.mobile.dev.paxos.Propose", "org.opensplice.mobile.dev.paxos.Adopt", "org.opensplice.mobile.dev.paxos.Adopted", "org.opensplice.mobile.dev.paxos.Accept", "org.opensplice.mobile.dev.paxos.Accepted", "org.opensplice.mobile.dev.paxos.Decide", "org.opensplice.mobile.dev.paxos.RejectedVersion", "org.opensplice.mobile.dev.paxos.RejectedEpoch")
var registerTypeOK = { typeList.foreach(nuvo.nio.SerializerCache.registerType(_)); true }
}
object ProposeHelper {
val typeHash = (-5913457992674831318L, 827129285365610529L) // org.opensplice.mobile.dev.paxos.Propose
def serialize(buf: RawBuffer, t: Propose, format: SerializationFormat) {
format match {
case NuvoSF => serializeNuvoSF(buf, t)
}
}
def serializeKey(buf: RawBuffer, t: Propose, format: SerializationFormat) {
format match {
case NuvoSF => serializeNuvoSF(buf, t)
}
}
final def serializeNuvoSF(buf: RawBuffer, t: Propose) {
buf.order(ByteOrder.nativeOrder)
val __nuvoc_startPosition = buf.position
buf.position(__nuvoc_startPosition + 4)
buf.putLong(typeHash._1)
buf.putLong(typeHash._2)
org.opensplice.mobile.dev.common.serializeUuid(buf, t.client)
//buf.putObject(t.client)
buf.putLong(t.statekey)
buf.putInt(t.op)
buf.putInt(t.epoch)
buf.putObject(t.value)
val __nuvoc_serializedDataLength = buf.position - __nuvoc_startPosition - 4
val __nuvoc_MEL = (buf.order.value << 24) | (__nuvoc_serializedDataLength & 0x00ffffff)
buf.order(ByteOrder.littleEndian)
buf.putInt(__nuvoc_startPosition, __nuvoc_MEL)
}
final def serializeKeyNuvoSF(buf: RawBuffer, t: Propose) = ()
def deserialize(buf: RawBuffer, format: SerializationFormat): Propose = {
format match {
case NuvoSF => deserializeNuvoSF(buf)
}
}
def deserializeKey(buf: RawBuffer, format: SerializationFormat) = {
format match {
case NuvoSF => deserializeNuvoSF(buf)
}
}
final def deserializeNuvoSF(buf: RawBuffer): Propose = {
buf.order(LittleEndian)
val __nuvoc_MEL = buf.getInt()
val __nuvoc_endianess = (__nuvoc_MEL >> 24).toByte
val __nuvoc_serializeDataLength = (__nuvoc_MEL & 0x00ffffff)
buf.order(__nuvoc_endianess match { case LittleEndian.value => LittleEndian; case BigEndian.value => BigEndian; case _ => { buf.position(buf.position + __nuvoc_serializeDataLength); throw new RuntimeException("Invalid Format") } })
val __nuvoc_startPosition = buf.position
val wireTypeHash = (buf.getLong, buf.getLong)
if (typeHash != wireTypeHash) throw new RuntimeException("Mismatching TypeHash, you ma be trying to deserialize using the wrong helper")
val client = org.opensplice.mobile.dev.common.deserializeUuid(buf)
//val client = buf.getObject[Uuid]()
val statekey = buf.getLong()
val op = buf.getInt()
val epoch = buf.getInt()
val value = buf.getObject[WirePaxosData]()
buf.position(__nuvoc_startPosition + __nuvoc_serializeDataLength)
new Propose(client, statekey, op, epoch, value)
}
def deserializeNoHeaderNuvoSF(buf: RawBuffer): Propose = {
val client = org.opensplice.mobile.dev.common.deserializeUuid(buf)
//val client = buf.getObject[Uuid]()
val statekey = buf.getLong()
val op = buf.getInt()
val epoch = buf.getInt()
val value = buf.getObject[WirePaxosData]()
new Propose(client, statekey, op, epoch, value)
}
final def deserializeKeyNuvoSF(buf: RawBuffer) = ()
final def deserializeKeyNoHeaderNuvoSF(buf: RawBuffer) = ()
}
object AdoptHelper {
val typeHash = (4064821272009572339L, 1309856541390262730L) // org.opensplice.mobile.dev.paxos.Adopt
def serialize(buf: RawBuffer, t: Adopt, format: SerializationFormat) {
format match {
case NuvoSF => serializeNuvoSF(buf, t)
}
}
def serializeKey(buf: RawBuffer, t: Adopt, format: SerializationFormat) {
format match {
case NuvoSF => serializeNuvoSF(buf, t)
}
}
final def serializeNuvoSF(buf: RawBuffer, t: Adopt) {
buf.order(ByteOrder.nativeOrder)
val __nuvoc_startPosition = buf.position
buf.position(__nuvoc_startPosition + 4)
buf.putLong(typeHash._1)
buf.putLong(typeHash._2)
buf.putLong(t.statekey)
buf.putInt(t.serialNumber)
org.opensplice.mobile.dev.common.serializeUuid(buf, t.proposer)
//buf.putObject(t.proposer)
buf.putInt(t.epoch)
val __nuvoc_serializedDataLength = buf.position - __nuvoc_startPosition - 4
val __nuvoc_MEL = (buf.order.value << 24) | (__nuvoc_serializedDataLength & 0x00ffffff)
buf.order(ByteOrder.littleEndian)
buf.putInt(__nuvoc_startPosition, __nuvoc_MEL)
}
final def serializeKeyNuvoSF(buf: RawBuffer, t: Adopt) = ()
def deserialize(buf: RawBuffer, format: SerializationFormat): Adopt = {
format match {
case NuvoSF => deserializeNuvoSF(buf)
}
}
def deserializeKey(buf: RawBuffer, format: SerializationFormat) = {
format match {
case NuvoSF => deserializeNuvoSF(buf)
}
}
final def deserializeNuvoSF(buf: RawBuffer): Adopt = {
buf.order(LittleEndian)
val __nuvoc_MEL = buf.getInt()
val __nuvoc_endianess = (__nuvoc_MEL >> 24).toByte
val __nuvoc_serializeDataLength = (__nuvoc_MEL & 0x00ffffff)
buf.order(__nuvoc_endianess match { case LittleEndian.value => LittleEndian; case BigEndian.value => BigEndian; case _ => { buf.position(buf.position + __nuvoc_serializeDataLength); throw new RuntimeException("Invalid Format") } })
val __nuvoc_startPosition = buf.position
val wireTypeHash = (buf.getLong, buf.getLong)
if (typeHash != wireTypeHash) throw new RuntimeException("Mismatching TypeHash, you ma be trying to deserialize using the wrong helper")
val statekey = buf.getLong()
val serialNumber = buf.getInt()
val proposer = org.opensplice.mobile.dev.common.deserializeUuid(buf)
//val proposer = buf.getObject[Uuid]()
val epoch = buf.getInt()
buf.position(__nuvoc_startPosition + __nuvoc_serializeDataLength)
new Adopt(statekey, serialNumber, proposer, epoch)
}
def deserializeNoHeaderNuvoSF(buf: RawBuffer): Adopt = {
val statekey = buf.getLong()
val serialNumber = buf.getInt()
val proposer = org.opensplice.mobile.dev.common.deserializeUuid(buf)
//val proposer = buf.getObject[Uuid]()
val epoch = buf.getInt()
new Adopt(statekey, serialNumber, proposer, epoch)
}
final def deserializeKeyNuvoSF(buf: RawBuffer) = ()
final def deserializeKeyNoHeaderNuvoSF(buf: RawBuffer) = ()
}
object AdoptedHelper {
val typeHash = (331608093171274555L, -3986708890607512155L) // org.opensplice.mobile.dev.paxos.Adopted
def serialize(buf: RawBuffer, t: Adopted, format: SerializationFormat) {
format match {
case NuvoSF => serializeNuvoSF(buf, t)
}
}
def serializeKey(buf: RawBuffer, t: Adopted, format: SerializationFormat) {
format match {
case NuvoSF => serializeNuvoSF(buf, t)
}
}
final def serializeNuvoSF(buf: RawBuffer, t: Adopted) {
buf.order(ByteOrder.nativeOrder)
val __nuvoc_startPosition = buf.position
buf.position(__nuvoc_startPosition + 4)
buf.putLong(typeHash._1)
buf.putLong(typeHash._2)
buf.putLong(t.statekey)
org.opensplice.mobile.dev.common.serializeUuid(buf, t.acceptor)
//buf.putObject(t.acceptor)
buf.putInt(t.serialNumber)
org.opensplice.mobile.dev.common.serializeUuid(buf, t.proposer)
//buf.putObject(t.proposer)
buf.putInt(t.epoch)
buf.putObject(t.value)
buf.putInt(t.previousSerialNumber)
org.opensplice.mobile.dev.common.serializeUuid(buf, t.proposer)
//buf.putObject(t.previousProposer)
val __nuvoc_serializedDataLength = buf.position - __nuvoc_startPosition - 4
val __nuvoc_MEL = (buf.order.value << 24) | (__nuvoc_serializedDataLength & 0x00ffffff)
buf.order(ByteOrder.littleEndian)
buf.putInt(__nuvoc_startPosition, __nuvoc_MEL)
}
final def serializeKeyNuvoSF(buf: RawBuffer, t: Adopted) = ()
def deserialize(buf: RawBuffer, format: SerializationFormat): Adopted = {
format match {
case NuvoSF => deserializeNuvoSF(buf)
}
}
def deserializeKey(buf: RawBuffer, format: SerializationFormat) = {
format match {
case NuvoSF => deserializeNuvoSF(buf)
}
}
final def deserializeNuvoSF(buf: RawBuffer): Adopted = {
buf.order(LittleEndian)
val __nuvoc_MEL = buf.getInt()
val __nuvoc_endianess = (__nuvoc_MEL >> 24).toByte
val __nuvoc_serializeDataLength = (__nuvoc_MEL & 0x00ffffff)
buf.order(__nuvoc_endianess match { case LittleEndian.value => LittleEndian; case BigEndian.value => BigEndian; case _ => { buf.position(buf.position + __nuvoc_serializeDataLength); throw new RuntimeException("Invalid Format") } })
val __nuvoc_startPosition = buf.position
val wireTypeHash = (buf.getLong, buf.getLong)
if (typeHash != wireTypeHash) throw new RuntimeException("Mismatching TypeHash, you ma be trying to deserialize using the wrong helper")
val statekey = buf.getLong()
val acceptor = org.opensplice.mobile.dev.common.deserializeUuid(buf)
//acceptor = buf.getObject[Uuid]()
val serialNumber = buf.getInt()
val proposer = org.opensplice.mobile.dev.common.deserializeUuid(buf)
//val proposer = buf.getObject[Uuid]()
val epoch = buf.getInt()
val value = buf.getObject[WirePaxosData]()
val previousSerialNumber = buf.getInt()
val previousProposer = org.opensplice.mobile.dev.common.deserializeUuid(buf)
//val previousProposer = buf.getObject[Uuid]()
buf.position(__nuvoc_startPosition + __nuvoc_serializeDataLength)
new Adopted(statekey, acceptor, serialNumber, proposer, epoch, value, previousSerialNumber, previousProposer)
}
def deserializeNoHeaderNuvoSF(buf: RawBuffer): Adopted = {
val statekey = buf.getLong()
val acceptor = org.opensplice.mobile.dev.common.deserializeUuid(buf)
//val acceptor = buf.getObject[Uuid]()
val serialNumber = buf.getInt()
val proposer = org.opensplice.mobile.dev.common.deserializeUuid(buf)
//val proposer = buf.getObject[Uuid]()
val epoch = buf.getInt()
val value = buf.getObject[WirePaxosData]()
val previousSerialNumber = buf.getInt()
val previousProposer = org.opensplice.mobile.dev.common.deserializeUuid(buf)
//val previousProposer = buf.getObject[Uuid]()
new Adopted(statekey, acceptor, serialNumber, proposer, epoch, value, previousSerialNumber, previousProposer)
}
final def deserializeKeyNuvoSF(buf: RawBuffer) = ()
final def deserializeKeyNoHeaderNuvoSF(buf: RawBuffer) = ()
}
object AcceptHelper {
val typeHash = (-5312736960000419235L, 8905248054417495962L) // org.opensplice.mobile.dev.paxos.Accept
def serialize(buf: RawBuffer, t: Accept, format: SerializationFormat) {
format match {
case NuvoSF => serializeNuvoSF(buf, t)
}
}
def serializeKey(buf: RawBuffer, t: Accept, format: SerializationFormat) {
format match {
case NuvoSF => serializeNuvoSF(buf, t)
}
}
final def serializeNuvoSF(buf: RawBuffer, t: Accept) {
buf.order(ByteOrder.nativeOrder)
val __nuvoc_startPosition = buf.position
buf.position(__nuvoc_startPosition + 4)
buf.putLong(typeHash._1)
buf.putLong(typeHash._2)
buf.putLong(t.statekey)
buf.putInt(t.serialNumber)
org.opensplice.mobile.dev.common.serializeUuid(buf, t.proposer)
//buf.putObject(t.proposer)
buf.putInt(t.epoch)
buf.putObject(t.value)
val __nuvoc_serializedDataLength = buf.position - __nuvoc_startPosition - 4
val __nuvoc_MEL = (buf.order.value << 24) | (__nuvoc_serializedDataLength & 0x00ffffff)
buf.order(ByteOrder.littleEndian)
buf.putInt(__nuvoc_startPosition, __nuvoc_MEL)
}
final def serializeKeyNuvoSF(buf: RawBuffer, t: Accept) = ()
def deserialize(buf: RawBuffer, format: SerializationFormat): Accept = {
format match {
case NuvoSF => deserializeNuvoSF(buf)
}
}
def deserializeKey(buf: RawBuffer, format: SerializationFormat) = {
format match {
case NuvoSF => deserializeNuvoSF(buf)
}
}
final def deserializeNuvoSF(buf: RawBuffer): Accept = {
buf.order(LittleEndian)
val __nuvoc_MEL = buf.getInt()
val __nuvoc_endianess = (__nuvoc_MEL >> 24).toByte
val __nuvoc_serializeDataLength = (__nuvoc_MEL & 0x00ffffff)
buf.order(__nuvoc_endianess match { case LittleEndian.value => LittleEndian; case BigEndian.value => BigEndian; case _ => { buf.position(buf.position + __nuvoc_serializeDataLength); throw new RuntimeException("Invalid Format") } })
val __nuvoc_startPosition = buf.position
val wireTypeHash = (buf.getLong, buf.getLong)
if (typeHash != wireTypeHash) throw new RuntimeException("Mismatching TypeHash, you ma be trying to deserialize using the wrong helper")
val statekey = buf.getLong()
val serialNumber = buf.getInt()
val proposer = org.opensplice.mobile.dev.common.deserializeUuid(buf)
//val proposer = buf.getObject[Uuid]()
val epoch = buf.getInt()
val value = buf.getObject[WirePaxosData]()
buf.position(__nuvoc_startPosition + __nuvoc_serializeDataLength)
new Accept(statekey, serialNumber, proposer, epoch, value)
}
def deserializeNoHeaderNuvoSF(buf: RawBuffer): Accept = {
val statekey = buf.getLong()
val serialNumber = buf.getInt()
val proposer = org.opensplice.mobile.dev.common.deserializeUuid(buf)
//val proposer = buf.getObject[Uuid]()
val epoch = buf.getInt()
val value = buf.getObject[WirePaxosData]()
new Accept(statekey, serialNumber, proposer, epoch, value)
}
final def deserializeKeyNuvoSF(buf: RawBuffer) = ()
final def deserializeKeyNoHeaderNuvoSF(buf: RawBuffer) = ()
}
object AcceptedHelper {
val typeHash = (3952933611115212986L, 2100986163802431951L) // org.opensplice.mobile.dev.paxos.Accepted
def serialize(buf: RawBuffer, t: Accepted, format: SerializationFormat) {
format match {
case NuvoSF => serializeNuvoSF(buf, t)
}
}
def serializeKey(buf: RawBuffer, t: Accepted, format: SerializationFormat) {
format match {
case NuvoSF => serializeNuvoSF(buf, t)
}
}
final def serializeNuvoSF(buf: RawBuffer, t: Accepted) {
buf.order(ByteOrder.nativeOrder)
val __nuvoc_startPosition = buf.position
buf.position(__nuvoc_startPosition + 4)
buf.putLong(typeHash._1)
buf.putLong(typeHash._2)
buf.putLong(t.statekey)
org.opensplice.mobile.dev.common.serializeUuid(buf, t.acceptor)
//buf.putObject(t.acceptor)
buf.putInt(t.serialNumber)
org.opensplice.mobile.dev.common.serializeUuid(buf, t.proposer)
//buf.putObject(t.proposer)
buf.putInt(t.epoch)
val __nuvoc_serializedDataLength = buf.position - __nuvoc_startPosition - 4
val __nuvoc_MEL = (buf.order.value << 24) | (__nuvoc_serializedDataLength & 0x00ffffff)
buf.order(ByteOrder.littleEndian)
buf.putInt(__nuvoc_startPosition, __nuvoc_MEL)
}
final def serializeKeyNuvoSF(buf: RawBuffer, t: Accepted) = ()
def deserialize(buf: RawBuffer, format: SerializationFormat): Accepted = {
format match {
case NuvoSF => deserializeNuvoSF(buf)
}
}
def deserializeKey(buf: RawBuffer, format: SerializationFormat) = {
format match {
case NuvoSF => deserializeNuvoSF(buf)
}
}
final def deserializeNuvoSF(buf: RawBuffer): Accepted = {
buf.order(LittleEndian)
val __nuvoc_MEL = buf.getInt()
val __nuvoc_endianess = (__nuvoc_MEL >> 24).toByte
val __nuvoc_serializeDataLength = (__nuvoc_MEL & 0x00ffffff)
buf.order(__nuvoc_endianess match { case LittleEndian.value => LittleEndian; case BigEndian.value => BigEndian; case _ => { buf.position(buf.position + __nuvoc_serializeDataLength); throw new RuntimeException("Invalid Format") } })
val __nuvoc_startPosition = buf.position
val wireTypeHash = (buf.getLong, buf.getLong)
if (typeHash != wireTypeHash) throw new RuntimeException("Mismatching TypeHash, you ma be trying to deserialize using the wrong helper")
val statekey = buf.getLong()
val acceptor = org.opensplice.mobile.dev.common.deserializeUuid(buf)
//val acceptor = buf.getObject[Uuid]()
val serialNumber = buf.getInt()
val proposer = org.opensplice.mobile.dev.common.deserializeUuid(buf)
//val proposer = buf.getObject[Uuid]()
val epoch = buf.getInt()
buf.position(__nuvoc_startPosition + __nuvoc_serializeDataLength)
new Accepted(statekey, acceptor, serialNumber, proposer, epoch)
}
def deserializeNoHeaderNuvoSF(buf: RawBuffer): Accepted = {
val statekey = buf.getLong()
val acceptor = org.opensplice.mobile.dev.common.deserializeUuid(buf)
//val acceptor = buf.getObject[Uuid]()
val serialNumber = buf.getInt()
val proposer = org.opensplice.mobile.dev.common.deserializeUuid(buf)
//val proposer = buf.getObject[Uuid]()
val epoch = buf.getInt()
new Accepted(statekey, acceptor, serialNumber, proposer, epoch)
}
final def deserializeKeyNuvoSF(buf: RawBuffer) = ()
final def deserializeKeyNoHeaderNuvoSF(buf: RawBuffer) = ()
}
object DecideHelper {
val typeHash = (3836235823849512753L, -6207857136567549040L) // org.opensplice.mobile.dev.paxos.Decide
def serialize(buf: RawBuffer, t: Decide, format: SerializationFormat) {
format match {
case NuvoSF => serializeNuvoSF(buf, t)
}
}
def serializeKey(buf: RawBuffer, t: Decide, format: SerializationFormat) {
format match {
case NuvoSF => serializeNuvoSF(buf, t)
}
}
final def serializeNuvoSF(buf: RawBuffer, t: Decide) {
buf.order(ByteOrder.nativeOrder)
val __nuvoc_startPosition = buf.position
buf.position(__nuvoc_startPosition + 4)
buf.putLong(typeHash._1)
buf.putLong(typeHash._2)
buf.putLong(t.statekey)
buf.putInt(t.op)
buf.putInt(t.epoch)
buf.putObject(t.value)
val __nuvoc_serializedDataLength = buf.position - __nuvoc_startPosition - 4
val __nuvoc_MEL = (buf.order.value << 24) | (__nuvoc_serializedDataLength & 0x00ffffff)
buf.order(ByteOrder.littleEndian)
buf.putInt(__nuvoc_startPosition, __nuvoc_MEL)
}
final def serializeKeyNuvoSF(buf: RawBuffer, t: Decide) = ()
def deserialize(buf: RawBuffer, format: SerializationFormat): Decide = {
format match {
case NuvoSF => deserializeNuvoSF(buf)
}
}
def deserializeKey(buf: RawBuffer, format: SerializationFormat) = {
format match {
case NuvoSF => deserializeNuvoSF(buf)
}
}
final def deserializeNuvoSF(buf: RawBuffer): Decide = {
buf.order(LittleEndian)
val __nuvoc_MEL = buf.getInt()
val __nuvoc_endianess = (__nuvoc_MEL >> 24).toByte
val __nuvoc_serializeDataLength = (__nuvoc_MEL & 0x00ffffff)
buf.order(__nuvoc_endianess match { case LittleEndian.value => LittleEndian; case BigEndian.value => BigEndian; case _ => { buf.position(buf.position + __nuvoc_serializeDataLength); throw new RuntimeException("Invalid Format") } })
val __nuvoc_startPosition = buf.position
val wireTypeHash = (buf.getLong, buf.getLong)
if (typeHash != wireTypeHash) throw new RuntimeException("Mismatching TypeHash, you ma be trying to deserialize using the wrong helper")
val statekey = buf.getLong()
val op = buf.getInt()
val epoch = buf.getInt()
val value = buf.getObject[WirePaxosData]()
buf.position(__nuvoc_startPosition + __nuvoc_serializeDataLength)
new Decide(statekey, op, epoch, value)
}
def deserializeNoHeaderNuvoSF(buf: RawBuffer): Decide = {
val statekey = buf.getLong()
val op = buf.getInt()
val epoch = buf.getInt()
val value = buf.getObject[WirePaxosData]()
new Decide(statekey, op, epoch, value)
}
final def deserializeKeyNuvoSF(buf: RawBuffer) = ()
final def deserializeKeyNoHeaderNuvoSF(buf: RawBuffer) = ()
}
object RejectedVersionHelper {
val typeHash = (-8206506404484259573L, -2226406451472359675L) // org.opensplice.mobile.dev.paxos.RejectedVersion
def serialize(buf: RawBuffer, t: RejectedVersion, format: SerializationFormat) {
format match {
case NuvoSF => serializeNuvoSF(buf, t)
}
}
def serializeKey(buf: RawBuffer, t: RejectedVersion, format: SerializationFormat) {
format match {
case NuvoSF => serializeNuvoSF(buf, t)
}
}
final def serializeNuvoSF(buf: RawBuffer, t: RejectedVersion) {
buf.order(ByteOrder.nativeOrder)
val __nuvoc_startPosition = buf.position
buf.position(__nuvoc_startPosition + 4)
buf.putLong(typeHash._1)
buf.putLong(typeHash._2)
buf.putLong(t.statekey)
buf.putObject(t.acceptor)
buf.putInt(t.serialNumber)
buf.putObject(t.proposer)
buf.putInt(t.epoch)
val __nuvoc_serializedDataLength = buf.position - __nuvoc_startPosition - 4
val __nuvoc_MEL = (buf.order.value << 24) | (__nuvoc_serializedDataLength & 0x00ffffff)
buf.order(ByteOrder.littleEndian)
buf.putInt(__nuvoc_startPosition, __nuvoc_MEL)
}
final def serializeKeyNuvoSF(buf: RawBuffer, t: RejectedVersion) = ()
def deserialize(buf: RawBuffer, format: SerializationFormat): RejectedVersion = {
format match {
case NuvoSF => deserializeNuvoSF(buf)
}
}
def deserializeKey(buf: RawBuffer, format: SerializationFormat) = {
format match {
case NuvoSF => deserializeNuvoSF(buf)
}
}
final def deserializeNuvoSF(buf: RawBuffer): RejectedVersion = {
buf.order(LittleEndian)
val __nuvoc_MEL = buf.getInt()
val __nuvoc_endianess = (__nuvoc_MEL >> 24).toByte
val __nuvoc_serializeDataLength = (__nuvoc_MEL & 0x00ffffff)
buf.order(__nuvoc_endianess match { case LittleEndian.value => LittleEndian; case BigEndian.value => BigEndian; case _ => { buf.position(buf.position + __nuvoc_serializeDataLength); throw new RuntimeException("Invalid Format") } })
val __nuvoc_startPosition = buf.position
val wireTypeHash = (buf.getLong, buf.getLong)
if (typeHash != wireTypeHash) throw new RuntimeException("Mismatching TypeHash, you ma be trying to deserialize using the wrong helper")
val statekey = buf.getLong()
val acceptor = buf.getObject[Uuid]()
val serialNumber = buf.getInt()
val proposer = buf.getObject[Uuid]()
val epoch = buf.getInt()
buf.position(__nuvoc_startPosition + __nuvoc_serializeDataLength)
new RejectedVersion(statekey, acceptor, serialNumber, proposer, epoch)
}
def deserializeNoHeaderNuvoSF(buf: RawBuffer): RejectedVersion = {
val statekey = buf.getLong()
val acceptor = buf.getObject[Uuid]()
val serialNumber = buf.getInt()
val proposer = buf.getObject[Uuid]()
val epoch = buf.getInt()
new RejectedVersion(statekey, acceptor, serialNumber, proposer, epoch)
}
final def deserializeKeyNuvoSF(buf: RawBuffer) = ()
final def deserializeKeyNoHeaderNuvoSF(buf: RawBuffer) = ()
}
object RejectedEpochHelper {
val typeHash = (4654629398526763083L, -825127635025370105L) // org.opensplice.mobile.dev.paxos.RejectedEpoch
def serialize(buf: RawBuffer, t: RejectedEpoch, format: SerializationFormat) {
format match {
case NuvoSF => serializeNuvoSF(buf, t)
}
}
def serializeKey(buf: RawBuffer, t: RejectedEpoch, format: SerializationFormat) {
format match {
case NuvoSF => serializeNuvoSF(buf, t)
}
}
final def serializeNuvoSF(buf: RawBuffer, t: RejectedEpoch) {
buf.order(ByteOrder.nativeOrder)
val __nuvoc_startPosition = buf.position
buf.position(__nuvoc_startPosition + 4)
buf.putLong(typeHash._1)
buf.putLong(typeHash._2)
buf.putLong(t.statekey)
buf.putInt(t.epoch)
val __nuvoc_serializedDataLength = buf.position - __nuvoc_startPosition - 4
val __nuvoc_MEL = (buf.order.value << 24) | (__nuvoc_serializedDataLength & 0x00ffffff)
buf.order(ByteOrder.littleEndian)
buf.putInt(__nuvoc_startPosition, __nuvoc_MEL)
}
final def serializeKeyNuvoSF(buf: RawBuffer, t: RejectedEpoch) = ()
def deserialize(buf: RawBuffer, format: SerializationFormat): RejectedEpoch = {
format match {
case NuvoSF => deserializeNuvoSF(buf)
}
}
def deserializeKey(buf: RawBuffer, format: SerializationFormat) = {
format match {
case NuvoSF => deserializeNuvoSF(buf)
}
}
final def deserializeNuvoSF(buf: RawBuffer): RejectedEpoch = {
buf.order(LittleEndian)
val __nuvoc_MEL = buf.getInt()
val __nuvoc_endianess = (__nuvoc_MEL >> 24).toByte
val __nuvoc_serializeDataLength = (__nuvoc_MEL & 0x00ffffff)
buf.order(__nuvoc_endianess match { case LittleEndian.value => LittleEndian; case BigEndian.value => BigEndian; case _ => { buf.position(buf.position + __nuvoc_serializeDataLength); throw new RuntimeException("Invalid Format") } })
val __nuvoc_startPosition = buf.position
val wireTypeHash = (buf.getLong, buf.getLong)
if (typeHash != wireTypeHash) throw new RuntimeException("Mismatching TypeHash, you ma be trying to deserialize using the wrong helper")
val statekey = buf.getLong()
val epoch = buf.getInt()
buf.position(__nuvoc_startPosition + __nuvoc_serializeDataLength)
new RejectedEpoch(statekey, epoch)
}
def deserializeNoHeaderNuvoSF(buf: RawBuffer): RejectedEpoch = {
val statekey = buf.getLong()
val epoch = buf.getInt()
new RejectedEpoch(statekey, epoch)
}
final def deserializeKeyNuvoSF(buf: RawBuffer) = ()
final def deserializeKeyNoHeaderNuvoSF(buf: RawBuffer) = ()
}
|
levitha/levitha
|
src/main/generated/org/opensplice/mobile/dev/paxos/PaxosSamplesHelper.scala
|
Scala
|
apache-2.0
| 25,555 |
package controllers
import javax.inject.Inject
import play.api.mvc.{AbstractController, ControllerComponents}
class HomeController @Inject()(cc: ControllerComponents) extends AbstractController(cc) {
def index() = Action {
Ok(views.html.index())
}
}
|
play2-maven-plugin/play2-maven-test-projects
|
play28/scala/streaming-example/app/controllers/HomeController.scala
|
Scala
|
apache-2.0
| 263 |
package visceljs.connection
import org.scalajs.dom
import org.scalajs.dom.ServiceWorkerContainer
import rescala.default.{Events, Signal, _}
import viscel.shared.Log
import scala.concurrent.Future
import scala.scalajs.concurrent.JSExecutionContext.Implicits.queue
import scala.scalajs.js
import scala.scalajs.js.timers._
import scala.util.{Failure, Success}
object ServiceWorker {
def getDefined[T](ts: T*): Option[T] = ts.find(v => v != null && !scalajs.js.isUndefined(v))
val serviceWorkerOption: Option[ServiceWorkerContainer] = {
val workerSupported_? = dom.window.navigator.serviceWorker
getDefined(workerSupported_?)
}
def register(): Signal[String] = {
transaction() { implicit at =>
Events.fromCallback[String] { cb =>
serviceWorkerOption match {
case None =>
// this is a bit sad, we can not call cb within the starting transaction,
// as that forces a new transaction …
setTimeout(0) { cb("none") }
case Some(serviceworker) =>
serviceworker.register("serviceworker.js").toFuture.onComplete {
case Success(registration) =>
cb("registered")
registration.addEventListener(
"updatefound",
(event: js.Any) => {
val newWorker = registration.installing
newWorker.addEventListener(
"statechange",
(event: js.Any) => {
cb(newWorker.state)
}
)
}
)
case Failure(error) =>
Log.JS.error(s"serviceworker failed", error)
cb("failed")
}
}
}.event.latest("init")
}
}
def unregister(): Future[List[Boolean]] = {
scribe.warn("trying to unregister service worker")
val res = serviceWorkerOption.map { swc =>
swc.getRegistrations().toFuture.flatMap { registrations =>
Future.sequence(registrations.toList.map { sw =>
sw.unregister().toFuture
})
}
}.getOrElse(Future.successful(List()))
res.onComplete {
case Failure(error) =>
Log.JS.error(s"could not unregister serviceworker", error)
case _ =>
}
res
}
}
|
rmgk/viscel
|
code/js/src/main/scala/visceljs/connection/ServiceWorker.scala
|
Scala
|
agpl-3.0
| 2,336 |
package com.mesosphere.cosmos.label.v1.circe
import com.mesosphere.cosmos.label
import com.mesosphere.universe.v2.circe.Decoders._
import io.circe.Decoder
import io.circe.generic.semiauto._
object Decoders {
implicit val decodeLabelV1PackageMetadata: Decoder[label.v1.model.PackageMetadata] = {
deriveFor[label.v1.model.PackageMetadata].decoder
}
}
|
movicha/cosmos
|
cosmos-json/src/main/scala/com/mesosphere/cosmos/label/v1/circe/Decoders.scala
|
Scala
|
apache-2.0
| 361 |
object One {
val value = 1
}
|
mscoutermarsh/exercism_coveralls
|
test/fixtures/scala/one/example.scala
|
Scala
|
agpl-3.0
| 31 |
package net.sansa_stack.examples.flink.rdf
import net.sansa_stack.rdf.flink.io._
import org.apache.flink.api.scala.ExecutionEnvironment
object TripleWriter {
def main(args: Array[String]) {
parser.parse(args, Config()) match {
case Some(config) =>
run(config.in, config.out)
case None =>
println(parser.usage)
}
}
def run(input: String, output: String): Unit = {
println("======================================")
println("| Triple writer example |")
println("======================================")
val env = ExecutionEnvironment.getExecutionEnvironment
val triples = env.rdf(Lang.NTRIPLES)(input)
triples.saveAsNTriplesFile(output)
env.execute(s"Triple writer example ($input)")
}
case class Config(
in: String = "",
out: String = "")
val parser = new scopt.OptionParser[Config]("Triple writer example ") {
head("Triple writer example ")
opt[String]('i', "input").required().valueName("<path>").
action((x, c) => c.copy(in = x)).
text("path to file that contains the data (in N-Triples format)")
opt[String]('o', "out").required().valueName("<directory>").
action((x, c) => c.copy(out = x)).
text("the output directory")
help("help").text("prints this usage text")
}
}
|
SANSA-Stack/SANSA-RDF
|
sansa-examples/sansa-examples-flink/src/main/scala/net/sansa_stack/examples/flink/rdf/TripleWriter.scala
|
Scala
|
apache-2.0
| 1,325 |
package ru.vsu.amm.problems.entities
import main.ru.vsu.amm.problems.entities.{IntMatrix, Matrix}
import scala.collection.mutable
import scala.util.Try
/**
* Created by FManukovskiy on 14.11.17.
*/
class AssignmentMatrixBad(matrix: Seq[Seq[Int]]) extends AssignmentMatrix(matrix) {
protected def crossRows(matrix: Matrix[(Int, Int)], numberOfCrossings: Int): Matrix[(Int, Int)] = {
// println("row") //todo
val rowIndex = matrix.data
.indices
.find(matrix.row(_).count(_ == (0, 0)) == numberOfCrossings)
.get
// println(rowIndex)
val indices = matrix.data
.indices
.filter(i => matrix.row(rowIndex).slice(i, matrix.size).indexOf((0, 0)) == 0)
// matrix.data
// .indices
// .foreach(i => println(matrix.row(rowIndex).slice(i, matrix.size).indexOf((0, 0))))
// println(indices)
var resultMatrix = matrix
for (ind <- indices) {
resultMatrix = resultMatrix
.mapCol(ind, { case (x: Int, y: Int) => (x, y + 1) })
}
// println(resultMatrix)
resultMatrix
}
protected def crossColumns(matrix: Matrix[(Int, Int)], numberOfCrossings: Int): Matrix[(Int, Int)] = {
// println("col") //todo
val colIndex = matrix.data
.indices
.find(matrix.col(_).count(_ == (0, 0)) == numberOfCrossings)
.get
val indices = matrix.data
.indices
.filter(i => matrix.col(colIndex).slice(i, matrix.size).indexOf((0, 0)) == 0)
var resultMatrix = matrix
for (ind <- indices) {
resultMatrix = resultMatrix
.mapRow(ind, { case (x: Int, y: Int) => (x, y + 1) })
}
resultMatrix
}
protected lazy val crossAndCountIndependentZeros: (Matrix[(Int, Int)], Int) = {
var matrix = reduced.map((_, 0))
var count = 0
var continueFor = true
var continueWhile = true
while (count != matrix.size && continueWhile) {
continueFor = true
for (numberOfCrossings <- 1 to matrix.size if continueFor) {
// println(matrix) // todo
// println(s"$numberOfCrossings -- $count")
val a = Try(crossRows(matrix, numberOfCrossings))
if (a.isSuccess) {
matrix = a.get
count += numberOfCrossings
continueFor = false
} else {
val a = Try(crossColumns(matrix, numberOfCrossings))
if (a.isSuccess) {
matrix = a.get
count += numberOfCrossings
continueFor = false
} else {
continueFor = true
}
}
if (numberOfCrossings == matrix.size) {
continueWhile = false
}
}
}
(matrix, count)
}
lazy val crossed: Matrix[(Int, Int)] = crossAndCountIndependentZeros._1
lazy val independentZerosNumber: Int = crossAndCountIndependentZeros._2
protected def crossRowsWithIndices(matrix: Matrix[(Int, Int)], numberOfCrossings: Int): (Matrix[(Int, Int)], (Int, Int)) = {
// println("row") //todo
val rowIndex = matrix.data
.indices
.find(matrix.row(_).count(_ == (0, 0)) == numberOfCrossings)
.get
val colIndex = matrix.data
.indices
.find(i => matrix.row(rowIndex).slice(i, matrix.size).indexOf((0, 0)) == 0)
.get
(matrix
.mapCol(colIndex, { case (x: Int, y: Int) => (x, y + 1) })
.mapRow(rowIndex, { case (x: Int, y: Int) => (x, y + 1) }),
(rowIndex, colIndex))
}
protected def crossColumnsWithIndices(matrix: Matrix[(Int, Int)], numberOfCrossings: Int): (Matrix[(Int, Int)], (Int, Int)) = {
// println("col") //todo
val colIndex = matrix.data
.indices
.find(matrix.col(_).count(_ == (0, 0)) == numberOfCrossings)
.get
val rowIndex = matrix.data
.indices
.find(i => matrix.col(colIndex).slice(i, matrix.size).indexOf((0, 0)) == 0)
.get
(matrix
.mapCol(colIndex, { case (x: Int, y: Int) => (x, y + 1) })
.mapRow(rowIndex, { case (x: Int, y: Int) => (x, y + 1) }),
(rowIndex, colIndex))
}
protected lazy val independentZerosIndices: Seq[(Int, Int)] = {
var matrix = reduced.map((_, 0))
val zerosIndices = new mutable.ListBuffer[(Int, Int)]()
var count = 0
var continueFor = true
var continueWhile = true
while (count != matrix.size && continueWhile) {
continueFor = true
for (numberOfCrossings <- 1 to matrix.size if continueFor) {
// println(matrix) // todo
// println(s"$numberOfCrossings -- $count")
val a = Try(crossRowsWithIndices(matrix, numberOfCrossings))
if (a.isSuccess) {
a.get match {
case (m, indices) =>
matrix = m
zerosIndices.append(indices)
}
count += 1
continueFor = false
} else {
val a = Try(crossColumnsWithIndices(matrix, numberOfCrossings))
if (a.isSuccess) {
a.get match {
case (m, indices) =>
matrix = m
zerosIndices.append(indices)
}
count += 1
continueFor = false
} else {
continueFor = true
}
}
if (numberOfCrossings == matrix.size) {
continueWhile = false
}
}
}
zerosIndices
}
lazy val withNewZeros: IntMatrix = {
val min = crossed.minBy({
case (x, 0) => x
case (_, _) => Int.MaxValue
})._1
crossed.map({
case (value, count) if count == 0 =>
value - min
case (value, count) if count == 1 =>
value
case (value, count) if count == 2 =>
value + min
})
}
lazy val solvedIndices: Seq[(Int, Int)] = {
var m: AssignmentMatrix = this.reduced
while (m.independentZerosNumber != m.size && m != m.withNewZeros) {
m = m.withNewZeros
// println(s"${m.independentZerosNumber}-${m.size}")
// println(m)
// println()
}
m.independentZerosIndices
}
lazy val solved: IntMatrix = Matrix.matrixOfIndices(size)
.map(ind => if (solvedIndices.contains(ind)) 1 else 0)
}
|
FeodorM/amm_code
|
problems/assignment-problem/src/main/ru/vsu/amm/problems/entities/AssignmentMatrixBad.scala
|
Scala
|
mit
| 6,153 |
/**
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.edda
import scala.actors.Actor
import scala.actors.TIMEOUT
import org.slf4j.LoggerFactory
import org.joda.time.DateTime
class ElectorPoller(elector: Elector) extends Actor {
val logger = LoggerFactory.getLogger(getClass)
override def toString = elector + " poller"
override def act() = {
var keepLooping = true
Actor.self.loopWhile(keepLooping) {
implicit val req = RequestId(Utils.uuid + " poller")
Actor.self.reactWithin(elector.pollCycle.get.toInt) {
case got @ StateMachine.Stop(from) => keepLooping = false
case got @ TIMEOUT => {
if (logger.isDebugEnabled) logger.debug(s"$req$this received: $got")
val msg = Elector.RunElection(Actor.self)
if (logger.isDebugEnabled) logger.debug(s"$req$this sending: $msg -> $elector")
elector ! msg
}
}
}
}
override def exceptionHandler = {
case e: Exception => if (logger.isErrorEnabled) logger.error(this + " failed to setup election poller", e)
}
def stop()(implicit req: RequestId) {
val msg = StateMachine.Stop(this)
if (logger.isDebugEnabled) logger.debug(s"$req$this sending: $msg -> $this")
this ! msg
}
}
|
gitlon/edda
|
src/main/scala/com/netflix/edda/ElectorPoller.scala
|
Scala
|
apache-2.0
| 1,807 |
/*
* Copyright 2001-2013 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalatest
import SharedHelpers.{createTempDirectory, thisLineNumber}
import java.io.File
import Matchers._
import exceptions.TestFailedException
class ShouldBeWritableSpec extends Spec {
val tempDir = createTempDirectory()
val writableFile = File.createTempFile("writable", "me", tempDir)
writableFile.setWritable(true)
val secretFile = new File(tempDir, "secret")
secretFile.setWritable(false)
val fileName: String = "ShouldBeWritableSpec.scala"
def wasNotWritable(left: Any): String =
FailureMessages.wasNotWritable(left)
def wasWritable(left: Any): String =
FailureMessages.wasWritable(left)
def `writableFile should be writable, secretFile should not be writable` {
assert(writableFile.canRead === true)
assert(secretFile.canRead === false)
}
def allError(left: Any, message: String, lineNumber: Int): String = {
val messageWithIndex = UnquotedString(" " + FailureMessages.forAssertionsGenTraversableMessageWithStackDepth(0, UnquotedString(message), UnquotedString(fileName + ":" + lineNumber)))
FailureMessages.allShorthandFailed(messageWithIndex, left)
}
object `writable matcher` {
object `when work with 'file should be (writable)'` {
def `should do nothing when file is writable` {
writableFile should be (writable)
}
def `should throw TestFailedException with correct stack depth when file is not writable` {
val caught1 = intercept[TestFailedException] {
secretFile should be (writable)
}
assert(caught1.message === Some(wasNotWritable(secretFile)))
assert(caught1.failedCodeFileName === Some(fileName))
assert(caught1.failedCodeLineNumber === Some(thisLineNumber - 4))
}
}
object `when work with 'file should not be writable'` {
def `should do nothing when file is not writable` {
secretFile should not be writable
}
def `should throw TestFailedException with correct stack depth when file is writable` {
val caught1 = intercept[TestFailedException] {
writableFile should not be writable
}
assert(caught1.message === Some(wasWritable(writableFile)))
assert(caught1.failedCodeFileName === Some(fileName))
assert(caught1.failedCodeLineNumber === Some(thisLineNumber - 4))
}
}
object `when work with 'file shouldBe writable'` {
def `should do nothing when file is writable` {
writableFile shouldBe writable
}
def `should throw TestFailedException with correct stack depth when file is not writable` {
val caught1 = intercept[TestFailedException] {
secretFile shouldBe writable
}
assert(caught1.message === Some(wasNotWritable(secretFile)))
assert(caught1.failedCodeFileName === Some(fileName))
assert(caught1.failedCodeLineNumber === Some(thisLineNumber - 4))
}
}
object `when work with 'file shouldNot be (writable)'` {
def `should do nothing when file is not writable` {
secretFile shouldNot be (writable)
}
def `should throw TestFailedException with correct stack depth when file is writable` {
val caught1 = intercept[TestFailedException] {
writableFile shouldNot be (writable)
}
assert(caught1.message === Some(wasWritable(writableFile)))
assert(caught1.failedCodeFileName === Some(fileName))
assert(caught1.failedCodeLineNumber === Some(thisLineNumber - 4))
}
}
object `when work with 'all(xs) should be (writable)'` {
def `should do nothing when all(xs) is writable` {
all(List(writableFile)) should be (writable)
}
def `should throw TestFailedException with correct stack depth when all(xs) is not writable` {
val left1 = List(secretFile)
val caught1 = intercept[TestFailedException] {
all(left1) should be (writable)
}
assert(caught1.message === Some(allError(left1, wasNotWritable(secretFile), thisLineNumber - 2)))
assert(caught1.failedCodeFileName === Some(fileName))
assert(caught1.failedCodeLineNumber === Some(thisLineNumber - 4))
}
}
object `when work with 'all(xs) should not be writable'` {
def `should do nothing when all(xs) is not writable` {
all(List(secretFile)) should not be writable
}
def `should throw TestFailedException with correct stack depth when all(xs) is writable` {
val left1 = List(writableFile)
val caught1 = intercept[TestFailedException] {
all(left1) should not be writable
}
assert(caught1.message === Some(allError(left1, wasWritable(writableFile), thisLineNumber - 2)))
assert(caught1.failedCodeFileName === Some(fileName))
assert(caught1.failedCodeLineNumber === Some(thisLineNumber - 4))
}
}
object `when work with 'all(xs) shouldBe writable'` {
def `should do nothing when all(xs) is writable` {
all(List(writableFile)) shouldBe writable
}
def `should throw TestFailedException with correct stack depth when all(xs) is not writable` {
val left1 = List(secretFile)
val caught1 = intercept[TestFailedException] {
all(left1) shouldBe writable
}
assert(caught1.message === Some(allError(left1, wasNotWritable(secretFile), thisLineNumber - 2)))
assert(caught1.failedCodeFileName === Some(fileName))
assert(caught1.failedCodeLineNumber === Some(thisLineNumber - 4))
}
}
object `when work with 'all(xs) shouldNot be (writable)'` {
def `should do nothing when all(xs) is not writable` {
all(List(secretFile)) shouldNot be (writable)
}
def `should throw TestFailedException with correct stack depth when all(xs) is writable` {
val left1 = List(writableFile)
val caught1 = intercept[TestFailedException] {
all(left1) shouldNot be (writable)
}
assert(caught1.message === Some(allError(left1, wasWritable(writableFile), thisLineNumber - 2)))
assert(caught1.failedCodeFileName === Some(fileName))
assert(caught1.failedCodeLineNumber === Some(thisLineNumber - 4))
}
}
}
}
|
cheeseng/scalatest
|
scalatest-test/src/test/scala/org/scalatest/ShouldBeWritableSpec.scala
|
Scala
|
apache-2.0
| 7,060 |
package org.hammerlab.bgzf.block
import java.io.{ IOException, InputStream }
import java.nio.ByteBuffer
import org.hammerlab.channel.ByteChannel
/**
* BGZF-block header
*
* @param size size of header, in bytes
* @param compressedSize compressed size of block, parsed from header
*/
case class Header(size: Int, compressedSize: Int)
object Header {
// 18 bytes is enough to learn what we need to know: sizes of header and compressed block
val EXPECTED_HEADER_SIZE = 18
def apply(ch: ByteChannel)(implicit buf: ByteBuffer): Header = {
buf.limit(EXPECTED_HEADER_SIZE)
ch.readFully(buf)
implicit val arr = buf.array
val header = make
buf.clear()
ch.skip(header.size - EXPECTED_HEADER_SIZE)
buf.position(header.size)
header
}
def apply(is: InputStream)(implicit buffer: Array[Byte]): Header = {
val headerBytesRead = is.read(buffer, 0, EXPECTED_HEADER_SIZE)
if (headerBytesRead != EXPECTED_HEADER_SIZE)
throw new IOException(
s"Expected $EXPECTED_HEADER_SIZE header bytes, got $headerBytesRead"
)
val header = make
is.skip(header.size - EXPECTED_HEADER_SIZE)
header
}
def make(implicit bytes: Array[Byte]): Header = {
def check(idx: Int, expected: Byte): Unit = {
val actual = bytes(idx)
if (actual != expected)
throw HeaderParseException(
idx,
actual,
expected
)
}
// GZip magic bytes
check(0, 31)
check(1, 139.toByte)
check(2, 8)
check(3, 4)
val xlen = getShort(10)
// We expect 6 bytes of `xlen`; anything more is considered "extra" and added to the expected 18-byte header size
val extraHeaderBytes = xlen - 6
val actualHeaderSize = EXPECTED_HEADER_SIZE + extraHeaderBytes
// BAM-specific GZip-flags
check(12, 66)
check(13, 67)
check(14, 2)
val compressedSize = getShort(16) + 1
Header(
actualHeaderSize,
compressedSize
)
}
def getShort(idx: Int)(implicit buffer: Array[Byte]): Int =
(buffer(idx) & 0xff) |
((buffer(idx + 1) & 0xff) << 8)
}
|
ryan-williams/spark-bam
|
bgzf/src/main/scala/org/hammerlab/bgzf/block/Header.scala
|
Scala
|
apache-2.0
| 2,114 |
/*
* Copyright (c) 2013 koiroha.org.
* All sources and related resources are available under Apache License 2.0.
* http://www.apache.org/licenses/LICENSE-2.0.html
*/
package com.kazzla.asterisk
import scala.concurrent.{Future, Promise}
import scala.collection._
import javax.net.ssl._
import java.util.concurrent.atomic.AtomicBoolean
import org.slf4j.LoggerFactory
import java.io._
import java.security.cert.{CertificateException, X509Certificate}
import java.security.KeyStore
import scala.Some
import java.util.concurrent.LinkedBlockingQueue
import org.asterisque.codec.Codec
// ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
// Wire
// ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
/**
* [[com.kazzla.asterisk.Message]] のシリアライズ/デシリアライズを行い通信経路に転送するクラスです。
* @author Takami Torao
*/
trait Wire extends Closeable {
import Wire._
/**
* この `Wire` がメッセージ受信通知を開始しているかどうかを表すフラグ。
*/
private[this] val active = new AtomicBoolean(false)
/**
* この `Wire` が既にクローズされているかを表すフラグ。
*/
private[this] val closed = new AtomicBoolean(false)
/**
* この `Wire` が開始されていない状態でサブクラスからメッセージ到着の通知があった場合に、内部的にメッセージを
* 保持するためのバッファです。このバッファに保持されたメッセージは `start()` の呼び出しですべてのイベント
* ハンドラへ通知されます。
*/
private[this] val buffer = new mutable.ArrayBuffer[Message]()
/**
* この `Wire` がメッセージを受信したときに通知を行うイベントハンドラです。メッセージの受信通知は `start()` が
* 呼び出されてから `stop()` が呼び出されるまのオンライン状態で行われます。オフライン中に受信したメッセージは
* `Wire` 内部にバッファリングされ次回 `start()` が呼び出されたときに通知されます。
*/
val onReceive = new EventHandlers[Message]()
/**
* この `Wire` がクローズされたときに通知を行うイベントハンドラです。
*/
val onClosed = new EventHandlers[Wire]()
/**
* この `Wire` の表すエンドポイントがサーバ側の場合に true を返します。
* このフラグは通信相手との合意なしにユニークな ID を発行できるようにするために使用されます。例えば新しいパイプを
* 開くときの ID の最上位ビットを立てるかどうかに使用することで相手との合意なしにユニークなパイプ ID 発行を行って
* います。この取り決めから通信の双方でこの値が異なっている必要があります。
* @return サーバ側の場合 true
*/
def isServer:Boolean
/**
* @return この `Wire` がクローズされているときに true
*/
def isClosed:Boolean = closed.get()
/**
* @return この `Wire` がメッセージ受信通知を行っているとき true
*/
def isActive:Boolean = active.get()
/**
* @return 通信相手を人が識別するための文字列
*/
def peerName:String = ""
/**
* @return 認証された通信相手の SSL セッション
*/
def tls:Future[Option[SSLSession]] = Promise.successful(None).future
// ==============================================================================================
// メッセージの送信
// ==============================================================================================
/**
* 指定されたメッセージを送信します。
* @param msg 送信するメッセージ
*/
def send(msg:Message):Unit
// ==============================================================================================
// メッセージの受信
// ==============================================================================================
/**
* 下層のネットワーク実装からメッセージを受信したときに呼び出します。
* @param msg 受信したメッセージ
*/
protected def receive(msg:Message):Unit = if(! isClosed){
if(active.get()) {
onReceive(msg)
} else buffer.synchronized {
buffer.append(msg)
logger.debug(s"message buffered on deactive wire: $msg")
}
} else {
logger.debug(s"message disposed on closed wire: $msg")
}
// ==============================================================================================
// メッセージ受信通知の開始
// ==============================================================================================
/**
* この `Wire` 上で受信したメッセージの `onReceive` イベントハンドラへの配信を開始します。
* 下層の非同期 I/O 実装が `Wire` の構築と同時にメッセージ受信を開始する場合に、イベントハンドラが正しく設定さ
* れるまで受信したメッセージをバッファリングする事を目的としています。
*/
def start():Unit = if(active.compareAndSet(false, true)){
buffer.foreach{ msg => onReceive(msg) }
buffer.clear()
}
// ==============================================================================================
// メッセージ受信通知の停止
// ==============================================================================================
/**
* この `Wire` 上でのメッセージ配信を停止します。
* 停止中に受信したメッセージは内部のバッファに保持され次回開始したときに通知されます。
*/
def stop():Unit = if(active.compareAndSet(true, false)){
/* */
}
// ==============================================================================================
// クローズ
// ==============================================================================================
/**
* この `Wire` をクローズします。インスタンスに対して最初に呼び出されたタイミングで `onClosed` イベント
* ハンドラへのコールバックが行われます。このメソッドを複数回呼び出しても二度目以降は無視されます。
*/
def close():Unit = if(closed.compareAndSet(false, true)){
onClosed(this)
}
}
object Wire {
import java.security._
private[Wire] val logger = LoggerFactory.getLogger(classOf[Wire])
// ==============================================================================================
// パイプの作成
// ==============================================================================================
/**
* [[Codec]] を伴わないでメッセージをやり取りするパイプ型 Wire を構築します。
* @return パイプ Wire の双方のタプル
*/
def newPipe():(Wire, Wire) = {
val queue = new LinkedBlockingQueue[()=>Unit]()
val thread = new Thread("PipedWire Message Pump"){
override def run() = try {
while(true){
queue.take().apply()
}
} catch {
case ex:InterruptedException => None
case ex:ThreadDeath => throw ex
case ex:Throwable => logger.error("piped wired ", ex)
}
}
lazy val wires = new Array[PipedWire](2)
class PipedWire(i:Int) extends Wire {
val other = (i + 1) & 0x01
val isServer = (i & 0x01) == 0
def send(m:Message) = if(isClosed){
throw new IOException("pipe closed")
} else {
queue.put({ () => wires(other).receive(m) })
}
//def post(m:Message) = onReceive(m)
override def close() = {
thread.interrupt()
super.close()
}
onClosed ++ { _ => wires(other).close() }
}
wires(0) = new PipedWire(0)
wires(1) = new PipedWire(1)
thread.start()
(wires(0), wires(1))
}
// ==============================================================================================
// 証明書のロード
// ==============================================================================================
/**
* 指定された証明書ストアを読み込んで SSL コンテキストを作成するためのユーティリティです。
*
* TrustManager を変更する場合はシステムプロパティ `javax.net.ssl.trustStore` を使用してください。
* http://docs.oracle.com/javase/jp/6/technotes/guides/security/jsse/JSSERefGuide.html#TrustManagerFactory
*
* @param cert 証明書のキーストアファイル
* @param ksPassword 証明書キーストアのパスワード
* @param pkPassword 秘密鍵のパスワード
* @param trust 信頼済み CA 証明書のキーストアファイル
* @param trustPassword 信頼済み CA 証明書キーストアのパスワード
*/
def loadSSLContext(cert:File, ksPassword:String, pkPassword:String, trust:File, trustPassword:String):SSLContext = {
val algorithm = Option(Security.getProperty("ssl.KeyManagerFactory.algorithm")).getOrElse("SunX509")
val targetKeyStore = loadKeyStore(cert, ksPassword)
val kmf = KeyManagerFactory.getInstance(algorithm)
kmf.init(targetKeyStore, pkPassword.toCharArray)
val trustKeyStore = loadKeyStore(trust, trustPassword)
val tmf = TrustManagerFactory.getInstance("SunX509")
tmf.init(trustKeyStore)
val context = SSLContext.getInstance("TLS")
context.init(kmf.getKeyManagers, tmf.getTrustManagers, null)
context
}
// ==============================================================================================
// キーストアの読み込み
// ==============================================================================================
/**
* 指定されたファイルからキーストアを読み込むためのユーティリティ関数です。
*
* @param file JSK 形式の KeyStore ファイル
* @param ksPassword KeyStore のパスワード
* @param ksType キーストアのタイプ
*/
private[this] def loadKeyStore(file:File, ksPassword:String, ksType:String = "JKS"):KeyStore = using(new BufferedInputStream(new FileInputStream(file))){ in =>
val keyStore = KeyStore.getInstance(ksType)
keyStore.load(in, ksPassword.toCharArray)
keyStore
}
}
object DummyTrustManager extends X509TrustManager {
private[this] val logger = LoggerFactory.getLogger(this.getClass)
def getAcceptedIssuers:Array[X509Certificate] = {
logger.debug(s"DummyTrustManager.getAcceptedIssuers")
Array()
}
def checkClientTrusted(certs:Array[X509Certificate], authType:String) = {
logger.debug(s"DummyTrustManager.checkClientTrusted(${certs.mkString("[",",","]")},$authType)")
}
def checkServerTrusted(certs:Array[X509Certificate], authType:String) = {
logger.debug(s"DummyTrustManager.checkServerTrusted(${certs.mkString("[",",","]")},$authType)")
}
}
class TrustManager extends X509TrustManager {
private[this] val defaultTrustManager = {
val ks = KeyStore.getInstance("JKS")
ks.load(new FileInputStream("trustedCerts"), "passphrase".toCharArray)
val tmf = TrustManagerFactory.getInstance("PKIX")
tmf.init(ks)
tmf.getTrustManagers.find{ _.isInstanceOf[X509TrustManager] } match {
case Some(tm) => tm.asInstanceOf[X509TrustManager]
case None => throw new IllegalStateException("x509 certificate not found on default TrustManager")
}
}
def checkClientTrusted(chain:Array[X509Certificate], authType:String):Unit = try {
defaultTrustManager.checkClientTrusted(chain, authType)
} catch {
case ex:CertificateException =>
}
def checkServerTrusted(chain:Array[X509Certificate], authType:String):Unit = try {
defaultTrustManager.checkServerTrusted(chain, authType)
} catch {
case ex:CertificateException =>
}
def getAcceptedIssuers:Array[X509Certificate] = {
defaultTrustManager.getAcceptedIssuers
}
}
|
torao/asterisque
|
core-scala/src_back/main/scala/com/kazzla/asterisk/Wire.scala
|
Scala
|
apache-2.0
| 11,786 |
def f(a: Int, a: Int) {}
f(/* resolved: false */ a = 1)
|
ilinum/intellij-scala
|
testdata/resolve2/element/clash/NamedParameter.scala
|
Scala
|
apache-2.0
| 58 |
package scaldi.jsr330
import scaldi._
/** Injector that creates JSR 330 compliant bindings on-demand (when they are injected) */
class OnDemandAnnotationInjector
extends MutableInjectorUser
with InjectorWithLifecycle[OnDemandAnnotationInjector]
with ShutdownHookLifecycleManager {
private var bindings: List[BindingWithLifecycle] = Nil
private var lifecycleManager: Option[LifecycleManager] = None
def getBindingInternal(identifiers: List[Identifier]): Option[BindingWithLifecycle] =
identifiers
.collect { case TypeTagIdentifier(tpe) => tpe }
.map(tpe => tpe -> AnnotationBinding.extractIdentifiers(tpe)) match {
case (tpe, resultingIdentifiers) :: Nil if Identifier.sameAs(resultingIdentifiers, identifiers) =>
bindings.find(_ isDefinedFor identifiers) orElse {
this.synchronized {
bindings.find(_ isDefinedFor identifiers) orElse {
val binding = new AnnotationBinding(
Right(tpe),
() => injector,
resultingIdentifiers
)
// doesn't appear possible for `isEager` to be true, remove this code?
if (binding.isEager) {
lifecycleManager map binding.get getOrElse (throw new InjectException(
"Attempt to inject binding before OnDemandAnnotationInjector was initialized"
))
}
bindings = bindings :+ binding
Some(binding)
}
}
}
case _ => None
}
def getBindingsInternal(identifiers: List[Identifier]): List[BindingWithLifecycle] =
getBindingInternal(identifiers).toList
protected def init(lifecycleManager: LifecycleManager): () => Unit = {
this.lifecycleManager = Some(lifecycleManager)
() => ()
}
}
|
scaldi/scaldi-jsr330
|
src/main/scala/scaldi/jsr330/OnDemandAnnotationInjector.scala
|
Scala
|
apache-2.0
| 1,831 |
/*
* Copyright (C) 2015 Stratio (http://stratio.com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.stratio.datasource.mongodb.schema
import java.text.SimpleDateFormat
import java.util.Locale
import com.stratio.datasource.MongodbTestConstants
import com.stratio.datasource.mongodb.config.{MongodbConfig, MongodbConfigBuilder}
import com.stratio.datasource.mongodb.partitioner.MongodbPartitioner
import com.stratio.datasource.mongodb.rdd.MongodbRDD
import com.stratio.datasource.mongodb._
import org.apache.spark.sql.mongodb.{TemporaryTestSQLContext, TestSQLContext}
import org.apache.spark.sql.types.{ArrayType, StringType, StructField, TimestampType}
import org.junit.runner.RunWith
import org.scalatest._
import org.scalatest.junit.JUnitRunner
@RunWith(classOf[JUnitRunner])
class MongodbSchemaIT extends FlatSpec
with Matchers
with MongoEmbedDatabase
with TestBsonData
with MongodbTestConstants {
private val host: String = "localhost"
private val collection: String = "testCol"
private val readPreference = "secondaryPreferred"
val testConfig = MongodbConfigBuilder()
.set(MongodbConfig.Host,List(host + ":" + mongoPort))
.set(MongodbConfig.Database,db)
.set(MongodbConfig.Collection,collection)
.set(MongodbConfig.SamplingRatio,1.0)
.set(MongodbConfig.ReadPreference, readPreference)
.build()
val mongodbPartitioner = new MongodbPartitioner(testConfig)
val mongodbRDD = new MongodbRDD(TemporaryTestSQLContext, testConfig, mongodbPartitioner)
behavior of "A schema"
it should "be inferred from rdd with primitives" + scalaBinaryVersion in {
withEmbedMongoFixture(primitiveFieldAndType) { mongodProc =>
val schema = MongodbSchema(mongodbRDD, 1.0).schema()
schema.fields should have size 7
schema.fieldNames should contain allOf("string", "integer", "long", "double", "boolean", "null")
schema.printTreeString()
}
}
it should "be inferred from rdd with complex fields" + scalaBinaryVersion in {
withEmbedMongoFixture(complexFieldAndType1) { mongodProc =>
val schema = MongodbSchema(mongodbRDD, 1.0).schema()
schema.fields should have size 13
schema.fields filter {
case StructField(name, ArrayType(StringType, _), _, _) => Set("arrayOfNull", "arrayEmpty") contains name
case _ => false
} should have size 2
schema.printTreeString()
}
}
it should "resolve type conflicts between fields" + scalaBinaryVersion in {
withEmbedMongoFixture(primitiveFieldValueTypeConflict) { mongodProc =>
val schema = MongodbSchema(mongodbRDD, 1.0).schema()
schema.fields should have size 7
schema.printTreeString()
}
}
it should "be inferred from rdd with more complex fields" + scalaBinaryVersion in {
withEmbedMongoFixture(complexFieldAndType2) { mongodProc =>
val schema = MongodbSchema(mongodbRDD, 1.0).schema()
schema.fields should have size 5
schema.printTreeString()
}
}
it should "read java.util.Date fields as timestamptype" + scalaBinaryVersion in {
val dfunc = (s: String) => new SimpleDateFormat("EEE MMM dd HH:mm:ss Z yyyy", Locale.ENGLISH).parse(s)
import com.mongodb.casbah.Imports.DBObject
val stringAndDate = List(DBObject("string" -> "this is a simple string.", "date" -> dfunc("Mon Aug 10 07:52:49 EDT 2015")))
withEmbedMongoFixture(stringAndDate) { mongodProc =>
val schema = MongodbSchema(mongodbRDD, 1.0).schema()
schema.fields should have size 3
schema.fields.filter(_.name == "date").head.dataType should equal(TimestampType)
schema.printTreeString()
}
}
}
|
Stratio/spark-mongodb
|
spark-mongodb/src/test/scala/com/stratio/datasource/mongodb/schema/MongodbSchemaIT.scala
|
Scala
|
apache-2.0
| 4,157 |
package net.revenj.server
import net.revenj.patterns.ServiceLocator
import net.revenj.serialization.Serialization
import scala.concurrent.Future
trait ServerCommand {
def execute[TInput, TOutput](
locator: ServiceLocator,
input: Serialization[TInput],
output: Serialization[TOutput],
data: TInput): Future[CommandResult[TOutput]]
}
trait ReadOnlyServerCommand extends ServerCommand
|
ngs-doo/revenj
|
scala/revenj-akka/src/main/scala/net/revenj/server/ServerCommand.scala
|
Scala
|
bsd-3-clause
| 420 |
package ua.ds.persistent.iteration3
sealed trait PersistentList[+T] {
import PersistentList._
def exists(predicate: (T) => Boolean): Boolean = this match {
case Empty => false
case Cons(head, tail) => predicate(head) || tail.exists(predicate)
}
def takeWhile(predicate: T => Boolean): PersistentList[T] = this match {
case Cons(head, tail) if predicate(head) => head :: tail.takeWhile(predicate)
case _ => Empty
}
def take(size: Int): PersistentList[T] = this match {
case Cons(head, tail) if size != 0 => head :: tail.take(size - 1)
case _ => Empty
}
def reverse: PersistentList[T] = fold(apply[T]())((list, e) => e :: list)
def map[R](mapper: T => R): PersistentList[R] = reverse.fold(apply[R]())((list, e) => mapper(e) :: list)
def filter(predicate: T => Boolean): PersistentList[T] = reverse.fold(apply[T]())((list, e) => if (!predicate(e)) e :: list else list)
def flatMap[R]()(flatter: T => PersistentList[R]): PersistentList[R] = reverse.fold(apply[R]())((list, e) => flatter(e) ++ list)
def zip[E, R](that: PersistentList[E])(zipper: (T, E) => R): PersistentList[R] = (this, that) match {
case (Cons(thisHead, thisTail), Cons(thatHead, thatTail)) => zipper(thisHead, thatHead) :: thisTail.zip(thatTail)(zipper)
case (_, _) => Empty
}
def dropWhile(predicate: T => Boolean): PersistentList[T] = this match {
case Cons(head, tail) if predicate(head) => tail.dropWhile(predicate)
case _ => this
}
def drop(size: Int): PersistentList[T] = this match {
case Cons(_, tail) if size > 0 => tail.drop(size - 1)
case _ => this
}
def :+[E >: T](newTail: E): PersistentList[E] = reverse.fold(newTail :: Empty)((list, elem) => elem :: list)
def ++[E >: T](that: PersistentList[E]): PersistentList[E] = reverse.fold(that)((list, elem) => elem :: list)
def tail: PersistentList[T] = this match {
case Empty => Empty
case Cons(_, tail) => tail
}
def head: Option[T] = this match {
case Empty => None
case Cons(e, _) => Some(e)
}
def ::[E >: T](e: E): PersistentList[E] = Cons(e, this)
override def toString: String = {
"[" + fold(new StringBuilder)(
(builder, e) => {
if (builder.nonEmpty)
builder.append(", ")
builder.append(e)
}
).toString() + "]"
}
def fold[A](init: A)(func: (A, T) => A): A = this match {
case Empty => init
case Cons(head, tail) => tail.fold(func(init, head))(func)
}
}
object PersistentList {
def apply[T](): PersistentList[T] = Empty
private case object Empty extends PersistentList[Nothing]
private case class Cons[+T](h: T, t: PersistentList[T]) extends PersistentList[T]
}
|
Alex-Diez/persistent-data-sturctures
|
data-structures-practices/persistent-list/src/main/scala/ua/ds/persistent/iteration3/PersistentList.scala
|
Scala
|
mit
| 2,875 |
/* (c) rtm-scala contributors, 2012. All rights reserved. */
package net.combinatory.rtm
import org.joda.time.DateTime
case class TaskSeries(
id: String,
created: DateTime,
modified: DateTime,
name: String,
source: String,
url: Option[String],
locationId: Option[String]
)
case class Task(
id: String,
due: Option[DateTime],
hasDueTime: Boolean,
added: DateTime,
completed: Option[DateTime],
deleted: Option[DateTime],
priority: Option[Priority],
wasPostponed: Boolean,
source: String,
estimate: Option[DateTime]
)
sealed trait Priority
case object High extends Priority
case object Medium extends Priority
case object Low extends Priority
|
comb/rtm-scala
|
src/main/scala/net/combinatory/rtm/Tasks.scala
|
Scala
|
apache-2.0
| 678 |
package pt.pimentelfonseca.activatorexample.db.dao
import scala.slick.driver.SQLiteDriver.simple._
import pt.pimentelfonseca.activatorexample.db.tables.TableDatabaseVersions
class DatabaseVersions(tag: Tag) extends TableDatabaseVersions(tag) {
// Extra DAO logic here.
}
|
luismfonseca/agile-scala-android-example
|
src/main/scala/pt/pimentelfonseca/activatorexample/db/dao/DatabaseVersions.scala
|
Scala
|
cc0-1.0
| 287 |
/*
* Scala (https://www.scala-lang.org)
*
* Copyright EPFL and Lightbend, Inc.
*
* Licensed under Apache License 2.0
* (http://www.apache.org/licenses/LICENSE-2.0).
*
* See the NOTICE file distributed with this work for
* additional information regarding copyright ownership.
*/
package scala
/** An annotation to designate that the annotated entity
* should not be considered for additional compiler checks.
* Specific applications include annotating the subject of
* a match expression to suppress exhaustiveness warnings, and
* annotating a type argument in a match case to suppress
* unchecked warnings.
*
* Such suppression should be used with caution, without which
* one may encounter [[scala.MatchError]] or [[java.lang.ClassCastException]]
* at runtime. In most cases one can and should address the
* warning instead of suppressing it.
*
* {{{
* object Test extends App {
* // This would normally warn "match is not exhaustive"
* // because `None` is not covered.
* def f(x: Option[String]) = (x: @unchecked) match { case Some(y) => y }
* // This would normally warn "type pattern is unchecked"
* // but here will blindly cast the head element to String.
* def g(xs: Any) = xs match { case x: List[String @unchecked] => x.head }
* }
* }}}
*/
final class unchecked extends scala.annotation.Annotation {}
|
martijnhoekstra/scala
|
src/library/scala/unchecked.scala
|
Scala
|
apache-2.0
| 1,393 |
/**
* Copyright (C) 2013 Orbeon, Inc.
*
* This program is free software; you can redistribute it and/or modify it under the terms of the
* GNU Lesser General Public License as published by the Free Software Foundation; either version
* 2.1 of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
* without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU Lesser General Public License for more details.
*
* The full text of the license is available at http://www.gnu.org/copyleft/lesser.html
*/
package org.orbeon.oxf.xforms.function.xxforms
import org.orbeon.oxf.xforms.function.{FunctionSupport, XFormsFunction}
import org.orbeon.saxon.expr.XPathContext
class XXFormsClientId extends XFormsFunction with FunctionSupport {
override def evaluateItem(xpathContext: XPathContext) =
resolveStaticOrAbsoluteId(argument.lift(0))(xpathContext)
}
|
ajw625/orbeon-forms
|
src/main/scala/org/orbeon/oxf/xforms/function/xxforms/XXFormsClientId.scala
|
Scala
|
lgpl-2.1
| 1,006 |
// code-examples/Traits/trait-construction-script.scala
trait T1 {
println( " in T1: x = " + x )
val x=1
println( " in T1: x = " + x )
}
trait T2 {
println( " in T2: y = " + y )
val y="T2"
println( " in T2: y = " + y )
}
class Base12 {
println( " in Base12: b = " + b )
val b="Base12"
println( " in Base12: b = " + b )
}
class C12 extends Base12 with T1 with T2 {
println( " in C12: c = " + c )
val c="C12"
println( " in C12: c = " + c )
}
println( "Creating C12:" )
new C12
println( "After Creating C12" )
println( "Creating D12:" )
class D12 extends Base12 {
println(" in D12: d = " + d)
val d = "D12"
println(" in D12: d = " + d)
}
new D12 with T1 with T2 {
println("in anonymous class")
}
println( "After Creating D12" )
|
foomango/scalaex
|
code-examples/Traits/trait-construction-script.scala
|
Scala
|
mit
| 771 |
package org.separatepackage
import org.coroutines._
import org.scalatest._
import scala.util.Failure
class SeparatePackageTest extends FunSuite with Matchers {
test("should declare and run a coroutine") {
val rube = coroutine { (x: Int) =>
yieldval(x * 2)
if (x > 0) yieldval(x)
else yieldval(-x)
x + 1
}
val c0 = call(rube(2))
assert(c0.resume)
assert(c0.value == 4)
assert(c0.resume)
assert(c0.value == 2)
assert(!c0.resume)
assert(c0.result == 3)
assert(c0.isCompleted)
val c1 = call(rube(-2))
assert(c1.resume)
assert(c1.value == -4)
assert(c1.resume)
assert(c1.value == 2)
assert(!c1.resume)
assert(c1.result == -1)
assert(c1.isCompleted)
}
test("Another coroutine must be invoked without syntax sugar") {
val inc = coroutine { (x: Int) => x + 1 }
val rube = coroutine { () =>
inc(3)
}
val c = call(rube())
assert(!c.resume)
assert(c.result == 4)
assert(c.isCompleted)
}
}
|
storm-enroute/coroutines
|
src/test/scala/org/separatepackage/SeparatePackageTest.scala
|
Scala
|
bsd-3-clause
| 1,026 |
/*
* Copyright (C) 2014 - 2017 Contributors as noted in the AUTHORS.md file
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package com.wegtam.tensei.agent.helpers
import java.nio.file.{ Files, StandardCopyOption }
import java.io.InputStream
import org.dfasdl.utils.exceptions.{ XmlValidationErrorException, XmlValidationFatalException }
import com.wegtam.tensei.agent.DefaultSpec
class DFASDLValidator$Test extends DefaultSpec {
describe("DFASDLValidator") {
describe("validateString") {
describe("when given an empty String") {
it("should throw a SAXParseException") {
intercept[XmlValidationFatalException] {
DFASDLValidator.validateString("")
}
}
}
describe("when given a minimal valid dfasdl") {
it("should be valid") {
val xml =
"""<?xml version="1.0" encoding="UTF-8"?><dfasdl xmlns="http://www.dfasdl.org/DFASDL" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" default-encoding="utf-8" semantic="niem"></dfasdl>"""
DFASDLValidator.validateString(xml)
}
}
}
describe("validate") {
it("must validate the local file") {
val filename = Files.createTempFile("test-", ".xml")
val in: InputStream = getClass.getResourceAsStream(
"/com/wegtam/tensei/agent/middleware/complex-definition.xml"
)
Files.copy(in, filename, StandardCopyOption.REPLACE_EXISTING)
DFASDLValidator.validate(filename.toFile)
}
it("must throw an exception when it does not validate") {
intercept[XmlValidationErrorException] {
val filename = Files.createTempFile("test-", ".xml")
val in: InputStream = getClass.getResourceAsStream(
"/com/wegtam/tensei/agent/middleware/complex-definition-wrong.xml"
)
Files.copy(in, filename, StandardCopyOption.REPLACE_EXISTING)
DFASDLValidator.validate(filename.toFile)
}
}
}
describe("validateLocal") {
it("must validate the local file") {
DFASDLValidator.validateLocal("/com/wegtam/tensei/agent/middleware/complex-definition.xml")
}
it("must throw an exception when it does not validate") {
intercept[XmlValidationErrorException] {
DFASDLValidator.validateLocal(
"/com/wegtam/tensei/agent/middleware/complex-definition-wrong.xml"
)
}
}
}
}
}
|
Tensei-Data/tensei-agent
|
src/test/scala/com/wegtam/tensei/agent/helpers/DFASDLValidator$Test.scala
|
Scala
|
agpl-3.0
| 3,072 |
package eventstreams
/*
* Copyright 2014-15 Intelix Pty Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import eventstreams.core.components.cluster.ClusterManagerActor._
import eventstreams.support.{DummyNodeTestContext, SharedActorSystem}
import org.scalatest.FlatSpec
class ClusterManagerTest
extends FlatSpec with DummyNodeTestContext with SharedActorSystem {
val expectedPeersListInitial = "dummy1"
val expectedPeersListComplete = "dummy1,dummy2,dummy3,dummy4,dummy5"
"Cluster" should "start with 5 nodes and all peers should be discovered" in new WithDummyNode1
with WithDummyNode2 with WithDummyNode3 with WithDummyNode4 with WithDummyNode5 {
expectSomeEventsWithTimeout(30000, ClusterStateChanged, 'Peers -> expectedPeersListInitial, 'Node -> "dummy1")
expectSomeEventsWithTimeout(30000, ClusterStateChanged, 'Peers -> expectedPeersListComplete, 'Node -> "dummy1")
expectOneOrMoreEvents(ClusterStateChanged, 'Peers -> expectedPeersListComplete, 'Node -> "dummy2")
expectOneOrMoreEvents(ClusterStateChanged, 'Peers -> expectedPeersListComplete, 'Node -> "dummy3")
expectOneOrMoreEvents(ClusterStateChanged, 'Peers -> expectedPeersListComplete, 'Node -> "dummy4")
expectOneOrMoreEvents(ClusterStateChanged, 'Peers -> expectedPeersListComplete, 'Node -> "dummy5")
}
it should "consistently" in new WithDummyNode1
with WithDummyNode2 with WithDummyNode3 with WithDummyNode4 with WithDummyNode5 {
expectSomeEventsWithTimeout(30000, ClusterStateChanged, 'Peers -> expectedPeersListInitial, 'Node -> "dummy1")
expectSomeEventsWithTimeout(30000, ClusterStateChanged, 'Peers -> expectedPeersListComplete, 'Node -> "dummy1")
expectOneOrMoreEvents(ClusterStateChanged, 'Peers -> expectedPeersListComplete, 'Node -> "dummy2")
expectOneOrMoreEvents(ClusterStateChanged, 'Peers -> expectedPeersListComplete, 'Node -> "dummy3")
expectOneOrMoreEvents(ClusterStateChanged, 'Peers -> expectedPeersListComplete, 'Node -> "dummy4")
expectOneOrMoreEvents(ClusterStateChanged, 'Peers -> expectedPeersListComplete, 'Node -> "dummy5")
}
it should "and again (testing SharedActorSystem)" in new WithDummyNode1
with WithDummyNode2 with WithDummyNode3 with WithDummyNode4 with WithDummyNode5 {
expectSomeEventsWithTimeout(30000, ClusterStateChanged, 'Peers -> expectedPeersListInitial, 'Node -> "dummy1")
expectSomeEventsWithTimeout(30000, ClusterStateChanged, 'Peers -> expectedPeersListComplete, 'Node -> "dummy1")
expectOneOrMoreEvents(ClusterStateChanged, 'Peers -> expectedPeersListComplete, 'Node -> "dummy2")
expectOneOrMoreEvents(ClusterStateChanged, 'Peers -> expectedPeersListComplete, 'Node -> "dummy3")
expectOneOrMoreEvents(ClusterStateChanged, 'Peers -> expectedPeersListComplete, 'Node -> "dummy4")
expectOneOrMoreEvents(ClusterStateChanged, 'Peers -> expectedPeersListComplete, 'Node -> "dummy5")
}
it should "and again ... (testing SharedActorSystem)"in new WithDummyNode1
with WithDummyNode2 with WithDummyNode3 with WithDummyNode4 with WithDummyNode5 {
expectSomeEventsWithTimeout(30000, ClusterStateChanged, 'Peers -> expectedPeersListInitial, 'Node -> "dummy1")
expectSomeEventsWithTimeout(30000, ClusterStateChanged, 'Peers -> expectedPeersListComplete, 'Node -> "dummy1")
expectOneOrMoreEvents(ClusterStateChanged, 'Peers -> expectedPeersListComplete, 'Node -> "dummy2")
expectOneOrMoreEvents(ClusterStateChanged, 'Peers -> expectedPeersListComplete, 'Node -> "dummy3")
expectOneOrMoreEvents(ClusterStateChanged, 'Peers -> expectedPeersListComplete, 'Node -> "dummy4")
expectOneOrMoreEvents(ClusterStateChanged, 'Peers -> expectedPeersListComplete, 'Node -> "dummy5")
}
it should s"recover if one node fails" in new WithDummyNode1
with WithDummyNode2 with WithDummyNode3 with WithDummyNode4 with WithDummyNode5 {
expectSomeEventsWithTimeout(30000, ClusterStateChanged, 'Peers -> expectedPeersListInitial, 'Node -> "dummy1")
expectSomeEventsWithTimeout(30000, 5, ClusterStateChanged, 'Peers -> expectedPeersListComplete)
clearEvents()
restartDummyNode4()
expectSomeEventsWithTimeout(30000, 1, ClusterStateChanged, 'Peers -> expectedPeersListComplete, 'Node -> "dummy4")
}
}
|
intelix/eventstreams
|
es-core/es-api/src/test/scala/eventstreams/ClusterManagerTest.scala
|
Scala
|
apache-2.0
| 4,770 |
package tk.monnef.mcmapper
import java.io.File
object Utils {
implicit final class ForwardPipe[T](val x: T) extends AnyVal {
def |>[B](f: (T) => B) = f(x)
}
implicit final class OrElseCrash[T](val x: Option[T]) extends AnyVal {
def orElseCrash: T = x match {
case None => throw new McMapperException
case Some(a) => a
}
def orElseCrash(msg: String): T = x match {
case None => throw new McMapperException(msg)
case Some(a) => a
}
}
def getCurrentPath() = new File(".").getAbsolutePath
}
|
mnn/mcMapperLib
|
src/main/scala/tk/monnef/mcmapper/Utils.scala
|
Scala
|
apache-2.0
| 548 |
package se.gigurra.util.serialization.json.writer
/**
* Created by johan on 2015-03-27.
*/
object ScalaMapWriter extends TypeWriter {
override def apply(x: Any)(implicit buffer: StringBuilder, writer: JsonWriter): Unit = {
if (x != null) {
val m = x.asInstanceOf[collection.Map[_, _]]
buffer.append('{')
for ((k, v) <- m) {
SimpleTypeWriter.apply(k)
buffer.append(':')
writer.write(v)
buffer.append(',')
}
writeClassName(x.getClass)
buffer.append('}')
} else {
writer.writeNull()
}
}
}
|
GiGurra/gigurra-game-challenge
|
ext/gigurra-util/src/main/scala/se/yabs/util/serialization/json/writer/ScalaMapWriter.scala
|
Scala
|
gpl-2.0
| 579 |
package dog
package autodoc
import scalaz._, Id._, Free._
import httpz._
import argonaut.DecodeJson
trait AutodocMarker {
def generate(title: String, format: Autodoc.Format): String
}
sealed abstract class Autodoc[A: Show] extends AutodocMarker {
def description: Option[String]
def request: Request
def response: Response[A]
// not Functor
def map[B: Show](f: A => B): Autodoc[B]
}
object Autodoc {
sealed abstract class Format
final case class Markdown(
generate: (String, Option[String], RequestDocument, ResponseDocument) => play.twirl.api.Content
= dog.autodoc.templates.md.document.apply _
) extends Format
final case class Html(
generate: (String, Option[String], RequestDocument, ResponseDocument) => play.twirl.api.Content
= html.document.apply _
) extends Format
def apply[A: Show](interpreter: Interpreter[Id], p: ActionNel[Autodoc[A]])(test: Response[A] => TestCaseAp[Unit]): TestCase[Autodoc[A]] = {
val t: TestCaseAp[Autodoc[A]] = TestCase {
interpreter.run(p) match {
case -\/(es) => TestCase.handle(es.list.map(_.asInstanceOf[Throwable]))
case \/-(a) => test(a.response).map(Function.const(a))
}
}
t.monadic
}
private[this] final case class AutodocImpl[A: Show](
override val description: Option[String],
override val request: Request,
override val response: Response[A]) extends Autodoc[A] {
override def generate(title: String, format: Format) = {
val req = RequestDocument.from(request)
val res = ResponseDocument.from(implicitly[Show[A]].show(response))
(format match {
case Markdown(generate) => generate
case Html(generate) => generate
})(title, description, req, res).body.trim
}
override def map[B: Show](f: A => B) = AutodocImpl(description, request, response.map(f))
}
private[this] def descriptionOption[A](description: String): Option[String] =
if(description.trim.isEmpty) None else Some(description)
def json[A <: JsonToString[A]: DecodeJson](req: Request, description: String = "")
: EitherT[({type l[a] = Free[RequestF, a]})#l, Error, Autodoc[A]] =
Core.jsonResponse(req).map[Autodoc[A]](res =>
AutodocImpl(descriptionOption(description), req, res))
def string(req: Request, description: String = "")
: EitherT[({type l[a] = Free[RequestF, a]})#l, Throwable, Autodoc[String]] =
Core.stringResponse(req).map[Autodoc[String]](res =>
AutodocImpl(descriptionOption(description), req, res))
def raw(req: Request, description: String = "")
: EitherT[({type l[a] = Free[RequestF, a]})#l, Throwable, Autodoc[ByteArray]] =
Core.raw(req).map[Autodoc[ByteArray]](res =>
AutodocImpl(descriptionOption(description), req, res))
}
|
pocketberserker/dog-autodoc
|
core/src/main/scala/dog/autodoc/Autodoc.scala
|
Scala
|
mit
| 2,775 |
package au.com.dius.pact.consumer
import au.com.dius.pact.model.{RequestResponseInteraction, _}
import difflib.DiffUtils
import groovy.json.JsonOutput
@Deprecated
object PrettyPrinter {
//TODO: allow configurable context lines
val defaultContextLines = 3
def print(session: PactSessionResults): String = {
printAlmost(session.almostMatched) + printMissing(session.missing) + printUnexpected(session.unexpected)
}
def printDiff(label: String, expected: List[String], actual: List[String], contextLines: Int = defaultContextLines): Seq[String] = {
import scala.collection.JavaConversions._
val patch = DiffUtils.diff(expected, actual)
val uDiff = DiffUtils.generateUnifiedDiff(label, "", expected, patch, contextLines)
uDiff.toSeq
}
def printMapMismatch[A, B](label: String, expected: Map[A, B], actual: Map[A, B])(implicit oA: Ordering[A]): Seq[String] = {
def stringify(m: Map[A,B]): List[String] = m.toList.sortBy(_._1).map(t => t._1+ " = " + t._2)
printDiff(label, stringify(expected), stringify(actual))
}
def printStringMismatch(label: String, expected: Any, actual: Any): Seq[String] = {
def stringify(s: String) = s.toString.split("\\n").toList
def anyToString(a: Any) : String = {
a match {
case None => ""
case Some(s) => anyToString(s)
case _ => a.toString
}
}
printDiff(label, stringify(anyToString(expected)), stringify(anyToString(actual)))
}
def printProblem(interaction:Interaction, partial: Seq[RequestPartMismatch]): String = {
partial.flatMap {
case HeaderMismatch(key, expected, actual, mismatch) => printStringMismatch("Header " + key, expected, actual)
case BodyMismatch(expected, actual, mismatch, path, _) => printStringMismatch("Body",
JsonOutput.prettyPrint(expected.toString), JsonOutput.prettyPrint(actual.toString))
case CookieMismatch(expected, actual) => printDiff("Cookies", expected.sorted, actual.sorted)
case PathMismatch(expected, actual, _) => printDiff("Path", List(expected), List(actual), 0)
case MethodMismatch(expected, actual) => printDiff("Method", List(expected), List(actual), 0)
}.mkString("\\n")
}
def printAlmost(almost: List[PartialRequestMatch]): String = {
def partialRequestMatch(p:PartialRequestMatch): Iterable[String] = {
val map: Map[Interaction, Seq[RequestPartMismatch]] = p.problems
map.flatMap {
case (_, Nil) => None
case (i, mismatches) => Some(printProblem(i, mismatches))
}
}
almost.flatMap(partialRequestMatch).mkString("\\n")
}
def printMissing(missing: List[Interaction]) = {
if(missing.isEmpty) {
""
} else {
s"missing:\\n ${missing.map(_.asInstanceOf[RequestResponseInteraction].getRequest).mkString("\\n")}"
}
}
def printUnexpected(unexpected: List[Request]) = {
if(unexpected.isEmpty) {
""
} else {
s"unexpected:\\n${unexpected.mkString("\\n")}"
}
}
}
|
Fitzoh/pact-jvm
|
pact-jvm-consumer/src/main/scala/au/com/dius/pact/consumer/PrettyPrinter.scala
|
Scala
|
apache-2.0
| 2,986 |
//======================================================================================================================
// Facsimile: A Discrete-Event Simulation Library
// Copyright © 2004-2020, Michael J Allen.
//
// This file is part of Facsimile.
//
// Facsimile is free software: you can redistribute it and/or modify it under the terms of the GNU Lesser General Public
// License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later
// version.
//
// Facsimile is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied
// warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
// details.
//
// You should have received a copy of the GNU Lesser General Public License along with Facsimile. If not, see:
//
// http://www.gnu.org/licenses/lgpl.
//
// The developers welcome all comments, suggestions and offers of assistance. For further information, please visit the
// project home page at:
//
// http://facsim.org/
//
// Thank you for your interest in the Facsimile project!
//
// IMPORTANT NOTE: All patches (modifications to existing files and/or the addition of new files) submitted for
// inclusion as part of the official Facsimile code base, must comply with the published Facsimile Coding Standards. If
// your code fails to comply with the standard, then your patches will be rejected. For further information, please
// visit the coding standards at:
//
// http://facsim.org/Documentation/CodingStandards/
//======================================================================================================================
//======================================================================================================================
// Scala source file belonging to the org.facsim.util.log package.
//======================================================================================================================
package org.facsim.util
/** ''Facsimile Utility'' library logging root package.
*
* This library contains tools for streaming log messages.
*
* @since 0.2
*/
package object log
|
MichaelJAllen/facsimile
|
facsimile-util/src/main/scala/org/facsim/util/log/package.scala
|
Scala
|
lgpl-3.0
| 2,206 |
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** @author John Miller
* @version 1.3
* @date Sun Mar 2 20:37:18 EST 2014
* @see LICENSE (MIT style license file).
*/
package apps.event
import scalation.model.Modelable
import scalation.event.{Entity, Event, Model, WaitQueue}
import scalation.linalgebra.{MatrixD, VectorD}
import scalation.queueingnet.MMc_Queue
import scalation.random.{Exponential, Variate}
import scalation.stat.Statistic
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** The `Machine` object defines a particular scenario under which to execute the
* Machine model.
* @see scalation.event.ModelTest for another example of test code.
* > run-main apps.event.Machine
*/
object Machine extends App with Modelable
{
val stream = 1 // random number stream (0 to 99)
val lambda = 6.0 // part arrival rate (per hr)
val mu = 7.5 // part service rate (per hr)
val maxParts = 10 // stopping rule: at maxParts
val iArrivalRV = Exponential (HOUR/lambda, stream) // inter-arrival time random var
val serviceRV = Exponential (HOUR/mu, stream) // service time random variate
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Run the simulation of `MachineModel`.
* @param startTime the start time for the simulation
*/
def simulate (startTime: Double)
{
new MachineModel ("Machine", maxParts, iArrivalRV, serviceRV)
} // simulate
simulate (0.0)
} // Machine object
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** The `MachineModel` class defines an Event-Scheduling model of a two-stage
* manufacturing process, which consists of two machines in series M1 and M2.
* @param name the name of the simulation model
* @param nArrivals the number of arrivals to generate (stopping condition)
* @param iArrivalRV the inter-arrival time distribution
* @param serviceRV the service time distribution
*/
class MachineModel (name: String, nArrivals: Int, iArrivalRV: Variate, serviceRV: Variate)
extends Model (name)
{
val t_a_stat = new Statistic ("t_a") // time between Arrivals statistics
val t_s1_stat = new Statistic ("t_s1") // time in Service statistics for M1
val t_s2_stat = new Statistic ("t_s1") // time in Service statistics for M2
val waitQueue_M1 = WaitQueue (this, "1", 10) // waiting queue for M1 that collects stats
val waitQueue_M2 = WaitQueue (this, "2", 10) // waiting queue for M2 that collects stats
var nArr = 0 // number of parts that have arrived
var nIn = 0 // number of parts in the system
var nM1 = 0 // number of parts in service at M1
var nM2 = 0 // number of parts in serice at M1
var nOut = 0 // number of parts that finished & left
addStats (t_a_stat, t_s1_stat, t_s2_stat)
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** `Arrival` is a subclass of `Event` for handling arrival events.
* The 'occur' method triggers future events and updates the current state.
* @param part the entity that arrives, in this case a part
* @param delay the time delay for this event's occurrence
*/
case class Arrival (part: Entity, delay: Double)
extends Event (part, this, delay, t_a_stat)
{
def occur ()
{
if (nArr < nArrivals-1) {
val toArrive = Entity (iArrivalRV.gen, serviceRV.gen, MachineModel.this)
schedule (Arrival (toArrive, toArrive.iArrivalT))
} // if
if (nM1 == 0) {
nM1 = 1
schedule (Departure_M1 (part, part.serviceT))
} else {
waitQueue_M1 += part // collects time in Queue statistics
} // if
nArr += 1 // update the current state
nIn += 1
} // occur
} // Arrival class
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** `Departure_M1` is a subclass of `Event` for handling departure events
* from machine M1.
* The 'occur' method triggers future events and updates the current state.
* @param part the entity that departs, in this case a part
* @param delay the time delay for this event's occurrence
*/
case class Departure_M1 (part: Entity, delay: Double)
extends Event (part, this, delay, t_s1_stat)
{
def occur ()
{
if (waitQueue_M1.isEmpty) {
nM1 = 0
} else {
val nextService = waitQueue_M1.dequeue () // first part in queue
schedule (Departure_M1 (nextService, nextService.serviceT))
} // if
if (nM2 == 0) {
nM2 = 1
schedule (Departure_M2 (part, part.serviceT))
} else {
waitQueue_M2 += part // collects time in Queue statistics
} // if
} // occur
} // Departure_M1 class
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** `Departure_M2` is a subclass of `Event` for handling departure events.
* from machine M2.
* The 'occur' method triggers future events and updates the current state.
* @param part the entity that departs, in this case a part
* @param delay the time delay for this event's occurrence
*/
case class Departure_M2 (part: Entity, delay: Double)
extends Event (part, this, delay, t_s2_stat)
{
def occur ()
{
leave (part) // collects time in sYstem statistics
if (waitQueue_M2.isEmpty) {
nM2 = 0
} else {
val nextService = waitQueue_M2.dequeue () // first part in queue
schedule (Departure_M2 (nextService, nextService.serviceT))
} // if
nIn -= 1 // update the current state
nOut += 1
} // occur
} // Departure_M2 class
//:: start the simulation after scheduling the first priming event
val firstArrival = Entity (iArrivalRV.gen, serviceRV.gen, this)
schedule (Arrival (firstArrival, firstArrival.iArrivalT)) // first priming event
simulate () // start simulating
val nScrap = waitQueue_M1.barred + waitQueue_M2.barred
report (("nArr", nArr), ("nIn", nIn), ("nOut", nOut), ("nScrap", nScrap))
reportStats
} // MachineModel class
|
scalation/fda
|
scalation_1.3/scalation_models/src/main/scala/apps/event/Machine.scala
|
Scala
|
mit
| 7,059 |
package warsztat
import org.scalatest.{FlatSpec, Matchers}
class RichByteArrayTest extends FlatSpec with Matchers {
it should "render byte array" in {
import warsztat.RichByteArray.RichByteArray
val in = "some data"
in.getBytes.render("|") shouldBe s"|$in"
}
}
|
bjankie1/warsztat-scala
|
src/test/scala/warsztat/RichByteArrayTest.scala
|
Scala
|
apache-2.0
| 281 |
package leo.modules.calculus
import leo.datastructures.Term
import leo.modules.HOLSignature.{<=>, o}
import leo.{Checked, LeoTestSuite}
/**
* Created by mwisnie on 1/14/16.
*/
class DefExpansionTest extends LeoTestSuite{
implicit val s = getFreshSignature
test("DefExpansion Equivalenz", Checked) {
val a = Term.mkAtom(s.addUninterpreted("a",o))
val b = Term.mkAtom(s.addUninterpreted("b",o))
val t = <=>(a,b)
val dt = DefExpSimp(t)
println(dt.pretty)
}
}
|
lex-lex/Leo-III
|
src/test/scala/leo/modules/calculus/DefExpansionTest.scala
|
Scala
|
bsd-3-clause
| 491 |
package org.clulab.sequences
import java.io._
import org.clulab.learning._
import org.clulab.processors.{Document, Sentence}
import org.clulab.sequences.SequenceTaggerLogger._
import org.clulab.struct.Counter
import org.clulab.utils.SeqUtils
import scala.collection.mutable.ArrayBuffer
import scala.reflect.ClassTag
/**
* Bidirectional MEMM sequence tagger
* User: mihais
* Date: 8/27/17
*/
abstract class BiMEMMSequenceTagger[L: ClassTag, F](
var order:Int,
var numFoldsFirstPass:Int, // if < 2, this reverts to a single-layer MEMM
var leftToRight:Boolean) extends SequenceTagger[L, F] {
/** C'tor for a single-layer. left-to-right MEMM of order 2 */
def this(order:Int = 2, leftToRight:Boolean = true) {
this(order, -1, leftToRight)
}
var firstPassModel:Option[Classifier[L, F]] = None
var secondPassModel:Option[Classifier[L, F]] = None
override def train(docs:Iterator[Document]): Unit = {
val sentences = new ArrayBuffer[Sentence]()
for(doc <- docs; sent <- doc.sentences) {
sentences += sent
}
logger.info(s"Training on ${sentences.size} sentences using order $order.")
// count bigrams in the corpus, and keep a set of those that occur > BIGRAM_THRESHOLD times
FeatureExtractor.countBigrams(sentences, FeatureExtractor.BIGRAM_THRESHOLD)
var firstPassLabels:Option[Array[Array[L]]] = None
var acc = 0.0
if(numFoldsFirstPass > 1) {
// generate first-pass labels
// try to read them from cached file, if it exists
val firstPassFile = new File(FIRST_PASS_FILE)
firstPassLabels = if(firstPassFile.exists()) {
logger.debug(s"Found cached file with first-pass labels: $FIRST_PASS_FILE")
val source = scala.io.Source.fromFile(firstPassFile)
val labels = readFirstPassLabels(source)
source.close()
Some(labels)
} else {
logger.debug("Generating first-pass labels from scratch...")
val labels = mkFirstPassLabels(sentences)
val pw = new PrintWriter(new FileWriter(FIRST_PASS_FILE))
for(s <- labels) {
pw.println(s.mkString("\\t"))
}
pw.close()
Some(labels)
}
assert(firstPassLabels.get.length >= sentences.size)
// compute the accuracy of the first pass
acc = accuracy(sentences, firstPassLabels.get)
}
// make the second-pass classifier
logger.debug("Training the second-pass classifier on the whole data...")
secondPassModel = Some(buildClassifier(sentences, mkFullFold(sentences.size),
leftToRight, firstPassLabels))
if(numFoldsFirstPass > 1) {
// make the first-pass classifier on the whole data
logger.debug("Training the first-pass classifier on the whole data...")
firstPassModel = Some(buildClassifier(sentences, mkFullFold(sentences.size),
!leftToRight, None))
}
logger.info("Finished training.")
if(firstPassLabels.nonEmpty)
logger.info(s"The accuracy of the first pass classifier was $acc.")
}
private val FIRST_PASS_FILE = "first_pass_labels.tsv"
protected def readFirstPassLabels(source:scala.io.Source):Array[Array[L]]
def mkFirstPassLabels(sentences: ArrayBuffer[Sentence]): Array[Array[L]] = {
val folds = Datasets.mkFolds(numFoldsFirstPass, sentences.size)
// generate first-pass labels through cross validation
logger.debug("Generating first pass labels...")
val labels = new Array[Array[L]](sentences.size)
var foldCount = 1
for(fold <- folds) {
logger.debug(s"In fold $foldCount: ${fold.testFold}...")
foldCount += 1
val classifier = buildClassifier(sentences, fold, ! leftToRight, None)
for(si <- fold.testFold._1 until fold.testFold._2) {
labels(si) = classesOf(classifier, sentences(si), None, ! leftToRight)
}
}
labels
}
def accuracy(sentences: ArrayBuffer[Sentence], labels:Array[Array[L]]):Double = {
// check the accuracy of these labels
var total = 0
var correct = 0
for(i <- sentences.indices) {
val sent = sentences(i)
val gold = labelExtractor(sent)
val pred = labels(i)
assert(gold != null && pred != null)
assert(gold.length == pred.length)
total += gold.length
for(j <- gold.indices) {
if(gold(j) == pred(j)) correct += 1
}
}
val acc = 100.0 * correct.toDouble / total.toDouble
logger.info(s"Accuracy of first pass classifier: $acc% ($correct/$total)")
acc
}
def mkFeatures(features:Counter[F],
sentence:Sentence,
offset:Int,
history:Seq[L],
firstPassLabels:Option[Array[L]]): Unit = {
//
// add features from observed data
//
featureExtractor(features, sentence, offset)
//
// add history features:
// concatenate the labels of the previous <order> tokens to the features
// then store each example in the training dataset
//
addHistoryFeatures(features, order, history, offset)
//
// add features from first-pass labels (if any)
//
if (firstPassLabels.nonEmpty) {
addFirstPassFeatures(features, order, firstPassLabels.get, offset)
}
}
def buildClassifier(
sentences: ArrayBuffer[Sentence],
fold: DatasetFold,
leftToRight:Boolean,
firstPassLabels:Option[Array[Array[L]]]): Classifier[L, F] = {
// construct the dataset from the training partitions
val dataset = mkDataset
var sentCount = 0
for (trainFold <- fold.trainFolds; sentOffset <- trainFold._1 until trainFold._2) {
// original sentence
val origSentence = sentences(sentOffset)
// actual sentence to be used
val sentence = if (leftToRight) origSentence else origSentence.revert()
// labels to be learned
val labels =
if (leftToRight) labelExtractor(origSentence)
else SeqUtils.revert(labelExtractor(origSentence)).toArray
// labels from the first pass (if any)
val firstPass =
if(firstPassLabels.nonEmpty) {
if(leftToRight) Some(firstPassLabels.get(sentOffset))
else Some(SeqUtils.revert(firstPassLabels.get(sentOffset)).toArray)
} else {
None
}
val features = new Array[Counter[F]](sentence.size)
assert(labels.length == features.length)
for (i <- features.indices) features(i) = new Counter[F]()
for(i <- 0 until sentence.size) {
// add all features
mkFeatures(features(i), sentence, i, labels, firstPass)
// add one datum for each word in the sentence
val d = mkDatum(labels(i), features(i))
dataset += d
}
sentCount += 1
if (sentCount % 100 == 0) {
logger.debug(s"Processed $sentCount sentences...")
}
}
// train
val classifier = mkClassifier
classifier.train(dataset)
classifier
}
def classesOf(classifier: Classifier[L, F],
origSentence: Sentence,
firstPassLabels:Option[Array[L]],
leftToRight:Boolean): Array[L] = {
val sentence = if(leftToRight) origSentence else origSentence.revert()
val firstPass =
if(firstPassLabels.nonEmpty) {
if(leftToRight) firstPassLabels
else Some(SeqUtils.revert(firstPassLabels.get).toArray)
} else {
None
}
val history = new ArrayBuffer[L]()
for(i <- 0 until sentence.size) {
val feats = new Counter[F]
mkFeatures(feats, sentence, i, history, firstPass)
val d = mkDatum(null.asInstanceOf[L], feats)
val label = classifier.classOf(d)
history += label
}
if(leftToRight) history.toArray else SeqUtils.revert(history).toArray
}
override def classesOf(sentence: Sentence):Array[L] = {
var firstPassLabels:Option[Array[L]] = None
if(firstPassModel.nonEmpty)
firstPassLabels = Some(classesOf(firstPassModel.get, sentence, None, ! leftToRight))
val secondPassLabels = classesOf(secondPassModel.get, sentence, firstPassLabels, leftToRight)
secondPassLabels
}
private def mkDataset: Dataset[L, F] = new RVFDataset[L, F]()
private def mkDatum(label:L, features:Counter[F]): Datum[L, F] = new RVFDatum[L, F](label, features)
private def mkClassifier: Classifier[L, F] = new L1LogisticRegressionClassifier[L, F]() // TODO: add all classifiers
private def mkFullFold(size:Int): DatasetFold =
new DatasetFold(testFold = Tuple2(-1, -1), trainFolds = List(Tuple2(0, size)))
override def save(fn:File): Unit = {
// save meta data
var w = new PrintWriter(new FileWriter(fn))
w.println(order)
w.println(leftToRight)
// save second pass model
secondPassModel.get.saveTo(w)
w.close()
// save first pass model (if any)
w = new PrintWriter(new FileWriter(fn, true))
if(firstPassModel.nonEmpty) {
w.println(1)
firstPassModel.get.saveTo(w)
} else {
w.println(0)
}
w.close()
}
override def load(reader:BufferedReader) {
// load meta data
order = reader.readLine().toInt
leftToRight = reader.readLine().toBoolean
// load second pass classifier
secondPassModel = Some(LiblinearClassifier.loadFrom[L, F] (reader))
reader.readLine()
// load first pass classifier (if any)
val hasFirstPass = reader.readLine().toInt
if(hasFirstPass == 1) {
firstPassModel = Some(LiblinearClassifier.loadFrom[L, F](reader))
} else {
firstPassModel = None
}
reader.close()
}
}
|
sistanlp/processors
|
main/src/main/scala/org/clulab/sequences/BiMEMMSequenceTagger.scala
|
Scala
|
apache-2.0
| 9,503 |
package creational
/**
* Scala provides concise direct realization of the singleton pattern in the language
* Objects can inherit methods from classes or interfaces. Object can be referenced (directly or via an inherited interface).
* In Scala, objects are initialized on-demand.
*
* @author Daniel Leon
*/
object Singleton {
def main(args:Array[String]) {
object Cat extends Runnable {
def run() {
println("I'm a Scala cat running around !")
}
}
Cat.run()
}
}
|
LeonDaniel/DesignPatterns
|
ScalaPatterns/src/main/scala/creational/Singleton.scala
|
Scala
|
lgpl-3.0
| 507 |
package org.bitcoins.rpc.client.common
import java.util.UUID
import akka.actor.ActorSystem
import akka.http.javadsl.model.headers.HttpCredentials
import akka.http.scaladsl.Http
import akka.http.scaladsl.model._
import akka.stream.{ActorMaterializer, StreamTcpException}
import akka.util.ByteString
import org.bitcoins.core.config.{MainNet, NetworkParameters, RegTest, TestNet3}
import org.bitcoins.core.crypto.ECPrivateKey
import org.bitcoins.core.util.BitcoinSLogger
import org.bitcoins.rpc.config.BitcoindInstance
import org.bitcoins.rpc.serializers.JsonSerializers._
import org.bitcoins.rpc.util.AsyncUtil
import play.api.libs.json._
import scala.concurrent._
import scala.concurrent.duration.DurationInt
import scala.sys.process._
import scala.util.{Failure, Success, Try}
import java.nio.file.Files
import org.bitcoins.rpc.config.BitcoindAuthCredentials.CookieBased
import org.bitcoins.rpc.config.BitcoindAuthCredentials.PasswordBased
import java.nio.file.Path
import org.bitcoins.rpc.config.BitcoindAuthCredentials
import org.bitcoins.rpc.BitcoindException
/**
* This is the base trait for Bitcoin Core
* RPC clients. It defines no RPC calls
* except for the a ping. It contains functionality
* and utilities useful when working with an RPC
* client, like data directories, log files
* and whether or not the client is started.
*/
trait Client extends BitcoinSLogger {
def version: BitcoindVersion
protected val instance: BitcoindInstance
/**
* The log file of the Bitcoin Core daemon
*/
lazy val logFile: Path = {
val prefix = instance.network match {
case MainNet => ""
case TestNet3 => "testnet"
case RegTest => "regtest"
}
instance.datadir.toPath.resolve(prefix).resolve("debug.log")
}
/** The configuration file of the Bitcoin Core daemon */
lazy val confFile: Path =
instance.datadir.toPath.resolve("bitcoin.conf")
implicit protected val system: ActorSystem
implicit protected val materializer: ActorMaterializer =
ActorMaterializer.create(system)
implicit protected val executor: ExecutionContext = system.getDispatcher
implicit protected val network: NetworkParameters = instance.network
/**
* This is here (and not in JsonWrriters)
* so that the implicit network val is accessible
*/
implicit object ECPrivateKeyWrites extends Writes[ECPrivateKey] {
override def writes(o: ECPrivateKey): JsValue = JsString(o.toWIF(network))
}
implicit val eCPrivateKeyWrites: Writes[ECPrivateKey] = ECPrivateKeyWrites
implicit val importMultiAddressWrites: Writes[RpcOpts.ImportMultiAddress] =
Json.writes[RpcOpts.ImportMultiAddress]
implicit val importMultiRequestWrites: Writes[RpcOpts.ImportMultiRequest] =
Json.writes[RpcOpts.ImportMultiRequest]
private val resultKey: String = "result"
private val errorKey: String = "error"
def getDaemon: BitcoindInstance = instance
/** Starts bitcoind on the local system.
* @return a future that completes when bitcoind is fully started.
* This future times out after 60 seconds if the client
* cannot be started
*/
def start(): Future[Unit] = {
if (version != BitcoindVersion.Unknown) {
val foundVersion = instance.getVersion
if (foundVersion != version) {
throw new RuntimeException(
s"Wrong version for bitcoind RPC client! Expected $version, got $foundVersion")
}
}
val binaryPath = instance.binary.getAbsolutePath
val cmd = List(binaryPath,
"-datadir=" + instance.datadir,
"-rpcport=" + instance.rpcUri.getPort,
"-port=" + instance.uri.getPort)
logger.debug(
s"starting bitcoind with datadir ${instance.datadir} and binary path $binaryPath")
val _ = Process(cmd).run()
def isStartedF: Future[Boolean] = {
val started: Promise[Boolean] = Promise()
val pingF = bitcoindCall[Unit]("ping", printError = false)
pingF.onComplete {
case Success(_) => started.success(true)
case Failure(_) => started.success(false)
}
started.future
}
// if we're doing cookie based authentication, we might attempt
// to read the cookie file before it's written. this ensures
// we avoid that
val awaitCookie: BitcoindAuthCredentials => Future[Unit] = {
case cookie: CookieBased =>
val cookieExistsF =
AsyncUtil.retryUntilSatisfied(Files.exists(cookie.cookiePath))
cookieExistsF.onComplete {
case Failure(exception) =>
logger.error(s"Cookie filed was never created! $exception")
case _: Success[_] =>
}
cookieExistsF
case _: PasswordBased => Future.successful(())
}
val started = {
for {
_ <- awaitCookie(instance.authCredentials)
_ <- AsyncUtil.retryUntilSatisfiedF(() => isStartedF,
duration = 1.seconds,
maxTries = 60)
} yield ()
}
started.onComplete {
case Success(_) => logger.debug(s"started bitcoind")
case Failure(exc) =>
logger.info(
s"Could not start bitcoind instance! Message: ${exc.getMessage}")
// When we're unable to start bitcoind that's most likely
// either a configuration error or bug in Bitcoin-S. In either
// case it's much easier to debug this with conf and logs
// dumped somewhere. Especially in tests this is
// convenient, as our test framework deletes the data directories
// of our instances. We don't want to do this on mainnet,
// as both the logs and conf file most likely contain sensitive
// information
if (network != MainNet) {
val tempfile = Files.createTempFile("bitcoind-log-", ".dump")
val logfile = Files.readAllBytes(logFile)
Files.write(tempfile, logfile)
logger.info(s"Dumped debug.log to $tempfile")
val otherTempfile = Files.createTempFile("bitcoin-conf-", ".dump")
val conffile = Files.readAllBytes(confFile)
Files.write(otherTempfile, conffile)
logger.info(s"Dumped bitcoin.conf to $otherTempfile")
}
}
started
}
/**
* Checks whether the underlying bitcoind daemon is running
*/
def isStartedF: Future[Boolean] = {
def tryPing: Future[Boolean] = {
val request = buildRequest(instance, "ping", JsArray.empty)
val responseF = sendRequest(request)
val payloadF: Future[JsValue] =
responseF.flatMap(getPayload(_, command = "ping", request = request))
// Ping successful if no error can be parsed from the payload
val parsedF = payloadF.map { payload =>
(payload \ errorKey).validate[BitcoindException] match {
case _: JsSuccess[BitcoindException] => false
case _: JsError => true
}
}
parsedF.recover {
case exc: StreamTcpException
if exc.getMessage.contains("Connection refused") =>
false
}
}
instance.authCredentials match {
case cookie: CookieBased if Files.notExists(cookie.cookiePath) =>
// if the cookie file doesn't exist we're not started
Future.successful(false)
case (CookieBased(_, _) | PasswordBased(_, _)) => tryPing
}
}
/**
* Checks whether the underlyind bitcoind daemon is stopped
*/
def isStoppedF: Future[Boolean] = {
isStartedF.map(started => !started)
}
// This RPC call is here to avoid circular trait depedency
def ping(): Future[Unit] = {
bitcoindCall[Unit]("ping")
}
protected def bitcoindCall[T](
command: String,
parameters: List[JsValue] = List.empty,
printError: Boolean = true)(
implicit
reader: Reads[T]): Future[T] = {
val request = buildRequest(instance, command, JsArray(parameters))
val responseF = sendRequest(request)
val payloadF: Future[JsValue] =
responseF.flatMap(getPayload(_, command, request, parameters))
payloadF.map { payload =>
/**
* These lines are handy if you want to inspect what's being sent to and
* returned from bitcoind before it's parsed into a Scala type. However,
* there will sensitive material in some of those calls (private keys,
* XPUBs, balances, etc). It's therefore not a good idea to enable
* this logging in production.
*/
// logger.info(
// s"Command: $command ${parameters.map(_.toString).mkString(" ")}")
// logger.info(s"Payload: \n${Json.prettyPrint(payload)}")
parseResult(result = (payload \ resultKey).validate[T],
json = payload,
printError = printError,
command = command)
}
}
protected def buildRequest(
instance: BitcoindInstance,
methodName: String,
params: JsArray): HttpRequest = {
val uuid = UUID.randomUUID().toString
val m: Map[String, JsValue] = Map("method" -> JsString(methodName),
"params" -> params,
"id" -> JsString(uuid))
val jsObject = JsObject(m)
// Would toString work?
val uri = "http://" + instance.rpcUri.getHost + ":" + instance.rpcUri.getPort
val username = instance.authCredentials.username
val password = instance.authCredentials.password
HttpRequest(
method = HttpMethods.POST,
uri,
entity = HttpEntity(ContentTypes.`application/json`, jsObject.toString()))
.addCredentials(
HttpCredentials.createBasicHttpCredentials(username, password))
}
protected def sendRequest(req: HttpRequest): Future[HttpResponse] = {
Http(materializer.system).singleRequest(req)
}
/** Parses the payload of the given response into JSON.
*
* The command, parameters and request are given as debug parameters,
* and only used for printing diagnostics if things go belly-up.
*/
protected def getPayload(
response: HttpResponse,
command: String,
request: HttpRequest,
parameters: List[JsValue] = List.empty): Future[JsValue] = {
val payloadF = response.entity.dataBytes.runFold(ByteString.empty)(_ ++ _)
payloadF.flatMap { payload =>
Try(Json.parse(payload.decodeString(ByteString.UTF_8))) match {
case Failure(err) =>
if (network != MainNet) {
logger.error(s"Error when parsing result of command: $command")
logger.error(s"Parameters: ${Json.stringify(JsArray(parameters))}")
logger.error(s"Sent HTTP request: $request")
logger.error(s"Received HTTP response: $response")
logger.error(s"Error: $err")
}
Future.failed(err)
case Success(js) => Future.successful(js)
}
}
}
// Should both logging and throwing be happening?
private def parseResult[T](
result: JsResult[T],
json: JsValue,
printError: Boolean,
command: String
): T = {
checkUnitError[T](result, json, printError)
result match {
case JsSuccess(value, _) => value
case res: JsError =>
(json \ errorKey).validate[BitcoindException] match {
case JsSuccess(err, _) =>
if (printError) {
logger.error(s"$err")
}
throw err
case _: JsError =>
val jsonResult = (json \ resultKey).get
val errString =
s"Error when parsing result of '$command': ${JsError.toJson(res).toString}!"
if (printError) logger.error(errString + s"JSON: $jsonResult")
throw new IllegalArgumentException(
s"Could not parse JsResult: $jsonResult! Error: $errString")
}
}
}
// Catches errors thrown by calls with Unit as the expected return type (which isn't handled by UnitReads)
private def checkUnitError[T](
result: JsResult[T],
json: JsValue,
printError: Boolean): Unit = {
if (result == JsSuccess(())) {
(json \ errorKey).validate[BitcoindException] match {
case JsSuccess(err, _) =>
if (printError) {
logger.error(s"$err")
}
throw err
case _: JsError =>
}
}
}
}
|
bitcoin-s/bitcoin-s-core
|
bitcoind-rpc/src/main/scala/org/bitcoins/rpc/client/common/Client.scala
|
Scala
|
mit
| 12,342 |
import sbt._
import Keys._
import play.Project._
object ApplicationBuild extends Build {
val appName = "WAMPlay"
val appVersion = "0.2.6-SNAPSHOT"
val appDependencies = Seq(
javaCore
)
val main = play.Project(appName, appVersion, appDependencies).settings(
organization := "ws.wamplay",
publishTo := Some(Resolver.file("Bo's Repo", Path.userHome / "code" / "blopker.github.com" / "maven-repo" asFile)),
publishMavenStyle := true,
// hack to suppress javadoc error, see: https://play.lighthouseapp.com/projects/82401/tickets/898-javadoc-error-invalid-flag-g-when-publishing-new-module-local#ticket-898-7
publishArtifact in(Compile, packageDoc) := false
)
}
|
blopker/WAMPlay
|
module/project/Build.scala
|
Scala
|
mit
| 708 |
package chrome.downloads.bindings
sealed trait InterruptReason
object InterruptReason {
val FILE_FAILED: InterruptReason = "FILE_FAILED".asInstanceOf[InterruptReason]
val FILE_ACCESS_DENIED: InterruptReason = "FILE_ACCESS_DENIED".asInstanceOf[InterruptReason]
val FILE_NO_SPACE: InterruptReason = "FILE_NO_SPACE".asInstanceOf[InterruptReason]
val FILE_NAME_TOO_LONG: InterruptReason = "FILE_NAME_TOO_LONG".asInstanceOf[InterruptReason]
val FILE_TOO_LARGE: InterruptReason = "FILE_TOO_LARGE".asInstanceOf[InterruptReason]
val FILE_VIRUS_INFECTED: InterruptReason = "FILE_VIRUS_INFECTED".asInstanceOf[InterruptReason]
val FILE_TRANSIENT_ERROR: InterruptReason = "FILE_TRANSIENT_ERROR".asInstanceOf[InterruptReason]
val FILE_BLOCKED: InterruptReason = "FILE_BLOCKED".asInstanceOf[InterruptReason]
val FILE_SECURITY_CHECK_FAILED: InterruptReason = "FILE_SECURITY_CHECK_FAILED".asInstanceOf[InterruptReason]
val FILE_TOO_SHORT: InterruptReason = "FILE_TOO_SHORT".asInstanceOf[InterruptReason]
val FILE_HASH_MISMATCH: InterruptReason = "FILE_HASH_MISMATCH".asInstanceOf[InterruptReason]
val NETWORK_FAILED: InterruptReason = "NETWORK_FAILED".asInstanceOf[InterruptReason]
val NETWORK_TIMEOUT: InterruptReason = "NETWORK_TIMEOUT".asInstanceOf[InterruptReason]
val NETWORK_DISCONNECTED: InterruptReason = "NETWORK_DISCONNECTED".asInstanceOf[InterruptReason]
val NETWORK_SERVER_DOWN: InterruptReason = "NETWORK_SERVER_DOWN".asInstanceOf[InterruptReason]
val NETWORK_INVALID_REQUEST: InterruptReason = "NETWORK_INVALID_REQUEST".asInstanceOf[InterruptReason]
val SERVER_FAILED: InterruptReason = "SERVER_FAILED".asInstanceOf[InterruptReason]
val SERVER_NO_RANGE: InterruptReason = "SERVER_NO_RANGE".asInstanceOf[InterruptReason]
val SERVER_BAD_CONTENT: InterruptReason = "SERVER_BAD_CONTENT".asInstanceOf[InterruptReason]
val SERVER_UNAUTHORIZED: InterruptReason = "SERVER_UNAUTHORIZED".asInstanceOf[InterruptReason]
val SERVER_CERT_PROBLEM: InterruptReason = "SERVER_CERT_PROBLEM".asInstanceOf[InterruptReason]
val SERVER_FORBIDDEN: InterruptReason = "SERVER_FORBIDDEN".asInstanceOf[InterruptReason]
val SERVER_UNREACHABLE: InterruptReason = "SERVER_UNREACHABLE".asInstanceOf[InterruptReason]
val USER_CANCELED: InterruptReason = "USER_CANCELED".asInstanceOf[InterruptReason]
val USER_SHUTDOWN: InterruptReason = "USER_SHUTDOWN".asInstanceOf[InterruptReason]
val CRASH: InterruptReason = "CRASH".asInstanceOf[InterruptReason]
}
|
lucidd/scala-js-chrome
|
bindings/src/main/scala/chrome/downloads/bindings/InterruptReason.scala
|
Scala
|
mit
| 2,471 |
package com.yiguang.mcdb
import java.util.concurrent.CountDownLatch
import com.yiguang.mcdb.storage.Leveldb
import scala.util.Random
import com.yiguang.util.StringUtils._
/**
* Created by yigli on 14-12-11.
*/
object Benchmark extends App {
val nThread = 100
val leveldbConfig = new Leveldb.Config
val leveldb = new Leveldb(Config.Leveldb.directory,leveldbConfig)
leveldb.init
val count = 10000
val latch = new CountDownLatch(nThread)
val r = new Runnable {
override def run(): Unit = {
var c:Int = 0
var time:Long = 0L
while(c < count){
val s = System.currentTimeMillis()
leveldb.put(Random.nextString(8),Random.nextString(8))
time += (System.currentTimeMillis() - s)
c+=1
}
println("resp:"+ (time / c))
latch.countDown()
}
}
val start = System.currentTimeMillis()
for(i <- 0 until nThread ){
new Thread(r).start()
}
latch.await()
leveldb.close
val time = System.currentTimeMillis() - start
val qps = count * nThread * 1000 / time
println("Average QPS="+qps)
}
|
liyiguang/memcachedb
|
src/test/scala/com/yiguang/mcdb/Benchmark.scala
|
Scala
|
apache-2.0
| 1,092 |
/*
* Copyright 2016 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.ct.ct600e.v2
import uk.gov.hmrc.ct.box.{CtBoxIdentifier, CtOptionalInteger, Input}
case class E9(value: Option[Int]) extends CtBoxIdentifier("Gift Aid or Millennium Gift Aid") with CtOptionalInteger with Input
|
ahudspith-equalexperts/ct-calculations
|
src/main/scala/uk/gov/hmrc/ct/ct600e/v2/E9.scala
|
Scala
|
apache-2.0
| 837 |
import org.apache.spark.mllib.classification.SVMModel
import org.apache.spark.mllib.linalg.Vector
import org.apache.spark.mllib.optimization.{HingeGradient,LBFGS, SquaredL2Updater}
import org.apache.spark.mllib.regression.GeneralizedLinearAlgorithm
import org.apache.spark.mllib.util.DataValidators
/**
* Created by diego on 4/12/15.
*/
class SVMWithBFGS extends GeneralizedLinearAlgorithm[SVMModel] with Serializable {
override val optimizer = new LBFGS(new HingeGradient, new SquaredL2Updater)
override protected val validators = List(DataValidators.binaryLabelValidator)
override protected def createModel(weights: Vector, intercept: Double) = {
new SVMModel(weights, intercept)
}
}
|
wxhC3SC6OPm8M1HXboMy/spark-ml-optimization
|
src/main/scala/SVMWithBFGS.scala
|
Scala
|
apache-2.0
| 704 |
package lore.compiler.transformation
import lore.compiler.core.Position
import lore.compiler.semantics.expressions.Expression
import lore.compiler.semantics.functions.MultiFunctionDefinition
import lore.compiler.semantics.scopes.{Binding, StructConstructorBinding, TypedBinding}
import lore.compiler.typing.InferenceVariable
object BindingProcessors {
/**
* A binding processor that coerces multi-functions and constructors to function values.
*/
def accessCoercion(position: Position): Binding => Option[Expression] = {
case mf: MultiFunctionDefinition =>
// Multi-functions which aren't directly used in a simple call must be converted to function values immediately.
Some(Expression.MultiFunctionValue(mf, new InferenceVariable, position))
case binding: StructConstructorBinding =>
if (binding.isConstant) {
Some(Expression.ConstructorValue(binding, binding.underlyingType, position))
} else {
Some(Expression.UntypedConstructorValue(binding, new InferenceVariable, position))
}
case binding: TypedBinding => Some(Expression.BindingAccess(binding, position))
}
}
|
marcopennekamp/lore
|
compiler/src/lore/compiler/transformation/BindingProcessors.scala
|
Scala
|
mit
| 1,142 |
/**
* Copyright (c) 2014-2016 Tim Bruijnzeels
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* - Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* - Neither the name of this software, nor the names of its contributors, nor
* the names of the contributors' employers may be used to endorse or promote
* products derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
package nl.bruijnzeels.tim.rpki.ca.provisioning
import java.security.KeyPair
import java.util.UUID
import javax.security.auth.x500.X500Principal
import net.ripe.rpki.commons.provisioning.x509.{ProvisioningIdentityCertificate, ProvisioningIdentityCertificateBuilder}
import nl.bruijnzeels.tim.rpki.common.domain.KeyPairSupport
case class MyIdentity(id: UUID, identityCertificate: ProvisioningIdentityCertificate, keyPair: KeyPair) {
def toChildXml() = {
import net.ripe.rpki.commons.provisioning.identity._
new ChildIdentitySerializer().serialize(new ChildIdentity(id.toString, identityCertificate))
}
}
object MyIdentity {
def create(id: UUID) = {
val kp = KeyPairSupport.createRpkiKeyPair
val cert = new ProvisioningIdentityCertificateBuilder()
.withSelfSigningKeyPair(kp)
.withSelfSigningSubject(new X500Principal("CN=" + id.toString))
.build()
MyIdentity(id = id, identityCertificate = cert, keyPair = kp)
}
}
|
timbru/rpki-ca
|
src/main/scala/nl/bruijnzeels/tim/rpki/ca/provisioning/MyIdentity.scala
|
Scala
|
bsd-3-clause
| 2,606 |
/**
* Copyright (c) 2016 Intel Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.trustedanalytics.sparktk.frame.internal.ops.timeseries
import org.apache.commons.lang.StringUtils
import org.trustedanalytics.sparktk.frame.internal.{ FrameState, FrameSummarization, BaseFrame }
import com.cloudera.sparkts.stats.TimeSeriesStatisticalTests
trait TimeSeriesAugmentedDickeyFullerTestSummarization extends BaseFrame {
/**
* Performs the Augmented Dickey-Fuller (ADF) Test, which tests the null hypothesis of whether a unit root is present
* in a time series sample. The test statistic that is returned in a negative number. The lower the value, the
* stronger the rejection of the hypothesis that there is a unit root at some level of confidence.
*
* @param tsColumn Name of the column that contains the time series values to use with the ADF test.
* @param maxLag The lag order to calculate the test statistic.
* @param regression The method of regression that was used. Following MacKinnon's notation, this can be "c" for
* constant, "nc" for no constant, "ct" for constant and trend, and "ctt" for constant, trend,
* and trend-squared.
* @return Object that contains the ADF test statistic and p-value
*/
def timeSeriesAugmentedDickeyFullerTest(tsColumn: String,
maxLag: Int,
regression: String = "c"): AdfTestReturn = {
execute(TimeSeriesAugmentedDickeyFullerTest(tsColumn, maxLag, regression))
}
}
case class TimeSeriesAugmentedDickeyFullerTest(tsColumn: String,
maxLag: Int,
regression: String) extends FrameSummarization[AdfTestReturn] {
require(StringUtils.isNotEmpty(tsColumn), "tsColumn name must not be null or empty.")
require(StringUtils.isNotEmpty(regression), "regression string must not be null or empty.")
override def work(state: FrameState): AdfTestReturn = {
val tsVector = TimeSeriesFunctions.getVectorFromFrame(state, tsColumn)
val dftResult = TimeSeriesStatisticalTests.adftest(tsVector, maxLag, regression)
AdfTestReturn(dftResult._1, dftResult._2)
}
}
/**
*
* Return value for the AugmentedDickeyFullerTest
*
* @param testStat Augmented Dickey–Fuller (ADF) statistic
* @param pValue p-value
*/
case class AdfTestReturn(testStat: Double, pValue: Double)
|
ashaarunkumar/spark-tk
|
sparktk-core/src/main/scala/org/trustedanalytics/sparktk/frame/internal/ops/timeseries/AugmentedDickeyFullerTest.scala
|
Scala
|
apache-2.0
| 3,092 |
package audits
import doobie.imports._
import javax.inject.Inject
import play.api.Logger
import play.api.db.Database
/** Implementación con doobie de dao de eventos */
class EventDaoDoobie @Inject() (db: Database) extends EventDao {
import daos.doobie.DoobieTransactor.transactor
import EventDaoDoobie.qWrite
def write(description: String): Unit = {
Logger.debug(description)
qWrite(description).run.transact(transactor(db)).unsafePerformIO
}
}
object EventDaoDoobie {
def qWrite(description: String): Update0 =
sql"""insert into events(description, moment)
values($description, now())""".update
}
|
kdoomsday/kaminalapp
|
app/audits/EventDaoDoobie.scala
|
Scala
|
mit
| 641 |
/*
smile-df.scala
Testing the use of Smile DataFrames
*/
object SmileApp {
def main(args: Array[String]): Unit = {
val df2 = smile.read.csv("../r/cars93.csv")
val df3 = df2.filter{ _("EngineSize").asInstanceOf[Double] <= 4.0 }
val w = df3.select("Weight")
val wkg = w map {_(0).asInstanceOf[Int] * 0.453592}
val wkgdf = smile.data.DataFrame.of(wkg.toArray.map(Array(_)),"WKG")
val adf = df3 merge wkgdf
smile.write.csv(adf,"cars-smile.csv")
// read it back for good measure...
val rdf = smile.read.csv("cars-smile.csv")
println(rdf)
println(rdf.summary)
}
}
// eof
|
darrenjw/scala-course
|
examples/C6-DataFrames/smiledf/src/main/scala/smile-df.scala
|
Scala
|
gpl-3.0
| 625 |
package chrome.webRequest.bindings
import scala.scalajs.js
@js.native
trait WebResponseErrorDetails extends WebResponseCacheDetails {
/**
* The error description. This string is not guaranteed to remain backwards compatible between releases. You must
* not parse and act based upon its content.
*/
val error: String = js.native
}
|
lucidd/scala-js-chrome
|
bindings/src/main/scala/chrome/webRequest/bindings/WebResponseErrorDetails.scala
|
Scala
|
mit
| 348 |
package model.blog
import org.joda.time.DateTime
import org.joda.time.format.DateTimeFormat
import model.DBEntity
/**
* A file reference to store information of uploaded files to the database
*
* @author Stefan Bleibinhaus
*
*/
case class FileRef(
override val id: Option[String],
val name: String,
val uploaded: DateTime)
extends DBEntity {
private val dateStringformat = DateTimeFormat.forPattern("MMMMM d, yyyy")
private val dateRelUrlformat = DateTimeFormat.forPattern("yyyy/MM")
/**
* @return The uploaded date as string
*/
def uploadedString(): String = dateStringformat.print(uploaded)
/**
* @return The link associated with the upload date (all posts of the corresponding year and month)
*/
def uploadedPostsLink(): String = "/" + dateRelUrlformat.print(uploaded)
}
object FileRef {
def apply(name: String): FileRef = FileRef(None, name, new DateTime)
}
|
ExNexu/scablo
|
app/model/blog/FileRef.scala
|
Scala
|
bsd-2-clause
| 919 |
package org.openapitools.models
import io.circe._
import io.finch.circe._
import io.circe.generic.semiauto._
import io.circe.java8.time._
import org.openapitools._
import org.openapitools.models.Link
/**
*
* @param self
* @param actions
* @param runs
* @param queue
* @param Underscoreclass
*/
case class BranchImpllinks(self: Option[Link],
actions: Option[Link],
runs: Option[Link],
queue: Option[Link],
Underscoreclass: Option[String]
)
object BranchImpllinks {
/**
* Creates the codec for converting BranchImpllinks from and to JSON.
*/
implicit val decoder: Decoder[BranchImpllinks] = deriveDecoder
implicit val encoder: ObjectEncoder[BranchImpllinks] = deriveEncoder
}
|
cliffano/swaggy-jenkins
|
clients/scala-finch/generated/src/main/scala/org/openapitools/models/BranchImpllinks.scala
|
Scala
|
mit
| 792 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package dbis.pig.plan
import scala.collection.mutable.{Set => MutableSet}
import scala.util.Random
/** Implements generating random pipe names.
*
*/
object PipeNameGenerator {
/** The length of the generated pipe names.
*
*/
final val length = 10
/** Characters allowed in the generated pipe names.
*
*/
final val characters = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
/** The number of characters in [[characters]].
*
*/
final lazy val numChars = characters.length
/** The prefix of the pipe name.
*
*/
final val prefix = "pipe"
private val generated: MutableSet[String] = MutableSet.empty
/** Clears the internal set of already generated pipenames.
*
* Note that this means that a future call to [[generate()]] can now return a previously returned value
*/
def clearGenerated = generated.clear
/** Generate a pipe name of the specified `length`.
*
* @param length
* @return
*/
def generate(length: Int): String = {
var generatedName = prefix ++ recGenerate(length)
while(generated contains generatedName) {
generatedName = prefix ++ recGenerate(length)
}
generated += generatedName
generatedName
}
private def recGenerate(length: Int): String = {
length match {
case 0 => ""
case n => characters(Random.nextInt(numChars)).toString ++ recGenerate(n - 1)
}
}
/** Generate a pipe name of length [[length]].
*
* @return
*/
def generate(): String = generate(length)
def addKnownName(name: String) = generated += name
}
|
ksattler/piglet
|
src/main/scala/dbis/pig/plan/PipeNameGenerator.scala
|
Scala
|
apache-2.0
| 2,401 |
/**
* FILE: BuildingWithLevelDAOTest.scala
* PERCORSO /Codice/sgad/servertier/src/test/scala/sgad/servertier/dataaccess/databaseaccess/shareddatadao
* DATA CREAZIONE: 20 Febbraio 2014
* AUTORE: ProTech
* EMAIL: [email protected]
*
* Questo file è proprietà del gruppo ProTech, viene rilasciato sotto licenza Apache v2.
*
* DIARIO DELLE MODIFICHE:
* 2014-02-20 - Creazione della classe - Segantin Fabio
*/
import sgad.servertier.dataaccess.data.shareddata._
import com.mongodb.casbah.commons.Imports.MongoDBObject
import org.scalatest._
import org.joda.time.IllegalFieldValueException
import sgad.servertier.dataaccess.databaseaccess.shareddatadao.{ProductedResourceDAO, BuildingWithLevelDAO, CostDAO, BonusDAO}
/**
* Classe di test per l'object BuildingWithLevelDAO
*/
class BuildingWithLevelDAOTest extends FlatSpec {
DataFactory.setUnits(Map())
DataFactory.setResources(Map())
DataFactory.setBuildings(Map())
var preconditions = Vector[BuildingWithLevel]()
var bonus = new Bonus("bonus1", 2, 3.0F)
var gold = new Resource("oro")
var potion = new Resource("pozione")
DataFactory.setResources(Map("oro" -> gold, "pozione" -> potion))
var quantityResourceVector = Vector(new QuantityResource(gold, 100), new QuantityResource(potion, 300))
var cost = new Cost(1000, quantityResourceVector)
var productedResource1 = new ProductedResource(gold, 1223, 1, 2)
var productedUnit1 = Vector[`Unit`]()
var productedUnit2 = new `Unit`("soldato2", 1, 3, cost, true)
DataFactory.setUnits(Map("soldato2" -> productedUnit2))
var Tower = new BuildingWithLevel(true, bonus, cost, 2, "Torre", preconditions, productedResource1, productedUnit1, 2, false)
DataFactory.setBuildings(Map(Tower.getKey -> Tower))
var mongoObject = MongoDBObject(
"bonus" -> BonusDAO.getMongoObject(bonus),
"cost" -> CostDAO.getMongoObject(cost),
"isConstructible" -> true,
"level" -> 2,
"nameBuilding" -> "Torre",
"precondition" -> preconditions,
"productedResource" -> ProductedResourceDAO.getMongoObject(productedResource1),
"productedUnits" -> productedUnit1,
"unitsSpace" -> 2,
"isDestructible" -> false
)
productedUnit1 = Vector[`Unit`](productedUnit2)
preconditions = Vector[BuildingWithLevel](Tower)
var cave = new BuildingWithLevel(true, bonus, cost, 2, "Miniera", preconditions, productedResource1, productedUnit1, 2, false)
var vectorkeys = preconditions.map((b: BuildingWithLevel) => {
b.getKey
})
var vectorUnit = productedUnit1.map((u: `Unit`) => {
u.getKey
})
var mongoObject2 = MongoDBObject(
"bonus" -> BonusDAO.getMongoObject(bonus),
"cost" -> CostDAO.getMongoObject(cost),
"isConstructible" -> true,
"level" -> 2,
"nameBuilding" -> "Miniera",
"precondition" -> vectorkeys,
"productedResource" -> ProductedResourceDAO.getMongoObject(productedResource1),
"productedUnits" -> vectorUnit,
"unitsSpace" -> 2,
"isDestructible" -> true
)
"BuildingWithLevelDAO" must "creare un MongoObject adeguato" in {
assert(mongoObject == BuildingWithLevelDAO.getMongoObject(Tower))
assert(mongoObject != BuildingWithLevelDAO.getMongoObject(cave))
}
it must "creare un building relativo al mongoObject giusto" in {
assert(Tower == BuildingWithLevelDAO.getObject(mongoObject))
assert(Tower != BuildingWithLevelDAO.getObject(mongoObject2))
}
it must "lanciare una eccezione se il mongoObject non ha le informazioni relative" in {
intercept[IllegalFieldValueException] {
val mongoObject2 = MongoDBObject(
"cost" -> CostDAO.getMongoObject(cost),
"isConstructible" -> true,
"level" -> 2,
"nameBuilding" -> "Miniera",
"precondition" -> vectorkeys,
"productedResource" -> ProductedResourceDAO.getMongoObject(productedResource1),
"productedUnits" -> vectorUnit,
"unitsSpace" -> 2
)
BuildingWithLevelDAO.getObject(mongoObject2)
}
intercept[IllegalFieldValueException] {
val mongoObject2 = MongoDBObject(
"bonus" -> BonusDAO.getMongoObject(bonus),
"isConstructible" -> true,
"level" -> 2,
"nameBuilding" -> "Miniera",
"precondition" -> vectorkeys,
"productedResource" -> ProductedResourceDAO.getMongoObject(productedResource1),
"productedUnits" -> vectorUnit,
"unitsSpace" -> 2
)
BuildingWithLevelDAO.getObject(mongoObject2)
}
intercept[IllegalFieldValueException] {
val mongoObject2 = MongoDBObject(
"bonus" -> BonusDAO.getMongoObject(bonus),
"cost" -> CostDAO.getMongoObject(cost),
"level" -> 2,
"nameBuilding" -> "Miniera",
"precondition" -> vectorkeys,
"productedResource" -> ProductedResourceDAO.getMongoObject(productedResource1),
"productedUnits" -> vectorUnit,
"unitsSpace" -> 2
)
BuildingWithLevelDAO.getObject(mongoObject2)
}
intercept[IllegalFieldValueException] {
val mongoObject2 = MongoDBObject(
"bonus" -> BonusDAO.getMongoObject(bonus),
"cost" -> CostDAO.getMongoObject(cost),
"isConstructible" -> true,
"nameBuilding" -> "Miniera",
"precondition" -> vectorkeys,
"productedResource" -> ProductedResourceDAO.getMongoObject(productedResource1),
"productedUnits" -> vectorUnit,
"unitsSpace" -> 2
)
BuildingWithLevelDAO.getObject(mongoObject2)
}
intercept[IllegalFieldValueException] {
val mongoObject2 = MongoDBObject(
"bonus" -> BonusDAO.getMongoObject(bonus),
"cost" -> CostDAO.getMongoObject(cost),
"isConstructible" -> true,
"level" -> 2,
"precondition" -> vectorkeys,
"productedResource" -> ProductedResourceDAO.getMongoObject(productedResource1),
"productedUnits" -> vectorUnit,
"unitsSpace" -> 2
)
BuildingWithLevelDAO.getObject(mongoObject2)
}
intercept[IllegalFieldValueException] {
val mongoObject2 = MongoDBObject(
"bonus" -> BonusDAO.getMongoObject(bonus),
"cost" -> CostDAO.getMongoObject(cost),
"isConstructible" -> true,
"level" -> 2,
"nameBuilding" -> "Miniera",
"productedResource" -> ProductedResourceDAO.getMongoObject(productedResource1),
"productedUnits" -> vectorUnit,
"unitsSpace" -> 2
)
BuildingWithLevelDAO.getObject(mongoObject2)
}
intercept[IllegalFieldValueException] {
val mongoObject2 = MongoDBObject(
"bonus" -> BonusDAO.getMongoObject(bonus),
"cost" -> CostDAO.getMongoObject(cost),
"isConstructible" -> true,
"level" -> 2,
"nameBuilding" -> "Miniera",
"precondition" -> vectorkeys,
"productedResource" -> ProductedResourceDAO.getMongoObject(productedResource1),
"unitsSpace" -> 2
)
BuildingWithLevelDAO.getObject(mongoObject2)
}
intercept[IllegalFieldValueException] {
val mongoObject2 = MongoDBObject(
"bonus" -> BonusDAO.getMongoObject(bonus),
"cost" -> CostDAO.getMongoObject(cost),
"isConstructible" -> true,
"level" -> 2,
"nameBuilding" -> "Miniera",
"precondition" -> vectorkeys,
"productedResource" -> ProductedResourceDAO.getMongoObject(productedResource1),
"productedUnits" -> vectorUnit
)
BuildingWithLevelDAO.getObject(mongoObject2)
}
}
it must "lanciare una eccezione se il tipo dei dati non è corretto" in {
intercept[IllegalFieldValueException] {
val mongoObject3 = MongoDBObject(
"bonus" -> "{}",
"cost" -> CostDAO.getMongoObject(cost),
"isConstructible" -> true,
"level" -> 2,
"nameBuilding" -> "Miniera",
"precondition" -> vectorkeys,
"productedResource" -> "[]",
"productedUnits" -> vectorUnit,
"unitsSpace" -> 2
)
BuildingWithLevelDAO.getObject(mongoObject3)
}
intercept[IllegalFieldValueException] {
val mongoObject3 = MongoDBObject(
"bonus" -> BonusDAO.getMongoObject(bonus),
"cost" -> "{}",
"isConstructible" -> true,
"level" -> 2,
"nameBuilding" -> "Miniera",
"precondition" -> vectorkeys,
"productedResource" -> "[]",
"productedUnits" -> vectorUnit,
"unitsSpace" -> 2
)
BuildingWithLevelDAO.getObject(mongoObject3)
}
intercept[IllegalFieldValueException] {
val mongoObject3 = MongoDBObject(
"bonus" -> BonusDAO.getMongoObject(bonus),
"cost" -> CostDAO.getMongoObject(cost),
"isConstructible" -> 15,
"level" -> 2,
"nameBuilding" -> "Miniera",
"precondition" -> vectorkeys,
"productedResource" -> "[]",
"productedUnits" -> vectorUnit,
"unitsSpace" -> 2
)
BuildingWithLevelDAO.getObject(mongoObject3)
}
intercept[IllegalFieldValueException] {
val mongoObject3 = MongoDBObject(
"bonus" -> BonusDAO.getMongoObject(bonus),
"cost" -> CostDAO.getMongoObject(cost),
"isConstructible" -> true,
"level" -> 2.5,
"nameBuilding" -> "Miniera",
"precondition" -> vectorkeys,
"productedResource" -> "[]",
"productedUnits" -> vectorUnit,
"unitsSpace" -> 2
)
BuildingWithLevelDAO.getObject(mongoObject3)
}
intercept[IllegalFieldValueException] {
val mongoObject3 = MongoDBObject(
"bonus" -> BonusDAO.getMongoObject(bonus),
"cost" -> CostDAO.getMongoObject(cost),
"isConstructible" -> true,
"level" -> 2,
"nameBuilding" -> 15,
"precondition" -> vectorkeys,
"productedResource" -> "[]",
"productedUnits" -> vectorUnit,
"unitsSpace" -> 2
)
BuildingWithLevelDAO.getObject(mongoObject3)
}
intercept[IllegalFieldValueException] {
val mongoObject3 = MongoDBObject(
"bonus" -> BonusDAO.getMongoObject(bonus),
"cost" -> CostDAO.getMongoObject(cost),
"isConstructible" -> true,
"level" -> 2,
"nameBuilding" -> "Miniera",
"precondition" -> vectorkeys,
"productedResource" -> "[]",
"productedUnits" -> vectorUnit,
"unitsSpace" -> 2.5
)
BuildingWithLevelDAO.getObject(mongoObject3)
}
}
}
|
protechunipd/SGAD
|
Codice/sgad/servertier/src/test/scala/sgad/servertier/dataaccess/databaseaccess/shareddatadao/BuildingWithLevelDAOTest.scala
|
Scala
|
apache-2.0
| 9,641 |
/* - Coeus web framework -------------------------
*
* Licensed under the Apache License, Version 2.0.
*
* Author: Spiros Tzavellas
*/
package com.tzavellas.coeus.i18n.locale
import java.util.Locale
import javax.servlet.http.Cookie
import org.junit.{Test, Before}
import org.junit.Assert._
import org.springframework.mock.web._
class CookieLocaleResolverTest {
val requestLocale = Locale.UK
val request = new MockHttpServletRequest
val response = new MockHttpServletResponse
val resolver = new CookieLocaleResolver
@Before
def setLocaleInHttpRequest() {
request.addPreferredLocale(requestLocale)
}
@Test
def use_default_locale_if_no_cookie_in_request() {
assertEquals(Locale.US, new CookieLocaleResolver(Some(Locale.US)).resolve(request))
}
@Test
def use_request_locale_if_if_no_cookie_in_request_and_default_not_specified() {
assertEquals(Locale.UK, resolver.resolve(request))
}
@Test
def read_the_locale_from_cookie() {
request.setCookies(new Cookie(resolver.COOKIE_NAME, Locale.US.toString))
assertEquals(Locale.US, resolver.resolve(request))
}
@Test
def set_the_locale_to_a_cookie() {
resolver.setLocale(request, response, Locale.US)
assertEquals(Locale.US, resolver.resolve(request))
assertNotNull(response.getCookie(resolver.COOKIE_NAME))
}
@Test
def setting_the_locale_to_null_resets_to_default_locale() {
resolver.setLocale(request, response, Locale.US)
assertEquals(Locale.US, resolver.resolve(request))
resolver.setLocale(request, response, null)
assertEquals(requestLocale, resolver.resolve(request))
}
}
|
sptz45/coeus
|
src/test/scala/com/tzavellas/coeus/i18n/locale/CookieLocaleResolverTest.scala
|
Scala
|
apache-2.0
| 1,639 |
package org.scalastyle.scalariform
import java.util.regex.Pattern
import org.scalastyle.CombinedAst
import org.scalastyle.CombinedChecker
import org.scalastyle.PositionError
import org.scalastyle.ScalastyleError
import scalariform.lexer.Tokens
/**
* comment check for line comment style TODO or FIXME
*/
class TodoCommentChecker extends CombinedChecker {
val errorKey = "todo.comment"
val defaultWords = "TODO|FIXME"
def verify(ast: CombinedAst): List[ScalastyleError] = {
val words = getString("words", defaultWords)
val split = words.split("\\\\|").map(Pattern.quote).mkString("|")
val regex = ("""(?i)(//|/\\*|/\\*\\*|\\*)\\s?(""" + split + """)(:?)\\s+""").r
for {
t <- ast.compilationUnit.tokens
at <- t.associatedWhitespaceAndComments
if Tokens.COMMENTS.contains(at.token.tokenType)
if at.text.split("\\n").exists(s => regex.findFirstIn(s).isDefined)
} yield PositionError(at.token.offset, List(words))
}
}
|
asaitov/scalastyle
|
src/main/scala/org/scalastyle/scalariform/TodoCommentChecker.scala
|
Scala
|
apache-2.0
| 964 |
package redis.api.scripting
import java.io.File
import java.security.MessageDigest
import redis.protocol.{MultiBulk, Bulk}
import redis._
import akka.util.ByteString
object RedisScript {
def fromFile(file: File): RedisScript = {
val source = scala.io.Source.fromFile(file)
val lines = try source.mkString.stripMargin.replaceAll("[\\n\\r]","") finally source.close()
RedisScript(lines)
}
def fromResource(path: String): RedisScript = {
val source = scala.io.Source.fromURL(getClass.getResource(path))
val lines = try source.mkString.stripMargin.replaceAll("[\\n\\r]","") finally source.close()
RedisScript(lines)
}
}
case class RedisScript(script: String) {
lazy val sha1 = {
val messageDigestSha1 = MessageDigest.getInstance("SHA-1")
messageDigestSha1.digest(script.getBytes("UTF-8")).map("%02x".format(_)).mkString
}
}
trait EvaledScript extends {
val isMasterOnly = true
def encodeRequest[KK, KA](
encoder: ((String, Seq[ByteString]) => ByteString),
command: String,
param: String,
keys: Seq[KK],
args: Seq[KA],
keySerializer: ByteStringSerializer[KK],
argSerializer: ByteStringSerializer[KA]): ByteString = {
encoder(command,
(ByteString(param)
+: ByteString(keys.length.toString)
+: keys.map(keySerializer.serialize)) ++ args.map(argSerializer.serialize))
}
}
case class Eval[R, KK, KA](script: String, keys: Seq[KK] = Seq(), args: Seq[KA] = Seq())(implicit redisKeys: ByteStringSerializer[KK], redisArgs: ByteStringSerializer[KA], deserializerR: RedisReplyDeserializer[R])
extends RedisCommandRedisReplyRedisReply[R]
with EvaledScript {
val encodedRequest: ByteString = encodeRequest(encode, "EVAL", script, keys, args, redisKeys, redisArgs)
val deserializer: RedisReplyDeserializer[R] = deserializerR
}
case class Evalsha[R, KK, KA](sha1: String, keys: Seq[KK] = Seq(), args: Seq[KA] = Seq())(implicit redisKeys: ByteStringSerializer[KK], redisArgs: ByteStringSerializer[KA], deserializerR: RedisReplyDeserializer[R])
extends RedisCommandRedisReplyRedisReply[R]
with EvaledScript {
val encodedRequest: ByteString = encodeRequest(encode, "EVALSHA", sha1, keys, args, redisKeys, redisArgs)
val deserializer: RedisReplyDeserializer[R] = deserializerR
}
case object ScriptFlush extends RedisCommandStatusBoolean {
val isMasterOnly = true
val encodedRequest: ByteString = encode("SCRIPT", Seq(ByteString("FLUSH")))
}
case object ScriptKill extends RedisCommandStatusBoolean {
val isMasterOnly = true
val encodedRequest: ByteString = encode("SCRIPT", Seq(ByteString("KILL")))
}
case class ScriptLoad(script: String) extends RedisCommandBulk[String] {
val isMasterOnly = true
val encodedRequest: ByteString = encode("SCRIPT", Seq(ByteString("LOAD"), ByteString(script)))
def decodeReply(bulk: Bulk) = bulk.toString
}
case class ScriptExists(sha1: Seq[String]) extends RedisCommandMultiBulk[Seq[Boolean]] {
val isMasterOnly = true
val encodedRequest: ByteString = encode("SCRIPT", ByteString("EXISTS") +: sha1.map(ByteString(_)))
def decodeReply(mb: MultiBulk) = MultiBulkConverter.toSeqBoolean(mb)
}
|
mspielberg/rediscala
|
src/main/scala/redis/api/Scripting.scala
|
Scala
|
apache-2.0
| 3,260 |
package org.zalando.nakadi.client.scala
import scala.concurrent.duration.DurationInt
import org.scalatest.Matchers
import org.scalatest.WordSpec
import org.zalando.nakadi.client.scala.model._
import org.zalando.nakadi.client.scala.model.ScalaJacksonJsonMarshaller
import org.zalando.nakadi.client.utils.AkkaConfig
import org.zalando.nakadi.client.utils.TestScalaEntity
import com.fasterxml.jackson.core.`type`.TypeReference
import com.fasterxml.jackson.databind.ObjectMapper
import com.fasterxml.jackson.module.scala.JsonScalaEnumeration
import org.zalando.nakadi.client.Deserializer
import org.zalando.nakadi.client.Serializer
/**
* Tests the Marshalling and Umarshalling of the same json object. It tests in this sequence: 1.Marshall and 2.Unmarshall. <br>
* This is just a simple test that should break when Custom
* Marshallers/Unmarshallers are used and produce different
* unexpected results.
*/
class SerializerDeserializerTest extends WordSpec with Matchers with AkkaConfig {
import ScalaJacksonJsonMarshaller._
import TestScalaEntity._
"When an entity(scala object) is marshalled and unmarshalled it" should {
val testName = "always result in the same entity"
s"$testName(eventMetadata)" in {
checkSerializationDeserializationProcess("eventMetadata", eventMetadata)
}
"EventType" in {
println(" ####### " + EventTypeCategory.withName("business"))
}
s"$testName(problem)" in {
checkSerializationDeserializationProcess("problem", problem)
}
s"$testName(metrics)" in {
checkSerializationDeserializationProcess("metrics", metrics)
}
s"$testName(partition)" in {
checkSerializationDeserializationProcess("partition", partition)
}
s"$testName(cursor)" in {
checkSerializationDeserializationProcess("cursor", cursor)
}
s"$testName(eventTypeSchema)" in {
checkSerializationDeserializationProcess("eventTypeSchema", eventTypeSchema)
}
s"$testName(partitionResolutionStrategy)" in {
checkSerializationDeserializationProcess("partitionResolutionStrategy", partitionResolutionStrategy)
}
s"$testName(eventEnrichmentStrategy)" in {
checkSerializationDeserializationProcess("eventEnrichmentStrategy", partitionResolutionStrategy)
}
// s"$testName(dataChangeEvent)" in {
// checkSerializationDeserializationProcess("dataChangeEvent", dataChangeEvent)
// }
s"$testName(eventType)" in {
checkSerializationDeserializationProcess("eventType", eventType)
}
// s"$testName(event)" in {
// checkSerializationDeserializationProcess("event", event)
// }
s"$testName(eventStreamBatch)" in {
implicit val myEventStreamBatchTR: TypeReference[EventStreamBatch[MyEvent]] = new TypeReference[EventStreamBatch[MyEvent]] {}
checkSerializationDeserializationProcess("eventStreamBatch", eventStreamBatch)
}
s"$testName(eventTypeStatistics)" in {
checkSerializationDeserializationProcess("eventTypeStatistics", eventTypeStatistics)
}
s"$testName(batchItemResponse)" in {
checkSerializationDeserializationProcess("batchItemResponse", batchItemResponse)
}
}
def checkSerializationDeserializationProcess[T](key: String, value: T)(implicit ser: Serializer[T], des: Deserializer[T]) {
val jsonEntity = ser.to(value) // Marshal
println("#### Json-Entity:" + jsonEntity)
val scalaEntity = des.from(jsonEntity) //Unmarshal
println("#### Scala-Entity:" + scalaEntity)
assert(scalaEntity == value, s"Failed to marshall $key correctly!!!")
}
}
|
zalando/nakadi-klients
|
client/src/test/scala/org/zalando/nakadi/client/scala/SerializerDeserializerTest.scala
|
Scala
|
mit
| 3,589 |
/*
* Copyright 2015 Shao Tian-Chen (Austin)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.au9ustine.puzzles.s99
/**
*
* Problem 09: Pack consecutive duplicates of list elements into sublists.
*
* If a list contains repeated elements they should be placed in separate sublists.
*
* Example:
*
* scala> pack(List('a, 'a, 'a, 'a, 'b, 'c, 'c, 'a, 'a, 'd, 'e, 'e, 'e, 'e))
* res0: List[List[Symbol]] = List(List('a, 'a, 'a, 'a), List('b), List('c, 'c), List('a, 'a), List('d), List('e, 'e, 'e, 'e))
*
*/
object P09 {
def pack[A](lst: List[A]): List[List[A]] = lst.foldLeft(List[List[A]]()) {
(ret, x) => x match {
case x_different if ret.isEmpty || x_different != ret.head.last => List(x) :: ret
case _ => (x :: ret.head) :: ret.tail
}
}.reverse
}
|
au9ustine/org.au9ustine.puzzles.s99
|
src/main/scala/org/au9ustine/puzzles/s99/P09.scala
|
Scala
|
apache-2.0
| 1,310 |
package org.elasticmq.actor
import akka.actor.{ActorRef, Props}
import org.elasticmq._
import org.elasticmq.actor.queue.{QueueActor, QueueEvent}
import org.elasticmq.actor.reply._
import org.elasticmq.msg._
import org.elasticmq.util.{Logging, NowProvider}
import scala.reflect._
class QueueManagerActor(nowProvider: NowProvider, limits: Limits, queueEventListener: Option[ActorRef])
extends ReplyingActor
with Logging {
type M[X] = QueueManagerMsg[X]
val ev: ClassTag[QueueManagerMsg[Unit]] = classTag[M[Unit]]
private val queues = collection.mutable.HashMap[String, ActorRef]()
def receiveAndReply[T](msg: QueueManagerMsg[T]): ReplyAction[T] =
msg match {
case CreateQueue(queueData) =>
if (queues.contains(queueData.name)) {
logger.debug(s"Cannot create queue, as it already exists: $queueData")
Left(new QueueAlreadyExists(queueData.name))
} else {
logger.info(s"Creating queue $queueData")
Limits.verifyQueueName(queueData.name, queueData.isFifo, limits) match {
case Left(error) =>
Left(QueueCreationError(queueData.name, error))
case Right(_) =>
val actor = createQueueActor(nowProvider, queueData, queueEventListener)
queues(queueData.name) = actor
queueEventListener.foreach(_ ! QueueEvent.QueueCreated(queueData))
Right(actor)
}
}
case DeleteQueue(queueName) =>
logger.info(s"Deleting queue $queueName")
queues.remove(queueName).foreach(context.stop)
queueEventListener.foreach(_ ! QueueEvent.QueueDeleted(queueName))
case LookupQueue(queueName) =>
val result = queues.get(queueName)
logger.debug(s"Looking up queue $queueName, found?: ${result.isDefined}")
result
case ListQueues() => queues.keySet.toSeq
}
protected def createQueueActor(
nowProvider: NowProvider,
queueData: QueueData,
queueEventListener: Option[ActorRef]
): ActorRef = {
val deadLetterQueueActor = queueData.deadLettersQueue.flatMap { qd => queues.get(qd.name) }
val copyMessagesToQueueActor = queueData.copyMessagesTo.flatMap { queueName => queues.get(queueName) }
val moveMessagesToQueueActor = queueData.moveMessagesTo.flatMap { queueName => queues.get(queueName) }
context.actorOf(
Props(
new QueueActor(
nowProvider,
queueData,
deadLetterQueueActor,
copyMessagesToQueueActor,
moveMessagesToQueueActor,
queueEventListener
)
)
)
}
}
|
adamw/elasticmq
|
core/src/main/scala/org/elasticmq/actor/QueueManagerActor.scala
|
Scala
|
apache-2.0
| 2,622 |
package com.kolor.docker.api.entities
import play.api.libs.json._
sealed case class ContainerConfiguration(
image: Option[String] = None,
cmd: Option[Seq[String]] = None,
hostname: Option[String] = None,
user: Option[String] = None,
memory: Option[Long] = None,
memorySwap: Option[Long] = None,
attachStdin: Option[Boolean] = None,
attachStdout: Option[Boolean] = None,
attachStderr: Option[Boolean] = None,
//portSpecs: Option[Seq[String]] = None, // deprec
tty: Option[Boolean] = None,
openStdin: Option[Boolean] = None,
stdinOnce: Option[Boolean] = None,
env: Option[Seq[String]] = None,
dns: Option[String] = None,
volumes: Option[Map[String, DockerVolume]] = None,
volumesFrom: Option[ContainerId] = None,
workingDir: Option[String] = None,
exposedPorts: Option[Map[String, DockerPortBinding]] = None,
entryPoint: Option[Seq[String]] = None,
networkDisabled: Option[Boolean] = Some(false),
onBuild: Option[Seq[String]] = None
) extends DockerEntity
object ContainerConfig {
def apply(json: JsObject)(implicit fmt: Format[ContainerConfiguration]): ContainerConfiguration = {
val res = Json.fromJson[ContainerConfiguration](json)(fmt)
res.asOpt match {
case Some(c) => c
case _ => throw new RuntimeException(s"failed to serialize container config to json: " + Json.prettyPrint(json))
}
}
def apply(json: String)(implicit fmt: Format[ContainerConfiguration]): ContainerConfiguration = {
val res = Json.fromJson[ContainerConfiguration](Json.parse(json))(fmt)
res.asOpt match {
case Some(c) => c
case _ => throw new RuntimeException(s"failed to serialize container config to json: " + json)
}
}
def apply(image: String, cmd: Seq[String]): ContainerConfiguration = ContainerConfiguration(Some(image), Some(cmd))
}
|
tldeti/reactive-docker
|
src/main/scala/com/kolor/docker/api/entities/ContainerConfiguration.scala
|
Scala
|
mit
| 1,870 |
object Test extends App {
val xs = List.tabulate(4)(List(_))
val i = xs.map(_.iterator).reduce { (a,b) =>
a.hasNext
a ++ b
}
val r1 = i.toList
val r2 = xs.flatten.toList
assert(r1 == r2, r1)
}
|
som-snytt/dotty
|
tests/run/t8428.scala
|
Scala
|
apache-2.0
| 215 |
/*
* The MIT License (MIT)
*
* Copyright (c) 2016 Algolia
* http://www.algolia.com/
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
package algolia.dsl
import algolia.definitions.HadPendingMappingsDefinition
import algolia.responses.HasPendingMappings
import org.json4s.Formats
import algolia.{AlgoliaClient, Executable}
import scala.concurrent.{ExecutionContext, Future}
trait HasDsl {
implicit val formats: Formats
case object has {
def pendingMappings(pending: Boolean) =
HadPendingMappingsDefinition(pending)
def pendingMappings() = HadPendingMappingsDefinition()
}
implicit object HadPendingMappingsDefinitionExecutable
extends Executable[HadPendingMappingsDefinition, HasPendingMappings] {
override def apply(
client: AlgoliaClient,
query: HadPendingMappingsDefinition
)(implicit executor: ExecutionContext): Future[HasPendingMappings] = {
client.request[HasPendingMappings](query.build())
}
}
}
|
algolia/algoliasearch-client-scala
|
src/main/scala/algolia/dsl/HasDsl.scala
|
Scala
|
mit
| 2,010 |
/**
* Generated by API Builder - https://www.apibuilder.io
* Service version: 0.14.85
* apibuilder 0.14.93 app.apibuilder.io/apicollective/apibuilder-common/latest/anorm_2_8_parsers
*/
import anorm._
package io.apibuilder.common.v0.anorm.parsers {
import io.apibuilder.common.v0.anorm.conversions.Standard._
import io.apibuilder.common.v0.anorm.conversions.Types._
object Audit {
def parserWithPrefix(prefix: String, sep: String = "_"): RowParser[io.apibuilder.common.v0.models.Audit] = parser(prefixOpt = Some(s"$prefix$sep"))
def parser(
createdAt: String = "created_at",
createdByPrefix: String = "created_by",
updatedAt: String = "updated_at",
updatedByPrefix: String = "updated_by",
prefixOpt: Option[String] = None
): RowParser[io.apibuilder.common.v0.models.Audit] = {
SqlParser.get[_root_.org.joda.time.DateTime](prefixOpt.getOrElse("") + createdAt) ~
io.apibuilder.common.v0.anorm.parsers.ReferenceGuid.parserWithPrefix(prefixOpt.getOrElse("") + createdByPrefix) ~
SqlParser.get[_root_.org.joda.time.DateTime](prefixOpt.getOrElse("") + updatedAt) ~
io.apibuilder.common.v0.anorm.parsers.ReferenceGuid.parserWithPrefix(prefixOpt.getOrElse("") + updatedByPrefix) map {
case createdAt ~ createdBy ~ updatedAt ~ updatedBy => {
io.apibuilder.common.v0.models.Audit(
createdAt = createdAt,
createdBy = createdBy,
updatedAt = updatedAt,
updatedBy = updatedBy
)
}
}
}
}
object Healthcheck {
def parserWithPrefix(prefix: String, sep: String = "_"): RowParser[io.apibuilder.common.v0.models.Healthcheck] = parser(prefixOpt = Some(s"$prefix$sep"))
def parser(
status: String = "status",
prefixOpt: Option[String] = None
): RowParser[io.apibuilder.common.v0.models.Healthcheck] = {
SqlParser.str(prefixOpt.getOrElse("") + status) map {
case status => {
io.apibuilder.common.v0.models.Healthcheck(
status = status
)
}
}
}
}
object Reference {
def parserWithPrefix(prefix: String, sep: String = "_"): RowParser[io.apibuilder.common.v0.models.Reference] = parser(prefixOpt = Some(s"$prefix$sep"))
def parser(
guid: String = "guid",
key: String = "key",
prefixOpt: Option[String] = None
): RowParser[io.apibuilder.common.v0.models.Reference] = {
SqlParser.get[_root_.java.util.UUID](prefixOpt.getOrElse("") + guid) ~
SqlParser.str(prefixOpt.getOrElse("") + key) map {
case guid ~ key => {
io.apibuilder.common.v0.models.Reference(
guid = guid,
key = key
)
}
}
}
}
object ReferenceGuid {
def parserWithPrefix(prefix: String, sep: String = "_"): RowParser[io.apibuilder.common.v0.models.ReferenceGuid] = parser(prefixOpt = Some(s"$prefix$sep"))
def parser(
guid: String = "guid",
prefixOpt: Option[String] = None
): RowParser[io.apibuilder.common.v0.models.ReferenceGuid] = {
SqlParser.get[_root_.java.util.UUID](prefixOpt.getOrElse("") + guid) map {
case guid => {
io.apibuilder.common.v0.models.ReferenceGuid(
guid = guid
)
}
}
}
}
}
|
mbryzek/apidoc
|
api/app/generated/ApicollectiveApibuilderCommonV0Parsers.scala
|
Scala
|
mit
| 3,318 |
/*
* Copyright 2015
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package influxdbreporter.core.metrics.pull
import java.util.concurrent.TimeUnit
import java.util.concurrent.atomic.AtomicLong
import influxdbreporter.core.utils.ClockOpt.toClockOpt
import com.codahale.metrics.Clock
import scala.concurrent.{ExecutionContext, Future}
abstract class PullingCachedGauge[V] protected (clock: Clock,
timeout: Long,
timeoutUnit: TimeUnit) extends PullingGauge[V] {
private val reloadAt = new AtomicLong(0)
private val timeoutNS = timeoutUnit.toNanos(timeout)
@volatile private var valueFuture: Future[List[ValueByTag[V]]] = _
protected def this(timeout: Long, timeoutUnit: TimeUnit) = {
this(Clock.defaultClock(), timeout, timeoutUnit)
}
protected def loadValue()(implicit ec: ExecutionContext): Future[List[ValueByTag[V]]]
override def getValues(implicit ec: ExecutionContext): Future[List[ValueByTag[V]]] = {
if (shouldLoad()) {
valueFuture = loadValue()
}
valueFuture
}
private def shouldLoad(): Boolean = {
val time = clock.getTimeInNanos
val current = reloadAt.get
time > current && reloadAt.compareAndSet(current, time + timeoutNS)
}
}
|
TouK/influxdb-reporter
|
core/src/main/scala/influxdbreporter/core/metrics/pull/PullingCachedGauge.scala
|
Scala
|
apache-2.0
| 1,812 |
package org.bitcoins.dlc.wallet.models
import org.bitcoins.core.currency.CurrencyUnit
import org.bitcoins.core.protocol.BlockTimeStamp
import org.bitcoins.core.protocol.dlc.models._
import org.bitcoins.core.protocol.tlv.{ContractDescriptorTLV, OracleParamsV0TLV}
import org.bitcoins.crypto._
/** This table contains all the meta information about a DLC.
* This includes various identifiers as well as state and a BIP 32 key path.
*/
case class DLCContractDataDb(
dlcId: Sha256Digest,
oracleThreshold: Int,
oracleParamsTLVOpt: Option[OracleParamsV0TLV],
contractDescriptorTLV: ContractDescriptorTLV,
contractMaturity: BlockTimeStamp,
contractTimeout: BlockTimeStamp,
totalCollateral: CurrencyUnit
) {
lazy val dlcTimeouts: DLCTimeouts =
DLCTimeouts(contractMaturity, contractTimeout)
}
|
bitcoin-s/bitcoin-s
|
dlc-wallet/src/main/scala/org/bitcoins/dlc/wallet/models/DLCContractDataDb.scala
|
Scala
|
mit
| 826 |
package com.ubirch.user.util.server
import java.net.URLEncoder
import java.util.UUID
/**
* author: cvandrei
* since: 2017-03-22
*/
object RouteConstants {
final val apiPrefix = "api"
final val serviceName = "userService"
final val currentVersion = "v1"
final val check = "check"
final val deepCheck = "deepCheck"
final val context = "context"
final val byName = "byName"
final val user = "user"
final val group = "group"
final val allowedUsers = "allowedUsers"
final val memberOf = "memberOf"
final val initData = "initData"
final val externalIdExists = "extIdExists"
final val recreate = "recreate"
final val info = "info"
final val activation = "activation"
final val register = "register"
val pathPrefix = s"/$apiPrefix/$serviceName/$currentVersion"
val pathCheck = s"$pathPrefix/$check"
val pathDeepCheck = s"$pathPrefix/$deepCheck"
val pathContext = s"$pathPrefix/$context"
def pathContextWithId(id: UUID) = s"$pathContext/$id"
def pathContextFindByName(name: String): String = {
val encodedName = URLEncoder.encode(name, "UTF-8")
s"$pathContext/$byName/$encodedName"
}
val pathUser = s"$pathPrefix/$user"
val pathRecreate = s"$pathPrefix/$user/$recreate"
def pathUserFind(providerId: String, externalUserId: String): String = {
val providerEncoded = URLEncoder.encode(providerId, "UTF-8")
val userEncoded = URLEncoder.encode(externalUserId, "UTF-8")
s"$pathUser/$providerEncoded/$userEncoded"
}
def pathUserUpdate(providerId: String, externalUserId: String): String = {
val providerEncoded = URLEncoder.encode(providerId, "UTF-8")
val userEncoded = URLEncoder.encode(externalUserId, "UTF-8")
s"$pathUser/$providerEncoded/$userEncoded"
}
def pathUserDelete(providerId: String, externalUserId: String): String = {
val providerEncoded = URLEncoder.encode(providerId, "UTF-8")
val userEncoded = URLEncoder.encode(externalUserId, "UTF-8")
s"$pathUser/$providerEncoded/$userEncoded"
}
def pathExternalIdExists(externalId: String): String = {
val userEncoded = URLEncoder.encode(externalId, "UTF-8")
s"$pathUser/$externalIdExists/$userEncoded"
}
val pathGroup = s"$pathPrefix/$group"
def pathGroupWithId(id: UUID) = s"$pathGroup/$id"
val pathGroupAllowedUsers = s"$pathGroup/$allowedUsers"
def pathGroupMemberOf(contextName: String,
providerId: String,
externalUserId: String
): String = {
val contextEncoded = URLEncoder.encode(contextName, "UTF-8")
val providerEncoded = URLEncoder.encode(providerId, "UTF-8")
val userEncoded = URLEncoder.encode(externalUserId, "UTF-8")
s"$pathPrefix/$group/$memberOf/$contextEncoded/$providerEncoded/$userEncoded"
}
final def pathInitData(env: String) = s"$pathPrefix/$initData/$env"
final val pathRegister = s"$pathPrefix/$register"
private val pathUserInfo = s"$pathUser/$info"
final def pathUserInfoGET(context: String, providerId: String, userId: String): String = {
val contextEncoded = URLEncoder.encode(context, "UTF-8")
val providerEncoded = URLEncoder.encode(providerId, "UTF-8")
val userEncoded = URLEncoder.encode(userId, "UTF-8")
s"$pathUserInfo/$contextEncoded/$providerEncoded/$userEncoded"
}
final val pathUserInfoPUT = pathUserInfo
}
|
ubirch/ubirch-user-service
|
util/src/main/scala/com/ubirch/user/util/server/RouteConstants.scala
|
Scala
|
apache-2.0
| 3,382 |
package mesosphere.util
import java.util.concurrent.TimeUnit
import com.codahale.metrics.{ MetricRegistry, Timer }
/**
* Utils for timer metrics collection.
*/
object TimerUtils {
class ScalaTimer(val timer: Timer) {
def apply[T](block: => T): T = {
val startTime = System.nanoTime()
try {
block
}
finally {
timer.update(System.nanoTime() - startTime, TimeUnit.NANOSECONDS)
}
}
}
def timer[T](registry: MetricRegistry, clazz: Class[T], name: String): ScalaTimer = {
val wrappedTimer = registry.timer(MetricRegistry.name(clazz, name))
new ScalaTimer(wrappedTimer)
}
}
|
quamilek/marathon
|
src/main/scala/mesosphere/util/TimerUtils.scala
|
Scala
|
apache-2.0
| 645 |
package org.jetbrains.plugins.scala.failed.typeInference
import org.jetbrains.plugins.scala.lang.typeConformance.TypeConformanceTestBase
/**
* @author mucianm
* @since 25.03.16.
*/
class PrimitivesConformanceTest extends TypeConformanceTestBase{
override protected def shouldPass: Boolean = false
def testSCL5358(): Unit = doTest(
"""
|final val x = 0
|val y: Byte = x
|/* True */
""".stripMargin)
}
|
JetBrains/intellij-scala
|
scala/scala-impl/test/org/jetbrains/plugins/scala/failed/typeInference/PrimitivesConformanceTest.scala
|
Scala
|
apache-2.0
| 451 |
/*
* Copyright (c) 2014-2020 by The Monix Project Developers.
* See the project homepage at: https://monix.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package monix.eval
import cats.CommutativeApplicative
import cats.laws.discipline.CommutativeApplicativeTests
import monix.catnap.internal.ParallelApplicative
import monix.eval.instances.CatsParallelForTask
object TypeClassLawsForParallelApplicativeSuite extends BaseLawsSuite {
implicit val ap: CommutativeApplicative[Task] =
ParallelApplicative(new CatsParallelForTask)
test("instance is valid") {
val ev = implicitly[CommutativeApplicative[Task]]
assertEquals(ev, ap)
}
checkAllAsync("ParallelApplicative[Task]") { implicit ec =>
CommutativeApplicativeTests[Task].commutativeApplicative[Int, Int, Int]
}
}
|
alexandru/monifu
|
monix-eval/shared/src/test/scala/monix/eval/TypeClassLawsForParallelApplicativeSuite.scala
|
Scala
|
apache-2.0
| 1,314 |
package org.denigma.kappa
import fastparse.core.Parsed
import org.denigma.kappa.notebook.parsers.{PaperParser, PaperSelection}
import org.denigma.kappa.parsers.AST
import org.scalatest.{Inside, Matchers, WordSpec}
class PaperParserSuite extends WordSpec with Matchers with Inside {
"parse IRIs and prefixes" in {
lazy val paper = ":paper"
lazy val hello = "<http://helloworld>"
lazy val page = ":page"
val parser = new PaperParser
inside(parser.IRI.parse(hello)) { case Parsed.Success(_, 19) =>}
inside(parser.IRI.parse("http helloworld")) { case f: Parsed.Failure[_,_] =>}
inside(parser.IRIREF.parse(paper)) { case f: Parsed.Failure[_,_] =>}
inside(parser.IRI.parse(paper)) { case Parsed.Success(_, 6)=>}
inside(parser.IRI.parse(page)) { case Parsed.Success(_, 5)=>}
}
"parse pages, chunks and tokens" in {
val parser = new PaperParser
inside(parser.page.parse(":page 20")){
case Parsed.Success(20, _)=>
}
inside(parser.page.parse(":on_page 1")){
case Parsed.Success(1, _)=>
}
inside(parser.page.parse("kappa:on_page 4")){
case Parsed.Success(4, _)=>
}
inside(parser.page.parse("on_page 20")){
case f: Parsed.Failure[_,_]=>
}
inside(parser.page.parse(":from_token 20")){
case f: Parsed.Failure[_,_]=>
}
inside(parser.page.parse(":page hello")){
case f: Parsed.Failure[_,_]=>
}
inside(parser.from_chunk.parse(":from_chunk 20")){
case Parsed.Success(20, _)=>
}
inside(parser.to_chunk.parse(":to_chunk 20")){
case Parsed.Success(20, _)=>
}
inside(parser.from_token.parse(":from_token 1")){
case Parsed.Success(1, _)=>
}
inside(parser.to_token.parse(":to_token 1")){
case Parsed.Success(1, _)=>
}
inside(parser.to_token.parse(":to_token hello")){
case f: Parsed.Failure[_,_]=>
}
}
"parse papers" in {
val parser = new PaperParser
inside(parser.paper.parse(":in_paper <http://helloworld/paper.pdf>")){
case Parsed.Success(AST.IRI("http://helloworld/paper.pdf"), _) =>
}
inside(parser.paper.parse(":in_paper files:paper.pdf")){
case Parsed.Success(iri @ AST.IRI("files:paper.pdf"), _) if iri.namespace =="files" && iri.local =="paper.pdf" =>
}
}
"parse whole line" in {
val parser = new PaperParser
inside(parser.annotation.parse(":in_paper <http://helloworld/paper.pdf>; :on_page 15 ; :from_chunk 11 ; :to_chunk 30 ; :from_token 1 ; :to_token 2")){
case Parsed.Success(PaperSelection(AST.IRI("http://helloworld/paper.pdf"), 15, 11, 30, Some(1), Some(2), _), _) =>
}
inside(parser.annotation.parse(":in_paper :hello; :page 10 ; :from_chunk 0 ; :to_chunk 3 ; :from_token 1 ; :to_token 2")){
case Parsed.Success(PaperSelection(AST.IRI(":hello"), 10, 0, 3, Some(1), Some(2), _), _) =>
}
inside(parser.annotation.parse(":in_paper :hello; :page 10 ; :from_chunk 0 ; :to_chunk 3 ; :from_token 1 ; :to_token 2")){
case Parsed.Success(PaperSelection(AST.IRI(":hello"), 10, 0, 3, Some(1), Some(2), _), _) =>
}
}
"parse complex whole line" in {
val parser = new PaperParser
inside(parser.annotation.parse(":in_paper :repressilator/Kappa%20in%20Synthetic%20Biology.pdf; :on_page 1; :from_chunk 65; :to_chunk 74; :from_token 1; :to_token 1 .")){
case Parsed.Success(PaperSelection(AST.IRI(":repressilator/Kappa%20in%20Synthetic%20Biology.pdf"), 1, 65, 74, Some(1), Some(1), _), _) =>
}
inside(parser.annotation.parse(":comment \\"This is an interesting paper!\\"; :in_paper :repressilator/Kappa%20in%20Synthetic%20Biology.pdf; :on_page 1; :from_chunk 65; :to_chunk 74; :from_token 1; :to_token 1 .")){
case Parsed.Success(PaperSelection(AST.IRI(":repressilator/Kappa%20in%20Synthetic%20Biology.pdf"), 1, 65, 74, Some(1), Some(1), _), _) =>
}
}
}
|
antonkulaga/kappa-notebook
|
app/js/src/test/scala/org/denigma/kappa/PaperParserSuite.scala
|
Scala
|
mpl-2.0
| 3,875 |
package org.jetbrains.plugins.scala.failed.annotator
import org.jetbrains.plugins.scala.PerfCycleTests
import org.jetbrains.plugins.scala.base.ScalaLightCodeInsightFixtureTestAdapter
import org.junit.experimental.categories.Category
/**
* Created by kate on 3/29/16.
*/
@Category(Array(classOf[PerfCycleTests]))
class ApplicationNotTakeParamImplicit extends ScalaLightCodeInsightFixtureTestAdapter {
def testSCL10352(): Unit = {
checkTextHasNoErrors(
"""
|abstract class BlockModel[T <: Block[_]] (implicit c: scala.reflect.ClassTag[T]){}
|
|class Block[R <: BlockModel[_]](implicit val repr: R) {???}
|
|abstract class Screen[R <: BlockModel[_]](override implicit val repr: R) extends Block[R] {}
|
|object TabsDemoScreen {
| implicit object TabsDemoScreenModel extends BlockModel[TabsDemoScreen] {
| }
|}
|class TabsDemoScreen extends Screen[TabsDemoScreen.TabsDemoScreenModel.type] {}
""".stripMargin)
}
def testSCL10902(): Unit = {
checkTextHasNoErrors(
"""
|object Test extends App {
| class A { def apply[Z] = 42 }
| def create = new A
|
| create[String]
|}
""".stripMargin)
}
}
|
ilinum/intellij-scala
|
test/org/jetbrains/plugins/scala/failed/annotator/ApplicationNotTakeParamImplicit.scala
|
Scala
|
apache-2.0
| 1,273 |
/*
* Copyright (c) 2009 Michel Alexandre Salim. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* 3. The names of the authors may not be used to endorse or promote
* products derived from this software without specific, prior
* written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
package info.hircus.kanren
import info.hircus.kanren.MiniKanren._
object Substitution {
/**
* An empty simple substitution
*/
object EmptySubst extends Subst {
/**
* Extending an empty substitution always succeeds, producing a simple substitution
* with one binding, v -> x
*
* @param v a logical variable
* @param x a value to bind x to
*/
def extend(v: Var, x: Any) = Some(SimpleSubst(v,x,this))
/**
* Looking up in an empty substitution always fails
*
* @param v a logical variable
*/
def lookup(v: Var) = None
/**
* The length of an empty substitution is zero
*/
def length: Int = 0
}
/**
* A non-empty simple substitution
*/
case class SimpleSubst(v: Var, x: Any, s: Subst) extends Subst {
/**
* Extending a simple substitution always succeeds, producing a new substitution
* linked with the current one
*
* @param v a logical variable
* @param x a value to bind to x
*/
def extend(v: Var, x: Any) = Some(SimpleSubst(v,x,this))
/**
* Looking up a variable succeeds immediately if it is at the head of the substitution.
* Otherwise, the linked substitution is queried.
*
* @param v a logical variable
*/
def lookup(v: Var) = if (this.v == v) Some(x) else s.lookup(v)
/**
* The length of a non-empty substitution is one more than its linked substitution
*/
def length: Int = 1 + s.length
}
abstract class ConstraintSubst extends Subst {
/**
* In a constrained substitution, two walked terms are only unifiable if neither are listed in
* the other's constraints
*/
override def unify(term1: Any, term2: Any): Option[Subst] = {
val v1 = walk(term1, this)
val v2 = walk(term2, this)
if (v1.isInstanceOf[Var] && (this.constraints(v1.asInstanceOf[Var]) contains v2)) None
else if (v2.isInstanceOf[Var] && (this.constraints(v2.asInstanceOf[Var]) contains v1)) None
else super.unify(v1, v2)
}
}
private def c_lookup(v: Var, c: Constraints): List[Any] = c match {
case Nil => Nil
case (w, cls) :: c2 => if (v==w) cls else c_lookup(v, c2)
}
private def c_insert(v: Var, x: Any, c: Constraints): Constraints = c match {
case Nil => List((v, List(x)))
case (w, cls) :: c2 => if (v==w) ((w, if (cls contains x) cls
else x::cls) :: c2)
else (w,cls) :: c_insert(v,x,c2)
}
case class ConstraintSubst0(c: Constraints) extends Subst {
/**
* extending a constraint substitution creates a new constraint substitution
* with the extension done in the simple substitution part
*/
def extend(v: Var, x: Any) =
if (this.constraints(v) contains x) None
else Some(ConstraintSubstN(SimpleSubst(v,x,this), c))
override def c_extend(v: Var, x: Any) = ConstraintSubst0(c_insert(v,x,c))
/**
* Looking up a variable in an empty constraint substitution always returns None
*
* @param v a logical variable
* @return None
*/
def lookup(v: Var) = None
override def constraints(v: Var) = c_lookup(v, c)
/**
* The length of an empty constraint substitution is zero
*/
def length: Int = 0
}
case class ConstraintSubstN(s: SimpleSubst, c: Constraints) extends Subst {
/**
* Constraint checking is performed here, since it is not needed with
* simple substitutions. Doing it in unify would be less efficient
*/
def extend(v: Var, x: Any) =
if (this.constraints(v) contains x) None
else Some(ConstraintSubstN(SimpleSubst(v,x,s), c))
override def c_extend(v: Var, x: Any) = ConstraintSubstN(s, c_insert(v,x,c))
/**
* Looking up a variable in a constraint substitution looks it up in the
* simple substitution
*
* @param v a logical variable
*/
def lookup(v: Var) = s.lookup(v)
override def constraints(v: Var) = c_lookup(v, c)
/**
* The length of a constraing substitution is the length of its simple substitution
*/
def length: Int = s.length
}
/**
* <p>Uses an immutable map to store the substitution.<br>
* If the computation is lookup-heavy, this should be faster.</p>
*
* <p>Not used by default as memory consumption is heavy -- palprod_o
* causes heap OOM exception.</p>
*/
case class MSubst(m: Map[Var, Any]) extends Subst {
def extend(v: Var, x: Any) = Some(MSubst(m(v) = x))
def lookup(v: Var) = m.get(v)
def length = m.size
}
val empty_msubst = MSubst(Map())
import clojure.lang.IPersistentMap
import clojure.lang.PersistentHashMap
/**
* A substitution based on Clojure's PersistentHashMap
* (earlier based on Odersky's colleague's work at EPFL!)
*
* Requires a modified Clojure, because right now the
* MapEntry interface exposes a val() getter which clashes
* with the Scala keyword
*/
case class CljSubst(m: IPersistentMap) extends Subst {
def extend(v: Var, x: Any) = Some(CljSubst(m.assoc(v, x)))
def lookup(v: Var) = {
val res = m.entryAt(v)
if (res != null) Some(res.`val`)
else None
}
def length = m.count
}
val empty_cljsubst = CljSubst(PersistentHashMap.EMPTY)
}
|
michel-slm/minikanren-scala
|
src/info/hircus/kanren/Subst.scala
|
Scala
|
bsd-3-clause
| 6,821 |
package org.bitcoins.rpc.bitcoincore.blockchain
import org.bitcoins.rpc.bitcoincore.blockchain.softforks.SoftForks
/**
* Created by Tom on 1/11/2016.
*/
trait BlockchainInfo {
def chain : String
def blockCount : Int
def headerCount : Int
def bestBlockHash : String
def difficulty : Double
def verificationProgress : Double
def chainWork : String
//def softForks : Seq[SoftForks]
}
case class BlockChainInfoImpl(chain : String, blockCount: Int, headerCount: Int, bestBlockHash: String, difficulty: Double,
verificationProgress : Double, chainWork : String /*,softForks : Seq[SoftForks]*/) extends BlockchainInfo
|
bitcoin-s/bitcoin-s-rpc-client
|
src/main/scala/org/bitcoins/rpc/bitcoincore/blockchain/BlockchainInfo.scala
|
Scala
|
mit
| 642 |
/*
* Copyright 2013 Eike Kettner
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.eknet.publet.sharry
import com.google.inject.{Inject, Singleton}
import org.eknet.publet.vfs.util.ByteSize
import org.eknet.publet.web.Config
import org.quartz.Scheduler
/**
* @author Eike Kettner [email protected]
* @since 21.04.13 17:31
*/
@Singleton
class SharryServiceMBeanImpl @Inject() (sharry: SharryService, config: Config, scheduler: Scheduler) extends SharryServiceMBean {
private def sharryImpl = sharry.asInstanceOf[SharryServiceImpl]
def getFolderSize = ByteSize.bytes.normalizeString(sharryImpl.sharry.folderSize)
def getMaxUploadSize = ByteSize.bytes.normalizeString(maxUploadSize(config))
def getArchiveCount = sharry.listArchives.size
def removeOutdatedFiles() {
scheduler.triggerJob(jobdef.getKey)
}
def removeAllArchives() {
sharry.removeFiles(_ => true)
sharryImpl.sharry.removeFiles(_ => true)
}
}
|
eikek/publet-sharry
|
src/main/scala/org/eknet/publet/sharry/SharryServiceMBeanImpl.scala
|
Scala
|
apache-2.0
| 1,472 |
/*
* MIT License
*
* Copyright (c) 2016 Gonçalo Marques
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
package com.byteslounge.slickrepo.version
import com.byteslounge.slickrepo.datetime.DateTimeHelper
import org.joda.time.Instant
object JodaTimeVersionImplicits {
implicit val instantVersionGenerator = new VersionGenerator[Instant]{
def initialVersion(): Instant = {
currentInstant()
}
def nextVersion(currentVersion: Instant): Instant = {
currentInstant()
}
private def currentInstant(): Instant = {
new Instant(DateTimeHelper.currentInstant.toEpochMilli)
}
}
}
|
gonmarques/slick-repo
|
src/test/scala/com/byteslounge/slickrepo/version/JodaTimeVersionImplicits.scala
|
Scala
|
mit
| 1,654 |
/*
* LazyValuesTest.scala
* Lazy range computation tests.
*
* Created By: Avi Pfeffer ([email protected])
* Creation Date: Dec 27, 2013
*
* Copyright 2013 Avrom J. Pfeffer and Charles River Analytics, Inc.
* See http://www.cra.com or email [email protected] for information.
*
* See http://www.github.com/p2t2/figaro for a copy of the software license.
*/
package com.cra.figaro.test.algorithm.lazyfactored
import org.scalatest.Matchers
import org.scalatest.WordSpec
import com.cra.figaro.algorithm.lazyfactored.LazyValues
import com.cra.figaro.algorithm.lazyfactored.Regular
import com.cra.figaro.language.Apply
import com.cra.figaro.language.Chain
import com.cra.figaro.language.Constant
import com.cra.figaro.language.Dist
import com.cra.figaro.language.Element
import com.cra.figaro.language.ElementCollection
import com.cra.figaro.language.Flip
import com.cra.figaro.language.Name
import com.cra.figaro.language.NonCachingChain
import com.cra.figaro.language.Select
import com.cra.figaro.language.Universe
import com.cra.figaro.library.atomic.continuous
import com.cra.figaro.library.atomic.discrete
/*
* The tests for unbounded depth values computation are in com.cra.figaro.test.algorithm.ValuesTest.
*/
class LazyValuesTest extends WordSpec with Matchers {
"Calling values()" should {
// This should be changed to a non-atomic element, since we now automatically sample atomic elements
"given a non-enumerable element" should {
"return the value set consisting only of *" in {
class testElem(name: Name[Int], collection: ElementCollection) extends Element[Int](name, collection) {
type Randomness = Int
def args = List()
def generateValue(rand: Randomness) = 1
def generateRandomness() = 1
}
val universe = Universe.createNew()
val vs = LazyValues(universe)(new testElem("", universe), 1)
vs.regularValues should equal (Set())
vs.hasStar should equal (true)
}
}
"use the old result when called twice on the same complete element" in {
val universe = Universe.createNew()
var a = 0
def f(x: Int) = { a += 1; x }
val elem1 = Apply(Constant(2), f)
val elem2 = Apply(Constant(3), f)
a = 0
val values = LazyValues(universe)
values(elem1, 1)
values(elem1, 2)
values(elem2, 1)
a should equal (2)
}
"use the old result when called twice on the same incomplete element with the same or lesser depth" in {
/*
* This test is tricky. We want to count the number of times the function inside the chain is called while computing
* values. It must be a non-caching chain, since a caching chain would memoize the function anyway even if values
* wasn't working correctly, so it wouldn't have a good test. The parent must have at least two values, because even
* a non-caching chain does not recompute the function if the parent value hasn't changed. So, we make the parent
* have two values. The final trick is to make sure that the first parent value is the current value when values is
* called on the chain, so we know the function is not recomputed the first time, and is only recomputed for the
* second parent value. That way, we can be sure the function should only be called once in the first call to
* values. Since values is memoized, the function should not be called at all in subsequent times, so the total
* number of times it is called should be 1.
*/
val universe = Universe.createNew()
var a = 0
val parent = Select(0.5 -> 2, 0.5 -> 5)
val elem1 = NonCachingChain(parent, (i: Int) => {
a += 1
Chain(Constant(i + 1), (j: Int) => Constant(j + 1))
})
parent.set(2)
a = 0
val values = LazyValues(universe)
values(elem1, 1)
values(elem1, 1)
values(elem1, 0)
a should equal (2)
}
"use the old result when called twice on the same universe" in {
val universe = Universe.createNew()
var a = 0
def f(x: Int) = { a += 1; x }
val elem1 = Apply(Constant(2), f)
val elem2 = Apply(Constant(3), f)
a = 0
val values = LazyValues(universe)
LazyValues(universe)(elem1, 1)
LazyValues(universe)(elem1, 1)
LazyValues(universe)(elem2, 1)
a should equal (2)
}
"not compute values of unneeded elements" in {
val universe = Universe.createNew()
val e1 = Flip(0.1)
var a = 0
val e2 = Apply(e1, (b: Boolean) => { a += 1; b })
a = 0
val values = LazyValues(universe)
values(e1, 1)
a should equal (0)
}
"with a bounded depth expansion whose depth is less than the depth of the model produce correct starred results" in {
val universe = Universe.createNew()
val flip1 = Flip(0.1)
val flip2 = Flip(0.2)
val uniform1 = discrete.Uniform(1,2)
val uniform2 = discrete.Uniform(2,3)
val uniform3 = discrete.Uniform(3,5)
val c1 = Chain(flip2, (b: Boolean) => if (b) uniform2; else uniform3)
val c2 = Chain(flip1, (b: Boolean) => if (b) uniform1; else c1)
val lv = LazyValues(universe)(c2, 1)
lv.regularValues should equal (Set(1,2))
lv.hasStar should equal (true)
}
"with a bounded depth expansion not expand unreached elements" in {
val universe = Universe.createNew()
val flip1 = Flip(0.1)
val flip2 = Flip(0.2)
val uniform1 = discrete.Uniform(1,2)
val uniform2 = discrete.Uniform(2,3)
var a = 0
val apply1 = Apply(discrete.Uniform(3,5), (i: Int) => { a += 1; i })
val c1 = Chain(flip2, (b: Boolean) => if (b) uniform2; else apply1)
val c2 = Chain(flip1, (b: Boolean) => if (b) uniform1; else c1)
a = 0
LazyValues(universe)(c2, 1)
a should equal (0)
}
"use the maximum depth values for all elements, even when query elements use them at different depths" in {
/*
* This test is meant to catch a subtle case that occurs in lazy values computation.
* Suppose we have two query elements X and Y that both depend on an element Z.
* Suppose that X depend on Z directly, while Y depends on Z indirectly.
* If Y is expanded first, there is a danger that it will use a lesser depth of values of Z than X,
* which can result in inconsistencies down the road.
* It is necessary for lazy values computation to make sure that the same depth of all elements is
* used consistently.
*/
val universe = Universe.createNew()
val select1 = Select(0.1 -> 1, 0.9 -> 2)
val select2 = Select(0.2 -> 3, 0.8 -> 4)
val apply1 = Apply(select1, (i: Int) => i + 1) // range is { 2, 3 }
val apply2 = Apply(select2, (i: Int) => i + 1) // range is { 4, 5 }
val x = Dist(0.3 -> apply1, 0.7 -> select2) // range should be { 2, 3, 4 }
val y = Dist(0.4 -> select1, 0.6 -> apply2) // range should be { 1, 2, 4, 5 }
val values = LazyValues(universe)
values.expandAll(Set((x, 1), (y, 1)))
values.storedValues(x).xvalues should equal (Set(Regular(2), Regular(3), Regular(4)))
values.storedValues(y).xvalues should equal (Set(Regular(1), Regular(2), Regular(4), Regular(5)))
}
"not include elements from other universes" in {
Universe.createNew()
val v1 = Flip(0.5)
Universe.createNew()
val v2 = Constant(v1)
val lv = LazyValues()
lv.expandAll(Set((v2, Int.MaxValue)))
lv.storedValues(v1).xvalues should be(empty)
}
}
}
|
agarbuno/figaro
|
Figaro/src/test/scala/com/cra/figaro/test/algorithm/lazyfactored/LazyValuesTest.scala
|
Scala
|
bsd-3-clause
| 7,683 |
package com.qbert65536
// TODO: Rewrite RelationAST to be Option[Relation], not Relation(Option,Option )
// TODO: Stop matching on Option, use high order functions as in FPIS
import java.net.URL
import codechecker.ModelChecker
import codegenerator._
import com.qbert65536.sql.{SQLConfigFactory, SQLManipulator}
import com.qbert65536.sqlgenerator.SQLGenerator
import parsers._
import java.io.{BufferedInputStream, File, InputStream, PrintWriter}
import java.nio.file.{Files, Paths}
import com.typesafe.config.{Config, ConfigFactory, ConfigValue}
import scala.collection.immutable.HashMap
import scala.collection.mutable
object Compiler extends ModelParser with App {
def requiredConfigOptions = List("database_name", "database_vendor", "database_host", "model_file", "output_directory", "output_language")
def ensureHasAllConfigs(config: Config): Boolean = {
var ret = true
requiredConfigOptions.foreach(key =>
if (!config.hasPath(key)) {
if (ret) {
Reporter.error("SQUALL CONFIGURATION ERRORS")
Reporter.error("=================================")
}
Reporter.error(s"${key} configuration is required but not found")
ret = false
}
)
Reporter.newline
ret
}
def loadConfigFile(config: Config): scala.collection.mutable.Map[String, String] = {
var map = scala.collection.mutable.Map[String, String]()
val configIterator = config.entrySet().iterator()
while (configIterator.hasNext) {
val setting = configIterator.next()
val key: String = setting.getKey
if (!key.contains(".")) map.put(key, setting.getValue.unwrapped.toString)
}
// DEFAULTS
map.put("database_driver", SQLConfigFactory.getDbDriver(map("database_vendor")))
map.put("connection_string", SQLConfigFactory.getConnectionString(map.toMap))
if (map.get("debug").isEmpty) map.put("debug", "false")
if (map.get("database_user").isEmpty) map.put("database_user", "")
if (map.get("database_password").isEmpty) map.put("database_password", "")
if (map.get("skip_confirmation").isEmpty) map.put("skip_confirmation", "")
map.put("database_vendor_capitalized", map("database_vendor").toUpperCase)
Reporter.newline
Reporter.debug("Using these configuration options")
Reporter.debug("=================================")
Reporter.newline
map.foreach(kv => Reporter.debug(kv._1 + " = " + kv._2))
Reporter.newline
map
}
def compileProject(configFile: String): Unit = {
if (new File(configFile).exists()) {
Reporter.info("Using configuration file at " + configFile)
ConfigFactory.invalidateCaches()
System.getProperties.setProperty("config.file", configFile);
val config = ConfigFactory.load();
val settings = loadConfigFile(config)
// if (!Files.exists(Paths.get(settings("model_file")))) {
// throw new Exception("The model file specifed at path " + new File(".").getCanonicalPath() + "/" + settings("model_file") + " does not exist! ");
// }
val outputLanguage: String = settings("output_language")
val model_file = scala.io.Source.fromFile(settings("model_file")).mkString
val outputDir: String = settings("output_directory")
if (ensureHasAllConfigs(config)) {
parseAll(program, model_file) match {
case res if res.successful => {
val program: ProgramAST = res.get
populateOppositeRelations(program);
if (settings("debug").equals("true")) prettyPrint(program)
ModelChecker.getErrors(program) match {
case errors if errors.nonEmpty => {
errors foreach {
s => Reporter.error(s)
}
}
case _ => {
Reporter.debug("Code check clean, beginning code generation.")
val generator: SQLGenerator = SQLGenerator.getGenerator(settings.toMap, program)
CodeGenerator.getGenerator(outputLanguage, new File(outputDir), settings.toMap, program, generator) match {
case Some(generator) => {
generator.convertLanguageSpecificTypes(program)
generator.generate() match {
case false => {
Reporter.error("Code generation failed for some inexplicable reason")
}
case _ => {
Reporter.newline
// if (Reporter.prompt("Should I start altering the databases with the changes? I backup it all before I begin")) {
Reporter.debug("Now altering database " + settings("database_name"))
if (config.hasPath("skip_database") || config.hasPath("skip_db")) {
Reporter.debug("SKIPPING DATABASE CREATION BECAUSE SKIP_DB was specified in squall.conf")
}
else {
val sql = new SQLManipulator(settings.toMap, program)
if (!settings("skip_confirmation").isEmpty) {
Reporter.debug("Skipping confirmation because skip_confirmation was set.")
sql.alterDatabase()
}
else {
if (Reporter.prompt("Are you sure you want to drop the database and recreate it? Y|n")) {
sql.alterDatabase()
}
}
}
// }
Reporter.newline
BuildFileGenerator.createBuildFile(outputLanguage, settings.toMap, program);
}
}
}
case _ => Reporter.error(s"No Code generator found for $outputLanguage")
}
}
}
if (config.hasPath("pretty_print")) prettyPrint(program)
}
case res => {
Reporter.error("Parsing failed" + res);
}
}
}
else {
Reporter.error("Quitting because of configuration errors");
}
}
else {
Reporter.error("Failed to find the config file " + configFile + " ( it should exist relative to where you are trying to run this executable ), bailing out now");
}
}
override def main(args: Array[String]) {
var configFile = System.getProperty("user.dir") + "/squall.conf"
if (args.length > 0) configFile = args(0)
compileProject(configFile)
Reporter.debug("Code generation successful")
}
}
|
qorrect/squall
|
src/main/scala/com/qbert65536/Compiler.scala
|
Scala
|
apache-2.0
| 6,740 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.mxnet.module
import java.io.{BufferedInputStream, BufferedOutputStream, FileInputStream, FileOutputStream}
import org.apache.mxnet.DType.DType
import org.apache.mxnet._
import org.apache.mxnet.module.DataParallelExecutorGroup.Builder
import org.apache.mxnet.optimizer.SGD
import org.slf4j.LoggerFactory
import scala.annotation.varargs
/**
* Module is a basic module that wrap a `Symbol`. It is functionally the same
* as the `FeedForward` model, except under the module API.
* @param symbolVar : Symbol definition.
* @param dataNames Input data names.
* @param labelNames Input label names
* @param contexts Default is cpu().
* @param workLoadList Default `None`, indicating uniform workload.
* @param fixedParamNames Default `None`, indicating no network parameters are fixed.
*/
class Module(symbolVar: Symbol,
val dataNames: IndexedSeq[String] = IndexedSeq("data"),
labelNames: IndexedSeq[String] = IndexedSeq("softmax_label"),
contexts: Array[Context] = Context.cpu(),
workLoadList: Option[IndexedSeq[Float]] = None,
fixedParamNames: Option[Set[String]] = None) extends BaseModule {
private val logger = LoggerFactory.getLogger(classOf[Module])
require(symbolVar != null, "Undefined symbol")
this.symbol = symbolVar
private val workLoads = workLoadList.getOrElse(contexts.map(_ => 1f).toIndexedSeq)
require(workLoads.size == contexts.length,
s"workloads size (${workLoads.size}) do not match number of contexts ${contexts.length}")
private val labelNameList = if (labelNames == null) IndexedSeq.empty[String] else labelNames
private val argNames = symbol.listArguments()
private val inputNames = dataNames ++ labelNameList
private val paramNames = argNames.filterNot(inputNames.toSet)
private val auxNames = symbol.listAuxiliaryStates()
private val outputNamesVar = symbol.listOutputs()
private[module] var paramsDirty = false
private var optimizer: Optimizer = null
private var kvstore: Option[KVStore] = None
private var updateOnKVStore: Boolean = false
private var updater: Option[MXKVStoreUpdater] = None
private var preloadOptStates: Option[String] = None
private var dataShapesVar: IndexedSeq[DataDesc] = null
private var labelShapesVar: Option[IndexedSeq[DataDesc]] = None
override def dataShapes: IndexedSeq[DataDesc] = {
require(binded, "bind() must be called first.")
dataShapesVar
}
override def labelShapes: IndexedSeq[DataDesc] = {
require(binded, "bind() must be called first.")
labelShapesVar.orNull
}
override def outputShapes: IndexedSeq[(String, Shape)] = {
require(binded, "bind() must be called first.")
execGroup.getOutputShapes
}
def outputNames: IndexedSeq[String] = outputNamesVar
/**
* Get current parameters.
* `(arg_params, aux_params)`, each a dictionary of name to parameters (in
* `NDArray`) mapping.
*/
override def getParams: (Map[String, NDArray], Map[String, NDArray]) = {
require(binded && paramsInitialized, "bind() and initParams() must be called first.")
if (paramsDirty) {
syncParamsFromDevices()
}
(argParams, auxParams)
}
/**
* Initialize the parameters and auxiliary states.
* @param initializer Called to initialize parameters if needed.
* @param argParams If not None, should be a dictionary of existing arg_params.
* Initialization will be copied from that.
* @param auxParams If not None, should be a dictionary of existing aux_params.
* Initialization will be copied from that.
* @param allowMissing If true, params could contain missing values,
* and the initializer will be called to fill those missing params.
* @param forceInit If true, will force re-initialize even if already initialized.
* @param allowExtra Whether allow extra parameters that are not needed by symbol.
* If this is True, no error will be thrown when argParams or auxParams
* contain extra parameters that is not needed by the executor.
*/
override def initParams(initializer: Initializer = new Uniform(0.01f),
argParams: Map[String, NDArray] = null,
auxParams: Map[String, NDArray] = null,
allowMissing: Boolean = false,
forceInit: Boolean = false,
allowExtra: Boolean = false): Unit = {
if (!paramsInitialized || forceInit) {
require(binded, "call bind before initializing the parameters")
if (this.argParams == null) {
val paramArrays =
execGroup.paramArrays.map(nds => NDArray.zeros(nds(0).shape, dtype = nds(0).dtype))
this.argParams = this.paramNames.zip(paramArrays).toMap
}
if (this.auxParams == null) {
val auxArrays =
execGroup.auxArrays.map(nds => NDArray.zeros(nds(0).shape, dtype = nds(0).dtype))
this.auxParams = this.auxNames.zip(auxArrays).toMap
}
this.argParams.foreach { case (name, arr) =>
impl(name, arr, allowMissing, Option(initializer), argParams)
}
this.auxParams.foreach { case (name, arr) =>
impl(name, arr, allowMissing, Option(initializer), auxParams)
}
this.paramsInitialized = true
this.paramsDirty = false
// copy the initialized parameters to devices
this.execGroup.setParams(this.argParams, this.auxParams, allowExtra = allowExtra)
}
}
// Internal helper for parameter initialization
private def impl(name: String, arr: NDArray, allowMissing: Boolean,
initializer: Option[Initializer] = None,
cache: Map[String, NDArray] = null): Unit = {
if (cache != null) {
if (cache.contains(name)) {
val cacheArr = cache(name) // just in case the cached array is just the target itself
if (cacheArr ne arr) {
cacheArr.copyTo(arr)
}
} else {
require(allowMissing, s"$name is not presented")
initializer.foreach(inst => inst(name, arr))
}
} else {
initializer.foreach(inst => inst(name, arr))
}
}
/**
* Assign parameter and aux state values.
* argParams : dict
* Dictionary of name to value (`NDArray`) mapping.
* auxParams : dict
* Dictionary of name to value (`NDArray`) mapping.
* allowMissing : bool
* If true, params could contain missing values, and the initializer will be
* called to fill those missing params.
* forceInit : bool
* If true, will force re-initialize even if already initialized.
* allowExtra : bool
* Whether allow extra parameters that are not needed by symbol.
* If this is True, no error will be thrown when argParams or auxParams
* contain extra parameters that is not needed by the executor.
*/
override def setParams(argParams: Map[String, NDArray],
auxParams: Map[String, NDArray],
allowMissing: Boolean = false,
forceInit: Boolean = true,
allowExtra: Boolean = false): Unit = {
if (!allowMissing) {
this.initParams(null, argParams, auxParams, allowMissing, forceInit, allowExtra)
} else if (this.paramsInitialized && !forceInit) {
logger.warn("Parameters already initialized and forceInit=false. " +
"setParams call ignored.")
} else {
this.execGroup.setParams(argParams, auxParams, allowExtra)
// because we didn't update self._arg_params, they are dirty now.
this.paramsDirty = true
this.paramsInitialized = true
}
}
// Internal function to reset binded state.
private def resetBind(): Unit = {
binded = false
execGroup = null
dataShapesVar = null
labelShapesVar = None
}
/**
* Bind the symbols to construct executors. This is necessary before one
* can perform computation with the module.
* @param dataShapes Typically is `dataIter.provideData`.
* @param labelShapes Typically is `data_iter.provide_label`.
* @param forTraining Default is `true`. Whether the executors should be bind for training.
* @param inputsNeedGrad Default is `false`.
* Whether the gradients to the input data need to be computed.
* Typically this is not needed.
* But this might be needed when implementing composition of modules.
* @param forceRebind Default is `false`.
* This function does nothing if the executors are already binded.
* But with this `true`, the executors will be forced to rebind.
* @param sharedModule Default is `None`. This is used in bucketing.
* When not `None`, the shared module essentially corresponds to
* a different bucket -- a module with different symbol
* but with the same sets of parameters
* (e.g. unrolled RNNs with different lengths).
*/
override def bind(dataShapes: IndexedSeq[DataDesc],
labelShapes: Option[IndexedSeq[DataDesc]] = None,
forTraining: Boolean = true, inputsNeedGrad: Boolean = false,
forceRebind: Boolean = false, sharedModule: Option[BaseModule] = None,
gradReq: String = "write"): Unit = {
// force rebinding is typically used when one want to switch from training to prediction phase.
if (forceRebind) {
resetBind()
}
if (binded) {
logger.warn("Already binded, ignoring bind()")
} else {
this.forTraining = forTraining
this.inputsNeedGrad = inputsNeedGrad
this.binded = true
if (!forTraining) {
require(!inputsNeedGrad, "Invalid inputsNeedGrad (cannot be true if not forTraining)")
} else {
// this is not True, as some module might not contains a loss function
// that consumes the labels
// require(labelShapes != None)
}
this.dataShapesVar = dataShapes
this.labelShapesVar = labelShapes
val sharedGroup =
sharedModule.map(sharedModuleInst => {
require(sharedModuleInst.binded && sharedModuleInst.paramsInitialized,
s"bind() and initParams() must be called first on shared module.")
sharedModuleInst.execGroup
})
val inputTypes = this.dataShapesVar.map(dataDesc => (dataDesc.name, dataDesc.dtype)).toMap ++
labelShapes.map(shapes => shapes.map(dataDesc => (dataDesc.name, dataDesc.dtype)).toMap)
.getOrElse(Map.empty[String, DType])
execGroup = new Builder(symbol, contexts, paramNames)
.setWorkLoadList(workLoads)
.setDataShapes(dataShapes)
.setLabelShapes(labelShapes.orNull)
.setForTraining(forTraining)
.setInputsNeedGrad(inputsNeedGrad)
.setSharedGroup(sharedGroup.orNull)
.setFixedParamNames(fixedParamNames.orNull)
.setGradReq(gradReq)
.setInputTypes(inputTypes)
.build()
if (sharedModule.isDefined) {
paramsInitialized = true
argParams = sharedModule.get.argParams
auxParams = sharedModule.get.auxParams
} else if (paramsInitialized) {
// if the parameters are already initialized, we are re-binding
// so automatically copy the already initialized params
execGroup.setParams(argParams, auxParams)
}
sharedModule.foreach {
case sharedModuleInst: Module =>
if (sharedModuleInst.optimizerInitialized) {
borrowOptimizer(sharedModuleInst)
}
case _ =>
}
}
}
/**
* Check that input names matches input data descriptors.
*/
@throws(classOf[IllegalArgumentException])
private def _checkNamesMatch(dataNames: IndexedSeq[String], dataShapes: IndexedSeq[DataDesc],
name: String, throwEx: Boolean): Unit = {
val actual = dataShapes.map(_.name)
if (dataNames.sorted != actual.sorted) {
val msg = s"Data provided by ${name}_shapes don't match names specified by " +
s"${name}_names (${dataShapes.mkString(", ")} vs. ${dataNames.mkString(", ")})"
if (throwEx) throw new IllegalArgumentException(msg)
else logger.warn(msg)
}
}
/**
* parse data_attrs into DataDesc format and check that names match
*/
@throws(classOf[IllegalArgumentException])
private def _parseDataDesc(dataNames: IndexedSeq[String], labelNames: IndexedSeq[String],
dataShapes: IndexedSeq[DataDesc], labelShapes: Option[IndexedSeq[DataDesc]]):
(IndexedSeq[DataDesc], Option[IndexedSeq[DataDesc]]) = {
_checkNamesMatch(dataNames, dataShapes, "data", true)
if (labelShapes != None) _checkNamesMatch(labelNames, labelShapes.get, "label", false)
(dataShapes, labelShapes)
}
/**
* Reshapes the module for new input shapes.
* @param dataShapes Typically is `dataIter.provideData`.
* @param labelShapes Typically is `dataIter.provideLabel`.
*/
def reshape(dataShapes: IndexedSeq[DataDesc],
labelShapes: Option[IndexedSeq[DataDesc]] = None): Unit = {
require(this.binded, "bind() must be called first.")
val (tdataShapes, tlabelShapes) = this._parseDataDesc(
this.dataNames, this.labelNames, dataShapes, labelShapes)
this.dataShapesVar = tdataShapes
this.labelShapesVar = tlabelShapes
this.execGroup.reshape(tdataShapes, tlabelShapes)
}
/**
* Install and initialize optimizers.
* @param kvstore
* @param optimizer
* @param resetOptimizer Default `True`, indicating whether we should set `rescaleGrad`
* & `idx2name` for optimizer according to executorGroup
* @param forceInit Default `False`, indicating whether we should force re-initializing
* the optimizer in the case an optimizer is already installed.
*/
def initOptimizer(kvstore: String = "local", optimizer: Optimizer = new SGD(),
resetOptimizer: Boolean = true, forceInit: Boolean = false): Unit = {
require(binded && paramsInitialized, "bind() and initParams() must be called first.")
if (optimizerInitialized && !forceInit) {
logger.warn("optimizer already initialized, ignoring ...")
} else {
val (kvstoreInst, updateOnKVStore) = Model.createKVStore(kvstore, contexts.length, argParams)
val batchSize = execGroup.getBatchSize * (
if (kvstoreInst != None && kvstoreInst.get.`type` == "dist_sync") {
kvstoreInst.get.numWorkers
} else {
1
})
if (resetOptimizer) {
val idx2name =
if (updateOnKVStore) {
execGroup.paramNames.zipWithIndex.map { case (name, i) => (i, name) }.toMap
} else {
(0 until contexts.length).flatMap(k =>
execGroup.paramNames.zipWithIndex.map { case (name, i) =>
(i * contexts.length + k, name)
}
).toMap
}
optimizer.setIdx2Name(idx2name)
optimizer.setRescaleGrad(1f / batchSize)
}
this.optimizer = optimizer
this.kvstore = kvstoreInst
this.updateOnKVStore = updateOnKVStore
kvstoreInst.foreach(kv =>
// copy initialized local parameters to kvstore
Model.initializeKVStore(kv, execGroup.paramArrays,
argParams, paramNames, updateOnKVStore)
)
updater =
if (updateOnKVStore) {
kvstoreInst.foreach(_.setOptimizer(this.optimizer))
None
} else {
Some(Optimizer.getUpdater(optimizer))
}
optimizerInitialized = true
preloadOptStates.foreach { optStates =>
loadOptimizerStates(optStates)
}
preloadOptStates = None
}
}
/**
* Borrow optimizer from a shared module. Used in bucketing, where exactly the same
* optimizer (esp. kvstore) is used.
* @param sharedModule
*/
def borrowOptimizer(sharedModule: Module): Unit = {
require(sharedModule.optimizerInitialized,
"initOptimizer() must be called first for shared module")
optimizer = sharedModule.optimizer
kvstore = sharedModule.kvstore
updateOnKVStore = sharedModule.updateOnKVStore
updater = sharedModule.updater
optimizerInitialized = true
}
/**
* Forward computation.
* @param dataBatch input data
* @param isTrain Default is `None`, which means `is_train` takes the value of `for_training`.
*/
def forward(dataBatch: DataBatch, isTrain: Option[Boolean] = None): Unit = {
require(binded && paramsInitialized, "bind() and initParams() must be called first.")
val currDataShapes = this.dataShapes.map(_.shape)
val newDataShapes = dataBatch.data.map(_.shape)
if (currDataShapes != newDataShapes) {
val newDShapes: IndexedSeq[DataDesc] =
if (dataBatch.provideDataDesc != null) dataBatch.provideDataDesc
else {
this.dataShapes.zip(newDataShapes).map { case (i, shape) =>
DataDesc(i.name, shape, i.dtype, i.layout)
}
}
val newLShapes: Option[IndexedSeq[DataDesc]] =
if (dataBatch.provideLabelDesc != null) Some(dataBatch.provideLabelDesc)
else if (dataBatch.label != null && dataBatch.label.length > 0
&& this.labelShapes != null) {
Some(this.labelShapes.zip(dataBatch.label).map { case (i, j) =>
DataDesc(i.name, j.shape, i.dtype, i.layout)
})
} else None
this.reshape(newDShapes, newLShapes)
}
execGroup.forward(dataBatch, isTrain)
}
/**
* Backward computation.
* @param outGrads Gradient on the outputs to be propagated back.
* This parameter is only needed when bind is called
* on outputs that are not a loss function.
*/
def backward(outGrads: Array[NDArray] = null): Unit = {
require(binded && paramsInitialized, "bind() and initParams() must be called first.")
execGroup.backward(outGrads)
}
// Update parameters according to the installed optimizer and the gradients computed
// in the previous forward-backward batch.
def update(): Unit = {
require(binded && paramsInitialized && optimizerInitialized,
"bind(), initParams() and initOptimizer() must be called first.")
paramsDirty = true
if (updateOnKVStore) {
Model.updateParamsOnKVStore(execGroup.paramArrays,
execGroup.gradArrays, kvstore, execGroup.paramNames)
} else {
require(updater.isDefined, "Undefined updater")
Model.updateParams(execGroup.paramArrays,
execGroup.gradArrays, updater.orNull, contexts.length, execGroup.paramNames, kvstore)
}
}
/**
* Get outputs of the previous forward computation.
* @return In the case when data-parallelism is used,
* the outputs will be collected from multiple devices.
* The results will look like `[ [out1_dev1, out1_dev2], [out2_dev1, out2_dev2] ]`,
* those `NDArray` might live on different devices.
*/
def getOutputs(): IndexedSeq[IndexedSeq[NDArray]] = {
require(binded && paramsInitialized, "bind() and initParams() must be called first.")
execGroup.getOutputs()
}
/**
* Get outputs of the previous forward computation.
* @return In the case when data-parallelism is used,
* the outputs will be merged from multiple devices,
* as they look like from a single executor.
* The results will look like `[out1, out2]`
*/
def getOutputsMerged(): IndexedSeq[NDArray] = {
require(binded && paramsInitialized, "bind() and initParams() must be called first.")
execGroup.getOutputsMerged()
}
/**
* Get the gradients to the inputs, computed in the previous backward computation.
* @return In the case when data-parallelism is used,
* the grads will be collected from multiple devices.
* The results will look like `[ [grad1_dev1, grad1_dev2], [grad2_dev1, grad2_dev2] ]`,
* those `NDArray` might live on different devices.
*/
def getInputGrads(): IndexedSeq[IndexedSeq[NDArray]] = {
require(binded && paramsInitialized, "bind() and initParams() must be called first.")
require(inputsNeedGrad, "Call to getInputGrads() but inputsNeedGrad is false")
execGroup.getInputGrads()
}
/**
* Get the gradients to the inputs, computed in the previous backward computation.
* @return In the case when data-parallelism is used,
* the grads will be merged from multiple devices,
* as they look like from a single executor.
* The results will look like `[grad1, grad2]`
*/
def getInputGradsMerged(): IndexedSeq[NDArray] = {
require(binded && paramsInitialized, "bind() and initParams() must be called first.")
require(inputsNeedGrad, "Call to getInputGradsMerged() but inputsNeedGrad is false")
execGroup.getInputGradsMerged()
}
/**
* Evaluate and accumulate evaluation metric on outputs of the last forward computation.
* @param evalMetric
* @param labels
*/
def updateMetric(evalMetric: EvalMetric, labels: IndexedSeq[NDArray]): Unit = {
execGroup.updateMetric(evalMetric, labels)
}
// Synchronize parameters from devices to CPU. This function should be called after
// calling `update` that updates the parameters on the devices, before one can read the
// latest parameters from `self._arg_params` and `self._aux_params`.
private def syncParamsFromDevices(): Unit = {
execGroup.getParams(argParams, auxParams)
}
// Install monitor on all executors
def installMonitor(monitor: Monitor): Unit = {
require(binded, "bind() must be called first.")
execGroup.installMonitor(monitor)
}
/**
* Save optimizer (updater) state to file
* @param fname Path to output states file.
*/
def saveOptimizerStates(fname: String): Unit = {
require(optimizerInitialized, "Optimizer should be initialized before saving.")
if (updateOnKVStore) {
kvstore.foreach(_.saveOptimizerStates(fname))
} else {
updater.foreach {
case cachedStates: MXKVStoreCachedStates =>
val target = new BufferedOutputStream(new FileOutputStream(fname))
try {
target.write(cachedStates.serializeState())
} finally {
target.close()
}
case _ =>
logger.warn("Updater does not have states, skip saving to {}", fname)
}
}
}
/**
* Load optimizer (updater) state from file
* @param fname Path to input states file.
*/
def loadOptimizerStates(fname: String): Unit = {
require(optimizerInitialized, "Optimizer should be initialized before loading.")
if (updateOnKVStore) {
kvstore.foreach(_.loadOptimizerStates(fname))
} else {
updater.foreach {
case cachedStates: MXKVStoreCachedStates =>
val bis = new BufferedInputStream(new FileInputStream(fname))
try {
val bArray = Stream.continually(bis.read).takeWhile(-1 !=).map(_.toByte).toArray
cachedStates.deserializeState(bArray)
} finally {
bis.close()
}
case _ =>
logger.warn("Updater does not have states, skip loading from {}", fname)
}
}
}
/**
* Save current progress to checkpoint.
* Use mx.callback.module_checkpoint as epoch_end_callback to save during training.
* @param prefix The file prefix to checkpoint to
* @param epoch The current epoch number
* @param saveOptStates Whether to save optimizer states for continue training
*/
def saveCheckpoint(prefix: String, epoch: Int, saveOptStates: Boolean = false): Unit = {
symbol.save(s"$prefix-symbol.json")
val paramName = "%s-%04d.params".format(prefix, epoch)
saveParams(paramName)
logger.info("Saved checkpoint to {}", paramName)
if (saveOptStates) {
val stateName = "%s-%04d.states".format(prefix, epoch)
saveOptimizerStates(stateName)
logger.info("Saved optimizer state to {}", stateName)
}
}
}
object Module {
/**
* Create a model from previously saved checkpoint.
* @param prefix Path prefix of saved model files. You should have "prefix-symbol.json",
* "prefix-xxxx.params", and optionally "prefix-xxxx.states",
* where xxxx is the epoch number.
* @param epoch Epoch to load.
* @param loadOptimizerStates Whether to load optimizer states.
* Checkpoint needs to have been made with saveOptimizerStates=True
* @param dataNames Input data names.
* @param labelNames Input label names
* @param contexts Default is cpu().
* @param workLoadList Default `None`, indicating uniform workload.
* @param fixedParamNames Default `None`, indicating no network parameters are fixed.
*/
def loadCheckpoint(prefix: String, epoch: Int, loadOptimizerStates: Boolean = false,
dataNames: IndexedSeq[String] = IndexedSeq("data"),
labelNames: IndexedSeq[String] = IndexedSeq("softmax_label"),
contexts: Array[Context] = Context.cpu(),
workLoadList: Option[IndexedSeq[Float]] = None,
fixedParamNames: Option[Set[String]] = None): Module = {
val (sym, args, auxs) = Model.loadCheckpoint(prefix, epoch)
val mod = new Module(symbolVar = sym,
dataNames, labelNames, contexts, workLoadList, fixedParamNames)
mod.argParams = args
mod.auxParams = auxs
mod.paramsInitialized = true
if (loadOptimizerStates) {
mod.preloadOptStates = Some("%s-%04d.states".format(prefix, epoch))
}
mod
}
/**
* Builder class for Module.
* @param modelDef model definition in Symbol.
*/
class Builder(private val modelDef: Symbol) {
private var dataNames: IndexedSeq[String] = IndexedSeq("data")
private var labelNames: IndexedSeq[String] = IndexedSeq("softmax_label")
private var contexts: Array[Context] = Array(Context.cpu())
private var workLoadList: IndexedSeq[Float] = _
private var fixedParamNames: Set[String] = _
/**
* Set the context for execution.
* @param ctx a list of contexts.
* @return this.
*/
@varargs def setContext(ctx: Context*): Builder = {
contexts = ctx.toArray
this
}
/**
* Set the input data names.
* @param name a list of data names. Cannot be null.
* @return this.
*/
@varargs def setDataNames(name: String*): Builder = {
dataNames = name.toVector
this
}
/**
* Set the label names.
* @param name a list of label names.
* Set to null if no label is required.
* @return this.
*/
@varargs def setLabelNames(name: String*): Builder = {
labelNames = if (name == null) IndexedSeq.empty[String] else name.toVector
this
}
/**
* Set the workloads.
* @param workloads a list of workloads
* @return this.
*/
@varargs def setWorkLoadList(workloads: Float*): Builder = {
workLoadList = workloads.toVector
this
}
/**
* Specify the parameters need to be fixed.
* @param name a list of parameter names.
* @return this.
*/
@varargs def setFixedParamNames(name: String*): Builder = {
fixedParamNames = name.toSet
this
}
def build(): Module = {
new Module(modelDef, dataNames, labelNames, contexts,
Option(workLoadList), Option(fixedParamNames))
}
}
}
|
zhreshold/mxnet
|
scala-package/core/src/main/scala/org/apache/mxnet/module/Module.scala
|
Scala
|
apache-2.0
| 28,456 |
/**
* Copyright 2015 Thomson Reuters
*
* Licensed under the Apache License, Version 2.0 (the “License”); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an “AS IS” BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmwell.blueprints.jena
import java.lang.Iterable
import org.apache.jena.rdf.model._
import com.tinkerpop.blueprints.util.{StringFactory, DefaultVertexQuery}
import com.tinkerpop.blueprints.{VertexQuery, Edge, Direction, Vertex}
import scala.collection.JavaConversions._
import scala.collection.mutable
import Extensions._
class JenaVertex(model1: Model, rdfNode1: RDFNode) extends JenaElement(model1, rdfNode1) with Vertex {
override def getId = rdfNode.id
override def getEdges(direction: Direction, labels: String*): Iterable[Edge] = {
val labelsSet = labels.toSet
val edges = mutable.Set[Edge]()
val statements = model.listStatements(getSelector(direction))
while (statements.hasNext) {
val statement = statements.next
if(!statement.getObject.isLiteral) {
val edge = new JenaEdge(model, statement.getPredicate, statement.getObject, statement.getSubject)
if (labelsSet.isEmpty || labelsSet(edge.getLabel))
edges += edge
}
}
edges
}
override def getVertices(direction: Direction, labels: String*): Iterable[Vertex] = {
val labelsSet = labels.toSet
val vertices = mutable.Set[Vertex]()
val statements = model.listStatements(getSelector(direction))
while (statements.hasNext) {
val statement = statements.next
if (labelsSet.isEmpty || labelsSet(statement.getPredicate.id.toString)) {
val s = statement.getSubject
val o = statement.getObject
vertices += new JenaVertex(model, if(rdfNode==s) o else s)
}
}
vertices
}
override def query(): VertexQuery = new DefaultVertexQuery(this)
override def toString = StringFactory.vertexString(this)
override def addEdge(s: String, vertex: Vertex): Edge = throw new UnsupportedOperationException("Current implementation is for a ReadOnly graph")
override def remove(): Unit = throw new UnsupportedOperationException("Current implementation is for a ReadOnly graph")
override def removeProperty[T](key: String): T = throw new UnsupportedOperationException("Current implementation is for a ReadOnly graph")
private def getSelector(dir: Direction): Selector = dir match {
case Direction.IN => new SimpleSelector {
override def selects(s: Statement): Boolean = s.getObject.isSameAs(rdfNode) && !s.getObject.isLiteral
}
case Direction.OUT => new SimpleSelector {
override def selects(s: Statement): Boolean = s.getSubject.isSameAs(rdfNode) && !s.getObject.isLiteral
}
case Direction.BOTH => new SimpleSelector {
override def selects(s: Statement): Boolean = (s.getSubject.isSameAs(rdfNode) || s.getObject.isSameAs(rdfNode)) && !s.getObject.isLiteral
}
case _ => throw new IllegalArgumentException("Direction")
}
}
|
nruppin/CM-Well
|
server/cmwell-plugin-gremlin/src/main/scala/cmwell/blueprints/jena/JenaVertex.scala
|
Scala
|
apache-2.0
| 3,387 |
import org.atnos.site._
object index extends UserGuidePage { def is = "eff".title ^ s2"""
Extensible effects are an alternative to monad transformers for computing with effects in a functional way.
This library is based on the "free-er" monad and an "open union" of effects described by
Oleg Kiselyov in [Freer monads, more extensible effects](http://okmij.org/ftp/Haskell/extensible/more.pdf).
You can learn more in the following sections:
- ${"installation and imports" ~ Installation}
- ${"your first effects" ~ Introduction}
- ${"standard effects in eff" ~ OutOfTheBox}: `Reader`, `Writer`, `Eval`, `State`,...
- ${"tutorial" ~ Tutorial}
- ${"create your own effects" ~ CreateEffects}
- ${"interpret and manipulate effect stacks" ~ TransformStack}
- ${"use Member implicits" ~ MemberImplicits }
- ${"use an applicative evaluation" ~ ApplicativeEvaluation}
- ${"tips and tricks" ~ Cookbook}
### Contributing
`eff` is a [Typelevel](http://typelevel.org) project. This means we embrace pure, typeful, functional programming,
and provide a safe and friendly environment for teaching, learning, and contributing as described in the [Typelevel Code of Conduct](http://typelevel.org/conduct.html).
Feel free to open an issue if you notice a bug, have an idea for a feature, or have a question about the code. Pull requests are also gladly accepted.
"""
}
|
etorreborre/eff
|
jvm/src/test/scala/org/atnos/site/index.scala
|
Scala
|
mit
| 1,370 |
// scalastyle:off line.size.limit
/*
* Ported by Alistair Johnson from
* https://github.com/gwtproject/gwt/blob/master/user/test/com/google/gwt/emultest/java/math/BigIntegerNotTest.java
*/
// scalastyle:on line.size.limit
package org.scalajs.testsuite.javalib.math
import java.math.BigInteger
import org.scalajs.jasminetest.JasmineTest
object BigIntegerNotTest extends JasmineTest {
describe("BigIntegerNotTest") {
it("testAndNotNegNegFirstLonger") {
val aBytes = Array[Byte](-128, 9, 56, 100, -2, -76, 89, 45, 91, 3, -15, 35, 26, -117, 23, 87, -25, -75)
val bBytes = Array[Byte](-2, -3, -4, -4, 5, 14, 23, 39, 48, 57, 66, 5, 14, 23)
val aSign = -1
val bSign = -1
val rBytes = Array[Byte](73, -92, -48, 4, 12, 6, 4, 32, 48, 64, 0, 8, 2)
val aNumber = new BigInteger(aSign, aBytes)
val bNumber = new BigInteger(bSign, bBytes)
val result = aNumber.andNot(bNumber)
var resBytes = Array.ofDim[Byte](rBytes.length)
resBytes = result.toByteArray()
for (i <- 0 until resBytes.length) {
expect(resBytes(i)).toEqual(rBytes(i))
}
expect(result.signum()).toEqual(1)
}
it("testAndNotPosPosFirstLonger") {
val aBytes = Array[Byte](-128, 9, 56, 100, -2, -76, 89, 45, 91, 3, -15, 35, 26, -117, 23, 87, -25, -75)
val bBytes = Array[Byte](-2, -3, -4, -4, 5, 14, 23, 39, 48, 57, 66, 5, 14, 23)
val aSign = 1
val bSign = 1
val rBytes = Array[Byte](0, -128, 9, 56, 100, 0, 0, 1, 1, 90, 1, -32, 0, 10, -126, 21, 82, -31, -96)
val aNumber = new BigInteger(aSign, aBytes)
val bNumber = new BigInteger(bSign, bBytes)
val result = aNumber.andNot(bNumber)
var resBytes = Array.ofDim[Byte](rBytes.length)
resBytes = result.toByteArray()
for (i <- 0 until resBytes.length) {
expect(resBytes(i)).toEqual(rBytes(i))
}
expect(result.signum()).toEqual(1)
}
it("testAndNotPosPosFirstShorter") {
val aBytes = Array[Byte](-2, -3, -4, -4, 5, 14, 23, 39, 48, 57, 66, 5, 14, 23)
val bBytes = Array[Byte](-128, 9, 56, 100, -2, -76, 89, 45, 91, 3, -15, 35, 26, -117, 23, 87, -25, -75)
val aSign = 1
val bSign = 1
val rBytes = Array[Byte](73, -92, -48, 4, 12, 6, 4, 32, 48, 64, 0, 8, 2)
val aNumber = new BigInteger(aSign, aBytes)
val bNumber = new BigInteger(bSign, bBytes)
val result = aNumber.andNot(bNumber)
var resBytes = Array.ofDim[Byte](rBytes.length)
resBytes = result.toByteArray()
for (i <- 0 until resBytes.length) {
expect(resBytes(i)).toEqual(rBytes(i))
}
expect(result.signum()).toEqual(1)
}
it("testNegPosFirstLonger") {
val aBytes = Array[Byte](-128, 9, 56, 100, -2, -76, 89, 45, 91, 3, -15, 35, 26, -117, 23, 87, -25, -75)
val bBytes = Array[Byte](-2, -3, -4, -4, 5, 14, 23, 39, 48, 57, 66, 5, 14, 23)
val aSign = -1
val bSign = 1
val rBytes = Array[Byte](-1, 127, -10, -57, -101, 1, 2, 2, 2, -96, -16, 8, -40, -59, 68, -88, -88, 16, 72)
val aNumber = new BigInteger(aSign, aBytes)
val bNumber = new BigInteger(bSign, bBytes)
val result = aNumber.andNot(bNumber)
var resBytes = Array.ofDim[Byte](rBytes.length)
resBytes = result.toByteArray()
for (i <- 0 until resBytes.length) {
expect(resBytes(i)).toEqual(rBytes(i))
}
expect(result.signum()).toEqual(-1)
}
it("testNotNeg") {
val aBytes = Array[Byte](-128, 56, 100, -2, -76, 89, 45, 91, 3, -15, 35, 26, -117)
val aSign = -1
val rBytes = Array[Byte](0, -128, 56, 100, -2, -76, 89, 45, 91, 3, -15, 35, 26, -118)
val aNumber = new BigInteger(aSign, aBytes)
val result = aNumber.not()
var resBytes = Array.ofDim[Byte](rBytes.length)
resBytes = result.toByteArray()
for (i <- 0 until resBytes.length) {
expect(resBytes(i)).toEqual(rBytes(i))
}
expect(result.signum()).toEqual(1)
}
it("testNotOne") {
val rBytes = Array[Byte](-2)
val aNumber = BigInteger.ONE
val result = aNumber.not()
var resBytes = Array.ofDim[Byte](rBytes.length)
resBytes = result.toByteArray()
for (i <- 0 until resBytes.length) {
expect(resBytes(i)).toEqual(rBytes(i))
}
expect(result.signum()).toEqual(-1)
}
it("testNotPos") {
val aBytes = Array[Byte](-128, 56, 100, -2, -76, 89, 45, 91, 3, -15, 35, 26, -117)
val aSign = 1
val rBytes = Array[Byte](-1, 127, -57, -101, 1, 75, -90, -46, -92, -4, 14, -36, -27, 116)
val aNumber = new BigInteger(aSign, aBytes)
val result = aNumber.not()
var resBytes = Array.ofDim[Byte](rBytes.length)
resBytes = result.toByteArray()
for (i <- 0 until resBytes.length) {
expect(resBytes(i)).toEqual(rBytes(i))
}
expect(result.signum()).toEqual(-1)
}
it("testNotSpecialCase") {
val aBytes = Array[Byte](-1, -1, -1, -1)
val aSign = 1
val rBytes = Array[Byte](-1, 0, 0, 0, 0)
val aNumber = new BigInteger(aSign, aBytes)
val result = aNumber.not()
var resBytes = Array.ofDim[Byte](rBytes.length)
resBytes = result.toByteArray()
for (i <- 0 until resBytes.length) {
expect(resBytes(i)).toEqual(rBytes(i))
}
expect(result.signum()).toEqual(-1)
}
it("testNotZero") {
val rBytes = Array[Byte](-1)
val aNumber = BigInteger.ZERO
val result = aNumber.not()
var resBytes = Array.ofDim[Byte](rBytes.length)
resBytes = result.toByteArray()
for (i <- 0 until resBytes.length) {
expect(resBytes(i)).toEqual(rBytes(i))
}
expect(result.signum()).toEqual(-1)
}
}
}
|
doron123/scala-js
|
test-suite/src/test/scala/org/scalajs/testsuite/javalib/math/BigIntegerNotTest.scala
|
Scala
|
bsd-3-clause
| 5,726 |
package deaktator.cats.free.ex1.free
import deaktator.cats.free.ex1.support.Account
/**
* From Ch 5 of "Functional and Reactive Domain Modeling", Debasish Ghosh.
*
* @tparam A
*/
sealed trait AccountRepoA[+A]
case class Query(no: String) extends AccountRepoA[Account]
case class Store(account: Account) extends AccountRepoA[Unit]
case class Delete(no: String) extends AccountRepoA[Unit]
|
deaktator/learn-cats
|
src/main/scala/deaktator/cats/free/ex1/free/AccountRepoA.scala
|
Scala
|
mit
| 398 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.streaming.util
import java.io.{Closeable, EOFException}
import java.nio.ByteBuffer
import org.apache.hadoop.conf.Configuration
import org.apache.spark.Logging
/**
* A reader for reading write ahead log files written using
* [[org.apache.spark.streaming.util.FileBasedWriteAheadLogWriter]]. This reads
* the records (bytebuffers) in the log file sequentially and return them as an
* iterator of bytebuffers.
*/
private[streaming] class FileBasedWriteAheadLogReader(path: String, conf: Configuration)
extends Iterator[ByteBuffer] with Closeable with Logging {
private val instream = HdfsUtils.getInputStream(path, conf)
private var closed = false
private var nextItem: Option[ByteBuffer] = None
override def hasNext: Boolean = synchronized {
if (closed) {
return false
}
if (nextItem.isDefined) { // handle the case where hasNext is called without calling next
true
} else {
try {
val length = instream.readInt()
val buffer = new Array[Byte](length)
instream.readFully(buffer)
nextItem = Some(ByteBuffer.wrap(buffer))
logTrace("Read next item " + nextItem.get)
true
} catch {
case e: EOFException =>
logDebug("Error reading next item, EOF reached", e)
close()
false
case e: Exception =>
logWarning("Error while trying to read data from HDFS.", e)
close()
throw e
}
}
}
override def next(): ByteBuffer = synchronized {
val data = nextItem.getOrElse {
close()
throw new IllegalStateException(
"next called without calling hasNext or after hasNext returned false")
}
nextItem = None // Ensure the next hasNext call loads new data.
data
}
override def close(): Unit = synchronized {
if (!closed) {
instream.close()
}
closed = true
}
}
|
practice-vishnoi/dev-spark-1
|
streaming/src/main/scala/org/apache/spark/streaming/util/FileBasedWriteAheadLogReader.scala
|
Scala
|
apache-2.0
| 2,716 |
package chandu0101.scalajs.react.components
import chandu0101.macros.tojs.JSMacro
import japgolly.scalajs.react._
import japgolly.scalajs.react.raw._
import japgolly.scalajs.react.vdom.VdomElement
import org.scalajs.dom.raw.HTMLElement
import scala.scalajs.js
import scala.scalajs.js.annotation.JSImport
@JSImport("react-infinite", JSImport.Default) @js.native
object ReactInfiniteRequire extends js.Any
case class ReactInfinite(
handleScroll: js.UndefOr[HTMLElement => Callback] = js.undefined,
preloadAdditionalHeight: js.UndefOr[Int] = js.undefined,
isInfiniteLoading: js.UndefOr[Boolean] = js.undefined,
preloadBatchSize: js.UndefOr[Int] = js.undefined,
containerHeight: Int,
ref: js.UndefOr[ReactInfiniteM => Unit] = js.undefined,
loadingSpinnerDelegate: js.UndefOr[ReactElement] = js.undefined,
timeScrollStateLastsForAfterUserScrolls: js.UndefOr[Int] = js.undefined,
elementHeight: Double,
key: js.UndefOr[String] = js.undefined,
className: js.UndefOr[String] = js.undefined,
infiniteLoadBeginBottomOffset: js.UndefOr[Int] = js.undefined,
onInfiniteLoad: js.UndefOr[Callback] = js.undefined
) {
def apply(children: Seq[VdomElement]) = {
val props = JSMacro[ReactInfinite](this)
val f = JsComponent[js.Object, Children.Varargs, Null](ReactInfiniteRequire)
f(props)(children: _*)
}
}
@js.native
trait ReactInfiniteM extends js.Object {
def getScrollTop(): Double = js.native
}
|
chandu0101/scalajs-react-components
|
core/src/main/scala/chandu0101/scalajs/react/components/ReactInfinite.scala
|
Scala
|
apache-2.0
| 1,462 |
/*
* Copyright (c) 2014-2018 by The Monix Project Developers.
* See the project homepage at: https://monix.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package monix.reactive.internal.consumers
import monix.execution.Callback
import monix.execution.Ack.Stop
import monix.execution.{Ack, Scheduler}
import monix.execution.atomic.Atomic
import monix.execution.cancelables.AssignableCancelable
import scala.util.control.NonFatal
import monix.reactive.{Consumer, Observer}
import monix.reactive.observers.Subscriber
import scala.concurrent.Future
import scala.util.{Failure, Success, Try}
/** Implementation for [[monix.reactive.Consumer.fromObserver]]. */
private[reactive]
final class FromObserverConsumer[In](f: Scheduler => Observer[In])
extends Consumer[In, Unit] {
def createSubscriber(cb: Callback[Throwable, Unit], s: Scheduler): (Subscriber[In], AssignableCancelable) = {
Try(f(s)) match {
case Failure(ex) =>
Consumer.raiseError(ex).createSubscriber(cb,s)
case Success(out) =>
val sub = new Subscriber[In] { self =>
implicit val scheduler = s
private[this] val isDone = Atomic(false)
private def signal(ex: Throwable): Unit =
if (!isDone.getAndSet(true)) {
if (ex == null) {
try out.onComplete()
finally cb.onSuccess(())
}
else {
try out.onError(ex)
finally cb.onError(ex)
}
}
def onNext(elem: In): Future[Ack] = {
val ack = try out.onNext(elem) catch {
case ex if NonFatal(ex) => Future.failed(ex)
}
ack.syncOnComplete {
case Success(result) =>
if (result == Stop) signal(null)
case Failure(ex) =>
signal(ex)
}
ack
}
def onComplete(): Unit = signal(null)
def onError(ex: Throwable): Unit = signal(ex)
}
(sub, AssignableCancelable.dummy)
}
}
}
|
Wogan/monix
|
monix-reactive/shared/src/main/scala/monix/reactive/internal/consumers/FromObserverConsumer.scala
|
Scala
|
apache-2.0
| 2,589 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.hive.execution
import org.apache.spark.sql.Row
import org.apache.spark.sql.hive.test.{TestHive, TestHiveSingleton}
import org.apache.spark.sql.hive.test.TestHive._
import org.apache.spark.sql.hive.test.TestHive.implicits._
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.test.SQLTestUtils
import org.apache.spark.util.Utils
class HiveTableScanSuite extends HiveComparisonTest with SQLTestUtils with TestHiveSingleton {
createQueryTest("partition_based_table_scan_with_different_serde",
"""
|CREATE TABLE part_scan_test (key STRING, value STRING) PARTITIONED BY (ds STRING)
|ROW FORMAT SERDE
|'org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe'
|STORED AS RCFILE;
|
|FROM src
|INSERT INTO TABLE part_scan_test PARTITION (ds='2010-01-01')
|SELECT 100,100 LIMIT 1;
|
|ALTER TABLE part_scan_test SET SERDE
|'org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe';
|
|FROM src INSERT INTO TABLE part_scan_test PARTITION (ds='2010-01-02')
|SELECT 200,200 LIMIT 1;
|
|SELECT * from part_scan_test;
""".stripMargin)
// In unit test, kv1.txt is a small file and will be loaded as table src
// Since the small file will be considered as a single split, we assume
// Hive / SparkSQL HQL has the same output even for SORT BY
createQueryTest("file_split_for_small_table",
"""
|SELECT key, value FROM src SORT BY key, value
""".stripMargin)
test("Spark-4041: lowercase issue") {
TestHive.sql("CREATE TABLE tb (KEY INT, VALUE STRING) STORED AS ORC")
TestHive.sql("insert into table tb select key, value from src")
TestHive.sql("select KEY from tb where VALUE='just_for_test' limit 5").collect()
TestHive.sql("drop table tb")
}
test("Spark-4077: timestamp query for null value") {
TestHive.sql("DROP TABLE IF EXISTS timestamp_query_null")
TestHive.sql(
"""
CREATE TABLE timestamp_query_null (time TIMESTAMP,id INT)
ROW FORMAT DELIMITED
FIELDS TERMINATED BY ','
LINES TERMINATED BY '\\n'
""".stripMargin)
val location =
Utils.getSparkClassLoader.getResource("data/files/issue-4077-data.txt").getFile()
TestHive.sql(s"LOAD DATA LOCAL INPATH '$location' INTO TABLE timestamp_query_null")
assert(TestHive.sql("SELECT time from timestamp_query_null limit 2").collect()
=== Array(Row(java.sql.Timestamp.valueOf("2014-12-11 00:00:00")), Row(null)))
TestHive.sql("DROP TABLE timestamp_query_null")
}
test("Spark-4959 Attributes are case sensitive when using a select query from a projection") {
withTable("spark_4959") {
sql("create table spark_4959 (col1 string)")
sql("""insert into table spark_4959 select "hi" from src limit 1""")
table("spark_4959").select(
'col1.as("CaseSensitiveColName"),
'col1.as("CaseSensitiveColName2")).createOrReplaceTempView("spark_4959_2")
assert(sql("select CaseSensitiveColName from spark_4959_2").head() === Row("hi"))
assert(sql("select casesensitivecolname from spark_4959_2").head() === Row("hi"))
}
}
private def checkNumScannedPartitions(stmt: String, expectedNumParts: Int): Unit = {
val plan = sql(stmt).queryExecution.sparkPlan
val numPartitions = plan.collectFirst {
case p: HiveTableScanExec => p.rawPartitions.length
}.getOrElse(0)
assert(numPartitions == expectedNumParts)
}
test("Verify SQLConf HIVE_METASTORE_PARTITION_PRUNING") {
val view = "src"
withTempView(view) {
spark.range(1, 5).createOrReplaceTempView(view)
val table = "table_with_partition"
withTable(table) {
sql(
s"""
|CREATE TABLE $table(id string)
|PARTITIONED BY (p1 string,p2 string,p3 string,p4 string,p5 string)
""".stripMargin)
sql(
s"""
|FROM $view v
|INSERT INTO TABLE $table
|PARTITION (p1='a',p2='b',p3='c',p4='d',p5='e')
|SELECT v.id
|INSERT INTO TABLE $table
|PARTITION (p1='a',p2='c',p3='c',p4='d',p5='e')
|SELECT v.id
""".stripMargin)
Seq("true", "false").foreach { hivePruning =>
withSQLConf(SQLConf.HIVE_METASTORE_PARTITION_PRUNING.key -> hivePruning) {
// If the pruning predicate is used, getHiveQlPartitions should only return the
// qualified partition; Otherwise, it return all the partitions.
val expectedNumPartitions = if (hivePruning == "true") 1 else 2
checkNumScannedPartitions(
stmt = s"SELECT id, p2 FROM $table WHERE p2 <= 'b'", expectedNumPartitions)
}
}
Seq("true", "false").foreach { hivePruning =>
withSQLConf(SQLConf.HIVE_METASTORE_PARTITION_PRUNING.key -> hivePruning) {
// If the pruning predicate does not exist, getHiveQlPartitions should always
// return all the partitions.
checkNumScannedPartitions(
stmt = s"SELECT id, p2 FROM $table WHERE id <= 3", expectedNumParts = 2)
}
}
}
}
}
test("SPARK-16926: number of table and partition columns match for new partitioned table") {
val view = "src"
withTempView(view) {
spark.range(1, 5).createOrReplaceTempView(view)
val table = "table_with_partition"
withTable(table) {
sql(
s"""
|CREATE TABLE $table(id string)
|PARTITIONED BY (p1 string,p2 string,p3 string,p4 string,p5 string)
""".stripMargin)
sql(
s"""
|FROM $view v
|INSERT INTO TABLE $table
|PARTITION (p1='a',p2='b',p3='c',p4='d',p5='e')
|SELECT v.id
|INSERT INTO TABLE $table
|PARTITION (p1='a',p2='c',p3='c',p4='d',p5='e')
|SELECT v.id
""".stripMargin)
val scan = getHiveTableScanExec(s"SELECT * FROM $table")
val numDataCols = scan.relation.dataCols.length
scan.rawPartitions.foreach(p => assert(p.getCols.size == numDataCols))
}
}
}
test("HiveTableScanExec canonicalization for different orders of partition filters") {
val table = "hive_tbl_part"
withTable(table) {
sql(
s"""
|CREATE TABLE $table (id int)
|PARTITIONED BY (a int, b int)
""".stripMargin)
val scan1 = getHiveTableScanExec(s"SELECT * FROM $table WHERE a = 1 AND b = 2")
val scan2 = getHiveTableScanExec(s"SELECT * FROM $table WHERE b = 2 AND a = 1")
assert(scan1.sameResult(scan2))
}
}
private def getHiveTableScanExec(query: String): HiveTableScanExec = {
sql(query).queryExecution.sparkPlan.collectFirst {
case p: HiveTableScanExec => p
}.get
}
}
|
bravo-zhang/spark
|
sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveTableScanSuite.scala
|
Scala
|
apache-2.0
| 7,686 |
package models.daos.tables
import java.util.UUID
import models.User
import slick.jdbc.JdbcProfile
import slick.lifted.ProvenShape.proveShapeOf
trait DBTableDefinitions {
protected val profile: JdbcProfile
import profile.api._
val users = TableQuery[Users]
val passwords = TableQuery[Passwords]
class Users(tag: Tag) extends Table[User](tag, "users") {
def * = (id, name, email) <>(User.tupled, User.unapply)
def id = column[UUID]("id", O.PrimaryKey)
def name = column[String]("name")
def email = column[String]("email")
}
case class DBPasswordInfo(hash: String, password: String, salt: Option[String], email: String)
class Passwords(tag: Tag) extends Table[DBPasswordInfo](tag, "passwords") {
def * = (hash, password, salt, email) <>(DBPasswordInfo.tupled, DBPasswordInfo.unapply)
def hash = column[String]("hash")
def password = column[String]("password")
def salt = column[Option[String]]("salt")
def email = column[String]("email", O.PrimaryKey)
}
}
|
wjglerum/bamboesmanager
|
app/models/daos/tables/DBTableDefinitions.scala
|
Scala
|
mit
| 1,021 |
package org.orbroker.config.dynamic
import org.orbroker._
import SQLStatement._
import org.orbroker.adapt.BrokerAdapter
import org.orbroker.callback.ExecutionCallback
import java.io.{ StringReader, StringWriter }
import freemarker.template._
private[orbroker] class FreeMarkerStatement(
id: Symbol,
freemarkerSQL: Seq[String],
trimSQL: Boolean,
callback: ExecutionCallback,
adapter: BrokerAdapter,
config: Configuration)
extends SQLStatement(id, callback, adapter) {
import FreeMarkerStatement._
private val template = {
val sql = if (usesINPredicate(freemarkerSQL)) {
val useSquareBrackets = freemarkerSQL.exists { line =>
FindFTLDirective.findFirstMatchIn(line).exists(_.group(1) == "[")
}
freemarkerSQL.mkString(EOL) + EOL + (if (useSquareBrackets) SeqExpansionMacroSquare else SeqExpansionMacroAngle)
} else {
freemarkerSQL.mkString(EOL)
}
new Template(id.name, sql, config)
}
override def statement(parms: Map[String, Any]) = {
val context = toJavaMap(parms)
val writer = new StringWriter
template.process(context, writer)
SQLStatement.parseSQL(id, writer, trimSQL, adapter)
}
}
private[dynamic] object FreeMarkerStatement {
private val FindSeqExpansionMacro = """@IN\\s+seq=""".r
def usesINPredicate(sql: Seq[String]) = sql.exists(FindSeqExpansionMacro.pattern.matcher(_).find)
val SeqExpansionMacroAngle = "<#macro IN seq>IN (<#list .globals[seq] as e><#if (e_index > 0)>,</#if>:${seq}[${e_index}]</#list>)</#macro>"
val SeqExpansionMacroSquare = "[#macro IN seq]IN ([#list .globals[seq] as e][#if (e_index > 0)],[/#if]:${seq}[${e_index}][/#list])[/#macro]"
val FindFTLDirective = """([<\\[])#ftl""".r
val isFreeMarkerAvailable = try {
Class.forName("freemarker.template.Template")
true
} catch {
case _: Throwable => false
}
def hasFreeMarkerConditionals(sq1: String) =
((sq1 contains "#if") && (sq1 contains "/#if")) ||
((sq1 contains "#list") && (sq1 contains "/#list"))
}
|
nilskp/orbroker
|
src/main/scala/org/orbroker/config/dynamic/FreeMarkerStatement.scala
|
Scala
|
mit
| 2,016 |
package edu.umd.mith.sga.mss
import java.io.File
import scala.xml._
trait TeiTemplates {
def cleanString(s: String): String = s
.replaceAll("\\u00A0", " ")
.replaceAll("&", "&")
.replaceAll("--", "—")
.replaceAll("<", "<").replaceAll(">", ">")
def spanString(span: Span): String = span match {
case PlainText(text) => cleanString(text)
case Unclear(spans) =>
"<unclear>" + spans.flatMap(spanString).mkString + "</unclear>"
case Deleted(spans) =>
"""<del rend="strikethrough">""" + spans.flatMap(spanString).mkString + "</del>"
}
def lineTemplate(line: Line) =
f"\\n <line>${ line.spans.map(spanString).mkString }%s</line>"
/** We're using a string-based template because the native XML support for
* processing instructions and custom indentation and line break rules in
* Scala is shamefully bad.
*/
def surfaceTemplate(
library: String,
shelfmark: String,
seq: Int,
folioLabel: String,
shelfmarkLabel: String,
size: Option[(Int, Int)],
pages: Seq[Page]
) = {
val id = f"$library%s-$shelfmark%s-$seq%04d"
val corners = size.fold("") {
case (w, h) => f"""\\n ulx="0" uly="0" lrx="$w%d" lry="$h%d""""
}
f"""<?xml version="1.0" encoding="ISO-8859-1"?><?xml-model href="../../../schemata/shelley-godwin-page.rnc"
type="application/relax-ng-compact-syntax"?><?xml-stylesheet type="text/xsl"
href="../../../xsl/page-proof.xsl"?>
<surface xmlns="http://www.tei-c.org/ns/1.0" xmlns:mith="http://mith.umd.edu/sc/ns1#"$corners%s
xml:id="$id%s" partOf="#$library%s-$shelfmark%s"
mith:shelfmark="$shelfmarkLabel%s" mith:folio="$folioLabel%s">
<graphic url="http://shelleygodwinarchive.org/images/$library%s/$id%s.jp2"/>
<zone type="main">${ pages.flatMap(_.lines.map(_._2)).map(lineTemplate).mkString }%s
</zone>
</surface>
"""
}
def teiTemplate(
library: String,
shelfmark: String,
shelfmarkLabel: String,
files: List[File],
works: List[(String, List[String])]
) = {
val id = f"$library%s-$shelfmark%s"
val fileIncludes = files.map { file =>
val path = file.getPath.split(File.separator).takeRight(2).mkString(File.separator)
<xi:include href={ path }/>
}
/** Here we need to combine consecutive identifiers into single locus
* elements. The logic is messy but not that complicated.
*/
val items = works.map {
case (title, ids) =>
val (loci, locus) = ids.foldLeft((List.empty[List[String]], List.empty[String])) {
case ((loci, locus @ (last :: _)), id)
if last.takeRight(4).toInt == id.takeRight(4).toInt - 1 =>
(loci, id :: locus)
case ((loci, locus), id) => ((loci :+ locus), List(id))
}
val allLoci = (loci :+ locus).filterNot(_.isEmpty).map { ids =>
<locus target={ ids.reverse.map("#" + _).mkString(" ") }/>
}
<msItem>
<bibl>
<title>{ title }</title>
</bibl>
<locusGrp>{ allLoci }</locusGrp>
</msItem>
}
<TEI xmlns="http://www.tei-c.org/ns/1.0"
xmlns:xi="http://www.w3.org/2001/XInclude"
xml:id={id}>
<teiHeader>
<fileDesc>
<titleStmt>
<title type="main">Frankenstein, Draft Notebook A</title>
<title type="sub">An electronic transcription</title>
</titleStmt>
<editionStmt>
<edition>Shelley-Godwin Archive edition, <date>2012-2014</date>
</edition>
</editionStmt>
<publicationStmt>
<distributor>Oxford University</distributor>
<address>
<addrLine>
<ref target="http://www.shelleygodwinarchive.org/">http://www.shelleygodwinarchive.org/</ref>
</addrLine>
</address>
<availability status="free">
<licence target="http://creativecommons.org/publicdomain/zero/1.0/">
<p>CC0 1.0 Universal.</p>
<p> To the extent possible under law, the creators of the metadata records for the Shelley-Godwin Archive
have waived all copyright and related or neighboring rights to this work.</p>
</licence>
</availability>
<pubPlace>Oxford, UK, and College Park, MD</pubPlace>
</publicationStmt>
<sourceDesc>
<msDesc>
<msIdentifier>
<settlement>Oxford</settlement>
<repository>Bodleian Library, University of Oxford</repository>
<idno type="Bod">{ shelfmarkLabel }</idno>
</msIdentifier>
<msContents>
<msItem xml:id={ id + "-works" }>
<bibl status="">
<author>Percy Shelley</author>
</bibl>
{ items }
</msItem>
</msContents>
<physDesc>
<handDesc>
<handNote xml:id="pbs"><persName>Percy Shelley</persName></handNote>
</handDesc>
</physDesc>
</msDesc>
</sourceDesc>
</fileDesc>
<revisionDesc>
</revisionDesc>
</teiHeader>
<sourceDoc>{ fileIncludes }
</sourceDoc>
</TEI>
}
}
|
umd-mith/pbs-mss
|
src/main/scala/TeiTemplates.scala
|
Scala
|
apache-2.0
| 5,590 |
package instrumentti
object LinkO {
val UNIDIRECTION: Int = 0
val BIDIRECTONAL: Int = 1
}
class Link(a: Node, b: Node, var dir: Int) extends Element(ElementCollection.getNextLinkId) {
ElementCollection.addLink(this)
def display() = {
InstrumenttiMain.stroke(0);
InstrumenttiMain.line(a.location, b.location);
}
def displaySelected() = {
}
}
|
transfluxus/PublicInstrumentti
|
src/instrumentti/Link.scala
|
Scala
|
mit
| 374 |
// scalac: '-Wconf:msg=shadowing a nested class of a parent is deprecated:s'
package a {
trait Test {
class Shadow
def test: Shadow = new Shadow
}
}
package b {
trait Test extends a.Test {
class Shadow extends super.Shadow
override def test: Shadow = super.test
}
}
|
scala/scala
|
test/files/neg/t8777.scala
|
Scala
|
apache-2.0
| 291 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.carbondata.spark.util
import java.nio.charset.Charset
import java.util.regex.Pattern
import scala.collection.JavaConverters._
import scala.collection.mutable
import scala.collection.mutable.{ArrayBuffer, HashMap, HashSet}
import scala.language.implicitConversions
import scala.util.control.Breaks.{break, breakable}
import org.apache.commons.lang3.{ArrayUtils, StringUtils}
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.Path
import org.apache.hadoop.io.NullWritable
import org.apache.hadoop.mapred.JobConf
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat
import org.apache.hadoop.mapreduce.security.TokenCache
import org.apache.spark.{Accumulator, SparkException}
import org.apache.spark.deploy.SparkHadoopUtil
import org.apache.spark.rdd.{NewHadoopRDD, RDD}
import org.apache.spark.sql._
import org.apache.spark.util.FileUtils
import org.apache.carbondata.common.logging.LogServiceFactory
import org.apache.carbondata.core.cache.dictionary.{Dictionary, DictionaryColumnUniqueIdentifier}
import org.apache.carbondata.core.constants.CarbonCommonConstants
import org.apache.carbondata.core.datastore.impl.FileFactory
import org.apache.carbondata.core.locks.{CarbonLockFactory, LockUsage}
import org.apache.carbondata.core.metadata.{AbsoluteTableIdentifier, CarbonTableIdentifier, ColumnIdentifier}
import org.apache.carbondata.core.metadata.datatype.DataTypes
import org.apache.carbondata.core.metadata.encoder.Encoding
import org.apache.carbondata.core.metadata.schema.table.column.{CarbonDimension, ColumnSchema}
import org.apache.carbondata.core.reader.CarbonDictionaryReader
import org.apache.carbondata.core.service.CarbonCommonFactory
import org.apache.carbondata.core.statusmanager.SegmentStatus
import org.apache.carbondata.core.util.{CarbonProperties, CarbonUtil, DataTypeUtil}
import org.apache.carbondata.core.util.path.CarbonTablePath
import org.apache.carbondata.core.writer.CarbonDictionaryWriter
import org.apache.carbondata.processing.exception.DataLoadingException
import org.apache.carbondata.processing.loading.csvinput.{CSVInputFormat, StringArrayWritable}
import org.apache.carbondata.processing.loading.exception.NoRetryException
import org.apache.carbondata.processing.loading.model.CarbonLoadModel
import org.apache.carbondata.processing.util.CarbonLoaderUtil
import org.apache.carbondata.spark.CarbonSparkFactory
import org.apache.carbondata.spark.rdd._
import org.apache.carbondata.spark.tasks.{DictionaryWriterTask, SortIndexWriterTask}
/**
* A object which provide a method to generate global dictionary from CSV files.
*/
object GlobalDictionaryUtil {
private val LOGGER = LogServiceFactory.getLogService(this.getClass.getCanonicalName)
/**
* The default separator to use if none is supplied to the constructor.
*/
val DEFAULT_SEPARATOR: Char = ','
/**
* The default quote character to use if none is supplied to the
* constructor.
*/
val DEFAULT_QUOTE_CHARACTER: Char = '"'
/**
* find columns which need to generate global dictionary.
*
* @param dimensions dimension list of schema
* @param headers column headers
* @param columns column list of csv file
*/
def pruneDimensions(dimensions: Array[CarbonDimension],
headers: Array[String],
columns: Array[String]): (Array[CarbonDimension], Array[String]) = {
val dimensionBuffer = new ArrayBuffer[CarbonDimension]
val columnNameBuffer = new ArrayBuffer[String]
val dimensionsWithDict = dimensions.filter(hasEncoding(_, Encoding.DICTIONARY,
Encoding.DIRECT_DICTIONARY))
dimensionsWithDict.foreach { dim =>
breakable {
headers.zipWithIndex.foreach { h =>
if (dim.getColName.equalsIgnoreCase(h._1)) {
dimensionBuffer += dim
columnNameBuffer += columns(h._2)
break
}
}
}
}
(dimensionBuffer.toArray, columnNameBuffer.toArray)
}
/**
* use this method to judge whether CarbonDimension use some encoding or not
*
* @param dimension carbonDimension
* @param encoding the coding way of dimension
* @param excludeEncoding the coding way to exclude
*/
def hasEncoding(dimension: CarbonDimension,
encoding: Encoding,
excludeEncoding: Encoding): Boolean = {
if (dimension.isComplex()) {
val children = dimension.getListOfChildDimensions
children.asScala.exists(hasEncoding(_, encoding, excludeEncoding))
} else {
dimension.hasEncoding(encoding) &&
(excludeEncoding == null || !dimension.hasEncoding(excludeEncoding))
}
}
def gatherDimensionByEncoding(carbonLoadModel: CarbonLoadModel,
dimension: CarbonDimension,
encoding: Encoding,
excludeEncoding: Encoding,
dimensionsWithEncoding: ArrayBuffer[CarbonDimension],
forPreDefDict: Boolean) {
if (dimension.isComplex) {
val children = dimension.getListOfChildDimensions.asScala
children.foreach { c =>
gatherDimensionByEncoding(carbonLoadModel, c, encoding, excludeEncoding,
dimensionsWithEncoding, forPreDefDict)
}
} else {
if (dimension.hasEncoding(encoding) &&
(excludeEncoding == null || !dimension.hasEncoding(excludeEncoding))) {
if ((forPreDefDict && carbonLoadModel.getPredefDictFilePath(dimension) != null) ||
(!forPreDefDict && carbonLoadModel.getPredefDictFilePath(dimension) == null)) {
dimensionsWithEncoding += dimension
}
}
}
}
def getPrimDimensionWithDict(carbonLoadModel: CarbonLoadModel,
dimension: CarbonDimension,
forPreDefDict: Boolean): Array[CarbonDimension] = {
val dimensionsWithDict = new ArrayBuffer[CarbonDimension]
gatherDimensionByEncoding(carbonLoadModel, dimension, Encoding.DICTIONARY,
Encoding.DIRECT_DICTIONARY,
dimensionsWithDict, forPreDefDict)
dimensionsWithDict.toArray
}
def generateParserForChildrenDimension(dim: CarbonDimension,
format: DataFormat,
mapColumnValuesWithId:
HashMap[String, HashSet[String]],
generic: GenericParser): Unit = {
val children = dim.getListOfChildDimensions.asScala
for (i <- children.indices) {
generateParserForDimension(Some(children(i)), format.cloneAndIncreaseIndex,
mapColumnValuesWithId) match {
case Some(childDim) =>
generic.addChild(childDim)
case None =>
}
}
}
def generateParserForDimension(dimension: Option[CarbonDimension],
format: DataFormat,
mapColumnValuesWithId: HashMap[String, HashSet[String]]): Option[GenericParser] = {
dimension match {
case None =>
None
case Some(dim) =>
if (DataTypes.isArrayType(dim.getDataType)) {
val arrDim = ArrayParser(dim, format)
generateParserForChildrenDimension(dim, format, mapColumnValuesWithId, arrDim)
Some(arrDim)
} else if (DataTypes.isStructType(dim.getDataType)) {
val stuDim = StructParser(dim, format)
generateParserForChildrenDimension(dim, format, mapColumnValuesWithId, stuDim)
Some(stuDim)
} else {
Some(PrimitiveParser(dim, mapColumnValuesWithId.get(dim.getColumnId)))
}
}
}
def createDataFormat(delimiters: Array[String]): DataFormat = {
if (ArrayUtils.isNotEmpty(delimiters)) {
val patterns = delimiters.map { d =>
Pattern.compile(if (d == null) {
""
} else {
CarbonUtil.delimiterConverter(d)
})
}
DataFormat(delimiters.map(CarbonUtil.delimiterConverter(_)), 0, patterns)
} else {
null
}
}
/**
* create a instance of DictionaryLoadModel
*
* @param carbonLoadModel carbon load model
* @param table CarbonTableIdentifier
* @param dimensions column list
* @param dictFolderPath path of dictionary folder
*/
def createDictionaryLoadModel(
carbonLoadModel: CarbonLoadModel,
table: CarbonTableIdentifier,
dimensions: Array[CarbonDimension],
dictFolderPath: String,
forPreDefDict: Boolean): DictionaryLoadModel = {
val primDimensionsBuffer = new ArrayBuffer[CarbonDimension]
val isComplexes = new ArrayBuffer[Boolean]
for (i <- dimensions.indices) {
val dims = getPrimDimensionWithDict(carbonLoadModel, dimensions(i), forPreDefDict)
for (j <- dims.indices) {
primDimensionsBuffer += dims(j)
isComplexes += dimensions(i).isComplex
}
}
val primDimensions = primDimensionsBuffer.map { x => x }.toArray
val dictDetail = CarbonSparkFactory.getDictionaryDetailService.
getDictionaryDetail(dictFolderPath, primDimensions, carbonLoadModel.getTablePath)
val dictFilePaths = dictDetail.dictFilePaths
val dictFileExists = dictDetail.dictFileExists
val columnIdentifier = dictDetail.columnIdentifiers
val hdfsTempLocation = CarbonProperties.getInstance.
getProperty(CarbonCommonConstants.HDFS_TEMP_LOCATION, System.getProperty("java.io.tmpdir"))
val lockType = CarbonProperties.getInstance
.getProperty(CarbonCommonConstants.LOCK_TYPE, CarbonCommonConstants.CARBON_LOCK_TYPE_HDFS)
val zookeeperUrl = CarbonProperties.getInstance.getProperty(CarbonCommonConstants.ZOOKEEPER_URL)
val serializationNullFormat =
carbonLoadModel.getSerializationNullFormat.split(CarbonCommonConstants.COMMA, 2)(1)
// get load count
if (null == carbonLoadModel.getLoadMetadataDetails) {
carbonLoadModel.readAndSetLoadMetadataDetails()
}
val absoluteTableIdentifier = AbsoluteTableIdentifier.from(carbonLoadModel.getTablePath, table)
DictionaryLoadModel(
absoluteTableIdentifier,
dimensions,
carbonLoadModel.getTablePath,
dictFolderPath,
dictFilePaths,
dictFileExists,
isComplexes.toArray,
primDimensions,
carbonLoadModel.getDelimiters,
columnIdentifier,
carbonLoadModel.getLoadMetadataDetails.size() == 0,
hdfsTempLocation,
lockType,
zookeeperUrl,
serializationNullFormat,
carbonLoadModel.getDefaultTimestampFormat,
carbonLoadModel.getDefaultDateFormat)
}
/**
* load and prune dictionary Rdd from csv file or input dataframe
*
* @param sqlContext sqlContext
* @param carbonLoadModel carbonLoadModel
* @param inputDF input dataframe
* @param requiredCols names of dictionary column
* @param hadoopConf hadoop configuration
* @return rdd that contains only dictionary columns
*/
private def loadInputDataAsDictRdd(sqlContext: SQLContext,
carbonLoadModel: CarbonLoadModel,
inputDF: Option[DataFrame],
requiredCols: Array[String],
hadoopConf: Configuration): RDD[Row] = {
if (inputDF.isDefined) {
inputDF.get.select(requiredCols.head, requiredCols.tail : _*).rdd
} else {
CommonUtil.configureCSVInputFormat(hadoopConf, carbonLoadModel)
hadoopConf.set(FileInputFormat.INPUT_DIR, carbonLoadModel.getFactFilePath)
val headerCols = carbonLoadModel.getCsvHeaderColumns.map(_.toLowerCase)
val header2Idx = headerCols.zipWithIndex.toMap
// index of dictionary columns in header
val dictColIdx = requiredCols.map(c => header2Idx(c.toLowerCase))
val jobConf = new JobConf(hadoopConf)
SparkHadoopUtil.get.addCredentials(jobConf)
TokenCache.obtainTokensForNamenodes(jobConf.getCredentials,
Array[Path](new Path(carbonLoadModel.getFactFilePath)),
jobConf)
val dictRdd = new NewHadoopRDD[NullWritable, StringArrayWritable](
sqlContext.sparkContext,
classOf[CSVInputFormat],
classOf[NullWritable],
classOf[StringArrayWritable],
jobConf)
.setName("global dictionary")
.map[Row] { currentRow =>
val rawRow = currentRow._2.get()
val destRow = new Array[String](dictColIdx.length)
for (i <- dictColIdx.indices) {
// dictionary index in this row
val idx = dictColIdx(i)
// copy specific dictionary value from source to dest
if (idx < rawRow.length) {
System.arraycopy(rawRow, idx, destRow, i, 1)
}
}
Row.fromSeq(destRow)
}
dictRdd
}
}
/**
* check whether global dictionary have been generated successfully or not
*
* @param status checking whether the generating is successful
*/
private def checkStatus(carbonLoadModel: CarbonLoadModel,
sqlContext: SQLContext,
model: DictionaryLoadModel,
status: Array[(Int, SegmentStatus)]) = {
var result = false
val tableName = model.table.getCarbonTableIdentifier.getTableName
status.foreach { x =>
val columnName = model.primDimensions(x._1).getColName
if (SegmentStatus.LOAD_FAILURE == x._2) {
result = true
LOGGER.error(s"table:$tableName column:$columnName generate global dictionary file failed")
}
}
if (result) {
LOGGER.error("generate global dictionary files failed")
throw new Exception("Failed to generate global dictionary files")
} else {
LOGGER.info("generate global dictionary successfully")
}
}
/**
* get external columns and whose dictionary file path
*
* @param colDictFilePath external column dict file path
* @param table table identifier
* @param dimensions dimension columns
*/
private def setPredefinedColumnDictPath(carbonLoadModel: CarbonLoadModel,
colDictFilePath: String,
table: CarbonTableIdentifier,
dimensions: Array[CarbonDimension]) = {
val colFileMapArray = colDictFilePath.split(",")
for (colPathMap <- colFileMapArray) {
val colPathMapTrim = colPathMap.trim
val colNameWithPath = colPathMapTrim.split(":")
if (colNameWithPath.length == 1) {
LOGGER.error("the format of external column dictionary should be " +
"columnName:columnPath, please check")
throw new DataLoadingException("the format of predefined column dictionary" +
" should be columnName:columnPath, please check")
}
setPredefineDict(carbonLoadModel, dimensions, table, colNameWithPath(0),
FileUtils
.getPaths(CarbonUtil
.checkAndAppendHDFSUrl(colPathMapTrim.substring(colNameWithPath(0).length + 1))))
}
}
/**
* set pre defined dictionary for dimension
*
* @param dimensions all the dimensions
* @param table carbon table identifier
* @param colName user specified column name for predefined dict
* @param colDictPath column dictionary file path
* @param parentDimName parent dimenion for complex type
*/
def setPredefineDict(carbonLoadModel: CarbonLoadModel,
dimensions: Array[CarbonDimension],
table: CarbonTableIdentifier,
colName: String,
colDictPath: String,
parentDimName: String = "") {
val middleDimName = colName.split("\\.")(0)
val dimParent = parentDimName + {
colName match {
case "" => colName
case _ =>
if (parentDimName.isEmpty) {
middleDimName
} else {
"." + middleDimName
}
}
}
// judge whether the column is exists
val preDictDimensionOption = dimensions.filter(
_.getColName.equalsIgnoreCase(dimParent))
if (preDictDimensionOption.length == 0) {
LOGGER.error(s"Column $dimParent is not a key column " +
s"in ${ table.getDatabaseName }.${ table.getTableName }")
throw new DataLoadingException(s"Column $dimParent is not a key column. " +
s"Only key column can be part of dictionary " +
s"and used in COLUMNDICT option.")
}
val preDictDimension = preDictDimensionOption(0)
if (preDictDimension.isComplex) {
val children = preDictDimension.getListOfChildDimensions.asScala.toArray
// for Array, user set ArrayFiled: path, while ArrayField has a child Array.val
val currentColName = {
if (DataTypes.isArrayType(preDictDimension.getDataType)) {
if (children(0).isComplex) {
"val." + colName.substring(middleDimName.length + 1)
} else {
"val"
}
} else {
colName.substring(middleDimName.length + 1)
}
}
setPredefineDict(carbonLoadModel, children, table, currentColName,
colDictPath, dimParent)
} else {
carbonLoadModel.setPredefDictMap(preDictDimension, colDictPath)
}
}
/**
* use external dimension column to generate global dictionary
*
* @param colDictFilePath external column dict file path
* @param table table identifier
* @param dimensions dimension column
* @param carbonLoadModel carbon load model
* @param sqlContext spark sql context
* @param dictFolderPath generated global dict file path
*/
def generatePredefinedColDictionary(colDictFilePath: String,
table: CarbonTableIdentifier,
dimensions: Array[CarbonDimension],
carbonLoadModel: CarbonLoadModel,
sqlContext: SQLContext,
dictFolderPath: String): Unit = {
// set pre defined dictionary column
setPredefinedColumnDictPath(carbonLoadModel, colDictFilePath, table, dimensions)
val dictLoadModel = createDictionaryLoadModel(carbonLoadModel, table, dimensions,
dictFolderPath, forPreDefDict = true)
// new RDD to achieve distributed column dict generation
val extInputRDD = new CarbonColumnDictGenerateRDD(carbonLoadModel, dictLoadModel,
sqlContext.sparkSession, table, dimensions, dictFolderPath)
.partitionBy(new ColumnPartitioner(dictLoadModel.primDimensions.length))
val statusList = new CarbonGlobalDictionaryGenerateRDD(sqlContext.sparkSession, extInputRDD,
dictLoadModel)
.collect()
// check result status
checkStatus(carbonLoadModel, sqlContext, dictLoadModel, statusList)
}
/* generate Dimension Parsers
*
* @param model
* @param distinctValuesList
* @return dimensionParsers
*/
def createDimensionParsers(model: DictionaryLoadModel,
distinctValuesList: ArrayBuffer[(Int, HashSet[String])]): Array[GenericParser] = {
// local combine set
val dimNum = model.dimensions.length
val primDimNum = model.primDimensions.length
val columnValues = new Array[HashSet[String]](primDimNum)
val mapColumnValuesWithId = new HashMap[String, HashSet[String]]
for (i <- 0 until primDimNum) {
columnValues(i) = new HashSet[String]
distinctValuesList += ((i, columnValues(i)))
mapColumnValuesWithId.put(model.primDimensions(i).getColumnId, columnValues(i))
}
val dimensionParsers = new Array[GenericParser](dimNum)
for (j <- 0 until dimNum) {
dimensionParsers(j) = GlobalDictionaryUtil.generateParserForDimension(
Some(model.dimensions(j)),
GlobalDictionaryUtil.createDataFormat(model.delimiters),
mapColumnValuesWithId).get
}
dimensionParsers
}
/**
* parse records in dictionary file and validate record
*
* @param x
* @param accum
* @param csvFileColumns
*/
private def parseRecord(x: String, accum: Accumulator[Int],
csvFileColumns: Array[String]): (String, String) = {
val tokens = x.split("" + DEFAULT_SEPARATOR)
var columnName: String = ""
var value: String = ""
// such as "," , "", throw ex
if (tokens.isEmpty) {
LOGGER.error("Read a bad dictionary record: " + x)
accum += 1
} else if (tokens.size == 1) {
// such as "1", "jone", throw ex
if (!x.contains(",")) {
accum += 1
} else {
try {
columnName = csvFileColumns(tokens(0).toInt)
} catch {
case _: Exception =>
LOGGER.error("Read a bad dictionary record: " + x)
accum += 1
}
}
} else {
try {
columnName = csvFileColumns(tokens(0).toInt)
value = tokens(1)
} catch {
case _: Exception =>
LOGGER.error("Read a bad dictionary record: " + x)
accum += 1
}
}
(columnName, value)
}
/**
* read local dictionary and prune column
*
* @param sqlContext
* @param csvFileColumns
* @param requireColumns
* @param allDictionaryPath
* @return allDictionaryRdd
*/
private def readAllDictionaryFiles(sqlContext: SQLContext,
csvFileColumns: Array[String],
requireColumns: Array[String],
allDictionaryPath: String,
accumulator: Accumulator[Int]) = {
var allDictionaryRdd: RDD[(String, Iterable[String])] = null
try {
// read local dictionary file, and spilt (columnIndex, columnValue)
val basicRdd = sqlContext.sparkContext.textFile(allDictionaryPath)
.map(x => parseRecord(x, accumulator, csvFileColumns))
// group by column index, and filter required columns
val requireColumnsList = requireColumns.toList
allDictionaryRdd = basicRdd
.groupByKey()
.filter(x => requireColumnsList.contains(x._1))
} catch {
case ex: Exception =>
LOGGER.error("Read dictionary files failed. Caused by: " + ex.getMessage)
throw ex
}
allDictionaryRdd
}
/**
* validate local dictionary files
*
* @param allDictionaryPath
* @return (isNonempty, isDirectory)
*/
private def validateAllDictionaryPath(allDictionaryPath: String): Boolean = {
val fileType = FileFactory.getFileType(allDictionaryPath)
val filePath = new Path(allDictionaryPath)
val file = FileFactory.getCarbonFile(filePath.toString, fileType)
val parentFile = FileFactory.getCarbonFile(filePath.getParent.toString, fileType)
// filepath regex, look like "/path/*.dictionary"
if (filePath.getName.startsWith("*")) {
val dictExt = filePath.getName.substring(1)
if (parentFile.exists()) {
val listFiles = parentFile.listFiles()
if (listFiles.exists(file =>
file.getName.endsWith(dictExt) && file.getSize > 0)) {
true
} else {
LOGGER.warn("No dictionary files found or empty dictionary files! " +
"Won't generate new dictionary.")
false
}
} else {
throw new DataLoadingException(
s"The given dictionary file path is not found : $allDictionaryPath")
}
} else {
if (file.exists()) {
if (file.getSize > 0) {
true
} else {
LOGGER.warn("No dictionary files found or empty dictionary files! " +
"Won't generate new dictionary.")
false
}
} else {
throw new DataLoadingException(
s"The given dictionary file path is not found : $allDictionaryPath")
}
}
}
/**
* generate global dictionary with SQLContext and CarbonLoadModel
*
* @param sqlContext sql context
* @param carbonLoadModel carbon load model
*/
def generateGlobalDictionary(
sqlContext: SQLContext,
carbonLoadModel: CarbonLoadModel,
hadoopConf: Configuration,
dataFrame: Option[DataFrame] = None): Unit = {
try {
val carbonTable = carbonLoadModel.getCarbonDataLoadSchema.getCarbonTable
val carbonTableIdentifier = carbonTable.getAbsoluteTableIdentifier.getCarbonTableIdentifier
val dictfolderPath = CarbonTablePath.getMetadataPath(carbonLoadModel.getTablePath)
// columns which need to generate global dictionary file
val dimensions = carbonTable.getDimensionByTableName(
carbonTable.getTableName).asScala.toArray
// generate global dict from pre defined column dict file
carbonLoadModel.initPredefDictMap()
val allDictionaryPath = carbonLoadModel.getAllDictPath
if (StringUtils.isEmpty(allDictionaryPath)) {
LOGGER.info("Generate global dictionary from source data files!")
// load data by using dataSource com.databricks.spark.csv
val headers = carbonLoadModel.getCsvHeaderColumns.map(_.trim)
val colDictFilePath = carbonLoadModel.getColDictFilePath
if (colDictFilePath != null) {
// generate predefined dictionary
generatePredefinedColDictionary(colDictFilePath, carbonTableIdentifier,
dimensions, carbonLoadModel, sqlContext, dictfolderPath)
}
val headerOfInputData: Array[String] = if (dataFrame.isDefined) {
dataFrame.get.columns
} else {
headers
}
if (headers.length > headerOfInputData.length && !carbonTable.isHivePartitionTable) {
val msg = "The number of columns in the file header do not match the " +
"number of columns in the data file; Either delimiter " +
"or fileheader provided is not correct"
LOGGER.error(msg)
throw new DataLoadingException(msg)
}
// use fact file to generate global dict
val (requireDimension, requireColumnNames) = pruneDimensions(dimensions,
headers, headerOfInputData)
if (requireDimension.nonEmpty) {
// select column to push down pruning
val dictRdd = loadInputDataAsDictRdd(sqlContext, carbonLoadModel, dataFrame,
requireColumnNames, hadoopConf)
val model = createDictionaryLoadModel(carbonLoadModel, carbonTableIdentifier,
requireDimension, dictfolderPath, false)
// combine distinct value in a block and partition by column
val inputRDD = new CarbonBlockDistinctValuesCombineRDD(sqlContext.sparkSession, dictRdd,
model)
.partitionBy(new ColumnPartitioner(model.primDimensions.length))
// generate global dictionary files
val statusList = new CarbonGlobalDictionaryGenerateRDD(sqlContext.sparkSession,
inputRDD, model)
.collect()
// check result status
checkStatus(carbonLoadModel, sqlContext, model, statusList)
} else {
LOGGER.info("No column found for generating global dictionary in source data files")
}
} else {
generateDictionaryFromDictionaryFiles(
sqlContext,
carbonLoadModel,
carbonTableIdentifier,
dictfolderPath,
dimensions,
allDictionaryPath)
}
} catch {
case ex: Exception =>
if (ex.getCause != null && ex.getCause.isInstanceOf[NoRetryException]) {
LOGGER.error(ex.getCause, "generate global dictionary failed")
throw new Exception("generate global dictionary failed, " +
ex.getCause.getMessage)
}
ex match {
case spx: SparkException =>
LOGGER.error(spx, "generate global dictionary failed")
throw new Exception("generate global dictionary failed, " +
trimErrorMessage(spx.getMessage))
case _ =>
LOGGER.error(ex, "generate global dictionary failed")
throw ex
}
}
}
def generateDictionaryFromDictionaryFiles(
sqlContext: SQLContext,
carbonLoadModel: CarbonLoadModel,
carbonTableIdentifier: CarbonTableIdentifier,
dictFolderPath: String,
dimensions: Array[CarbonDimension],
allDictionaryPath: String): Unit = {
LOGGER.info("Generate global dictionary from dictionary files!")
val allDictionaryPathAppended = CarbonUtil.checkAndAppendHDFSUrl(allDictionaryPath)
val isNonempty = validateAllDictionaryPath(allDictionaryPathAppended)
if (isNonempty) {
var headers = carbonLoadModel.getCsvHeaderColumns
headers = headers.map(headerName => headerName.trim)
// prune columns according to the CSV file header, dimension columns
val (requireDimension, requireColumnNames) = pruneDimensions(dimensions, headers, headers)
if (requireDimension.nonEmpty) {
val model = createDictionaryLoadModel(carbonLoadModel, carbonTableIdentifier,
requireDimension, dictFolderPath, false)
// check if dictionary files contains bad record
val accumulator = sqlContext.sparkContext.accumulator(0)
// read local dictionary file, and group by key
val allDictionaryRdd = readAllDictionaryFiles(sqlContext, headers,
requireColumnNames, allDictionaryPathAppended, accumulator)
// read exist dictionary and combine
val inputRDD = new CarbonAllDictionaryCombineRDD(sqlContext.sparkSession,
allDictionaryRdd, model)
.partitionBy(new ColumnPartitioner(model.primDimensions.length))
// generate global dictionary files
val statusList = new CarbonGlobalDictionaryGenerateRDD(sqlContext.sparkSession, inputRDD,
model)
.collect()
// check result status
checkStatus(carbonLoadModel, sqlContext, model, statusList)
// if the dictionary contains wrong format record, throw ex
if (accumulator.value > 0) {
throw new DataLoadingException("Data Loading failure, dictionary values are " +
"not in correct format!")
}
} else {
LOGGER.info("have no column need to generate global dictionary")
}
}
}
// Get proper error message of TextParsingException
def trimErrorMessage(input: String): String = {
var errorMessage: String = null
if (input != null && input.contains("TextParsingException:")) {
if (input.split("Hint").length > 1 &&
input.split("Hint")(0).split("TextParsingException: ").length > 1) {
errorMessage = input.split("Hint")(0).split("TextParsingException: ")(1)
} else if (input.split("Parser Configuration:").length > 1) {
errorMessage = input.split("Parser Configuration:")(0)
}
} else if (input != null && input.contains("Exception:")) {
errorMessage = input.split("Exception: ")(1).split("\n")(0)
}
errorMessage
}
/**
* This method will write dictionary file, sortindex file and dictionary meta for new dictionary
* column with default value
*
* @param columnSchema
* @param absoluteTableIdentifier
* @param defaultValue
*/
def loadDefaultDictionaryValueForNewColumn(
columnSchema: ColumnSchema,
absoluteTableIdentifier: AbsoluteTableIdentifier,
defaultValue: String): Unit = {
val dictLock = CarbonLockFactory
.getCarbonLockObj(absoluteTableIdentifier,
columnSchema.getColumnUniqueId + LockUsage.LOCK)
var isDictionaryLocked = false
try {
isDictionaryLocked = dictLock.lockWithRetries()
if (isDictionaryLocked) {
LOGGER.info(s"Successfully able to get the dictionary lock for ${
columnSchema.getColumnName
}")
} else {
sys.error(s"Dictionary file ${
columnSchema.getColumnName
} is locked for updation. Please try after some time")
}
val columnIdentifier = new ColumnIdentifier(columnSchema.getColumnUniqueId,
null,
columnSchema.getDataType)
val dictionaryColumnUniqueIdentifier: DictionaryColumnUniqueIdentifier = new
DictionaryColumnUniqueIdentifier(
absoluteTableIdentifier,
columnIdentifier,
columnIdentifier.getDataType)
val parsedValue = DataTypeUtil.normalizeColumnValueForItsDataType(defaultValue, columnSchema)
val valuesBuffer = new mutable.HashSet[String]
if (null != parsedValue) {
valuesBuffer += parsedValue
}
val dictWriteTask = new DictionaryWriterTask(valuesBuffer,
dictionary = null,
dictionaryColumnUniqueIdentifier,
columnSchema,
false
)
val distinctValues = dictWriteTask.execute
LOGGER.info(s"Dictionary file writing is successful for new column ${
columnSchema.getColumnName
}")
if (distinctValues.size() > 0) {
val sortIndexWriteTask = new SortIndexWriterTask(
dictionaryColumnUniqueIdentifier,
columnSchema.getDataType,
dictionary = null,
distinctValues)
sortIndexWriteTask.execute()
}
LOGGER.info(s"SortIndex file writing is successful for new column ${
columnSchema.getColumnName
}")
// After sortIndex writing, update dictionaryMeta
dictWriteTask.updateMetaData()
LOGGER.info(s"Dictionary meta file writing is successful for new column ${
columnSchema.getColumnName
}")
} catch {
case ex: Exception =>
LOGGER.error(ex)
throw ex
} finally {
if (dictLock != null && isDictionaryLocked) {
if (dictLock.unlock()) {
LOGGER.info(s"Dictionary ${
columnSchema.getColumnName
} Unlocked Successfully.")
} else {
LOGGER.error(s"Unable to unlock Dictionary ${
columnSchema.getColumnName
}")
}
}
}
}
}
|
sgururajshetty/carbondata
|
integration/spark-common/src/main/scala/org/apache/carbondata/spark/util/GlobalDictionaryUtil.scala
|
Scala
|
apache-2.0
| 33,886 |
/*
* Copyright 2014–2020 SlamData Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package quasar.contrib.matryoshka
import slamdata.Predef._
import scalaz._
import iotaz.{TListK, CopK, TNilK}
import iotaz.TListK.:::
/** Calculates the width of a typelevel union (coproduct). */
sealed abstract class UnionWidth[F[_]] {
val width: Int
}
object UnionWidth extends UWidthInstances
sealed abstract class UWidthInstances extends UWidthInstances0 {
implicit def copkUWidthInduct[F[_], LL <: TListK](implicit U: UnionWidth[CopK[LL, ?]]): UnionWidth[CopK[F ::: LL, ?]] =
new UnionWidth[CopK[F ::: LL, ?]] { val width = U.width + 1 }
implicit def coproductUWidth[F[_], G[_]](
implicit
F: UnionWidth[F],
G: UnionWidth[G]
): UnionWidth[Coproduct[F, G, ?]] =
new UnionWidth[Coproduct[F, G, ?]] {
val width = F.width + G.width
}
}
sealed abstract class UWidthInstances0 {
implicit def defaultUWidth[F[_]]: UnionWidth[F] =
new UnionWidth[F] { val width = 1 }
implicit def copkUWidthBase[F[_]]: UnionWidth[CopK[F ::: TNilK, ?]] =
new UnionWidth[CopK[F ::: TNilK, ?]] { val width = 1 }
}
|
slamdata/quasar
|
foundation/src/test/scala/quasar/contrib/matryoshka/UnionWidth.scala
|
Scala
|
apache-2.0
| 1,654 |
//
// Copyright (c) 2015 IronCore Labs
//
package com.ironcorelabs.davenport
package syntax
import db.{ Key, DBProg, DBDocument }
import argonaut.{ DecodeJson, EncodeJson, CodecJson }
// The convention is for syntax objects to start with lower case, so they look
// like package names. Scalastyle doesn't care for this, so ignore the line.
final object key extends KeyOps // scalastyle:ignore
trait KeyOps {
implicit class OurKeyOps(key: Key) {
def dbGet[T](implicit codec: DecodeJson[T]): DBProg[DBDocument[T]] = DBDocument.get(key)(codec)
def dbRemove: DBProg[Unit] = DBDocument.remove(key)
def dbCreate[T](t: T)(implicit codec: EncodeJson[T]): DBProg[DBDocument[T]] = DBDocument.create(key, t)
def dbIncrementCounter(delta: Long): DBProg[Long] = db.incrementCounter(key, delta)
def dbGetCounter: DBProg[Long] = db.getCounter(key)
def dbModify[T](f: T => T)(implicit codec: CodecJson[T]): DBProg[DBDocument[T]] = DBDocument.modify(key, f)
}
}
|
BobWall23/davenport
|
src/main/scala/com/ironcorelabs/davenport/syntax/key.scala
|
Scala
|
mit
| 976 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.streaming
import java.util.concurrent.CountDownLatch
import scala.concurrent.Future
import scala.util.Random
import scala.util.control.NonFatal
import org.scalatest.BeforeAndAfter
import org.scalatest.concurrent.Eventually._
import org.scalatest.concurrent.PatienceConfiguration.Timeout
import org.scalatest.time.Span
import org.scalatest.time.SpanSugar._
import org.apache.spark.SparkException
import org.apache.spark.sql.{AnalysisException, Dataset}
import org.apache.spark.sql.execution.streaming._
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.streaming.util.BlockingSource
import org.apache.spark.util.Utils
class StreamingQueryManagerSuite extends StreamTest with BeforeAndAfter {
import AwaitTerminationTester._
import testImplicits._
override val streamingTimeout = 20.seconds
before {
assert(spark.streams.active.isEmpty)
spark.streams.resetTerminated()
}
after {
assert(spark.streams.active.isEmpty)
spark.streams.resetTerminated()
}
testQuietly("listing") {
val (m1, ds1) = makeDataset
val (m2, ds2) = makeDataset
val (m3, ds3) = makeDataset
withQueriesOn(ds1, ds2, ds3) { queries =>
require(queries.size === 3)
assert(spark.streams.active.toSet === queries.toSet)
val (q1, q2, q3) = (queries(0), queries(1), queries(2))
assert(spark.streams.get(q1.id).eq(q1))
assert(spark.streams.get(q2.id).eq(q2))
assert(spark.streams.get(q3.id).eq(q3))
assert(spark.streams.get(java.util.UUID.randomUUID()) === null) // non-existent id
q1.stop()
assert(spark.streams.active.toSet === Set(q2, q3))
assert(spark.streams.get(q1.id) === null)
assert(spark.streams.get(q2.id).eq(q2))
m2.addData(0) // q2 should terminate with error
eventually(Timeout(streamingTimeout)) {
require(!q2.isActive)
require(q2.exception.isDefined)
}
assert(spark.streams.get(q2.id) === null)
assert(spark.streams.active.toSet === Set(q3))
}
}
testQuietly("awaitAnyTermination without timeout and resetTerminated") {
val datasets = Seq.fill(5)(makeDataset._2)
withQueriesOn(datasets: _*) { queries =>
require(queries.size === datasets.size)
assert(spark.streams.active.toSet === queries.toSet)
// awaitAnyTermination should be blocking
testAwaitAnyTermination(ExpectBlocked)
// Stop a query asynchronously and see if it is reported through awaitAnyTermination
val q1 = stopRandomQueryAsync(stopAfter = 100 milliseconds, withError = false)
testAwaitAnyTermination(ExpectNotBlocked)
require(!q1.isActive) // should be inactive by the time the prev awaitAnyTerm returned
// All subsequent calls to awaitAnyTermination should be non-blocking
testAwaitAnyTermination(ExpectNotBlocked)
// Resetting termination should make awaitAnyTermination() blocking again
spark.streams.resetTerminated()
testAwaitAnyTermination(ExpectBlocked)
// Terminate a query asynchronously with exception and see awaitAnyTermination throws
// the exception
val q2 = stopRandomQueryAsync(100 milliseconds, withError = true)
testAwaitAnyTermination(ExpectException[SparkException])
require(!q2.isActive) // should be inactive by the time the prev awaitAnyTerm returned
// All subsequent calls to awaitAnyTermination should throw the exception
testAwaitAnyTermination(ExpectException[SparkException])
// Resetting termination should make awaitAnyTermination() blocking again
spark.streams.resetTerminated()
testAwaitAnyTermination(ExpectBlocked)
// Terminate multiple queries, one with failure and see whether awaitAnyTermination throws
// the exception
val q3 = stopRandomQueryAsync(10 milliseconds, withError = false)
testAwaitAnyTermination(ExpectNotBlocked)
require(!q3.isActive)
val q4 = stopRandomQueryAsync(10 milliseconds, withError = true)
eventually(Timeout(streamingTimeout)) { require(!q4.isActive) }
// After q4 terminates with exception, awaitAnyTerm should start throwing exception
testAwaitAnyTermination(ExpectException[SparkException])
}
}
testQuietly("awaitAnyTermination with timeout and resetTerminated") {
val datasets = Seq.fill(6)(makeDataset._2)
withQueriesOn(datasets: _*) { queries =>
require(queries.size === datasets.size)
assert(spark.streams.active.toSet === queries.toSet)
// awaitAnyTermination should be blocking or non-blocking depending on timeout values
testAwaitAnyTermination(
ExpectBlocked,
awaitTimeout = 4 seconds,
expectedReturnedValue = false,
testBehaviorFor = 2 seconds)
testAwaitAnyTermination(
ExpectNotBlocked,
awaitTimeout = 50 milliseconds,
expectedReturnedValue = false,
testBehaviorFor = 1 second)
// Stop a query asynchronously within timeout and awaitAnyTerm should be unblocked
val q1 = stopRandomQueryAsync(stopAfter = 100 milliseconds, withError = false)
testAwaitAnyTermination(
ExpectNotBlocked,
awaitTimeout = 2 seconds,
expectedReturnedValue = true,
testBehaviorFor = 4 seconds)
require(!q1.isActive) // should be inactive by the time the prev awaitAnyTerm returned
// All subsequent calls to awaitAnyTermination should be non-blocking even if timeout is high
testAwaitAnyTermination(
ExpectNotBlocked, awaitTimeout = 4 seconds, expectedReturnedValue = true)
// Resetting termination should make awaitAnyTermination() blocking again
spark.streams.resetTerminated()
testAwaitAnyTermination(
ExpectBlocked,
awaitTimeout = 4 seconds,
expectedReturnedValue = false,
testBehaviorFor = 1 second)
// Terminate a query asynchronously with exception within timeout, awaitAnyTermination should
// throws the exception
val q2 = stopRandomQueryAsync(100 milliseconds, withError = true)
testAwaitAnyTermination(
ExpectException[SparkException],
awaitTimeout = 4 seconds,
testBehaviorFor = 6 seconds)
require(!q2.isActive) // should be inactive by the time the prev awaitAnyTerm returned
// All subsequent calls to awaitAnyTermination should throw the exception
testAwaitAnyTermination(
ExpectException[SparkException],
awaitTimeout = 2 seconds,
testBehaviorFor = 4 seconds)
// Terminate a query asynchronously outside the timeout, awaitAnyTerm should be blocked
spark.streams.resetTerminated()
val q3 = stopRandomQueryAsync(2 seconds, withError = true)
testAwaitAnyTermination(
ExpectNotBlocked,
awaitTimeout = 100 milliseconds,
expectedReturnedValue = false,
testBehaviorFor = 4 seconds)
// After that query is stopped, awaitAnyTerm should throw exception
eventually(Timeout(streamingTimeout)) { require(!q3.isActive) } // wait for query to stop
testAwaitAnyTermination(
ExpectException[SparkException],
awaitTimeout = 100 milliseconds,
testBehaviorFor = 4 seconds)
// Terminate multiple queries, one with failure and see whether awaitAnyTermination throws
// the exception
spark.streams.resetTerminated()
val q4 = stopRandomQueryAsync(10 milliseconds, withError = false)
testAwaitAnyTermination(
ExpectNotBlocked, awaitTimeout = 2 seconds, expectedReturnedValue = true)
require(!q4.isActive)
val q5 = stopRandomQueryAsync(10 milliseconds, withError = true)
eventually(Timeout(streamingTimeout)) { require(!q5.isActive) }
// After q5 terminates with exception, awaitAnyTerm should start throwing exception
testAwaitAnyTermination(ExpectException[SparkException], awaitTimeout = 2 seconds)
}
}
test("SPARK-18811: Source resolution should not block main thread") {
failAfter(streamingTimeout) {
BlockingSource.latch = new CountDownLatch(1)
withTempDir { tempDir =>
// if source resolution was happening on the main thread, it would block the start call,
// now it should only be blocking the stream execution thread
val sq = spark.readStream
.format("org.apache.spark.sql.streaming.util.BlockingSource")
.load()
.writeStream
.format("org.apache.spark.sql.streaming.util.BlockingSource")
.option("checkpointLocation", tempDir.toString)
.start()
eventually(Timeout(streamingTimeout)) {
assert(sq.status.message.contains("Initializing sources"))
}
BlockingSource.latch.countDown()
sq.stop()
}
}
}
test("SPARK-19268: Adaptive query execution should be disallowed") {
withSQLConf(SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true") {
val e = intercept[AnalysisException] {
MemoryStream[Int].toDS.writeStream.queryName("test-query").format("memory").start()
}
assert(e.getMessage.contains(SQLConf.ADAPTIVE_EXECUTION_ENABLED.key) &&
e.getMessage.contains("not supported"))
}
}
/** Run a body of code by defining a query on each dataset */
private def withQueriesOn(datasets: Dataset[_]*)(body: Seq[StreamingQuery] => Unit): Unit = {
failAfter(streamingTimeout) {
val queries = withClue("Error starting queries") {
datasets.zipWithIndex.map { case (ds, i) =>
var query: StreamingQuery = null
try {
val df = ds.toDF
val metadataRoot =
Utils.createTempDir(namePrefix = "streaming.checkpoint").getCanonicalPath
query =
df.writeStream
.format("memory")
.queryName(s"query$i")
.option("checkpointLocation", metadataRoot)
.outputMode("append")
.start()
} catch {
case NonFatal(e) =>
if (query != null) query.stop()
throw e
}
query
}
}
try {
body(queries)
} finally {
queries.foreach(_.stop())
}
}
}
/** Test the behavior of awaitAnyTermination */
private def testAwaitAnyTermination(
expectedBehavior: ExpectedBehavior,
expectedReturnedValue: Boolean = false,
awaitTimeout: Span = null,
testBehaviorFor: Span = 4 seconds
): Unit = {
def awaitTermFunc(): Unit = {
if (awaitTimeout != null && awaitTimeout.toMillis > 0) {
val returnedValue = spark.streams.awaitAnyTermination(awaitTimeout.toMillis)
assert(returnedValue === expectedReturnedValue, "Returned value does not match expected")
} else {
spark.streams.awaitAnyTermination()
}
}
AwaitTerminationTester.test(expectedBehavior, awaitTermFunc, testBehaviorFor)
}
/** Stop a random active query either with `stop()` or with an error */
private def stopRandomQueryAsync(stopAfter: Span, withError: Boolean): StreamingQuery = {
import scala.concurrent.ExecutionContext.Implicits.global
val activeQueries = spark.streams.active
val queryToStop = activeQueries(Random.nextInt(activeQueries.length))
Future {
Thread.sleep(stopAfter.toMillis)
if (withError) {
logDebug(s"Terminating query ${queryToStop.name} with error")
queryToStop.asInstanceOf[StreamingQueryWrapper].streamingQuery.logicalPlan.collect {
case StreamingExecutionRelation(source, _) =>
source.asInstanceOf[MemoryStream[Int]].addData(0)
}
} else {
logDebug(s"Stopping query ${queryToStop.name}")
queryToStop.stop()
}
}
queryToStop
}
private def makeDataset: (MemoryStream[Int], Dataset[Int]) = {
val inputData = MemoryStream[Int]
val mapped = inputData.toDS.map(6 / _)
(inputData, mapped)
}
}
|
spark0001/spark2.1.1
|
sql/core/src/test/scala/org/apache/spark/sql/streaming/StreamingQueryManagerSuite.scala
|
Scala
|
apache-2.0
| 12,781 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.