code
stringlengths 5
1M
| repo_name
stringlengths 5
109
| path
stringlengths 6
208
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 5
1M
|
---|---|---|---|---|---|
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.integration
import java.nio.ByteBuffer
import kafka.admin.AdminUtils
import kafka.api.{TopicMetadataRequest, TopicMetadataResponse}
import kafka.client.ClientUtils
import kafka.cluster.{Broker, BrokerEndPoint}
import kafka.common.ErrorMapping
import kafka.server.{KafkaConfig, KafkaServer, NotRunning}
import kafka.utils.TestUtils
import kafka.utils.TestUtils._
import kafka.zk.ZooKeeperTestHarness
import org.apache.kafka.common.protocol.SecurityProtocol
import org.junit.Assert._
import org.junit.{Test, After, Before}
class TopicMetadataTest extends ZooKeeperTestHarness {
private var server1: KafkaServer = null
var brokerEndPoints: Seq[BrokerEndPoint] = null
var adHocConfigs: Seq[KafkaConfig] = null
val numConfigs: Int = 4
@Before
override def setUp() {
super.setUp()
val props = createBrokerConfigs(numConfigs, zkConnect)
val configs: Seq[KafkaConfig] = props.map(KafkaConfig.fromProps)
adHocConfigs = configs.takeRight(configs.size - 1) // Started and stopped by individual test cases
server1 = TestUtils.createServer(configs.head)
brokerEndPoints = Seq(new Broker(server1.config.brokerId, server1.config.hostName, server1.boundPort()).getBrokerEndPoint(SecurityProtocol.PLAINTEXT))
}
@After
override def tearDown() {
server1.shutdown()
super.tearDown()
}
@Test
def testTopicMetadataRequest {
// create topic
val topic = "test"
AdminUtils.createTopic(zkClient, topic, 1, 1)
// create a topic metadata request
val topicMetadataRequest = new TopicMetadataRequest(List(topic), 0)
val serializedMetadataRequest = ByteBuffer.allocate(topicMetadataRequest.sizeInBytes + 2)
topicMetadataRequest.writeTo(serializedMetadataRequest)
serializedMetadataRequest.rewind()
val deserializedMetadataRequest = TopicMetadataRequest.readFrom(serializedMetadataRequest)
assertEquals(topicMetadataRequest, deserializedMetadataRequest)
}
@Test
def testBasicTopicMetadata {
// create topic
val topic = "test"
createTopic(zkClient, topic, numPartitions = 1, replicationFactor = 1, servers = Seq(server1))
var topicsMetadata = ClientUtils.fetchTopicMetadata(Set(topic), brokerEndPoints, "TopicMetadataTest-testBasicTopicMetadata",
2000,0).topicsMetadata
assertEquals(ErrorMapping.NoError, topicsMetadata.head.errorCode)
assertEquals(ErrorMapping.NoError, topicsMetadata.head.partitionsMetadata.head.errorCode)
assertEquals("Expecting metadata only for 1 topic", 1, topicsMetadata.size)
assertEquals("Expecting metadata for the test topic", "test", topicsMetadata.head.topic)
var partitionMetadata = topicsMetadata.head.partitionsMetadata
assertEquals("Expecting metadata for 1 partition", 1, partitionMetadata.size)
assertEquals("Expecting partition id to be 0", 0, partitionMetadata.head.partitionId)
assertEquals(1, partitionMetadata.head.replicas.size)
}
@Test
def testGetAllTopicMetadata {
// create topic
val topic1 = "testGetAllTopicMetadata1"
val topic2 = "testGetAllTopicMetadata2"
createTopic(zkClient, topic1, numPartitions = 1, replicationFactor = 1, servers = Seq(server1))
createTopic(zkClient, topic2, numPartitions = 1, replicationFactor = 1, servers = Seq(server1))
// issue metadata request with empty list of topics
var topicsMetadata = ClientUtils.fetchTopicMetadata(Set.empty, brokerEndPoints, "TopicMetadataTest-testGetAllTopicMetadata",
2000, 0).topicsMetadata
assertEquals(ErrorMapping.NoError, topicsMetadata.head.errorCode)
assertEquals(2, topicsMetadata.size)
assertEquals(ErrorMapping.NoError, topicsMetadata.head.partitionsMetadata.head.errorCode)
assertEquals(ErrorMapping.NoError, topicsMetadata.last.partitionsMetadata.head.errorCode)
val partitionMetadataTopic1 = topicsMetadata.head.partitionsMetadata
val partitionMetadataTopic2 = topicsMetadata.last.partitionsMetadata
assertEquals("Expecting metadata for 1 partition", 1, partitionMetadataTopic1.size)
assertEquals("Expecting partition id to be 0", 0, partitionMetadataTopic1.head.partitionId)
assertEquals(1, partitionMetadataTopic1.head.replicas.size)
assertEquals("Expecting metadata for 1 partition", 1, partitionMetadataTopic2.size)
assertEquals("Expecting partition id to be 0", 0, partitionMetadataTopic2.head.partitionId)
assertEquals(1, partitionMetadataTopic2.head.replicas.size)
}
@Test
def testAutoCreateTopic {
// auto create topic
val topic = "testAutoCreateTopic"
var topicsMetadata = ClientUtils.fetchTopicMetadata(Set(topic), brokerEndPoints, "TopicMetadataTest-testAutoCreateTopic",
2000,0).topicsMetadata
assertEquals(ErrorMapping.LeaderNotAvailableCode, topicsMetadata.head.errorCode)
assertEquals("Expecting metadata only for 1 topic", 1, topicsMetadata.size)
assertEquals("Expecting metadata for the test topic", topic, topicsMetadata.head.topic)
assertEquals(0, topicsMetadata.head.partitionsMetadata.size)
// wait for leader to be elected
TestUtils.waitUntilLeaderIsElectedOrChanged(zkClient, topic, 0)
TestUtils.waitUntilMetadataIsPropagated(Seq(server1), topic, 0)
// retry the metadata for the auto created topic
topicsMetadata = ClientUtils.fetchTopicMetadata(Set(topic), brokerEndPoints, "TopicMetadataTest-testBasicTopicMetadata",
2000,0).topicsMetadata
assertEquals(ErrorMapping.NoError, topicsMetadata.head.errorCode)
assertEquals(ErrorMapping.NoError, topicsMetadata.head.partitionsMetadata.head.errorCode)
var partitionMetadata = topicsMetadata.head.partitionsMetadata
assertEquals("Expecting metadata for 1 partition", 1, partitionMetadata.size)
assertEquals("Expecting partition id to be 0", 0, partitionMetadata.head.partitionId)
assertEquals(1, partitionMetadata.head.replicas.size)
assertTrue(partitionMetadata.head.leader.isDefined)
}
@Test
def testAutoCreateTopicWithCollision {
// auto create topic
val topic1 = "testAutoCreate_Topic"
val topic2 = "testAutoCreate.Topic"
var topicsMetadata = ClientUtils.fetchTopicMetadata(Set(topic1, topic2), brokerEndPoints, "TopicMetadataTest-testAutoCreateTopic",
2000,0).topicsMetadata
assertEquals("Expecting metadata for 2 topics", 2, topicsMetadata.size)
assertEquals("Expecting metadata for topic1", topic1, topicsMetadata.head.topic)
assertEquals(ErrorMapping.LeaderNotAvailableCode, topicsMetadata.head.errorCode)
assertEquals("Expecting metadata for topic2", topic2, topicsMetadata(1).topic)
assertEquals("Expecting InvalidTopicCode for topic2 metadata", ErrorMapping.InvalidTopicCode, topicsMetadata(1).errorCode)
// wait for leader to be elected
TestUtils.waitUntilLeaderIsElectedOrChanged(zkClient, topic1, 0)
TestUtils.waitUntilMetadataIsPropagated(Seq(server1), topic1, 0)
// retry the metadata for the first auto created topic
topicsMetadata = ClientUtils.fetchTopicMetadata(Set(topic1), brokerEndPoints, "TopicMetadataTest-testBasicTopicMetadata",
2000,0).topicsMetadata
assertEquals(ErrorMapping.NoError, topicsMetadata.head.errorCode)
assertEquals(ErrorMapping.NoError, topicsMetadata.head.partitionsMetadata.head.errorCode)
var partitionMetadata = topicsMetadata.head.partitionsMetadata
assertEquals("Expecting metadata for 1 partition", 1, partitionMetadata.size)
assertEquals("Expecting partition id to be 0", 0, partitionMetadata.head.partitionId)
assertEquals(1, partitionMetadata.head.replicas.size)
assertTrue(partitionMetadata.head.leader.isDefined)
}
private def checkIsr(servers: Seq[KafkaServer]): Unit = {
val activeBrokers: Seq[KafkaServer] = servers.filter(x => x.brokerState.currentState != NotRunning.state)
val expectedIsr: Seq[BrokerEndPoint] = activeBrokers.map(
x => new BrokerEndPoint(x.config.brokerId,
if (x.config.hostName.nonEmpty) x.config.hostName else "localhost",
x.boundPort())
)
// Assert that topic metadata at new brokers is updated correctly
activeBrokers.foreach(x => {
var metadata: TopicMetadataResponse = new TopicMetadataResponse(Seq(), Seq(), -1)
waitUntilTrue(() => {
metadata = ClientUtils.fetchTopicMetadata(
Set.empty,
Seq(new BrokerEndPoint(
x.config.brokerId,
if (x.config.hostName.nonEmpty) x.config.hostName else "localhost",
x.boundPort())),
"TopicMetadataTest-testBasicTopicMetadata",
2000, 0)
metadata.topicsMetadata.nonEmpty &&
metadata.topicsMetadata.head.partitionsMetadata.nonEmpty &&
expectedIsr == metadata.topicsMetadata.head.partitionsMetadata.head.isr
},
"Topic metadata is not correctly updated for broker " + x + ".\\n" +
"Expected ISR: " + expectedIsr + "\\n" +
"Actual ISR : " + (if (metadata.topicsMetadata.nonEmpty &&
metadata.topicsMetadata.head.partitionsMetadata.nonEmpty)
metadata.topicsMetadata.head.partitionsMetadata.head.isr
else
""), 6000L)
})
}
@Test
def testIsrAfterBrokerShutDownAndJoinsBack {
val numBrokers = 2 //just 2 brokers are enough for the test
// start adHoc brokers
val adHocServers = adHocConfigs.take(numBrokers - 1).map(p => createServer(p))
val allServers: Seq[KafkaServer] = Seq(server1) ++ adHocServers
// create topic
val topic: String = "test"
AdminUtils.createTopic(zkClient, topic, 1, numBrokers)
// shutdown a broker
adHocServers.last.shutdown()
adHocServers.last.awaitShutdown()
// startup a broker
adHocServers.last.startup()
// check metadata is still correct and updated at all brokers
checkIsr(allServers)
// shutdown adHoc brokers
adHocServers.map(p => p.shutdown())
}
private def checkMetadata(servers: Seq[KafkaServer], expectedBrokersCount: Int): Unit = {
var topicMetadata: TopicMetadataResponse = new TopicMetadataResponse(Seq(), Seq(), -1)
// Get topic metadata from old broker
// Wait for metadata to get updated by checking metadata from a new broker
waitUntilTrue(() => {
topicMetadata = ClientUtils.fetchTopicMetadata(
Set.empty, brokerEndPoints, "TopicMetadataTest-testBasicTopicMetadata", 2000, 0)
topicMetadata.brokers.size == expectedBrokersCount},
"Alive brokers list is not correctly propagated by coordinator to brokers"
)
// Assert that topic metadata at new brokers is updated correctly
servers.filter(x => x.brokerState.currentState != NotRunning.state).foreach(x =>
waitUntilTrue(() =>
topicMetadata == ClientUtils.fetchTopicMetadata(
Set.empty,
Seq(new Broker(x.config.brokerId,
x.config.hostName,
x.boundPort()).getBrokerEndPoint(SecurityProtocol.PLAINTEXT)),
"TopicMetadataTest-testBasicTopicMetadata",
2000, 0), "Topic metadata is not correctly updated"))
}
@Test
def testAliveBrokerListWithNoTopics {
checkMetadata(Seq(server1), 1)
}
@Test
def testAliveBrokersListWithNoTopicsAfterNewBrokerStartup {
var adHocServers = adHocConfigs.takeRight(adHocConfigs.size - 1).map(p => createServer(p))
checkMetadata(adHocServers, numConfigs - 1)
// Add a broker
adHocServers = adHocServers ++ Seq(createServer(adHocConfigs.head))
checkMetadata(adHocServers, numConfigs)
adHocServers.map(p => p.shutdown())
}
@Test
def testAliveBrokersListWithNoTopicsAfterABrokerShutdown {
val adHocServers = adHocConfigs.map(p => createServer(p))
checkMetadata(adHocServers, numConfigs)
// Shutdown a broker
adHocServers.last.shutdown()
adHocServers.last.awaitShutdown()
checkMetadata(adHocServers, numConfigs - 1)
adHocServers.map(p => p.shutdown())
}
}
|
usakey/kafka
|
core/src/test/scala/unit/kafka/integration/TopicMetadataTest.scala
|
Scala
|
apache-2.0
| 12,970 |
package scalapb.textformat
class TokenizerSpec extends munit.FunSuite {
def tokenize(text: String): Vector[String] = {
val b = Vector.newBuilder[String]
val t = new Tokenizer(text)
while (t.hasNext) b += t.next()
b.result()
}
test("Tokenizer is tokenizing") {
assertEquals(tokenize("hello world"), Vector("hello", "world"))
assertEquals(tokenize("hello world\n\tbar\n"), Vector("hello", "world", "bar"))
assertEquals(tokenize("hello world foo # foo"), Vector("hello", "world", "foo"))
assertEquals(tokenize("hello world foo\n# foo"), Vector("hello", "world", "foo"))
assertEquals(
tokenize("hello world { foo }\n# foo"),
Vector("hello", "world", "{", "foo", "}")
)
assertEquals(
tokenize("this is a \"quoted string\" here"),
Vector("this", "is", "a", "\"quoted string\"", "here")
)
assertEquals(
tokenize("this is a 'quoted string' here"),
Vector("this", "is", "a", "'quoted string'", "here")
)
assertEquals(
tokenize("this is a 'quoted string \\\nfoo'"),
Vector("this", "is", "a", "'quoted string \\", "foo", "'")
)
assertEquals(
tokenize("repeated_foreign_enum: [FOREIGN_FOO, FOREIGN_BAR]"),
Vector("repeated_foreign_enum", ":", "[", "FOREIGN_FOO", ",", "FOREIGN_BAR", "]")
)
}
}
|
scalapb/ScalaPB
|
scalapb-runtime/src/test/scala/scalapb/textformat/TokenizerSpec.scala
|
Scala
|
apache-2.0
| 1,327 |
/*
* (c) Copyright 2016 Hewlett Packard Enterprise Development LP
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cogx.runtime.allocation
import cogx.platform.opencl.OpenCLDevice
import cogx.platform.types.{FieldMemoryLayoutImpl, VirtualFieldRegister}
import scala.collection.mutable.ArrayBuffer
import AllocateFieldRegisters._
import cogx.platform.cpumemory.BufferType
/** Holds the single shared FieldRegister for a set of kernels
*
* @param device The device on which the OpenCL buffer will be allocated
* @param firstVirtualRegister The kernel that prompted creation of this latch
* @param seal After adding this kernel, prevent further kernel additions.
* @param bufferType The type of cpu memory (pinned versus pageable) to be allocated for the buffers.
*
* @author Dick Carter
*/
class SharedLatch(device: OpenCLDevice, firstVirtualRegister: VirtualFieldRegister, seal: Boolean, bufferType: BufferType) {
/** The virtual registers sharing this latch */
val virtualRegisters = ArrayBuffer[VirtualFieldRegister]()
/** Are more virtual registers prohibited from sharing this latch? */
def isSealed = _isSealed
/** The last virtual register that was assigned to use this latch */
def lastSharingVirtualRegister = virtualRegisters.last
/** The kernels that must complete before reallocation to another virtual register */
def lastConsumers = lastSharingVirtualRegister.sinks
/** Add a kernel to the list of kernels that share this latch */
def addVirtualRegister(virtualRegister: VirtualFieldRegister, seal: Boolean) {
virtualRegisters += virtualRegister
val newUseBytes = new FieldMemoryLayoutImpl(virtualRegister.fieldType).longBufferSizeBytes
_isSealed ||= seal
_maxGlobalMemoryUseBytes = math.max(_maxGlobalMemoryUseBytes,
new FieldMemoryLayoutImpl(virtualRegister.fieldType).longBufferSizeBytes)
_minGlobalMemoryUseBytes =
if (virtualRegisters.size == 1)
newUseBytes
else
math.min(_minGlobalMemoryUseBytes, newUseBytes)
}
// Are more virtual registers prohibited from sharing this latch?
private var _isSealed = false
// Of the VirtualFieldRegister uses of this shared latch, what is the biggest size?
private var _maxGlobalMemoryUseBytes: Long = 0L
// Of the VirtualFieldRegister uses of this shared latch, what is the smallest size?
private var _minGlobalMemoryUseBytes: Long = 0L
/** Of the VirtualFieldRegister uses of this shared latch, what is the biggest size? */
def maxGlobalMemoryUseBytes = _maxGlobalMemoryUseBytes
/** Of the VirtualFieldRegister uses of this shared latch, what is the smallest size? */
def minGlobalMemoryUseBytes = _minGlobalMemoryUseBytes
addVirtualRegister(firstVirtualRegister, seal)
/** The single FieldRegister shared by this set of kernels */
lazy val register = allocateFieldLatch(device, lastSharingVirtualRegister.fieldType, bufferType, _maxGlobalMemoryUseBytes)
override def toString = "latch with contents: " + virtualRegisters.mkString(", ")
}
|
hpe-cct/cct-core
|
src/main/scala/cogx/runtime/allocation/SharedLatch.scala
|
Scala
|
apache-2.0
| 3,543 |
/* *\\
** Squants **
** **
** Scala Quantities and Units of Measure Library and DSL **
** (c) 2013-2018, Gary Keorkunian **
** **
\\* */
package squants.radio
import squants._
/**
* @author Hunter Payne
*
* @param value Double
*/
final class Activity private (
val value: Double, val unit: ActivityUnit)
extends Quantity[Activity] {
def dimension = Activity
def /(that: AreaTime): ParticleFlux = BecquerelsPerSquareMeterSecond(
this.toBecquerels / that.toSquareMeterSeconds)
def toCuries = to(Curies)
def toBecquerels = to(Becquerels)
def toRutherfords = to(Rutherfords)
}
object Activity extends Dimension[Activity] {
private[radio] def apply[A](n: A, unit: ActivityUnit)(
implicit num: Numeric[A]) =
new Activity(num.toDouble(n), unit)
def apply(value: Any) = parse(value)
def name = "Activity"
def primaryUnit = Becquerels
def siUnit = Becquerels
def units = Set(Becquerels, Curies, Rutherfords)
}
trait ActivityUnit
extends UnitOfMeasure[Activity] with UnitConverter {
def apply[A](n: A)(implicit num: Numeric[A]) = Activity(n, this)
}
object Curies extends ActivityUnit {
val conversionFactor = 3.7 * Math.pow(10, 10)
val symbol = "Ci"
}
object Rutherfords extends ActivityUnit {
val conversionFactor = 1000000.0
val symbol = "Rd"
}
object Becquerels extends ActivityUnit with PrimaryUnit with SiUnit {
val symbol = "Bq"
}
object ActivityConversions {
lazy val curie = Curies(1)
lazy val rutherford = Rutherfords(1)
lazy val becquerel = Becquerels(1)
implicit class ActivityConversions[A](n: A)(
implicit num: Numeric[A]) {
def curies = Curies(n)
def rutherfords = Rutherfords(n)
def becquerels = Becquerels(n)
}
implicit object ActivityNumeric
extends AbstractQuantityNumeric[Activity](Activity.primaryUnit)
}
|
garyKeorkunian/squants
|
shared/src/main/scala/squants/radio/Activity.scala
|
Scala
|
apache-2.0
| 2,216 |
object PatternGuards2 {
def test(y: Int): Int = {
var x = y
def foo(): Boolean = x > 10
x = x + 1
x match {
case z if foo() => x
case _ => 11
}
} ensuring(_ > 10)
}
|
regb/leon
|
src/test/resources/regression/verification/xlang/valid/PatternGuards2.scala
|
Scala
|
gpl-3.0
| 209 |
/**
* Copyright (C) 2011 Orbeon, Inc.
*
* This program is free software; you can redistribute it and/or modify it under the terms of the
* GNU Lesser General Public License as published by the Free Software Foundation; either version
* 2.1 of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
* without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU Lesser General Public License for more details.
*
* The full text of the license is available at http://www.gnu.org/copyleft/lesser.html
*/
package org.orbeon.oxf.xforms.event
import events._
import org.orbeon.oxf.xforms.control._
import org.orbeon.oxf.xforms.control.controls._
import org.orbeon.oxf.xforms.XFormsConstants._
import org.orbeon.oxf.xforms.XFormsContainingDocument
import org.orbeon.oxf.xforms.XFormsUtils._
import org.orbeon.oxf.common.OXFException
import org.orbeon.oxf.xml._
import java.util.{ArrayList, List ⇒ JList, Set ⇒ JSet, Collections ⇒ JCollections}
import dom4j.{LocationSAXContentHandler, Dom4jUtils}
import org.orbeon.oxf.pipeline.api._
import org.dom4j.{Document, Element}
import org.orbeon.oxf.xforms.state.XFormsStateManager
import org.orbeon.oxf.util.{IndentedLogger, Multipart, Logging}
import XFormsEvents._
import collection.JavaConverters._
import org.orbeon.oxf.xforms.analysis.controls.RepeatControl
import org.orbeon.oxf.xforms.event.XFormsEvent._
// Process events sent by the client, including sorting, filtering, and security
object ClientEvents extends Logging {
// Only a few events specify custom properties that can be set by the client
private val AllStandardProperties =
XXFormsDndEvent.StandardProperties ++
KeypressEvent.StandardProperties ++
XXFormsUploadDoneEvent.StandardProperties ++
XXFormsLoadEvent.StandardProperties
private val DummyEvent = List(new LocalEvent(Dom4jUtils.createElement("dummy"), false))
private case class LocalEvent(private val element: Element, trusted: Boolean) {
val name = element.attributeValue("name")
val targetEffectiveId = element.attributeValue("source-control-id")
val bubbles = element.attributeValue("bubbles") != "false" // default is true
val cancelable = element.attributeValue("cancelable") != "false" // default is true
def attributeValue(name: String) = element.attributeValue(name)
lazy val properties = Dom4j.elements(element, XXFORMS_PROPERTY_QNAME) map { e ⇒ (e.attributeValue("name"), Option(e.getText)) } toMap
lazy val value = if (properties.nonEmpty) "" else element.getText // for now we don't support both a value and properties
}
// Entry point called by the server: process a sequence of incoming client events.
def processEvents(
doc: XFormsContainingDocument,
clientEvents: JList[Element],
serverEvents: JList[Element]): (Boolean, JSet[String], String) = {
val allClientAndServerEvents = {
// Process events for noscript mode if needed
val clientEventsAfterNoscript =
if (doc.getStaticState.isNoscript)
reorderNoscriptEvents(clientEvents.asScala, doc)
else
clientEvents.asScala
// Decode encrypted server events
def decodeServerEvents(element: Element) = {
val document = decodeXML(element.getStringValue)
Dom4j.elements(document.getRootElement, XXFORMS_EVENT_QNAME)
}
// Decode global server events
val globalServerEvents: Seq[LocalEvent] = serverEvents.asScala flatMap decodeServerEvents map (LocalEvent(_, trusted = true))
// Gather all events including decoding action server events
globalServerEvents ++
(clientEventsAfterNoscript flatMap {
case element if element.attributeValue("name") == XXFORMS_SERVER_EVENTS ⇒
decodeServerEvents(element) map (LocalEvent(_, trusted = true))
case element ⇒
List(LocalEvent(element, trusted = false))
})
}
if (allClientAndServerEvents.nonEmpty) {
def filterEvents(events: Seq[LocalEvent]) = events filter {
case a if a.name == XXFORMS_ALL_EVENTS_REQUIRED ⇒ false
case a if (a.name eq null) && (a.targetEffectiveId eq null) ⇒
throw new OXFException("<event> element must either have source-control-id and name attributes, or no attribute.")
case _ ⇒ true
}
def combineValueEvents(events: Seq[LocalEvent]): Seq[XFormsEvent] = events match {
case Seq() ⇒ Seq()
case Seq(localEvent) ⇒ safelyCreateAndMapEvent(doc, localEvent).toList
case _ ⇒
// Grouping key for value change events
case class EventGroupingKey(name: String, targetId: String) {
def this(localEvent: LocalEvent) =
this(localEvent.name, localEvent.targetEffectiveId)
}
// Slide over the events so we can filter and compress them
// NOTE: Don't use Iterator.toSeq as that returns a Stream, which evaluates lazily. This would be great, except
// that we *must* first create all events, then dispatch them, so that references to XFormsTarget are obtained
// beforehand.
(events ++ DummyEvent).sliding(2).toList flatMap {
case Seq(a, b) ⇒
if (a.name != XXFORMS_VALUE || new EventGroupingKey(a) != new EventGroupingKey(b))
safelyCreateAndMapEvent(doc, a)
else
None
}
}
// Combine and process events
for (event ← combineValueEvents(filterEvents(allClientAndServerEvents)))
processEvent(doc, event)
// Gather some metadata about the events received to help with the response to the client
// Whether we got a request for all events
val gotAllEvents = allClientAndServerEvents exists
(_.name == XXFORMS_ALL_EVENTS_REQUIRED)
// Set of all control ids for which we got value events
val valueChangeControlIds = allClientAndServerEvents collect
{ case e if e.name == XXFORMS_VALUE ⇒ e.targetEffectiveId } toSet
// Last client focus event received
val clientFocusControlId = allClientAndServerEvents.reverse find
(_.name == XFORMS_FOCUS) map
(_.targetEffectiveId) orNull
(gotAllEvents, valueChangeControlIds.asJava, clientFocusControlId)
} else
(false, JCollections.emptySet[String], null)
}
// NOTE: Leave public for unit tests
def reorderNoscriptEvents(eventElements: Seq[Element], doc: XFormsContainingDocument): Seq[Element] = {
// Event categories
sealed trait Category
case object Other extends Category
case object ValueChange extends Category
case object SelectBlank extends Category
case object Activation extends Category
// All categories in the order we want them
val AllCategories = Seq(Other, ValueChange, SelectBlank, Activation)
// Group events in 3 categories
def getEventCategory(element: Element) = element match {
// Special event for noscript mode
case element if element.attributeValue("name") == XXFORMS_VALUE_OR_ACTIVATE ⇒
val sourceControlId = element.attributeValue("source-control-id")
element match {
// This is a value event
case element if doc.getStaticOps.isValueControl(sourceControlId) ⇒ ValueChange
// This is most likely a trigger or submit which will translate into a DOMActivate. We will move it
// to the end so that value change events are committed to instance data before that.
case _ ⇒ Activation
}
case _ ⇒ Other
}
// NOTE: map keys are not in predictable order, but map values preserve the order
val groups = eventElements groupBy getEventCategory
// Special handling of checkboxes blanking in noscript mode
val blankEvents = {
// Get set of all value change events effective ids
def getValueChangeIds = groups.get(ValueChange).toList.flatten map (_.attributeValue("source-control-id")) toSet
// Create <xxf:event name="xxforms-value-or-activate" source-control-id="my-effective-id"/>
def createBlankingEvent(control: XFormsControl) = {
val newEventElement = Dom4jUtils.createElement(XXFORMS_EVENT_QNAME)
newEventElement.addAttribute("name", XXFORMS_VALUE_OR_ACTIVATE)
newEventElement.addAttribute("source-control-id", control.getEffectiveId)
newEventElement
}
val selectFullControls = doc.getControls.getCurrentControlTree.getSelectFullControls
// Find all relevant and non-readonly select controls for which no value change event arrived. For each such
// control, create a new event that will blank its value.
selectFullControls.asScala.keySet -- getValueChangeIds map
(selectFullControls.get(_).asInstanceOf[XFormsSelectControl]) filter
(control ⇒ control.isRelevant && ! control.isReadonly) map
createBlankingEvent toSeq
}
// Return all events by category in the order we defined the categories
AllCategories flatMap ((groups + (SelectBlank → blankEvents)).get(_)) flatten
}
// Incoming ids can have the form `my-repeat⊙1` in order to target a repeat iteration. This is ambiguous without
// knowing that `my-repeat` refers to a repeat and without knowing the repeat hierarchy, so we should change it
// in the future, but in the meanwhile we map this id to `my-repeat~iteration⊙1` based on static information.
// NOTE: Leave public for unit tests
def adjustIdForRepeatIteration(doc: XFormsContainingDocument, effectiveId: String) =
doc.getStaticOps.getControlAnalysis(getPrefixedId(effectiveId)) match {
case repeat: RepeatControl if repeat.ancestorRepeatsAcrossParts.size == getEffectiveIdSuffixParts(effectiveId).size - 1 ⇒
getRelatedEffectiveId(effectiveId, repeat.iteration.get.staticId)
case _ ⇒
effectiveId
}
private def safelyCreateAndMapEvent(doc: XFormsContainingDocument, event: LocalEvent): Option[XFormsEvent] = {
implicit val CurrentLogger = doc.getIndentedLogger(LOGGING_CATEGORY)
// Get event target
val eventTarget = doc.getObjectByEffectiveId(deNamespaceId(doc, adjustIdForRepeatIteration(doc, event.targetEffectiveId))) match {
case eventTarget: XFormsEventTarget ⇒ eventTarget
case _ ⇒
debug("ignoring client event with invalid target id", Seq("target id" → event.targetEffectiveId, "event name" → event.name))
return None
}
// Check whether the external event is allowed on the given target.
def checkAllowedExternalEvents = {
// Whether an external event name is explicitly allowed by the configuration.
def isExplicitlyAllowedExternalEvent = {
val externalEventsMap = doc.getStaticState.getAllowedExternalEvents
! XFormsEventFactory.isBuiltInEvent(event.name) && externalEventsMap.contains(event.name)
}
// This is also a security measure that also ensures that somebody is not able to change values in an instance
// by hacking external events.
isExplicitlyAllowedExternalEvent || {
val explicitlyAllowed = eventTarget.allowExternalEvent(event.name)
if (! explicitlyAllowed)
debug("ignoring invalid client event on target", Seq("id" → eventTarget.getEffectiveId, "event name" → event.name))
explicitlyAllowed
}
}
// Check the event is allowed on target
if (event.trusted)
// Event is trusted, don't check if it is allowed
debug("processing trusted event", Seq("target id" → eventTarget.getEffectiveId, "event name" → event.name))
else if (! checkAllowedExternalEvents)
return None // event is not trusted and is not allowed
def mapEventName(event: LocalEvent, eventTarget: XFormsEventTarget) = event.name match {
// Rewrite event type. This is special handling of xxforms-value-or-activate for noscript mode.
// NOTE: We do this here, because we need to know the actual type of the target. Could do this statically if
// the static state kept type information for each control.
case XXFORMS_VALUE_OR_ACTIVATE ⇒
eventTarget match {
// Handler produces:
// <button type="submit" name="foobar" value="activate">...
// <input type="submit" name="foobar" value="Hi There">...
// <input type="image" name="foobar" value="Hi There" src="...">...
// IE 6/7 are terminally broken: they don't send the value back, but the contents of the label. So
// we must test for any empty content here instead of !"activate".equals(valueString). (Note that
// this means that empty labels won't work.) Further, with IE 6, all buttons are present when
// using <button>, so we use <input> instead, either with type="submit" or type="image". Bleh.
case triggerControl: XFormsTriggerControl if event.value.isEmpty ⇒ None
// Triggers get a DOM activation
case triggerControl: XFormsTriggerControl ⇒ Some(DOM_ACTIVATE)
// Other controls get a value change
case _ ⇒ Some(XXFORMS_VALUE)
}
case eventName ⇒ Some(eventName)
}
// Create event
mapEventName(event, eventTarget) map { eventName ⇒
def standardProperties =
for {
attributeNames ← AllStandardProperties.get(eventName).toList
attributeName ← attributeNames
attributeValue = event.attributeValue(attributeName)
if attributeValue ne null
} yield
attributeName → Option(attributeValue)
def eventValue = if (eventName == XXFORMS_VALUE) Seq("value" → Option(event.value)) else Seq()
XFormsEventFactory.createEvent(
eventName,
eventTarget,
event.properties ++ standardProperties ++ eventValue,
allowCustomEvents = true,
event.bubbles,
event.cancelable)
}
}
// Check for and handle events that don't need access to the document but can return an Ajax response rapidly.
def doQuickReturnEvents(
xmlReceiver: XMLReceiver,
request: ExternalContext.Request,
requestDocument: Document,
indentedLogger: IndentedLogger,
logRequestResponse: Boolean,
clientEvents: JList[Element],
session: ExternalContext.Session): Boolean = {
val eventElement = clientEvents.asScala(0)
// Helper to make it easier to output simple Ajax responses
def eventResponse(messageType: String, message: String)(block: ContentHandlerHelper ⇒ Unit): Boolean = {
implicit val CurrentLogger = indentedLogger
withDebug(message) {
// Hook-up debug content handler if we must log the response document
val (responseReceiver, debugContentHandler) =
if (logRequestResponse) {
val receivers = new ArrayList[XMLReceiver]
receivers.add(xmlReceiver)
val debugContentHandler = new LocationSAXContentHandler
receivers.add(debugContentHandler)
(new TeeXMLReceiver(receivers), Some(debugContentHandler))
} else
(xmlReceiver, None)
val helper = new ContentHandlerHelper(responseReceiver)
helper.startDocument()
helper.startPrefixMapping("xxf", XXFORMS_NAMESPACE_URI)
helper.startElement("xxf", XXFORMS_NAMESPACE_URI, "event-response")
block(helper)
helper.endElement()
helper.endPrefixMapping("xxf")
helper.endDocument()
debugContentHandler foreach
(ch ⇒ debugResults(Seq("ajax response" → Dom4jUtils.domToPrettyString(ch.getDocument))))
}
true
}
eventElement.attributeValue("name") match {
// Quick response for heartbeat
case XXFORMS_SESSION_HEARTBEAT ⇒
if (indentedLogger.isDebugEnabled) {
if (session != null)
indentedLogger.logDebug("heartbeat", "received heartbeat from client for session: " + session.getId)
else
indentedLogger.logDebug("heartbeat", "received heartbeat from client (no session available).")
}
// Output empty Ajax response
eventResponse("ajax response", "handling quick heartbeat Ajax response")(helper ⇒ ())
// Quick response for upload progress
case XXFORMS_UPLOAD_PROGRESS ⇒
// Output simple resulting document
eventResponse("ajax response", "handling quick upload progress Ajax response") { helper ⇒
val sourceControlId = eventElement.attributeValue("source-control-id")
Multipart.getUploadProgress(request, XFormsStateManager.getRequestUUID(requestDocument), sourceControlId) match {
case Some(progress) ⇒
helper.startElement("xxf", XXFORMS_NAMESPACE_URI, "action")
helper.startElement("xxf", XXFORMS_NAMESPACE_URI, "control-values")
helper.element("xxf", XXFORMS_NAMESPACE_URI, "control",
Array[String]("id", sourceControlId,
"progress-state", progress.state.name,
"progress-received", progress.receivedSize.toString,
"progress-expected", progress.expectedSize map (_.toString) orNull))
helper.endElement()
helper.endElement()
case _ ⇒
}
}
case _ ⇒ false
}
}
// Process an incoming client event. Preprocessing for noscript and encrypted events is assumed to have taken place.
// This handles checking for stale controls, relevance, readonly, and special cases like xf:output.
// NOTE: Leave public for unit tests
def processEvent(doc: XFormsContainingDocument, event: XFormsEvent) {
// Check whether an event can be be dispatched to the given object. This only checks:
// o the the target is still live
// o that the target is not a non-relevant or readonly control
def checkEventTarget(event: XFormsEvent): Boolean = {
val eventTarget = event.targetObject
val newReference = doc.getObjectByEffectiveId(eventTarget.getEffectiveId)
def warn(condition: String) = {
implicit val CurrentLogger = doc.indentedLogger
debug("ignoring invalid client event on " + condition, Seq(
"control id" → eventTarget.getEffectiveId,
"event name" → event.name)
)
false
}
if (eventTarget ne newReference) {
// Here, we check that the event's target is still a valid object. For example, a couple of events from the
// UI could target controls. The first event is processed, which causes a change in the controls tree. The
// second event would then refer to a control which no longer exist. In this case, we don't dispatch it.
// We used to check simply by effective id, but this is not enough in some cases. We want to handle
// controls that just "move" in a repeat. Scenario:
//
// o repeat with 2 iterations has xf:input and xf:trigger
// o assume repeat is sorted on input value
// o use changes value in input and clicks trigger
// o client sends 2 events to server
// o client processes value change and sets new value
// o refresh takes place and causes reordering of rows
// o client processes DOMActivate on trigger, which now has moved position, e.g. row 2 to row 1
// o DOMActivate is dispatched to proper control (i.e. same as input was on)
//
// On the other hand, if the repeat iteration has disappeared, or was removed and recreated, the event is
// not dispatched.
warn("ghost target")
} else eventTarget match {
// Controls accept event only if they are relevant
case control: XFormsControl if ! control.isRelevant ⇒
warn("non-relevant control")
// Output control not subject to readonly condition below
case control: XFormsOutputControl ⇒
true
// Single node controls accept event only if they are not readonly
case control: XFormsSingleNodeControl if control.isReadonly ⇒
warn("read-only control")
// Disallow focus if the control is no focusable
// Relevance and read-only above are already caught. This catches hidden controls, which must not be
// focusable from the client.
case control: XFormsControl if event.isInstanceOf[XFormsFocusEvent] && ! control.isFocusable ⇒
warn("non-focusable control")
case _ ⇒
true
}
}
def dispatchEventCheckTarget(event: XFormsEvent) =
if (checkEventTarget(event))
Dispatch.dispatchEvent(event)
implicit val CurrentLogger = doc.getIndentedLogger(LOGGING_CATEGORY)
val target = event.targetObject
val targetEffectiveId = target.getEffectiveId
val eventName = event.name
withDebug("handling external event", Seq("target id" → targetEffectiveId, "event name" → eventName)) {
// Optimize case where a value change event won't change the control value to actually change
(event, target) match {
case (valueChange: XXFormsValueEvent, target: XFormsValueControl) if target.getExternalValue == valueChange.value ⇒
// We completely ignore the event if the value in the instance is the same. This also saves dispatching xxforms-repeat-activate below.
debug("ignoring value change event as value is the same", Seq(
"control id" → targetEffectiveId,
"event name" → eventName,
"value" → target.getExternalValue)
)
return
case _ ⇒
}
// NOTES:
// 1. We used to dispatch xforms-focus here, but now we don't anymore: we assume that the client provides
// xforms-focus before value changes as needed. Also, value changes can occur without focus changes, in
// particular when the JavaScript API is used.
// 2. We also used to handle value controls here, but it makes more sense to do it via events.
// 3. Recalculate, revalidate and refresh are handled with the automatic deferred updates.
// 4. We used to do special handling for xf:output: upon click on xf:output, the client would send
// xforms-focus. We would translate that into DOMActivate. As of 2012-03-09 there doesn't seem to be a
// need for this so we are removing this behavior.
// Each event is within its own start/end outermost action handler
doc.startOutermostActionHandler()
// Handle repeat iteration if the event target is in a repeat
if (hasEffectiveIdSuffix(targetEffectiveId))
dispatchEventCheckTarget(new XXFormsRepeatActivateEvent(target, EmptyGetter))
// Interpret event
dispatchEventCheckTarget(event)
doc.endOutermostActionHandler()
}
}
}
|
evlist/orbeon-forms
|
src/main/scala/org/orbeon/oxf/xforms/event/ClientEvents.scala
|
Scala
|
lgpl-2.1
| 25,777 |
package es.um.nosql.streaminginference.spark.implicits
import scala.collection.JavaConversions.asScalaBuffer
import scala.collection.JavaConversions.seqAsJavaList
import scala.collection.mutable.Buffer
import scala.collection.mutable.HashMap
import org.apache.spark.sql.Row
import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.types.ArrayType
import org.apache.spark.sql.types.BooleanType
import org.apache.spark.sql.types.DataType
import org.apache.spark.sql.types.DoubleType
import org.apache.spark.sql.types.StringType
import org.apache.spark.sql.types.StructField
import org.apache.spark.sql.types.StructType
import es.um.nosql.streaminginference.NoSQLSchema.Aggregate
import es.um.nosql.streaminginference.NoSQLSchema.Association
import es.um.nosql.streaminginference.NoSQLSchema.Attribute
import es.um.nosql.streaminginference.NoSQLSchema.EntityVersion
import es.um.nosql.streaminginference.NoSQLSchema.NoSQLSchema
import es.um.nosql.streaminginference.NoSQLSchema.PrimitiveType
import es.um.nosql.streaminginference.NoSQLSchema.Property
import es.um.nosql.streaminginference.NoSQLSchema.Reference
import es.um.nosql.streaminginference.NoSQLSchema.Tuple
import es.um.nosql.streaminginference.NoSQLSchema.Type
import es.um.nosql.streaminginference.spark.utils.EcoreHelper
object InferenceHelpers
{
private def getPrimitiveType(name: String): DataType =
name match
{
// TODO: Unify primitive types
case "Boolean" => BooleanType
case "Number" => DoubleType
case _ => StringType
}
implicit class PrimitiveExtend(val primitive: PrimitiveType) extends TypeExtend(primitive)
{
override def getSQLType: DataType = getPrimitiveType(primitive.getName)
}
implicit class TupleExtend(val tuple: Tuple) extends TypeExtend(tuple)
{
private def isHomogeneous: Boolean =
{
val elements = tuple.getElements
elements.size match
{
case 0 => true
// Nested tuples won't be homogeneous
case 1 => !elements.get(0).isInstanceOf[Tuple]
case _ =>
elements.map(elem => elem.getClass).distinct.size == 1 &&
// Nested tuples won't be homogeneous
elements.get(0).isInstanceOf[PrimitiveType]
}
}
override def getSQLType: DataType =
{
val elements = tuple.getElements.toSeq
if (elements.size == 0)
// FIXME: What is the type of an empty Tuple?
ArrayType(StringType)
if (isHomogeneous)
{
ArrayType(elements.get(0).getSQLType)
}
else
{
StructType(
elements
.zipWithIndex
.map { case (element, index) =>
StructField(index.toString, element.getSQLType)})
}
}
}
implicit class TypeExtend(val typ:Type)
{
def getSQLType: DataType =
{
if (typ.isInstanceOf[PrimitiveType])
typ.asInstanceOf[PrimitiveType].getSQLType
else
typ.asInstanceOf[Tuple].getSQLType
}
}
implicit class ReferenceExtend(val reference: Reference) extends AssociationExtend(reference)
{
override def getSQLType: DataType =
{
if (reference.getUpperBound == -1)
{
ArrayType(getPrimitiveType(reference.getOriginalType))
}
else
{
getPrimitiveType(reference.getOriginalType)
}
}
}
implicit class AssociationExtend(val association: Association) extends PropertyExtend(association)
{
override def getSQLType: DataType =
if (association.isInstanceOf[Reference])
association.asInstanceOf[Reference].getSQLType
else
association.asInstanceOf[Aggregate].getSQLType
}
implicit class AggregateExtend(val aggregate: Aggregate) extends AssociationExtend(aggregate)
{
private def isHomogeneous: Boolean =
{
val references = aggregate.getRefTo.toSeq
references.size match
{
case 0 => true
case 1 => true
case _ => references.forall(EcoreHelper.isEqual(_, references.head))
}
}
override def getSQLType: DataType = {
val references = aggregate.getRefTo.toSeq
if (references.size == 0)
// TODO: check empty aggregates
ArrayType(StringType)
else if (isHomogeneous)
ArrayType(references.head.getSQLType)
else
StructType(references
.zipWithIndex
.map {
case (reference, index) =>
StructField(index.toString, reference.getSQLType)})
}
}
implicit class AttributeExtend(val attribute: Attribute) extends PropertyExtend(attribute)
{
override def getSQLType: DataType = attribute.getType.getSQLType
}
implicit class PropertyExtend(val property: Property)
{
def getSQLType: DataType =
if (property.isInstanceOf[Attribute])
property.asInstanceOf[Attribute].getSQLType
else
property.asInstanceOf[Association].getSQLType
}
implicit class VersionExtend(val version: EntityVersion)
{
def getSQLType: StructType =
StructType(
version
.getProperties
.map(property => StructField(property.getName, property.getSQLType)))
}
implicit class NoSQLSchemaExtend(val schema: NoSQLSchema)
{
def toSparkSQLVersions() = {
val versions = HashMap[String, Buffer[StructType]]()
schema.getEntities.foreach(entity => {
versions +=
entity.getName -> entity.getEntityversions.map(version => version.getSQLType)
})
versions
}
def printSQLSchema(ss: SparkSession): Unit = {
schema
.toSparkSQLVersions
.map { case (entity, versions) =>
println(entity)
println("--------------")
versions.foreach(version => {
ss.createDataFrame(ss.sparkContext.emptyRDD[Row], version).printSchema
})
}
}
}
}
|
catedrasaes-umu/NoSQLDataEngineering
|
projects/es.um.nosql.streaminginference.json2dbschema/src/es/um/nosql/streaminginference/spark/implicits/InferenceSQLSchema.scala
|
Scala
|
mit
| 6,037 |
package kornell.server.ws.rs.writer
import javax.ws.rs.core.{MediaType, MultivaluedMap}
import javax.ws.rs.ext.{MessageBodyWriter, Provider}
@Provider
class BooleanWriter extends MessageBodyWriter[Boolean] {
override def getSize(b: Boolean,
aType: java.lang.Class[_],
genericType: java.lang.reflect.Type,
annotations: Array[java.lang.annotation.Annotation],
mediaType: MediaType): Long = -1L
override def isWriteable(aType: java.lang.Class[_],
genericType: java.lang.reflect.Type,
annotations: Array[java.lang.annotation.Annotation],
mediaType: MediaType): Boolean = "application/boolean".equalsIgnoreCase(mediaType.toString)
override def writeTo(b: Boolean,
aType: java.lang.Class[_],
genericType: java.lang.reflect.Type,
annotations: Array[java.lang.annotation.Annotation],
mediaType: MediaType,
httpHeaders: MultivaluedMap[java.lang.String, java.lang.Object],
out: java.io.OutputStream): Unit = {
out.write(b.toString.getBytes)
}
}
|
Craftware/Kornell
|
kornell-api/src/main/scala/kornell/server/ws/rs/writer/BooleanWriter.scala
|
Scala
|
apache-2.0
| 1,004 |
package xsbtWebApp
import scala.xml.{ Node => XmlNode, NodeSeq => XmlNodeSeq, _ }
import scala.xml.transform.{ RewriteRule, RuleTransformer }
import sbt._
import Keys.TaskStreams
import xsbtUtil.types._
import xsbtUtil.{ util => xu }
import xsbtClasspath.{ Asset => ClasspathAsset, ClasspathPlugin }
import xsbtClasspath.Import.classpathAssets
object Import {
val webapp = taskKey[File]("complete build, returns the created directory")
val webappAppDir = settingKey[File]("directory of the webapp to be built")
val webappWar = taskKey[File]("complete build, returns the created war file")
val webappWarFile = settingKey[File]("where to put the webapp's war file")
val webappPackageName = settingKey[String]("name of the package built")
val webappStage = taskKey[Seq[PathMapping]]("gathered webapp assets")
val webappAssetDir = settingKey[File]("directory with webapp contents")
val webappAssets = taskKey[Traversable[PathMapping]]("webapp contents")
val webappExtras = taskKey[Traversable[PathMapping]]("additional webapp contents+")
val webappDeploy = taskKey[Unit]("copy-deploy the webapp")
val webappDeployBase = settingKey[Option[File]]("target directory base for copy-deploy")
val webappDeployName = settingKey[String]("target directory name for copy-deploy")
val webappBuildDir = settingKey[File]("base directory of built files")
}
object WebAppPlugin extends AutoPlugin {
//------------------------------------------------------------------------------
//## exports
lazy val autoImport = Import
import autoImport._
override val requires:Plugins = ClasspathPlugin && plugins.JvmPlugin
override val trigger:PluginTrigger = noTrigger
override lazy val projectSettings:Seq[Def.Setting[_]] =
Vector(
webapp :=
buildTask(
streams = Keys.streams.value,
libs = classpathAssets.value,
assets = webappStage.value,
appDir = webappAppDir.value
),
webappAppDir := webappBuildDir.value / "output" / webappPackageName.value,
webappStage := webappAssets.value.toVector ++ webappExtras.value.toVector,
webappAssetDir := (Keys.sourceDirectory in Compile).value / "webapp",
webappAssets := xu.find allMapped webappAssetDir.value,
webappExtras := Seq.empty,
webappWar :=
warTask(
streams = Keys.streams.value,
webapp = webapp.value,
warFile = webappWarFile.value
),
webappWarFile := webappBuildDir.value / "output" / (webappPackageName.value + ".war"),
webappDeploy :=
deployTask(
streams = Keys.streams.value,
webapp = webapp.value,
deployBase = webappDeployBase.value,
deployName = webappDeployName.value
),
webappDeployBase := None,
webappDeployName := Keys.name.value,
webappPackageName := Keys.name.value + "-" + Keys.version.value,
webappBuildDir := Keys.crossTarget.value / "webapp",
Keys.watchSources := Keys.watchSources.value :+ WatchSource(webappAssetDir.value),
// disable standard artifact, xsbt-webapp publishes webappWar
Keys.publishArtifact in (Compile, Keys.packageBin) := false,
// add war artifact
Keys.artifact in (Compile, webappWar) ~= {
_ withType "war" withExtension "war"
},
// remove dependencies and repositories from pom
Keys.pomPostProcess := removeDependencies
) ++
addArtifact(Keys.artifact in (Compile, webappWar), webappWar)
//------------------------------------------------------------------------------
//## pom transformation
private def removeDependencies(node:XmlNode):XmlNode =
(new RuleTransformer(pomRewriteRule) transform node).head
private val pomRewriteRule =
new RewriteRule {
override def transform(node:XmlNode):XmlNodeSeq =
node match {
case el:Elem if el.label == "dependency" =>
val organization = childText(el, "groupId")
val artifact = childText(el, "artifactId")
val version = childText(el, "version")
val scope = childText(el, "scope")
Comment(s"$organization#$artifact;$version ($scope)")
case el:Elem if el.label == "repository" =>
/*
val id = childText(el, "id")
val name = childText(el, "name")
val url = childText(el, "url")
val layout = childText(el, "layout")
*/
Comment(s"redacted")
case _ =>
node
}
private def childText(el:Elem, label:String):String =
el.child filter { _.label == label } flatMap { _.text } mkString ""
}
//------------------------------------------------------------------------------
//## tasks
/** build webapp directory */
private def buildTask(
streams:TaskStreams,
libs:Seq[ClasspathAsset],
assets:Seq[PathMapping],
appDir:File
):File = {
streams.log info s"copying resources and libraries to ${appDir}"
val libsToCopy = libs map { _.flatPathMapping } map (xu.pathMapping modifyPath ("WEB-INF/lib/" + _))
xu.file mirror (appDir, assets ++ libsToCopy)
appDir
}
/** build webapp war */
private def warTask(
streams:TaskStreams,
webapp:File,
warFile:File
):File = {
streams.log info s"creating war file ${warFile}"
xu.zip create (
sources = xu.find allMapped webapp,
outputZip = warFile
)
warFile
}
/** copy-deploy webapp */
private def deployTask(
streams:TaskStreams,
webapp:File,
deployBase:Option[File],
deployName:String
):Unit = {
if (deployBase.isEmpty) {
xu.fail logging (streams, s"${webappDeployBase.key.label} must be initialized to deploy")
}
val deployBase1 = deployBase.get
val webappDir = deployBase1 / deployName
val warFile = deployBase1 / (deployName + ".war")
streams.log info s"deleting old war file ${warFile}"
IO delete warFile
streams.log info s"deploying webapp to ${webappDir}"
val webappFiles = xu.find allMapped webapp
xu.file mirror (webappDir, webappFiles)
}
}
|
ritschwumm/xsbt-webapp
|
src/main/scala/WebAppPlugin.scala
|
Scala
|
bsd-2-clause
| 5,820 |
package org.archive.archivespark.sparkling.io
import java.io.{FileInputStream, OutputStream}
import org.apache.hadoop.fs.Path
import org.archive.archivespark.sparkling.logging.{Log, LogContext}
import scala.util.Try
class HdfsFileWriter private(filename: String, append: Boolean, replication: Short) extends OutputStream {
implicit val logContext: LogContext = LogContext(this)
private val file = IOUtil.tmpFile
Log.info("Writing to temporary local file " + file.getCanonicalPath + " (" + filename + ")...")
val out = IOUtil.fileOut(file)
override def close(): Unit = {
Try { out.close() }
Log.info("Copying from temporary file " + file.getCanonicalPath + " to " + filename + "...")
if (append) {
val in = new FileInputStream(file)
val appendOut = HdfsIO.fs.append(new Path(filename))
IOUtil.copy(in, appendOut)
appendOut.close()
in.close()
file.delete()
} else HdfsIO.copyFromLocal(file.getCanonicalPath, filename, move = true, overwrite = true, replication)
Log.info("Done. (" + filename + ")")
}
override def write(b: Int): Unit = out.write(b)
override def write(b: Array[Byte]): Unit = out.write(b)
override def write(b: Array[Byte], off: Int, len: Int): Unit = out.write(b, off, len)
override def flush(): Unit = out.flush()
}
object HdfsFileWriter {
def apply(filename: String, overwrite: Boolean = false, append: Boolean = false, replication: Short = 0): HdfsFileWriter = {
if (!overwrite && !append) HdfsIO.ensureNewFile(filename)
new HdfsFileWriter(filename, append, replication)
}
}
|
helgeho/ArchiveSpark
|
src/main/scala/org/archive/archivespark/sparkling/io/HdfsFileWriter.scala
|
Scala
|
mit
| 1,591 |
package functions
import org.scalatest.FunSuite
import basics.functions.FunctionComposition
class FunctionCompositionTest extends FunSuite {
test("FunctionCompositionTest1") {
assert(FunctionComposition.computeComposed===12);
assert(FunctionComposition.computeComposed2===12);
}
}
|
szaqal/KitchenSink
|
Scala/01/src/test/scala/functions/FunctionCompositionTest.scala
|
Scala
|
gpl-3.0
| 296 |
package dotty.tools.dotc.util
/** A common class for lightweight mutable sets.
*/
abstract class MutableSet[T] extends ReadOnlySet[T]:
/** Add element `x` to the set */
def +=(x: T): Unit
/** Like `+=` but return existing element equal to `x` of it exists,
* `x` itself otherwise.
*/
def put(x: T): T
/** Remove element `x` from the set */
def -=(x: T): Unit
def clear(): Unit
def ++= (xs: IterableOnce[T]): Unit =
xs.iterator.foreach(this += _)
def --= (xs: IterableOnce[T]): Unit =
xs.iterator.foreach(this -= _)
|
dotty-staging/dotty
|
compiler/src/dotty/tools/dotc/util/MutableSet.scala
|
Scala
|
apache-2.0
| 557 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql
import java.util.{List => JList, Map => JMap}
import scala.reflect.runtime.universe.TypeTag
import org.apache.spark.{Accumulator, Logging}
import org.apache.spark.api.python.PythonBroadcast
import org.apache.spark.broadcast.Broadcast
import org.apache.spark.sql.api.java._
import org.apache.spark.sql.catalyst.ScalaReflection
import org.apache.spark.sql.catalyst.expressions.{Expression, ScalaUdf}
import org.apache.spark.sql.execution.PythonUDF
import org.apache.spark.sql.types.DataType
/**
* Functions for registering user-defined functions. Use [[SQLContext.udf]] to access this.
*/
class UDFRegistration private[sql] (sqlContext: SQLContext) extends Logging {
private val functionRegistry = sqlContext.functionRegistry
protected[sql] def registerPython(
name: String,
command: Array[Byte],
envVars: JMap[String, String],
pythonIncludes: JList[String],
pythonExec: String,
broadcastVars: JList[Broadcast[PythonBroadcast]],
accumulator: Accumulator[JList[Array[Byte]]],
stringDataType: String): Unit = {
log.debug(
s"""
| Registering new PythonUDF:
| name: $name
| command: ${command.toSeq}
| envVars: $envVars
| pythonIncludes: $pythonIncludes
| pythonExec: $pythonExec
| dataType: $stringDataType
""".stripMargin)
val dataType = sqlContext.parseDataType(stringDataType)
def builder(e: Seq[Expression]) =
PythonUDF(
name,
command,
envVars,
pythonIncludes,
pythonExec,
broadcastVars,
accumulator,
dataType,
e)
functionRegistry.registerFunction(name, builder)
}
// scalastyle:off
/* register 0-22 were generated by this script
(0 to 22).map { x =>
val types = (1 to x).foldRight("RT")((i, s) => {s"A$i, $s"})
val typeTags = (1 to x).map(i => s"A${i}: TypeTag").foldLeft("RT: TypeTag")(_ + ", " + _)
println(s"""
/**
* Register a Scala closure of ${x} arguments as user-defined function (UDF).
* @tparam RT return type of UDF.
*/
def register[$typeTags](name: String, func: Function$x[$types]): UserDefinedFunction = {
val dataType = ScalaReflection.schemaFor[RT].dataType
def builder(e: Seq[Expression]) = ScalaUdf(func, dataType, e)
functionRegistry.registerFunction(name, builder)
UserDefinedFunction(func, dataType)
}""")
}
(1 to 22).foreach { i =>
val extTypeArgs = (1 to i).map(_ => "_").mkString(", ")
val anyTypeArgs = (1 to i).map(_ => "Any").mkString(", ")
val anyCast = s".asInstanceOf[UDF$i[$anyTypeArgs, Any]]"
val anyParams = (1 to i).map(_ => "_: Any").mkString(", ")
println(s"""
|/**
| * Register a user-defined function with ${i} arguments.
| */
|def register(name: String, f: UDF$i[$extTypeArgs, _], returnType: DataType) = {
| functionRegistry.registerFunction(
| name,
| (e: Seq[Expression]) => ScalaUdf(f$anyCast.call($anyParams), returnType, e))
|}""".stripMargin)
}
*/
/**
* Register a Scala closure of 0 arguments as user-defined function (UDF).
* @tparam RT return type of UDF.
*/
def register[RT: TypeTag](name: String, func: Function0[RT]): UserDefinedFunction = {
val dataType = ScalaReflection.schemaFor[RT].dataType
def builder(e: Seq[Expression]) = ScalaUdf(func, dataType, e)
functionRegistry.registerFunction(name, builder)
UserDefinedFunction(func, dataType)
}
/**
* Register a Scala closure of 1 arguments as user-defined function (UDF).
* @tparam RT return type of UDF.
*/
def register[RT: TypeTag, A1: TypeTag](name: String, func: Function1[A1, RT]): UserDefinedFunction = {
val dataType = ScalaReflection.schemaFor[RT].dataType
def builder(e: Seq[Expression]) = ScalaUdf(func, dataType, e)
functionRegistry.registerFunction(name, builder)
UserDefinedFunction(func, dataType)
}
/**
* Register a Scala closure of 2 arguments as user-defined function (UDF).
* @tparam RT return type of UDF.
*/
def register[RT: TypeTag, A1: TypeTag, A2: TypeTag](name: String, func: Function2[A1, A2, RT]): UserDefinedFunction = {
val dataType = ScalaReflection.schemaFor[RT].dataType
def builder(e: Seq[Expression]) = ScalaUdf(func, dataType, e)
functionRegistry.registerFunction(name, builder)
UserDefinedFunction(func, dataType)
}
/**
* Register a Scala closure of 3 arguments as user-defined function (UDF).
* @tparam RT return type of UDF.
*/
def register[RT: TypeTag, A1: TypeTag, A2: TypeTag, A3: TypeTag](name: String, func: Function3[A1, A2, A3, RT]): UserDefinedFunction = {
val dataType = ScalaReflection.schemaFor[RT].dataType
def builder(e: Seq[Expression]) = ScalaUdf(func, dataType, e)
functionRegistry.registerFunction(name, builder)
UserDefinedFunction(func, dataType)
}
/**
* Register a Scala closure of 4 arguments as user-defined function (UDF).
* @tparam RT return type of UDF.
*/
def register[RT: TypeTag, A1: TypeTag, A2: TypeTag, A3: TypeTag, A4: TypeTag](name: String, func: Function4[A1, A2, A3, A4, RT]): UserDefinedFunction = {
val dataType = ScalaReflection.schemaFor[RT].dataType
def builder(e: Seq[Expression]) = ScalaUdf(func, dataType, e)
functionRegistry.registerFunction(name, builder)
UserDefinedFunction(func, dataType)
}
/**
* Register a Scala closure of 5 arguments as user-defined function (UDF).
* @tparam RT return type of UDF.
*/
def register[RT: TypeTag, A1: TypeTag, A2: TypeTag, A3: TypeTag, A4: TypeTag, A5: TypeTag](name: String, func: Function5[A1, A2, A3, A4, A5, RT]): UserDefinedFunction = {
val dataType = ScalaReflection.schemaFor[RT].dataType
def builder(e: Seq[Expression]) = ScalaUdf(func, dataType, e)
functionRegistry.registerFunction(name, builder)
UserDefinedFunction(func, dataType)
}
/**
* Register a Scala closure of 6 arguments as user-defined function (UDF).
* @tparam RT return type of UDF.
*/
def register[RT: TypeTag, A1: TypeTag, A2: TypeTag, A3: TypeTag, A4: TypeTag, A5: TypeTag, A6: TypeTag](name: String, func: Function6[A1, A2, A3, A4, A5, A6, RT]): UserDefinedFunction = {
val dataType = ScalaReflection.schemaFor[RT].dataType
def builder(e: Seq[Expression]) = ScalaUdf(func, dataType, e)
functionRegistry.registerFunction(name, builder)
UserDefinedFunction(func, dataType)
}
/**
* Register a Scala closure of 7 arguments as user-defined function (UDF).
* @tparam RT return type of UDF.
*/
def register[RT: TypeTag, A1: TypeTag, A2: TypeTag, A3: TypeTag, A4: TypeTag, A5: TypeTag, A6: TypeTag, A7: TypeTag](name: String, func: Function7[A1, A2, A3, A4, A5, A6, A7, RT]): UserDefinedFunction = {
val dataType = ScalaReflection.schemaFor[RT].dataType
def builder(e: Seq[Expression]) = ScalaUdf(func, dataType, e)
functionRegistry.registerFunction(name, builder)
UserDefinedFunction(func, dataType)
}
/**
* Register a Scala closure of 8 arguments as user-defined function (UDF).
* @tparam RT return type of UDF.
*/
def register[RT: TypeTag, A1: TypeTag, A2: TypeTag, A3: TypeTag, A4: TypeTag, A5: TypeTag, A6: TypeTag, A7: TypeTag, A8: TypeTag](name: String, func: Function8[A1, A2, A3, A4, A5, A6, A7, A8, RT]): UserDefinedFunction = {
val dataType = ScalaReflection.schemaFor[RT].dataType
def builder(e: Seq[Expression]) = ScalaUdf(func, dataType, e)
functionRegistry.registerFunction(name, builder)
UserDefinedFunction(func, dataType)
}
/**
* Register a Scala closure of 9 arguments as user-defined function (UDF).
* @tparam RT return type of UDF.
*/
def register[RT: TypeTag, A1: TypeTag, A2: TypeTag, A3: TypeTag, A4: TypeTag, A5: TypeTag, A6: TypeTag, A7: TypeTag, A8: TypeTag, A9: TypeTag](name: String, func: Function9[A1, A2, A3, A4, A5, A6, A7, A8, A9, RT]): UserDefinedFunction = {
val dataType = ScalaReflection.schemaFor[RT].dataType
def builder(e: Seq[Expression]) = ScalaUdf(func, dataType, e)
functionRegistry.registerFunction(name, builder)
UserDefinedFunction(func, dataType)
}
/**
* Register a Scala closure of 10 arguments as user-defined function (UDF).
* @tparam RT return type of UDF.
*/
def register[RT: TypeTag, A1: TypeTag, A2: TypeTag, A3: TypeTag, A4: TypeTag, A5: TypeTag, A6: TypeTag, A7: TypeTag, A8: TypeTag, A9: TypeTag, A10: TypeTag](name: String, func: Function10[A1, A2, A3, A4, A5, A6, A7, A8, A9, A10, RT]): UserDefinedFunction = {
val dataType = ScalaReflection.schemaFor[RT].dataType
def builder(e: Seq[Expression]) = ScalaUdf(func, dataType, e)
functionRegistry.registerFunction(name, builder)
UserDefinedFunction(func, dataType)
}
/**
* Register a Scala closure of 11 arguments as user-defined function (UDF).
* @tparam RT return type of UDF.
*/
def register[RT: TypeTag, A1: TypeTag, A2: TypeTag, A3: TypeTag, A4: TypeTag, A5: TypeTag, A6: TypeTag, A7: TypeTag, A8: TypeTag, A9: TypeTag, A10: TypeTag, A11: TypeTag](name: String, func: Function11[A1, A2, A3, A4, A5, A6, A7, A8, A9, A10, A11, RT]): UserDefinedFunction = {
val dataType = ScalaReflection.schemaFor[RT].dataType
def builder(e: Seq[Expression]) = ScalaUdf(func, dataType, e)
functionRegistry.registerFunction(name, builder)
UserDefinedFunction(func, dataType)
}
/**
* Register a Scala closure of 12 arguments as user-defined function (UDF).
* @tparam RT return type of UDF.
*/
def register[RT: TypeTag, A1: TypeTag, A2: TypeTag, A3: TypeTag, A4: TypeTag, A5: TypeTag, A6: TypeTag, A7: TypeTag, A8: TypeTag, A9: TypeTag, A10: TypeTag, A11: TypeTag, A12: TypeTag](name: String, func: Function12[A1, A2, A3, A4, A5, A6, A7, A8, A9, A10, A11, A12, RT]): UserDefinedFunction = {
val dataType = ScalaReflection.schemaFor[RT].dataType
def builder(e: Seq[Expression]) = ScalaUdf(func, dataType, e)
functionRegistry.registerFunction(name, builder)
UserDefinedFunction(func, dataType)
}
/**
* Register a Scala closure of 13 arguments as user-defined function (UDF).
* @tparam RT return type of UDF.
*/
def register[RT: TypeTag, A1: TypeTag, A2: TypeTag, A3: TypeTag, A4: TypeTag, A5: TypeTag, A6: TypeTag, A7: TypeTag, A8: TypeTag, A9: TypeTag, A10: TypeTag, A11: TypeTag, A12: TypeTag, A13: TypeTag](name: String, func: Function13[A1, A2, A3, A4, A5, A6, A7, A8, A9, A10, A11, A12, A13, RT]): UserDefinedFunction = {
val dataType = ScalaReflection.schemaFor[RT].dataType
def builder(e: Seq[Expression]) = ScalaUdf(func, dataType, e)
functionRegistry.registerFunction(name, builder)
UserDefinedFunction(func, dataType)
}
/**
* Register a Scala closure of 14 arguments as user-defined function (UDF).
* @tparam RT return type of UDF.
*/
def register[RT: TypeTag, A1: TypeTag, A2: TypeTag, A3: TypeTag, A4: TypeTag, A5: TypeTag, A6: TypeTag, A7: TypeTag, A8: TypeTag, A9: TypeTag, A10: TypeTag, A11: TypeTag, A12: TypeTag, A13: TypeTag, A14: TypeTag](name: String, func: Function14[A1, A2, A3, A4, A5, A6, A7, A8, A9, A10, A11, A12, A13, A14, RT]): UserDefinedFunction = {
val dataType = ScalaReflection.schemaFor[RT].dataType
def builder(e: Seq[Expression]) = ScalaUdf(func, dataType, e)
functionRegistry.registerFunction(name, builder)
UserDefinedFunction(func, dataType)
}
/**
* Register a Scala closure of 15 arguments as user-defined function (UDF).
* @tparam RT return type of UDF.
*/
def register[RT: TypeTag, A1: TypeTag, A2: TypeTag, A3: TypeTag, A4: TypeTag, A5: TypeTag, A6: TypeTag, A7: TypeTag, A8: TypeTag, A9: TypeTag, A10: TypeTag, A11: TypeTag, A12: TypeTag, A13: TypeTag, A14: TypeTag, A15: TypeTag](name: String, func: Function15[A1, A2, A3, A4, A5, A6, A7, A8, A9, A10, A11, A12, A13, A14, A15, RT]): UserDefinedFunction = {
val dataType = ScalaReflection.schemaFor[RT].dataType
def builder(e: Seq[Expression]) = ScalaUdf(func, dataType, e)
functionRegistry.registerFunction(name, builder)
UserDefinedFunction(func, dataType)
}
/**
* Register a Scala closure of 16 arguments as user-defined function (UDF).
* @tparam RT return type of UDF.
*/
def register[RT: TypeTag, A1: TypeTag, A2: TypeTag, A3: TypeTag, A4: TypeTag, A5: TypeTag, A6: TypeTag, A7: TypeTag, A8: TypeTag, A9: TypeTag, A10: TypeTag, A11: TypeTag, A12: TypeTag, A13: TypeTag, A14: TypeTag, A15: TypeTag, A16: TypeTag](name: String, func: Function16[A1, A2, A3, A4, A5, A6, A7, A8, A9, A10, A11, A12, A13, A14, A15, A16, RT]): UserDefinedFunction = {
val dataType = ScalaReflection.schemaFor[RT].dataType
def builder(e: Seq[Expression]) = ScalaUdf(func, dataType, e)
functionRegistry.registerFunction(name, builder)
UserDefinedFunction(func, dataType)
}
/**
* Register a Scala closure of 17 arguments as user-defined function (UDF).
* @tparam RT return type of UDF.
*/
def register[RT: TypeTag, A1: TypeTag, A2: TypeTag, A3: TypeTag, A4: TypeTag, A5: TypeTag, A6: TypeTag, A7: TypeTag, A8: TypeTag, A9: TypeTag, A10: TypeTag, A11: TypeTag, A12: TypeTag, A13: TypeTag, A14: TypeTag, A15: TypeTag, A16: TypeTag, A17: TypeTag](name: String, func: Function17[A1, A2, A3, A4, A5, A6, A7, A8, A9, A10, A11, A12, A13, A14, A15, A16, A17, RT]): UserDefinedFunction = {
val dataType = ScalaReflection.schemaFor[RT].dataType
def builder(e: Seq[Expression]) = ScalaUdf(func, dataType, e)
functionRegistry.registerFunction(name, builder)
UserDefinedFunction(func, dataType)
}
/**
* Register a Scala closure of 18 arguments as user-defined function (UDF).
* @tparam RT return type of UDF.
*/
def register[RT: TypeTag, A1: TypeTag, A2: TypeTag, A3: TypeTag, A4: TypeTag, A5: TypeTag, A6: TypeTag, A7: TypeTag, A8: TypeTag, A9: TypeTag, A10: TypeTag, A11: TypeTag, A12: TypeTag, A13: TypeTag, A14: TypeTag, A15: TypeTag, A16: TypeTag, A17: TypeTag, A18: TypeTag](name: String, func: Function18[A1, A2, A3, A4, A5, A6, A7, A8, A9, A10, A11, A12, A13, A14, A15, A16, A17, A18, RT]): UserDefinedFunction = {
val dataType = ScalaReflection.schemaFor[RT].dataType
def builder(e: Seq[Expression]) = ScalaUdf(func, dataType, e)
functionRegistry.registerFunction(name, builder)
UserDefinedFunction(func, dataType)
}
/**
* Register a Scala closure of 19 arguments as user-defined function (UDF).
* @tparam RT return type of UDF.
*/
def register[RT: TypeTag, A1: TypeTag, A2: TypeTag, A3: TypeTag, A4: TypeTag, A5: TypeTag, A6: TypeTag, A7: TypeTag, A8: TypeTag, A9: TypeTag, A10: TypeTag, A11: TypeTag, A12: TypeTag, A13: TypeTag, A14: TypeTag, A15: TypeTag, A16: TypeTag, A17: TypeTag, A18: TypeTag, A19: TypeTag](name: String, func: Function19[A1, A2, A3, A4, A5, A6, A7, A8, A9, A10, A11, A12, A13, A14, A15, A16, A17, A18, A19, RT]): UserDefinedFunction = {
val dataType = ScalaReflection.schemaFor[RT].dataType
def builder(e: Seq[Expression]) = ScalaUdf(func, dataType, e)
functionRegistry.registerFunction(name, builder)
UserDefinedFunction(func, dataType)
}
/**
* Register a Scala closure of 20 arguments as user-defined function (UDF).
* @tparam RT return type of UDF.
*/
def register[RT: TypeTag, A1: TypeTag, A2: TypeTag, A3: TypeTag, A4: TypeTag, A5: TypeTag, A6: TypeTag, A7: TypeTag, A8: TypeTag, A9: TypeTag, A10: TypeTag, A11: TypeTag, A12: TypeTag, A13: TypeTag, A14: TypeTag, A15: TypeTag, A16: TypeTag, A17: TypeTag, A18: TypeTag, A19: TypeTag, A20: TypeTag](name: String, func: Function20[A1, A2, A3, A4, A5, A6, A7, A8, A9, A10, A11, A12, A13, A14, A15, A16, A17, A18, A19, A20, RT]): UserDefinedFunction = {
val dataType = ScalaReflection.schemaFor[RT].dataType
def builder(e: Seq[Expression]) = ScalaUdf(func, dataType, e)
functionRegistry.registerFunction(name, builder)
UserDefinedFunction(func, dataType)
}
/**
* Register a Scala closure of 21 arguments as user-defined function (UDF).
* @tparam RT return type of UDF.
*/
def register[RT: TypeTag, A1: TypeTag, A2: TypeTag, A3: TypeTag, A4: TypeTag, A5: TypeTag, A6: TypeTag, A7: TypeTag, A8: TypeTag, A9: TypeTag, A10: TypeTag, A11: TypeTag, A12: TypeTag, A13: TypeTag, A14: TypeTag, A15: TypeTag, A16: TypeTag, A17: TypeTag, A18: TypeTag, A19: TypeTag, A20: TypeTag, A21: TypeTag](name: String, func: Function21[A1, A2, A3, A4, A5, A6, A7, A8, A9, A10, A11, A12, A13, A14, A15, A16, A17, A18, A19, A20, A21, RT]): UserDefinedFunction = {
val dataType = ScalaReflection.schemaFor[RT].dataType
def builder(e: Seq[Expression]) = ScalaUdf(func, dataType, e)
functionRegistry.registerFunction(name, builder)
UserDefinedFunction(func, dataType)
}
/**
* Register a Scala closure of 22 arguments as user-defined function (UDF).
* @tparam RT return type of UDF.
*/
def register[RT: TypeTag, A1: TypeTag, A2: TypeTag, A3: TypeTag, A4: TypeTag, A5: TypeTag, A6: TypeTag, A7: TypeTag, A8: TypeTag, A9: TypeTag, A10: TypeTag, A11: TypeTag, A12: TypeTag, A13: TypeTag, A14: TypeTag, A15: TypeTag, A16: TypeTag, A17: TypeTag, A18: TypeTag, A19: TypeTag, A20: TypeTag, A21: TypeTag, A22: TypeTag](name: String, func: Function22[A1, A2, A3, A4, A5, A6, A7, A8, A9, A10, A11, A12, A13, A14, A15, A16, A17, A18, A19, A20, A21, A22, RT]): UserDefinedFunction = {
val dataType = ScalaReflection.schemaFor[RT].dataType
def builder(e: Seq[Expression]) = ScalaUdf(func, dataType, e)
functionRegistry.registerFunction(name, builder)
UserDefinedFunction(func, dataType)
}
//////////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////////
/**
* Register a user-defined function with 1 arguments.
*/
def register(name: String, f: UDF1[_, _], returnType: DataType) = {
functionRegistry.registerFunction(
name,
(e: Seq[Expression]) => ScalaUdf(f.asInstanceOf[UDF1[Any, Any]].call(_: Any), returnType, e))
}
/**
* Register a user-defined function with 2 arguments.
*/
def register(name: String, f: UDF2[_, _, _], returnType: DataType) = {
functionRegistry.registerFunction(
name,
(e: Seq[Expression]) => ScalaUdf(f.asInstanceOf[UDF2[Any, Any, Any]].call(_: Any, _: Any), returnType, e))
}
/**
* Register a user-defined function with 3 arguments.
*/
def register(name: String, f: UDF3[_, _, _, _], returnType: DataType) = {
functionRegistry.registerFunction(
name,
(e: Seq[Expression]) => ScalaUdf(f.asInstanceOf[UDF3[Any, Any, Any, Any]].call(_: Any, _: Any, _: Any), returnType, e))
}
/**
* Register a user-defined function with 4 arguments.
*/
def register(name: String, f: UDF4[_, _, _, _, _], returnType: DataType) = {
functionRegistry.registerFunction(
name,
(e: Seq[Expression]) => ScalaUdf(f.asInstanceOf[UDF4[Any, Any, Any, Any, Any]].call(_: Any, _: Any, _: Any, _: Any), returnType, e))
}
/**
* Register a user-defined function with 5 arguments.
*/
def register(name: String, f: UDF5[_, _, _, _, _, _], returnType: DataType) = {
functionRegistry.registerFunction(
name,
(e: Seq[Expression]) => ScalaUdf(f.asInstanceOf[UDF5[Any, Any, Any, Any, Any, Any]].call(_: Any, _: Any, _: Any, _: Any, _: Any), returnType, e))
}
/**
* Register a user-defined function with 6 arguments.
*/
def register(name: String, f: UDF6[_, _, _, _, _, _, _], returnType: DataType) = {
functionRegistry.registerFunction(
name,
(e: Seq[Expression]) => ScalaUdf(f.asInstanceOf[UDF6[Any, Any, Any, Any, Any, Any, Any]].call(_: Any, _: Any, _: Any, _: Any, _: Any, _: Any), returnType, e))
}
/**
* Register a user-defined function with 7 arguments.
*/
def register(name: String, f: UDF7[_, _, _, _, _, _, _, _], returnType: DataType) = {
functionRegistry.registerFunction(
name,
(e: Seq[Expression]) => ScalaUdf(f.asInstanceOf[UDF7[Any, Any, Any, Any, Any, Any, Any, Any]].call(_: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any), returnType, e))
}
/**
* Register a user-defined function with 8 arguments.
*/
def register(name: String, f: UDF8[_, _, _, _, _, _, _, _, _], returnType: DataType) = {
functionRegistry.registerFunction(
name,
(e: Seq[Expression]) => ScalaUdf(f.asInstanceOf[UDF8[Any, Any, Any, Any, Any, Any, Any, Any, Any]].call(_: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any), returnType, e))
}
/**
* Register a user-defined function with 9 arguments.
*/
def register(name: String, f: UDF9[_, _, _, _, _, _, _, _, _, _], returnType: DataType) = {
functionRegistry.registerFunction(
name,
(e: Seq[Expression]) => ScalaUdf(f.asInstanceOf[UDF9[Any, Any, Any, Any, Any, Any, Any, Any, Any, Any]].call(_: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any), returnType, e))
}
/**
* Register a user-defined function with 10 arguments.
*/
def register(name: String, f: UDF10[_, _, _, _, _, _, _, _, _, _, _], returnType: DataType) = {
functionRegistry.registerFunction(
name,
(e: Seq[Expression]) => ScalaUdf(f.asInstanceOf[UDF10[Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any]].call(_: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any), returnType, e))
}
/**
* Register a user-defined function with 11 arguments.
*/
def register(name: String, f: UDF11[_, _, _, _, _, _, _, _, _, _, _, _], returnType: DataType) = {
functionRegistry.registerFunction(
name,
(e: Seq[Expression]) => ScalaUdf(f.asInstanceOf[UDF11[Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any]].call(_: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any), returnType, e))
}
/**
* Register a user-defined function with 12 arguments.
*/
def register(name: String, f: UDF12[_, _, _, _, _, _, _, _, _, _, _, _, _], returnType: DataType) = {
functionRegistry.registerFunction(
name,
(e: Seq[Expression]) => ScalaUdf(f.asInstanceOf[UDF12[Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any]].call(_: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any), returnType, e))
}
/**
* Register a user-defined function with 13 arguments.
*/
def register(name: String, f: UDF13[_, _, _, _, _, _, _, _, _, _, _, _, _, _], returnType: DataType) = {
functionRegistry.registerFunction(
name,
(e: Seq[Expression]) => ScalaUdf(f.asInstanceOf[UDF13[Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any]].call(_: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any), returnType, e))
}
/**
* Register a user-defined function with 14 arguments.
*/
def register(name: String, f: UDF14[_, _, _, _, _, _, _, _, _, _, _, _, _, _, _], returnType: DataType) = {
functionRegistry.registerFunction(
name,
(e: Seq[Expression]) => ScalaUdf(f.asInstanceOf[UDF14[Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any]].call(_: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any), returnType, e))
}
/**
* Register a user-defined function with 15 arguments.
*/
def register(name: String, f: UDF15[_, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _], returnType: DataType) = {
functionRegistry.registerFunction(
name,
(e: Seq[Expression]) => ScalaUdf(f.asInstanceOf[UDF15[Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any]].call(_: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any), returnType, e))
}
/**
* Register a user-defined function with 16 arguments.
*/
def register(name: String, f: UDF16[_, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _], returnType: DataType) = {
functionRegistry.registerFunction(
name,
(e: Seq[Expression]) => ScalaUdf(f.asInstanceOf[UDF16[Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any]].call(_: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any), returnType, e))
}
/**
* Register a user-defined function with 17 arguments.
*/
def register(name: String, f: UDF17[_, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _], returnType: DataType) = {
functionRegistry.registerFunction(
name,
(e: Seq[Expression]) => ScalaUdf(f.asInstanceOf[UDF17[Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any]].call(_: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any), returnType, e))
}
/**
* Register a user-defined function with 18 arguments.
*/
def register(name: String, f: UDF18[_, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _], returnType: DataType) = {
functionRegistry.registerFunction(
name,
(e: Seq[Expression]) => ScalaUdf(f.asInstanceOf[UDF18[Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any]].call(_: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any), returnType, e))
}
/**
* Register a user-defined function with 19 arguments.
*/
def register(name: String, f: UDF19[_, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _], returnType: DataType) = {
functionRegistry.registerFunction(
name,
(e: Seq[Expression]) => ScalaUdf(f.asInstanceOf[UDF19[Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any]].call(_: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any), returnType, e))
}
/**
* Register a user-defined function with 20 arguments.
*/
def register(name: String, f: UDF20[_, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _], returnType: DataType) = {
functionRegistry.registerFunction(
name,
(e: Seq[Expression]) => ScalaUdf(f.asInstanceOf[UDF20[Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any]].call(_: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any), returnType, e))
}
/**
* Register a user-defined function with 21 arguments.
*/
def register(name: String, f: UDF21[_, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _], returnType: DataType) = {
functionRegistry.registerFunction(
name,
(e: Seq[Expression]) => ScalaUdf(f.asInstanceOf[UDF21[Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any]].call(_: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any), returnType, e))
}
/**
* Register a user-defined function with 22 arguments.
*/
def register(name: String, f: UDF22[_, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _], returnType: DataType) = {
functionRegistry.registerFunction(
name,
(e: Seq[Expression]) => ScalaUdf(f.asInstanceOf[UDF22[Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any]].call(_: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any), returnType, e))
}
// scalastyle:on
}
|
trueyao/spark-lever
|
sql/core/src/main/scala/org/apache/spark/sql/UDFRegistration.scala
|
Scala
|
apache-2.0
| 28,934 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.exchange
import java.util.UUID
import java.util.concurrent._
import scala.concurrent.{ExecutionContext, Promise}
import scala.concurrent.duration.NANOSECONDS
import scala.util.control.NonFatal
import org.apache.spark.{broadcast, SparkException}
import org.apache.spark.launcher.SparkLauncher
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.expressions.UnsafeRow
import org.apache.spark.sql.catalyst.plans.logical.Statistics
import org.apache.spark.sql.catalyst.plans.physical.{BroadcastMode, BroadcastPartitioning, Partitioning}
import org.apache.spark.sql.execution.{SparkPlan, SQLExecution}
import org.apache.spark.sql.execution.joins.HashedRelation
import org.apache.spark.sql.execution.metric.SQLMetrics
import org.apache.spark.sql.internal.{SQLConf, StaticSQLConf}
import org.apache.spark.unsafe.map.BytesToBytesMap
import org.apache.spark.util.{SparkFatalException, ThreadUtils}
/**
* Common trait for all broadcast exchange implementations to facilitate pattern matching.
*/
trait BroadcastExchangeLike extends Exchange {
/**
* The broadcast job group ID
*/
def runId: UUID = UUID.randomUUID
/**
* The asynchronous job that prepares the broadcast relation.
*/
def relationFuture: Future[broadcast.Broadcast[Any]]
/**
* For registering callbacks on `relationFuture`.
* Note that calling this method may not start the execution of broadcast job.
*/
def completionFuture: scala.concurrent.Future[broadcast.Broadcast[Any]]
/**
* Returns the runtime statistics after broadcast materialization.
*/
def runtimeStatistics: Statistics
}
/**
* A [[BroadcastExchangeExec]] collects, transforms and finally broadcasts the result of
* a transformed SparkPlan.
*/
case class BroadcastExchangeExec(
mode: BroadcastMode,
child: SparkPlan) extends BroadcastExchangeLike {
import BroadcastExchangeExec._
override val runId: UUID = UUID.randomUUID
override lazy val metrics = Map(
"dataSize" -> SQLMetrics.createSizeMetric(sparkContext, "data size"),
"collectTime" -> SQLMetrics.createTimingMetric(sparkContext, "time to collect"),
"buildTime" -> SQLMetrics.createTimingMetric(sparkContext, "time to build"),
"broadcastTime" -> SQLMetrics.createTimingMetric(sparkContext, "time to broadcast"))
override def outputPartitioning: Partitioning = BroadcastPartitioning(mode)
override def doCanonicalize(): SparkPlan = {
BroadcastExchangeExec(mode.canonicalized, child.canonicalized)
}
override def runtimeStatistics: Statistics = {
val dataSize = metrics("dataSize").value
Statistics(dataSize)
}
@transient
private lazy val promise = Promise[broadcast.Broadcast[Any]]()
@transient
override lazy val completionFuture: scala.concurrent.Future[broadcast.Broadcast[Any]] =
promise.future
@transient
private val timeout: Long = SQLConf.get.broadcastTimeout
@transient
override lazy val relationFuture: Future[broadcast.Broadcast[Any]] = {
SQLExecution.withThreadLocalCaptured[broadcast.Broadcast[Any]](
sqlContext.sparkSession, BroadcastExchangeExec.executionContext) {
try {
// Setup a job group here so later it may get cancelled by groupId if necessary.
sparkContext.setJobGroup(runId.toString, s"broadcast exchange (runId $runId)",
interruptOnCancel = true)
val beforeCollect = System.nanoTime()
// Use executeCollect/executeCollectIterator to avoid conversion to Scala types
val (numRows, input) = child.executeCollectIterator()
if (numRows >= MAX_BROADCAST_TABLE_ROWS) {
throw new SparkException(
s"Cannot broadcast the table over $MAX_BROADCAST_TABLE_ROWS rows: $numRows rows")
}
val beforeBuild = System.nanoTime()
longMetric("collectTime") += NANOSECONDS.toMillis(beforeBuild - beforeCollect)
// Construct the relation.
val relation = mode.transform(input, Some(numRows))
val dataSize = relation match {
case map: HashedRelation =>
map.estimatedSize
case arr: Array[InternalRow] =>
arr.map(_.asInstanceOf[UnsafeRow].getSizeInBytes.toLong).sum
case _ =>
throw new SparkException("[BUG] BroadcastMode.transform returned unexpected " +
s"type: ${relation.getClass.getName}")
}
longMetric("dataSize") += dataSize
if (dataSize >= MAX_BROADCAST_TABLE_BYTES) {
throw new SparkException(
s"Cannot broadcast the table that is larger than 8GB: ${dataSize >> 30} GB")
}
val beforeBroadcast = System.nanoTime()
longMetric("buildTime") += NANOSECONDS.toMillis(beforeBroadcast - beforeBuild)
// Broadcast the relation
val broadcasted = sparkContext.broadcast(relation)
longMetric("broadcastTime") += NANOSECONDS.toMillis(
System.nanoTime() - beforeBroadcast)
val executionId = sparkContext.getLocalProperty(SQLExecution.EXECUTION_ID_KEY)
SQLMetrics.postDriverMetricUpdates(sparkContext, executionId, metrics.values.toSeq)
promise.trySuccess(broadcasted)
broadcasted
} catch {
// SPARK-24294: To bypass scala bug: https://github.com/scala/bug/issues/9554, we throw
// SparkFatalException, which is a subclass of Exception. ThreadUtils.awaitResult
// will catch this exception and re-throw the wrapped fatal throwable.
case oe: OutOfMemoryError =>
val ex = new SparkFatalException(
new OutOfMemoryError("Not enough memory to build and broadcast the table to all " +
"worker nodes. As a workaround, you can either disable broadcast by setting " +
s"${SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key} to -1 or increase the spark " +
s"driver memory by setting ${SparkLauncher.DRIVER_MEMORY} to a higher value.")
.initCause(oe.getCause))
promise.tryFailure(ex)
throw ex
case e if !NonFatal(e) =>
val ex = new SparkFatalException(e)
promise.tryFailure(ex)
throw ex
case e: Throwable =>
promise.tryFailure(e)
throw e
}
}
}
override protected def doPrepare(): Unit = {
// Materialize the future.
relationFuture
}
override protected def doExecute(): RDD[InternalRow] = {
throw new UnsupportedOperationException(
"BroadcastExchange does not support the execute() code path.")
}
override protected[sql] def doExecuteBroadcast[T](): broadcast.Broadcast[T] = {
try {
relationFuture.get(timeout, TimeUnit.SECONDS).asInstanceOf[broadcast.Broadcast[T]]
} catch {
case ex: TimeoutException =>
logError(s"Could not execute broadcast in $timeout secs.", ex)
if (!relationFuture.isDone) {
sparkContext.cancelJobGroup(runId.toString)
relationFuture.cancel(true)
}
throw new SparkException(s"Could not execute broadcast in $timeout secs. " +
s"You can increase the timeout for broadcasts via ${SQLConf.BROADCAST_TIMEOUT.key} or " +
s"disable broadcast join by setting ${SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key} to -1",
ex)
}
}
}
object BroadcastExchangeExec {
// Since the maximum number of keys that BytesToBytesMap supports is 1 << 29,
// and only 70% of the slots can be used before growing in HashedRelation,
// here the limitation should not be over 341 million.
val MAX_BROADCAST_TABLE_ROWS = (BytesToBytesMap.MAX_CAPACITY / 1.5).toLong
val MAX_BROADCAST_TABLE_BYTES = 8L << 30
private[execution] val executionContext = ExecutionContext.fromExecutorService(
ThreadUtils.newDaemonCachedThreadPool("broadcast-exchange",
SQLConf.get.getConf(StaticSQLConf.BROADCAST_EXCHANGE_MAX_THREAD_THRESHOLD)))
}
|
dbtsai/spark
|
sql/core/src/main/scala/org/apache/spark/sql/execution/exchange/BroadcastExchangeExec.scala
|
Scala
|
apache-2.0
| 8,985 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.calcite
import java.util.Properties
import org.apache.calcite.config.{CalciteConnectionConfig, CalciteConnectionConfigImpl, CalciteConnectionProperty}
import org.apache.calcite.plan.RelOptRule
import org.apache.calcite.sql.SqlOperatorTable
import org.apache.calcite.sql.parser.SqlParser
import org.apache.calcite.sql.util.ChainedSqlOperatorTable
import org.apache.calcite.sql2rel.SqlToRelConverter
import org.apache.calcite.tools.{RuleSet, RuleSets}
import org.apache.flink.annotation.Internal
import org.apache.flink.table.api.PlannerConfig
import org.apache.flink.util.Preconditions
import scala.collection.JavaConverters._
/**
* Builder for creating a Calcite configuration.
*/
@Internal
class CalciteConfigBuilder {
/**
* Defines the normalization rule set. Normalization rules are dedicated for rewriting
* predicated logical plan before volcano optimization.
*/
private var replaceNormRules: Boolean = false
private var normRuleSets: List[RuleSet] = Nil
/**
* Defines the logical optimization rule set.
*/
private var replaceLogicalOptRules: Boolean = false
private var logicalOptRuleSets: List[RuleSet] = Nil
/**
* Defines the physical optimization rule set.
*/
private var replacePhysicalOptRules: Boolean = false
private var physicalOptRuleSets: List[RuleSet] = Nil
/**
* Defines the decoration rule set. Decoration rules are dedicated for rewriting predicated
* logical plan after volcano optimization.
*/
private var replaceDecoRules: Boolean = false
private var decoRuleSets: List[RuleSet] = Nil
/**
* Defines the SQL operator tables.
*/
private var replaceOperatorTable: Boolean = false
private var operatorTables: List[SqlOperatorTable] = Nil
/**
* Defines a SQL parser configuration.
*/
private var replaceSqlParserConfig: Option[SqlParser.Config] = None
/**
* Defines a configuration for SqlToRelConverter.
*/
private var replaceSqlToRelConverterConfig: Option[SqlToRelConverter.Config] = None
/**
* Replaces the built-in normalization rule set with the given rule set.
*/
def replaceNormRuleSet(replaceRuleSet: RuleSet): CalciteConfigBuilder = {
Preconditions.checkNotNull(replaceRuleSet)
normRuleSets = List(replaceRuleSet)
replaceNormRules = true
this
}
/**
* Appends the given normalization rule set to the built-in rule set.
*/
def addNormRuleSet(addedRuleSet: RuleSet): CalciteConfigBuilder = {
Preconditions.checkNotNull(addedRuleSet)
normRuleSets = addedRuleSet :: normRuleSets
this
}
/**
* Replaces the built-in optimization rule set with the given rule set.
*/
def replaceLogicalOptRuleSet(replaceRuleSet: RuleSet): CalciteConfigBuilder = {
Preconditions.checkNotNull(replaceRuleSet)
logicalOptRuleSets = List(replaceRuleSet)
replaceLogicalOptRules = true
this
}
/**
* Appends the given optimization rule set to the built-in rule set.
*/
def addLogicalOptRuleSet(addedRuleSet: RuleSet): CalciteConfigBuilder = {
Preconditions.checkNotNull(addedRuleSet)
logicalOptRuleSets = addedRuleSet :: logicalOptRuleSets
this
}
/**
* Replaces the built-in optimization rule set with the given rule set.
*/
def replacePhysicalOptRuleSet(replaceRuleSet: RuleSet): CalciteConfigBuilder = {
Preconditions.checkNotNull(replaceRuleSet)
physicalOptRuleSets = List(replaceRuleSet)
replacePhysicalOptRules = true
this
}
/**
* Appends the given optimization rule set to the built-in rule set.
*/
def addPhysicalOptRuleSet(addedRuleSet: RuleSet): CalciteConfigBuilder = {
Preconditions.checkNotNull(addedRuleSet)
physicalOptRuleSets = addedRuleSet :: physicalOptRuleSets
this
}
/**
* Replaces the built-in decoration rule set with the given rule set.
*
* The decoration rules are applied after the cost-based optimization phase.
* The decoration phase allows to rewrite the optimized plan and is not cost-based.
*
*/
def replaceDecoRuleSet(replaceRuleSet: RuleSet): CalciteConfigBuilder = {
Preconditions.checkNotNull(replaceRuleSet)
decoRuleSets = List(replaceRuleSet)
replaceDecoRules = true
this
}
/**
* Appends the given decoration rule set to the built-in rule set.
*
* The decoration rules are applied after the cost-based optimization phase.
* The decoration phase allows to rewrite the optimized plan and is not cost-based.
*/
def addDecoRuleSet(addedRuleSet: RuleSet): CalciteConfigBuilder = {
Preconditions.checkNotNull(addedRuleSet)
decoRuleSets = addedRuleSet :: decoRuleSets
this
}
/**
* Replaces the built-in SQL operator table with the given table.
*/
def replaceSqlOperatorTable(replaceSqlOperatorTable: SqlOperatorTable): CalciteConfigBuilder = {
Preconditions.checkNotNull(replaceSqlOperatorTable)
operatorTables = List(replaceSqlOperatorTable)
replaceOperatorTable = true
this
}
/**
* Appends the given table to the built-in SQL operator table.
*/
def addSqlOperatorTable(addedSqlOperatorTable: SqlOperatorTable): CalciteConfigBuilder = {
Preconditions.checkNotNull(addedSqlOperatorTable)
this.operatorTables = addedSqlOperatorTable :: this.operatorTables
this
}
/**
* Replaces the built-in SQL parser configuration with the given configuration.
*/
def replaceSqlParserConfig(sqlParserConfig: SqlParser.Config): CalciteConfigBuilder = {
Preconditions.checkNotNull(sqlParserConfig)
replaceSqlParserConfig = Some(sqlParserConfig)
this
}
/**
* Replaces the built-in SqlToRelConverter configuration with the given configuration.
*/
def replaceSqlToRelConverterConfig(config: SqlToRelConverter.Config)
: CalciteConfigBuilder = {
Preconditions.checkNotNull(config)
replaceSqlToRelConverterConfig = Some(config)
this
}
/**
* Convert the [[RuleSet]] List to [[Option]] type
*/
private def getRuleSet(inputRuleSet: List[RuleSet]): Option[RuleSet] = {
inputRuleSet match {
case Nil => None
case h :: Nil => Some(h)
case _ =>
// concat rule sets
val concatRules =
inputRuleSet.foldLeft(Nil: Iterable[RelOptRule])((c, r) => r.asScala ++ c)
Some(RuleSets.ofList(concatRules.asJava))
}
}
/**
* Builds a new [[CalciteConfig]].
*/
def build(): CalciteConfig = new CalciteConfig(
getRuleSet(normRuleSets),
replaceNormRules,
getRuleSet(logicalOptRuleSets),
replaceLogicalOptRules,
getRuleSet(physicalOptRuleSets),
replacePhysicalOptRules,
getRuleSet(decoRuleSets),
replaceDecoRules,
operatorTables match {
case Nil => None
case h :: Nil => Some(h)
case _ =>
// chain operator tables
Some(operatorTables.reduce((x, y) => ChainedSqlOperatorTable.of(x, y)))
},
this.replaceOperatorTable,
replaceSqlParserConfig,
replaceSqlToRelConverterConfig)
}
/**
* Calcite configuration for defining a custom Calcite configuration for Table and SQL API.
*/
@Internal
class CalciteConfig(
/** A custom normalization rule set. */
val normRuleSet: Option[RuleSet],
/** Whether this configuration replaces the built-in normalization rule set. */
val replacesNormRuleSet: Boolean,
/** A custom logical optimization rule set. */
val logicalOptRuleSet: Option[RuleSet],
/** Whether this configuration replaces the built-in logical optimization rule set. */
val replacesLogicalOptRuleSet: Boolean,
/** A custom physical optimization rule set. */
val physicalOptRuleSet: Option[RuleSet],
/** Whether this configuration replaces the built-in physical optimization rule set. */
val replacesPhysicalOptRuleSet: Boolean,
/** A custom decoration rule set. */
val decoRuleSet: Option[RuleSet],
/** Whether this configuration replaces the built-in decoration rule set. */
val replacesDecoRuleSet: Boolean,
/** A custom SQL operator table. */
val sqlOperatorTable: Option[SqlOperatorTable],
/** Whether this configuration replaces the built-in SQL operator table. */
val replacesSqlOperatorTable: Boolean,
/** A custom SQL parser configuration. */
val sqlParserConfig: Option[SqlParser.Config],
/** A custom configuration for SqlToRelConverter. */
val sqlToRelConverterConfig: Option[SqlToRelConverter.Config]) extends PlannerConfig
object CalciteConfig {
val DEFAULT: CalciteConfig = createBuilder().build()
/**
* Creates a new builder for constructing a [[CalciteConfig]].
*/
def createBuilder(): CalciteConfigBuilder = {
new CalciteConfigBuilder
}
def connectionConfig(parserConfig : SqlParser.Config): CalciteConnectionConfig = {
val prop = new Properties()
prop.setProperty(CalciteConnectionProperty.CASE_SENSITIVE.camelName,
String.valueOf(parserConfig.caseSensitive))
new CalciteConnectionConfigImpl(prop)
}
}
|
fhueske/flink
|
flink-table/flink-table-planner/src/main/scala/org/apache/flink/table/calcite/CalciteConfig.scala
|
Scala
|
apache-2.0
| 9,790 |
/**
* Copyright 2011-2016 GatlingCorp (http://gatling.io)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.gatling.core.structure
import io.gatling.core.action.builder.ActionBuilder
trait StructureSupport extends StructureBuilder[ChainBuilder] {
private[core] def newInstance(actionBuilders: List[ActionBuilder]) = new ChainBuilder(actionBuilders)
private[core] def actionBuilders: List[ActionBuilder] = Nil
}
|
GabrielPlassard/gatling
|
gatling-core/src/main/scala/io/gatling/core/structure/StructureSupport.scala
|
Scala
|
apache-2.0
| 940 |
/*
* Copyright (C) Lightbend Inc. <https://www.lightbend.com>
*/
package scalaguide.common.build
package object controllers {
type AssetsBuilder = _root_.controllers.AssetsBuilder
type Assets = _root_.controllers.Assets
}
|
benmccann/playframework
|
documentation/manual/working/commonGuide/build/code/scalaguide/common/build/controllers/SubProjectAssets.scala
|
Scala
|
apache-2.0
| 237 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.planner.plan.rules.logical
import org.apache.flink.table.api.TableException
import org.apache.flink.table.planner.calcite.FlinkContext
import org.apache.flink.table.planner.plan.nodes.logical.{FlinkLogicalCalc, FlinkLogicalOverAggregate, FlinkLogicalRank}
import org.apache.flink.table.planner.plan.utils.RankUtil
import org.apache.flink.table.runtime.operators.rank.{ConstantRankRange, ConstantRankRangeWithoutEnd, RankType}
import org.apache.calcite.plan.RelOptRule.{any, operand}
import org.apache.calcite.plan.{RelOptRule, RelOptRuleCall, RelOptUtil}
import org.apache.calcite.rel.`type`.RelDataTypeField
import org.apache.calcite.rex.{RexInputRef, RexProgramBuilder, RexUtil}
import org.apache.calcite.sql.fun.SqlStdOperatorTable
import org.apache.calcite.sql.{SqlKind, SqlRankFunction}
import scala.collection.JavaConversions._
/**
* Planner rule that matches a [[FlinkLogicalCalc]] on a [[FlinkLogicalOverAggregate]],
* and converts them into a [[FlinkLogicalRank]].
*/
abstract class FlinkLogicalRankRuleBase
extends RelOptRule(
operand(classOf[FlinkLogicalCalc],
operand(classOf[FlinkLogicalOverAggregate], any()))) {
override def onMatch(call: RelOptRuleCall): Unit = {
val calc: FlinkLogicalCalc = call.rel(0)
val window: FlinkLogicalOverAggregate = call.rel(1)
val group = window.groups.get(0)
val rankFun = group.aggCalls.get(0).getOperator.asInstanceOf[SqlRankFunction]
// the rank function is the last field of LogicalWindow
val rankFieldIndex = window.getRowType.getFieldCount - 1
val condition = calc.getProgram.getCondition
val predicate = calc.getProgram.expandLocalRef(condition)
val config = calc.getCluster.getPlanner.getContext.unwrap(classOf[FlinkContext]).getTableConfig
val (rankRange, remainingPreds) = RankUtil.extractRankRange(
predicate,
rankFieldIndex,
calc.getCluster.getRexBuilder,
config)
require(rankRange.isDefined)
val cluster = window.getCluster
val rexBuilder = cluster.getRexBuilder
val calcProgram = calc.getProgram
val exprList = calcProgram.getProjectList.map(calcProgram.expandLocalRef)
val inputFields = RelOptUtil.InputFinder.bits(exprList, null).toList
// TODO use the field name specified by user
// the field name may be dropped by `ProjectToWindowRule`. so use field name in calc
// if the calc output rank number directly, otherwise use field name in window now
val outputRankNumber = inputFields.contains(rankFieldIndex)
var rankNumberType: Option[RelDataTypeField] = None
if (outputRankNumber) {
exprList.zipWithIndex.foreach {
case (ref: RexInputRef, index) if ref.getIndex == rankFieldIndex =>
rankNumberType = Some(calc.getRowType.getFieldList.get(index))
case _ => // do nothing
}
}
if (rankNumberType.isEmpty) {
rankNumberType = Some(window.getRowType.getFieldList.get(rankFieldIndex))
}
require(rankNumberType.isDefined)
rankRange match {
case Some(crr: ConstantRankRange) if crr.getRankEnd <= 0 =>
throw new TableException(
s"Rank end should not less than zero, but now is ${crr.getRankEnd}")
case _ => // do nothing
}
val rankType = rankFun match {
case SqlStdOperatorTable.RANK => RankType.RANK
case SqlStdOperatorTable.ROW_NUMBER => RankType.ROW_NUMBER
case SqlStdOperatorTable.DENSE_RANK => RankType.DENSE_RANK
case _ => throw new TableException(s"Unsupported rank function: $rankFun")
}
val rank = new FlinkLogicalRank(
cluster,
window.getTraitSet,
window.getInput,
group.keys,
group.orderKeys,
rankType,
rankRange.get,
rankNumberType.get,
outputRankNumber)
val rankRowType = rank.getRowType
val newRel = if (RexUtil.isIdentity(exprList, rankRowType) && remainingPreds.isEmpty) {
// project is trivial and filter is empty, remove the Calc
rank
} else {
val programBuilder = RexProgramBuilder.create(
rexBuilder,
rankRowType,
calcProgram.getExprList,
calcProgram.getProjectList,
remainingPreds.orNull,
calc.getRowType,
true, // normalize
null) // simplify
calc.copy(calc.getTraitSet, rank, programBuilder.getProgram)
}
call.transformTo(newRel)
}
}
/**
* This rule handles [[SqlRankFunction]] and rank range with end.
*
* The following two example queries could be converted to Rank by this rule:
* 1. constant range (rn <= 2):
* {{{
* SELECT * FROM (
* SELECT a, b, ROW_NUMBER() OVER (PARTITION BY b ORDER BY a) rn FROM MyTable) t
* WHERE rn <= 2
* }}}
* 2. variable range (rk < a):
* {{{
* SELECT * FROM (
* SELECT a, b, RANK() OVER (PARTITION BY b ORDER BY c) rk FROM MyTable) t
* WHERE rk < a
* }}}
*/
class FlinkLogicalRankRuleForRangeEnd extends FlinkLogicalRankRuleBase {
override def matches(call: RelOptRuleCall): Boolean = {
val calc: FlinkLogicalCalc = call.rel(0)
val window: FlinkLogicalOverAggregate = call.rel(1)
if (window.groups.size > 1) {
// only accept one window
return false
}
val group = window.groups.get(0)
if (group.aggCalls.size > 1) {
// only accept one agg call
return false
}
val agg = group.aggCalls.get(0)
if (!agg.getOperator.isInstanceOf[SqlRankFunction]) {
// only accept SqlRankFunction for Rank
return false
}
if (group.lowerBound.isUnbounded && group.upperBound.isCurrentRow) {
val condition = calc.getProgram.getCondition
if (condition != null) {
val predicate = calc.getProgram.expandLocalRef(condition)
// the rank function is the last field of FlinkLogicalOverAggregate
val rankFieldIndex = window.getRowType.getFieldCount - 1
val config = calc
.getCluster
.getPlanner
.getContext
.unwrap(classOf[FlinkContext])
.getTableConfig
val (rankRange, remainingPreds) = RankUtil.extractRankRange(
predicate,
rankFieldIndex,
calc.getCluster.getRexBuilder,
config)
rankRange match {
case Some(_: ConstantRankRangeWithoutEnd) =>
throw new TableException(
"Rank end is not specified. Currently rank only support TopN, " +
"which means the rank end must be specified.")
case _ => // do nothing
}
// remaining predicate must not access rank field attributes
val remainingPredsAccessRank = remainingPreds.isDefined &&
RankUtil.accessesRankField(remainingPreds.get, rankFieldIndex)
rankRange.isDefined && !remainingPredsAccessRank
} else {
false
}
} else {
false
}
}
}
/**
* This rule only handles RANK function and constant rank range.
*
* The following example query could be converted to Rank by this rule:
* SELECT * FROM (
* SELECT a, b, RANK() OVER (PARTITION BY b ORDER BY a) rk FROM MyTable) t
* WHERE rk <= 2
*/
class FlinkLogicalRankRuleForConstantRange extends FlinkLogicalRankRuleBase {
override def matches(call: RelOptRuleCall): Boolean = {
val calc: FlinkLogicalCalc = call.rel(0)
val window: FlinkLogicalOverAggregate = call.rel(1)
if (window.groups.size > 1) {
// only accept one window
return false
}
val group = window.groups.get(0)
if (group.aggCalls.size > 1) {
// only accept one agg call
return false
}
val agg = group.aggCalls.get(0)
if (agg.getOperator.kind != SqlKind.RANK) {
// only accept RANK function
return false
}
if (group.lowerBound.isUnbounded && group.upperBound.isCurrentRow) {
val condition = calc.getProgram.getCondition
if (condition != null) {
val predicate = calc.getProgram.expandLocalRef(condition)
// the rank function is the last field of FlinkLogicalOverAggregate
val rankFieldIndex = window.getRowType.getFieldCount - 1
val config = calc
.getCluster
.getPlanner
.getContext
.unwrap(classOf[FlinkContext])
.getTableConfig
val (rankRange, remainingPreds) = RankUtil.extractRankRange(
predicate,
rankFieldIndex,
calc.getCluster.getRexBuilder,
config)
// remaining predicate must not access rank field attributes
val remainingPredsAccessRank = remainingPreds.isDefined &&
RankUtil.accessesRankField(remainingPreds.get, rankFieldIndex)
// only support constant rank range
rankRange.exists(_.isInstanceOf[ConstantRankRange]) && !remainingPredsAccessRank
} else {
false
}
} else {
false
}
}
}
object FlinkLogicalRankRule {
val INSTANCE = new FlinkLogicalRankRuleForRangeEnd
val CONSTANT_RANGE_INSTANCE = new FlinkLogicalRankRuleForConstantRange
}
|
lincoln-lil/flink
|
flink-table/flink-table-planner/src/main/scala/org/apache/flink/table/planner/plan/rules/logical/FlinkLogicalRankRule.scala
|
Scala
|
apache-2.0
| 9,805 |
package spark.executor
import java.nio.ByteBuffer
import spark.TaskState.TaskState
/**
* A pluggable interface used by the Executor to send updates to the cluster scheduler.
*/
trait ExecutorBackend {
def statusUpdate(taskId: Long, state: TaskState, data: ByteBuffer)
}
|
ankurdave/arthur
|
core/src/main/scala/spark/executor/ExecutorBackend.scala
|
Scala
|
bsd-3-clause
| 276 |
package mesosphere.marathon
package integration.setup
import java.io.File
import java.lang.management.ManagementFactory
import java.net.{URLDecoder, URLEncoder}
import java.nio.charset.Charset
import java.nio.file.Files
import java.util.UUID
import java.util.concurrent.ConcurrentLinkedQueue
import akka.Done
import akka.actor.{ActorSystem, Cancellable, Scheduler}
import akka.http.scaladsl.Http
import akka.http.scaladsl.client.RequestBuilding.Get
import akka.http.scaladsl.model.{HttpResponse, StatusCodes}
import akka.stream.Materializer
import akka.stream.scaladsl.Sink
import com.fasterxml.jackson.databind.ObjectMapper
import com.fasterxml.jackson.module.scala.DefaultScalaModule
import com.fasterxml.jackson.module.scala.experimental.ScalaObjectMapper
import com.mesosphere.utils.{PortAllocator, ProcessOutputToLogStream}
import com.mesosphere.utils.http.RestResult
import com.mesosphere.utils.mesos.{MesosClusterTest, MesosFacade, MesosTest}
import com.mesosphere.utils.zookeeper.ZookeeperServerTest
import com.typesafe.scalalogging.{Logger, StrictLogging}
import com.mesosphere.usi.async.{Retry, Timeout}
import mesosphere.marathon.Protos.Constraint
import mesosphere.marathon.core.pod.{HostNetwork, MesosContainer, PodDefinition}
import mesosphere.marathon.integration.facades._
import mesosphere.marathon.raml.{
App,
AppCheck,
AppHealthCheck,
AppHostVolume,
AppPersistentVolume,
AppResidency,
AppVolume,
Container,
EngineType,
Network,
NetworkMode,
PersistentVolumeInfo,
PortDefinition,
ReadMode,
UnreachableDisabled,
UpgradeStrategy
}
import mesosphere.marathon.state.{AbsolutePathId, PathId, PersistentVolume, VolumeMount}
import mesosphere.marathon.test.MarathonTestHelper
import mesosphere.marathon.util.Lock
import mesosphere.{AkkaUnitTestLike, WaitTestSupport}
import org.apache.commons.io.FileUtils
import org.scalatest.concurrent.{Eventually, ScalaFutures}
import org.scalatest.exceptions.TestFailedDueToTimeoutException
import org.scalatest.time.{Milliseconds, Seconds, Span}
import org.scalatest.{BeforeAndAfterAll, Suite}
import play.api.libs.json.{JsObject, Json}
import scala.annotation.tailrec
import scala.async.Async.{async, await}
import scala.collection.{JavaConverters, mutable}
import scala.concurrent.duration._
import scala.concurrent.{ExecutionContext, Future}
import scala.sys.process.Process
import scala.util.Try
import scala.util.control.NonFatal
trait BaseMarathon extends AutoCloseable with StrictLogging with ScalaFutures {
val suiteName: String
val masterUrl: String
val zkUrl: String
val conf: Map[String, String] = Map.empty
implicit val system: ActorSystem
implicit val mat: Materializer
implicit val ctx: ExecutionContext
implicit val scheduler: Scheduler
lazy val uuid = UUID.randomUUID.toString
lazy val httpPort = PortAllocator.ephemeralPort()
lazy val url = conf.get("https_port").fold(s"http://localhost:$httpPort")(httpsPort => s"https://localhost:$httpsPort")
lazy val client = new MarathonFacade(url, PathId.root)
val workDir = {
val f = Files.createTempDirectory(s"marathon-$httpPort").toFile
f.deleteOnExit()
f
}
private def write(dir: File, fileName: String, content: String): String = {
val file = File.createTempFile(fileName, "", dir)
file.deleteOnExit()
FileUtils.write(file, content, Charset.defaultCharset)
file.setReadable(true)
file.getAbsolutePath
}
val secretPath = write(workDir, fileName = "marathon-secret", content = "secret1")
val mesosRole = conf.getOrElse("mesos_role", BaseMarathon.defaultRole)
val config = Map(
"master" -> masterUrl,
"mesos_authentication_principal" -> "principal",
"mesos_role" -> mesosRole,
"http_port" -> httpPort.toString,
"zk" -> zkUrl,
"zk_timeout" -> 20.seconds.toMillis.toString,
"zk_connection_timeout" -> 20.seconds.toMillis.toString,
"zk_session_timeout" -> 20.seconds.toMillis.toString,
"mesos_authentication_secret_file" -> s"$secretPath",
"access_control_allow_origin" -> "*",
"reconciliation_initial_delay" -> 5.minutes.toMillis.toString,
"min_revive_offers_interval" -> "1000",
"hostname" -> "localhost",
"logging_level" -> "debug",
"offer_matching_timeout" -> 10.seconds.toMillis.toString // see https://github.com/mesosphere/marathon/issues/4920
) ++ conf
val args = config.iterator.flatMap {
case (k, v) =>
if (v.nonEmpty) {
Seq(s"--$k", v)
} else {
Seq(s"--$k")
}
}.toSeq
@volatile var marathonProcess = Option.empty[Process]
val processBuilder: scala.sys.process.ProcessBuilder
def create(): Process = {
marathonProcess.getOrElse {
val process =
processBuilder.run(ProcessOutputToLogStream(s"mesosphere.marathon.integration.process.$suiteName-LocalMarathon-$httpPort"))
marathonProcess = Some(process)
process
}
}
def start(): Future[Done] = {
create()
val port = conf.get("http_port").orElse(conf.get("https_port")).map(_.toInt).getOrElse(httpPort)
val future = Retry(
s"Waiting for Marathon on $port",
maxAttempts = Int.MaxValue,
minDelay = 1.milli,
maxDelay = 5.seconds,
maxDuration = 4.minutes
) {
async {
val result = await(Http().singleRequest(Get(s"http://localhost:$port/v2/leader")))
result.discardEntityBytes() // forget about the body
if (result.status.isSuccess()) { // linter:ignore //async/await
Done
} else {
throw new Exception(s"Marathon on port=$port hasn't started yet. Giving up waiting..")
}
}
}
future
}
def isRunning(): Boolean =
activePids.nonEmpty
def exitValue(): Option[Int] = marathonProcess.map(_.exitValue())
def activePids: Seq[String] = {
val PIDRE = """^\\s*(\\d+)\\s+\\s*(.*)$""".r
Process("jps -lv").!!.split("\\n").iterator.collect {
case PIDRE(pid, jvmArgs) if jvmArgs.contains(uuid) => pid
}.toSeq
}
def stop(): Future[Done] = {
marathonProcess
.fold(Future.successful(Done)) { p =>
p.destroy()
Timeout.blocking(30.seconds, Some("Marathon")) { p.exitValue(); Done }.recover {
case NonFatal(e) =>
logger.warn(s"Could not shutdown Marathon $suiteName in time", e)
if (activePids.nonEmpty) {
Process(s"kill -9 ${activePids.mkString(" ")}").!
}
Done
}
}
.andThen {
case _ =>
marathonProcess = Option.empty[Process]
}
}
def restart(): Future[Done] = {
logger.info(s"Restarting Marathon on $httpPort")
async {
await(stop())
val x = await(start())
logger.info(s"Restarted Marathon on $httpPort")
x
}
}
override def close(): Unit = {
stop().futureValue(timeout(35.seconds), interval(1.seconds))
Try(FileUtils.deleteDirectory(workDir))
}
// lower the memory pressure by limiting threads.
val akkaJvmArgs = Seq(
"-Dakka.actor.default-dispatcher.fork-join-executor.parallelism-min=2",
"-Dakka.actor.default-dispatcher.fork-join-executor.factor=1",
"-Dakka.actor.default-dispatcher.fork-join-executor.parallelism-max=4",
"-Dscala.concurrent.context.minThreads=2",
"-Dscala.concurrent.context.maxThreads=32"
)
}
object BaseMarathon {
final val defaultRole = "foo"
}
/**
* Runs a marathon server for the given test suite
* @param suiteName The test suite that owns this marathon
* @param masterUrl The mesos master url
* @param zkUrl The ZK url
* @param conf any particular configuration
* @param mainClass The main class
*/
case class LocalMarathon(
suiteName: String,
masterUrl: String,
zkUrl: String,
override val conf: Map[String, String] = Map.empty,
val mainClass: String = "mesosphere.marathon.Main"
)(implicit val system: ActorSystem, val mat: Materializer, val ctx: ExecutionContext, val scheduler: Scheduler)
extends BaseMarathon {
// it'd be great to be able to execute in memory, but we can't due to GuiceFilter using a static :(
override val processBuilder = {
val java = sys.props.get("java.home").fold("java")(_ + "/bin/java")
val cp = sys.props.getOrElse("java.class.path", "target/classes")
// Get JVM arguments, such as -javaagent:some.jar
val runtimeMxBean = ManagementFactory.getRuntimeMXBean
val runtimeArguments = JavaConverters
.collectionAsScalaIterable(runtimeMxBean.getInputArguments)
.filterNot(_.contains("debugger-agent"))
.filterNot(_.startsWith("-javaagent"))
.filterNot(_.startsWith("-agentlib"))
.toSeq
val cmd = Seq(java, "-Xmx1024m", "-Xms256m", "-XX:+UseConcMarkSweepGC", "-XX:ConcGCThreads=2") ++
runtimeArguments ++ akkaJvmArgs ++
Seq(s"-DmarathonUUID=$uuid -DtestSuite=$suiteName", "-classpath", cp, "-client", mainClass) ++ args
Process(cmd, workDir, sys.env.toSeq: _*)
}
override def activePids: Seq[String] = {
val PIDRE = """^\\s*(\\d+)\\s+(\\S*)\\s*(.*)$""".r
Process("jps -lv").!!.split("\\n").iterator.collect {
case PIDRE(pid, main, jvmArgs) if main.contains(mainClass) && jvmArgs.contains(uuid) => pid
}.toSeq
}
}
trait HealthCheckEndpoint extends StrictLogging with ScalaFutures {
protected val healthChecks = Lock(mutable.ListBuffer.empty[IntegrationHealthCheck])
val registeredReadinessChecks = Lock(mutable.ListBuffer.empty[IntegrationReadinessCheck])
implicit val system: ActorSystem
implicit val mat: Materializer
/**
* Note! This is declared as lazy in order to prevent eager evaluation of values on which it depends
* We initialize it during the before hook and wait for Marathon to respond.
*/
protected[setup] lazy val healthEndpoint = {
val route = {
import akka.http.scaladsl.server.Directives._
val mapper = new ObjectMapper() with ScalaObjectMapper
mapper.registerModule(DefaultScalaModule)
get {
path(Segment / Segment / "health") { (uriEncodedAppId, versionId) =>
import PathId._
val appId = URLDecoder.decode(uriEncodedAppId, "UTF-8").toAbsolutePath
def instance = healthChecks(_.find { c => c.appId == appId && c.versionId == versionId })
val state = instance.fold(true)(_.healthy)
logger.info(s"Received health check request: app=$appId, version=$versionId reply=$state")
if (state) {
complete(HttpResponse(status = StatusCodes.OK))
} else {
complete(HttpResponse(status = StatusCodes.InternalServerError))
}
} ~ path(Segment / Segment / Segment / "ready") { (uriEncodedAppId, versionId, taskId) =>
import PathId._
val appId = URLDecoder.decode(uriEncodedAppId, "UTF-8").toAbsolutePath
// Find a fitting registred readiness check. If the check has no task id set we ignore it.
def check: Option[IntegrationReadinessCheck] =
registeredReadinessChecks(_.find { c =>
c.appId == appId && c.versionId == versionId && c.taskId.fold(true)(_ == taskId)
})
// An app is not ready by default to avoid race conditions.
val isReady = check.fold(false)(_.call)
logger.info(s"Received readiness check request: app=$appId, version=$versionId taskId=$taskId reply=$isReady")
if (isReady) {
complete(HttpResponse(status = StatusCodes.OK))
} else {
complete(HttpResponse(status = StatusCodes.InternalServerError))
}
} ~ path(Remaining) { path =>
require(false, s"$path was unmatched!")
complete(HttpResponse(status = StatusCodes.InternalServerError))
}
}
}
val port = PortAllocator.ephemeralPort()
logger.info(s"Starting health check endpoint on port $port.")
val server = Http().bindAndHandle(route, "0.0.0.0", port).futureValue
logger.info(s"Listening for health events on $port")
server
}
/**
* Add an integration health check to internal health checks. The integration health check is used to control the
* health check replies for our app mock.
*
* @param appId The app id of the app mock
* @param versionId The version of the app mock
* @param state The initial health status of the app mock
* @return The IntegrationHealthCheck object which is used to control the replies.
*/
def registerAppProxyHealthCheck(appId: AbsolutePathId, versionId: String, state: Boolean): IntegrationHealthCheck = {
val check = new IntegrationHealthCheck(appId, versionId, state)
healthChecks { checks =>
checks.filter(c => c.appId == appId && c.versionId == versionId).foreach(checks -= _)
checks += check
}
check
}
/**
* Adds an integration readiness check to internal readiness checks. The behaviour is similar to integration health
* checks.
*
* @param appId The app id of the app mock
* @param versionId The version of the app mock
* @param taskId Optional task id to identify the task of the app mock.
* @return The IntegrationReadinessCheck object which is used to control replies.
*/
def registerProxyReadinessCheck(appId: AbsolutePathId, versionId: String, taskId: Option[String] = None): IntegrationReadinessCheck = {
val check = new IntegrationReadinessCheck(appId, versionId, taskId)
registeredReadinessChecks { checks =>
checks.filter(c => c.appId == appId && c.versionId == versionId && c.taskId == taskId).foreach(checks -= _)
checks += check
}
check
}
}
trait MarathonAppFixtures {
val testBasePath: AbsolutePathId
val defaultRole: String = BaseMarathon.defaultRole
implicit class PathIdTestHelper(path: String) {
def toRootTestPath: AbsolutePathId = testBasePath.append(path).canonicalPath()
def toTestPath: AbsolutePathId = testBasePath.append(path)
}
val healthCheckPort: Int
/**
* Constructs the proper health proxy endpoint argument for the Python app mock.
*
* @param appId The app id whose health is checked
* @param versionId The version of the app
* @return URL to health check endpoint
*/
def healthEndpointFor(appId: PathId, versionId: String): String = {
val encodedAppId = URLEncoder.encode(appId.toString, "UTF-8")
s"http://$$HOST:$healthCheckPort/$encodedAppId/$versionId"
}
def appMockCmd(appId: PathId, versionId: String, port: String = "$PORT0"): String = {
val projectDir = sys.props.getOrElse("user.dir", ".")
val appMock: File = MarathonTestHelper.resourcePath("python/app_mock.py")
if (!appMock.exists()) {
throw new IllegalStateException("Failed to locate app_mock.py (" + appMock.getAbsolutePath + ")")
}
s"""echo APP PROXY $$MESOS_TASK_ID RUNNING; ${appMock.getAbsolutePath} """ +
s"""${port} $appId $versionId ${healthEndpointFor(appId, versionId)}"""
}
def appProxyHealthCheck(
gracePeriod: FiniteDuration = 1.seconds,
interval: FiniteDuration = 1.second,
maxConsecutiveFailures: Int = Int.MaxValue,
portIndex: Option[Int] = Some(0)
): AppHealthCheck =
raml.AppHealthCheck(
gracePeriodSeconds = gracePeriod.toSeconds.toInt,
intervalSeconds = interval.toSeconds.toInt,
maxConsecutiveFailures = maxConsecutiveFailures,
portIndex = portIndex,
protocol = raml.AppHealthCheckProtocol.Http,
path = Some("/health")
)
def appProxy(
appId: PathId,
versionId: String,
instances: Int,
healthCheck: Option[raml.AppHealthCheck] = Some(appProxyHealthCheck()),
dependencies: Set[PathId] = Set.empty,
gpus: Int = 0,
role: Option[String] = None,
check: Option[AppCheck] = None
): App = {
val cmd = appMockCmd(appId, versionId)
App(
id = appId.toString,
cmd = Some(cmd),
executor = "//cmd",
instances = instances,
cpus = 0.01,
mem = 32.0,
gpus = gpus,
healthChecks = healthCheck.toSet,
dependencies = dependencies.map(_.toString),
role = role,
check = check
)
}
def residentApp(
id: PathId,
containerPath: String = "persistent-volume",
cmd: String = "sleep 1000",
instances: Int = 1,
backoffDuration: FiniteDuration = 1.hour,
portDefinitions: Seq[PortDefinition] = Seq.empty, /* prevent problems by randomized port assignment */
constraints: Set[Seq[String]] = Set.empty,
role: Option[String] = None
): App = {
val cpus: Double = 0.001
val mem: Double = 1.0
val disk: Double = 1.0
val persistentVolumeSize = 2L
val persistentVolume: AppVolume = AppPersistentVolume(
containerPath = containerPath,
persistent = PersistentVolumeInfo(size = persistentVolumeSize),
mode = ReadMode.Rw
)
val app = App(
id.toString,
instances = instances,
residency = Some(AppResidency()),
constraints = constraints,
container = Some(
Container(
`type` = EngineType.Mesos,
volumes = Seq(persistentVolume)
)
),
cmd = Some(cmd),
// cpus, mem and disk are really small because otherwise we'll soon run out of reservable resources
cpus = cpus,
mem = mem,
disk = disk,
portDefinitions = Some(portDefinitions),
backoffSeconds = backoffDuration.toSeconds.toInt,
upgradeStrategy = Some(UpgradeStrategy(minimumHealthCapacity = 0.5, maximumOverCapacity = 0.0)),
unreachableStrategy = Some(UnreachableDisabled.DefaultValue),
role = role
)
app
}
def dockerAppProxy(
appId: PathId,
versionId: String,
instances: Int,
healthCheck: Option[AppHealthCheck] = Some(appProxyHealthCheck()),
dependencies: Set[PathId] = Set.empty
): App = {
val projectDir = sys.props.getOrElse("user.dir", ".")
val containerDir = "/opt/marathon"
val cmd = Some(
"""echo APP PROXY $$MESOS_TASK_ID RUNNING; /opt/marathon/python/app_mock.py """ +
s"""$$PORT0 $appId $versionId ${healthEndpointFor(appId, versionId)}"""
)
App(
id = appId.toString,
cmd = cmd,
container = Some(
raml.Container(
`type` = raml.EngineType.Docker,
docker = Some(
raml.DockerContainer(
image = "python:3.4.6-alpine"
)
),
volumes = collection.immutable.Seq(
AppHostVolume(hostPath = s"$projectDir/src/test/resources/python", containerPath = s"$containerDir/python", mode = ReadMode.Ro)
)
)
),
instances = instances,
cpus = 0.5,
mem = 128,
healthChecks = healthCheck.toSet,
dependencies = dependencies.map(_.toString),
networks = Seq(Network(mode = NetworkMode.Host))
)
}
def simplePod(podId: String, constraints: Set[Constraint] = Set.empty, instances: Int = 1, role: String = defaultRole): PodDefinition =
PodDefinition(
id = testBasePath / s"$podId",
role = role,
containers = Seq(
MesosContainer(
name = "task1",
exec = Some(raml.MesosExec(raml.ShellCommand("sleep 1000"))),
resources = raml.Resources(cpus = 0.1, mem = 32.0)
)
),
networks = Seq(HostNetwork),
instances = instances,
constraints = constraints
)
def residentPod(id: String, mountPath: String = "persistent-volume", cmd: String = "sleep 1000", instances: Int = 1): PodDefinition = {
val persistentVolumeSize = 2L
val volumeInfo = state.PersistentVolumeInfo(size = persistentVolumeSize)
val volumes = Seq(PersistentVolume(name = Some("pst"), persistent = volumeInfo))
val volumeMounts = Seq(VolumeMount(volumeName = Some("pst"), mountPath = mountPath, readOnly = false))
val pod = PodDefinition(
id = testBasePath / id,
role = "foo",
containers = Seq(
MesosContainer(
name = "task1",
exec = Some(raml.MesosExec(raml.ShellCommand(cmd))),
resources = raml.Resources(cpus = 0.1, mem = 32.0),
volumeMounts = volumeMounts
)
),
networks = Seq(HostNetwork),
instances = instances,
constraints = Set.empty,
volumes = volumes,
unreachableStrategy = state.UnreachableDisabled,
upgradeStrategy = state.UpgradeStrategy(0.0, 0.0)
)
pod
}
}
/**
* Base trait for tests that need a marathon
*/
trait MarathonTest extends HealthCheckEndpoint with MarathonAppFixtures with ScalaFutures with Eventually {
import MarathonFacade._
protected def logger: Logger
def marathonUrl: String
def marathon: MarathonFacade
def leadingMarathon: Future[BaseMarathon]
def mesosFacade: MesosFacade
def suiteName: String
implicit val system: ActorSystem
implicit val mat: Materializer
implicit val ctx: ExecutionContext
implicit val scheduler: Scheduler
lazy val healthCheckPort = healthEndpoint.localAddress.getPort
/* There is a small window between Jetty hanging up the event stream, and Jetty not accepting and
* responding to new requests. In the tests, under heavy load, retrying within 15 milliseconds is enough
* to hit this window.
*
* 10 times the interval would probably suffice. To be on the safe side we are making it 5 seconds.
*/
val sseStreamReconnectionInterval = 5.seconds
case class CallbackEvent(eventType: String, info: Map[String, Any])
object CallbackEvent {
def apply(event: ITEvent): CallbackEvent = CallbackEvent(event.eventType, event.info)
}
implicit class CallbackEventToStatusUpdateEvent(val event: CallbackEvent) {
def taskStatus: String = event.info.get("taskStatus").map(_.toString).getOrElse("")
def message: String = event.info("message").toString
def id: String = event.info("id").toString
def running: Boolean = taskStatus == "TASK_RUNNING"
def finished: Boolean = taskStatus == "TASK_FINISHED"
def failed: Boolean = taskStatus == "TASK_FAILED"
}
object StatusUpdateEvent {
def unapply(event: CallbackEvent): Option[CallbackEvent] = {
if (event.eventType == "status_update_event") Some(event)
else None
}
}
protected val events = new ConcurrentLinkedQueue[ITSSEEvent]()
def waitForTasks(appId: AbsolutePathId, num: Int, maxWait: FiniteDuration = patienceConfig.timeout.toMillis.millis)(implicit
facade: MarathonFacade = marathon
): List[ITEnrichedTask] = {
eventually(timeout(Span(maxWait.toMillis, Milliseconds))) {
val tasks = Try(facade.tasks(appId)).map(_.value).getOrElse(Nil).filter(_.launched)
logger.info(s"${tasks.size}/$num tasks launched for $appId")
require(tasks.size == num, s"Waiting for $num tasks to be launched")
tasks
}
}
// We shouldn't eat exceptions in cleanUp() methods: it's a source of hard to find bugs if
// we just move on to the next test, that expects a "clean state". We should fail loud and
// proud here and find out why the clean-up fails.
def cleanUp(): Unit = {
logger.info(">>> Starting to CLEAN UP...")
events.clear()
// Wait for a clean slate in Marathon, if there is a running deployment or a runSpec exists
logger.info("Clean Marathon State")
//do not fail here, since the require statements will ensure a correct setup and fail otherwise
Try(waitForDeployment(eventually(marathon.deleteGroup(testBasePath, force = true))))
waitForCleanMesos()
val apps = marathon.listAppsInBaseGroup
require(apps.value.isEmpty, s"apps weren't empty: ${apps.entityPrettyJsonString}")
val pods = marathon.listPodsInBaseGroup
require(pods.value.isEmpty, s"pods weren't empty: ${pods.entityPrettyJsonString}")
val groups = marathon.listGroupIdsInBaseGroup
require(groups.value.isEmpty, s"groups weren't empty: ${groups.entityPrettyJsonString}")
events.clear()
healthChecks(_.clear())
logger.info("... CLEAN UP finished <<<")
}
def waitForCleanMesos(): Unit = {
val cleanUpPatienceConfig = WaitTestSupport.PatienceConfig(timeout = Span(50, Seconds), interval = Span(1, Seconds))
WaitTestSupport.waitUntil("clean slate in Mesos") {
val mesosState = mesosFacade.state.value
val occupiedAgents = mesosState.agents.filter { agent => agent.usedResources.nonEmpty || agent.reservedResourcesByRole.nonEmpty }
occupiedAgents.foreach { agent =>
import com.mesosphere.utils.mesos.MesosFormats._
val usedResources: String = Json.prettyPrint(Json.toJson(agent.usedResources))
val reservedResources: String = Json.prettyPrint(Json.toJson(agent.reservedResourcesByRole))
logger.info(
s"""Waiting for blank slate Mesos...\\n "used_resources": "$usedResources"\\n"reserved_resources": "$reservedResources""""
)
}
if (occupiedAgents.nonEmpty) {
val tasks = mesosState.frameworks.flatMap(_.tasks)
logger.info(s"Remaining tasks: $tasks")
}
occupiedAgents.isEmpty
}(cleanUpPatienceConfig)
}
def waitForHealthCheck(check: IntegrationHealthCheck, maxWait: FiniteDuration = patienceConfig.timeout.toMillis.millis) = {
WaitTestSupport.waitUntil("Health check to get queried", maxWait) { check.pinged.get }
}
def waitForDeploymentId(deploymentId: String, maxWait: FiniteDuration = patienceConfig.timeout.toMillis.millis): CallbackEvent = {
waitForEventWith("deployment_success", _.id == deploymentId, s"event deployment_success (id: $deploymentId) to arrive", maxWait)
}
def waitForStatusUpdates(kinds: String*): Seq[CallbackEvent] =
kinds.map { kind =>
logger.info(s"Wait for status update event with kind: $kind")
waitForEventWith("status_update_event", _.taskStatus == kind, s"event status_update_event (${kinds.mkString(",")}) to arrive")
}.to(Seq)
def waitForEvent(kind: String, maxWait: FiniteDuration = patienceConfig.timeout.toMillis.millis): CallbackEvent =
waitForEventWith(kind, _ => true, s"event $kind to arrive", maxWait)
def waitForEventWith(
kind: String,
fn: CallbackEvent => Boolean,
description: String,
maxWait: FiniteDuration = patienceConfig.timeout.toMillis.millis
): CallbackEvent = {
waitForEventMatching(description, maxWait) { event =>
event.eventType == kind && fn(event)
}
}
/**
* Consumes the next event from the events queue within deadline. Does not throw. Returns None if unable to return an
* event by that time.
*
* @param deadline The time after which to stop attempting to get an event and return None
*/
private def nextEvent(deadline: Deadline): Option[ITSSEEvent] =
try {
eventually(timeout(Span(deadline.timeLeft.toMillis, Milliseconds))) {
val r = Option(events.poll)
if (r.isEmpty)
throw new NoSuchElementException
r
}
} catch {
case _: NoSuchElementException =>
None
case _: TestFailedDueToTimeoutException =>
None
}
/**
* Method waits for events and calls their callbacks independently of the events order. It receives a
* map of EventId -> Callback e.g.:
* Map("deployment_failed" -> _.id == deploymentId, "deployment_successful" -> _.id == rollbackId)),
* checks every event for it's existence in the map and if found, calls it's callback method. If successful, the entry
* is removed from the map. Returns if the map is empty.
*/
def waitForEventsWith(
description: String,
eventsMap: Map[String, CallbackEvent => Boolean],
maxWait: FiniteDuration = patienceConfig.timeout.toMillis.millis
) = {
val waitingFor = mutable.Map(eventsMap.toSeq: _*)
waitForEventMatching(description, maxWait) { event =>
if (waitingFor.get(event.eventType).fold(false)(fn => fn(event))) {
waitingFor -= event.eventType
}
waitingFor.isEmpty
}
}
/**
* Method waits for ANY (and only one) of the given events. It receives a map of EventId -> Callback e.g.:
* Map("deployment_failed" -> _.id == deploymentId, "deployment_successful" -> _.id == rollbackId)),
* and checks every incoming event for it's existence in the map and if found, calls it's callback method.
* Returns if event found and callback returns true.
*/
def waitForAnyEventWith(
description: String,
eventsMap: Map[String, CallbackEvent => Boolean],
maxWait: FiniteDuration = patienceConfig.timeout.toMillis.millis
) = {
val waitingForAny = mutable.Map(eventsMap.toSeq: _*)
waitForEventMatching(description, maxWait) { event =>
waitingForAny.get(event.eventType).fold(false)(fn => fn(event))
}
}
def waitForEventMatching(description: String, maxWait: FiniteDuration = patienceConfig.timeout.toMillis.millis)(
fn: CallbackEvent => Boolean
): CallbackEvent = {
val deadline = maxWait.fromNow
@tailrec
def iter(): CallbackEvent = {
nextEvent(deadline) match {
case Some(ITConnected) =>
throw new MarathonTest.UnexpectedConnect
case Some(event: ITEvent) =>
val cbEvent = CallbackEvent(event)
if (fn(cbEvent)) {
cbEvent
} else {
logger.info(s"Event $event did not match criteria skipping to next event")
iter()
}
case None =>
throw new RuntimeException(s"No events matched <$description>")
}
}
iter()
}
/**
* Blocks until a single connected event is consumed. Discards any events up to that point.
*
* Not reasoning about SSE connection state will lead to flaky tests. If a master is killed, you should wait for the
* SSE stream to reconnect before doing anything else, or you could miss events.
*/
def waitForSSEConnect(maxWait: FiniteDuration = patienceConfig.timeout.toMillis.millis): Unit = {
@tailrec
val deadline = maxWait.fromNow
def iter(): Unit = {
nextEvent(deadline) match {
case Some(event: ITEvent) =>
logger.info(s"Event ${event} was not a connected event; skipping")
iter()
case Some(ITConnected) =>
logger.info("ITConnected event consumed")
case None =>
throw new RuntimeException("No connected events")
}
}
iter()
}
/**
* Wait for the events of the given kinds (=types).
*/
def waitForEvents(kinds: String*)(maxWait: FiniteDuration = patienceConfig.timeout.toMillis.millis): Map[String, Seq[CallbackEvent]] = {
val deadline = maxWait.fromNow
/** Receive the events for the given kinds (duplicates allowed) in any order. */
val receivedEventsForKinds: Seq[CallbackEvent] = {
var eventsToWaitFor = kinds
val receivedEvents = Vector.newBuilder[CallbackEvent]
while (eventsToWaitFor.nonEmpty) {
val event = waitForEventMatching(s"event $eventsToWaitFor to arrive", deadline.timeLeft) { event =>
eventsToWaitFor.contains(event.eventType)
}
receivedEvents += event
// Remove received event kind. Only remove one element for duplicates.
val kindIndex = eventsToWaitFor.indexWhere(_ == event.eventType)
assert(kindIndex >= 0)
eventsToWaitFor = eventsToWaitFor.patch(kindIndex, Nil, 1)
}
receivedEvents.result()
}
receivedEventsForKinds.groupBy(_.eventType)
}
def waitForDeployment(change: RestResult[_], maxWait: FiniteDuration = patienceConfig.timeout.toMillis.millis): CallbackEvent = {
require(change.success, s"Deployment request has not been successful. httpCode=${change.code} body=${change.entityString}")
val deploymentId = change.deploymentId.getOrElse(throw new IllegalArgumentException("No deployment id found in Http Header"))
waitForDeploymentId(deploymentId, maxWait)
}
def waitForAppOfferReject(appId: PathId, offerRejectReason: String): Unit = {
def queueResult = marathon.launchQueue()
def jsQueueResult = queueResult.entityJson
def queuedRunspecs = (jsQueueResult \\ "queue").as[Seq[JsObject]]
def jsonApp = queuedRunspecs.find { spec => (spec \\ "app" \\ "id").as[String] == appId.toString }.get
def unfulfilledConstraintRejectSummary =
(jsonApp \\ "processedOffersSummary" \\ "rejectSummaryLastOffers")
.as[Seq[JsObject]]
.find { e => (e \\ "reason").as[String] == offerRejectReason }
.get
eventually {
logger.info("jsApp:" + jsonApp.toString())
assert((unfulfilledConstraintRejectSummary \\ "declined").as[Int] >= 1)
}
}
def teardown(): Unit = {
Try {
val frameworkId = marathon.info.entityJson.as[JsObject].value("frameworkId").as[String]
mesosFacade.teardown(frameworkId)
eventually(timeout(1.minutes), interval(2.seconds)) { assert(mesosFacade.completedFrameworkIds().value.contains(frameworkId)) }
}
Try(healthEndpoint.unbind().futureValue)
}
/**
* Connects repeatedly to the Marathon SSE endpoint until cancelled.
* Yields each event in order.
*/
def startEventSubscriber(): Cancellable = {
@volatile var cancelled = false
def iter(): Unit = {
import akka.stream.scaladsl.Source
logger.info("SSEStream: Connecting")
Source
.fromFuture(leadingMarathon)
.mapAsync(1) { leader =>
async {
logger.info(s"SSEStream: Acquiring connection to ${leader.url}")
val stream = await(leader.client.events())
logger.info(s"SSEStream: Connection acquired to ${leader.url}")
/* A potentially impossible edge case exists in which we query the leader, and then before we get a connection
* to that instance, it restarts and is no longer a leader.
*
* By checking the leader again once obtaining a connection to the SSE event stream, we have conclusive proof
* that we are consuming from the current leader, and we keep our connected events as deterministic as
* possible. */
val leaderAfterConnection = await(leadingMarathon)
logger.info(s"SSEStream: ${leader.url} is the leader")
if (leader != leaderAfterConnection) {
stream.runWith(Sink.cancelled)
throw new RuntimeException("Leader status changed since first connecting to stream")
} else {
stream
}
}
}
.flatMapConcat { stream =>
// We prepend the ITConnected event here in order to avoid emitting an ITConnected event on failed connections
stream.prepend(Source.single(ITConnected))
}
.runForeach { e: ITSSEEvent =>
e match {
case ITConnected =>
logger.info(s"SSEStream: Connected")
case event: ITEvent =>
logger.info(s"SSEStream: Received callback event: ${event.eventType} with props ${event.info}")
}
events.offer(e)
}
.onComplete {
case result =>
if (!cancelled) {
logger.info(s"SSEStream: Leader event stream was closed reason: ${result}")
logger.info("Reconnecting")
events.clear()
scheduler.scheduleOnce(sseStreamReconnectionInterval) { iter() }
}
}
}
iter()
new Cancellable {
override def cancel(): Boolean = {
cancelled = true
true
}
override def isCancelled: Boolean = cancelled
}
}
}
object MarathonTest extends StrictLogging {
class UnexpectedConnect
extends Exception(
"Received an unexpected SSE event stream Connection event. This is " +
"considered an exception because not thinking about re-connection events properly can lead to race conditions in " +
"the tests. You should call waitForSSEConnect() after killing a Marathon leader to ensure no events are dropped."
)
}
/**
* Fixture that can be used for a single test case.
*/
trait MarathonFixture extends AkkaUnitTestLike with MesosClusterTest with ZookeeperServerTest {
protected def logger: Logger
def withMarathon[T](suiteName: String, marathonArgs: Map[String, String] = Map.empty)(f: (LocalMarathon, MarathonTest) => T): T = {
val marathonServer = LocalMarathon(
suiteName = suiteName,
masterUrl = mesosMasterZkUrl,
zkUrl = s"zk://${zkserver.connectUrl}/marathon-$suiteName",
conf = marathonArgs
)
marathonServer.start().futureValue
val marathonTest = new MarathonTest {
override protected val logger: Logger = MarathonFixture.this.logger
override def marathonUrl: String = s"http://localhost:${marathonServer.httpPort}"
override def marathon: MarathonFacade = marathonServer.client
override def mesosFacade: MesosFacade = MarathonFixture.this.mesosFacade
override val testBasePath: AbsolutePathId = AbsolutePathId("/")
override implicit val system: ActorSystem = MarathonFixture.this.system
override implicit val mat: Materializer = MarathonFixture.this.mat
override implicit val ctx: ExecutionContext = MarathonFixture.this.ctx
override implicit val scheduler: Scheduler = MarathonFixture.this.scheduler
override val suiteName: String = MarathonFixture.this.suiteName
override implicit def patienceConfig: PatienceConfig =
PatienceConfig(MarathonFixture.this.patienceConfig.timeout, MarathonFixture.this.patienceConfig.interval)
override def leadingMarathon = Future.successful(marathonServer)
}
val sseStream = marathonTest.startEventSubscriber()
try {
marathonTest.healthEndpoint
marathonTest.waitForSSEConnect()
f(marathonServer, marathonTest)
} finally {
sseStream.cancel()
marathonTest.teardown()
marathonServer.stop()
}
}
}
object MarathonFixture extends MarathonFixture
/**
* base trait that spins up/tears down a marathon and has all of the original tooling from
* SingleMarathonIntegrationTest.
*/
trait MarathonSuite extends Suite with StrictLogging with ScalaFutures with BeforeAndAfterAll with Eventually with MarathonTest {
abstract override def afterAll(): Unit = {
teardown()
super.afterAll()
}
}
/**
* Base trait that starts a local marathon but doesn't have mesos/zookeeper yet
*/
trait LocalMarathonTest extends MarathonTest with ScalaFutures with AkkaUnitTestLike with MesosTest with ZookeeperServerTest {
def marathonArgs: Map[String, String] = Map.empty
lazy val marathonServer =
LocalMarathon(suiteName = suiteName, masterUrl = mesosMasterZkUrl, zkUrl = s"zk://${zkserver.connectUrl}/marathon", conf = marathonArgs)
lazy val marathonUrl = s"http://localhost:${marathonServer.httpPort}"
val testBasePath: AbsolutePathId = AbsolutePathId("/")
lazy val marathon = marathonServer.client
/**
* Return the current leading Marathon
* Expected to retry for a significant period of time until succeeds
*/
override def leadingMarathon: Future[LocalMarathon] =
Future.successful(marathonServer)
@volatile private var sseStream: Option[Cancellable] = None
abstract override def beforeAll(): Unit = {
super.beforeAll()
marathonServer.start().futureValue
sseStream = Some(startEventSubscriber())
waitForSSEConnect()
}
abstract override def afterAll(): Unit = {
sseStream.foreach(_.cancel)
teardown()
marathonServer.close()
super.afterAll()
}
}
/**
* trait that has marathon, zk, and a mesos ready to go
*/
trait EmbeddedMarathonTest extends Suite with StrictLogging with ZookeeperServerTest with MesosClusterTest with LocalMarathonTest {
/* disable failover timeout to assist with cleanup ops; terminated marathons are immediately removed from mesos's
* list of frameworks
*
* Until https://issues.apache.org/jira/browse/MESOS-8171 is resolved, we cannot set this value to 0.
*
* As soon as using Mesos offer constraints is switched on by default,
* the `*offer_constraints*` flags can be removed. Until that moment, offer
* constraints have to be switched on here (and not in the default options in
* `LocalMarathon`), because there are tests that use `LocalMarathon`
* for starting older versions of Marathon which do not support this flag.
*/
override def marathonArgs: Map[String, String] = Map(
"failover_timeout" -> "1",
"mesos_offer_constraints" -> "",
"min_mesos_offer_constraints_update_interval" -> "500"
)
}
/**
* Trait that has a Marathon cluster, zk, and Mesos via mesos-local ready to go.
*
* It provides multiple Marathon instances. This allows e.g. leadership rotation.
*/
trait MarathonClusterTest extends Suite with StrictLogging with ZookeeperServerTest with MesosClusterTest with LocalMarathonTest {
val numAdditionalMarathons = 2
lazy val additionalMarathons = 0.until(numAdditionalMarathons).map { _ =>
LocalMarathon(suiteName = suiteName, masterUrl = mesosMasterZkUrl, zkUrl = s"zk://${zkserver.connectUrl}/marathon", conf = marathonArgs)
}
lazy val marathonFacades = marathon +: additionalMarathons.map(_.client)
lazy val allMarathonServers = marathonServer +: additionalMarathons
override def leadingMarathon: Future[LocalMarathon] = {
val leader = Retry("querying leader", maxAttempts = 50, maxDelay = 1.second, maxDuration = patienceConfig.timeout) {
Future.firstCompletedOf(marathonFacades.map(_.leaderAsync()))
}
leader.map { leader =>
allMarathonServers.find { _.httpPort == leader.value.port }.head
}
}
override def beforeAll(): Unit = {
super.beforeAll()
Future.sequence(additionalMarathons.map(_.start())).futureValue
}
override def afterAll(): Unit = {
Try(additionalMarathons.foreach(_.close()))
super.afterAll()
}
override def cleanUp(): Unit = {
Future.sequence(marathonServer.start() +: additionalMarathons.map(_.start())).futureValue
super.cleanUp()
}
}
|
mesosphere/marathon
|
tests/integration/src/test/scala/mesosphere/marathon/integration/setup/MarathonTest.scala
|
Scala
|
apache-2.0
| 41,955 |
package stronghold.strings
/**
* problem description: http://rosalind.info/problems/revc/
*/
object ComputingGcContent {
object SampleData {
val sample: List[String] =
List(
">Rosalind_6404",
"CCTGCGGAAGATCGGCACTAGAATAGCCAGAACCGTTTCTCTGAGGCTTCCGGCCTTCCC",
"TCCCACTAATAATTCTGAGG",
">Rosalind_5959",
"CCATCGGTAGCGCATCCTTAGTCCAATTAAGTCCCTATCCAGGCGCTCCGCCGAAGGTCT",
"ATATCCATTTGTCAGCAGACACGC",
">Rosalind_0808",
"CCACCCTCGTGGTATGGCTAGGCATTCAGGAACCGGAGAACGCTTCAGACCAGCCCGGAC",
"TGGGAACCTGCGGGCAGTAGGTGGAAT"
)
}
import scala.annotation.tailrec
import SampleData.sample
import utils.UtilityFunctions.{Fasta, readInputData, readFastaSequences}
import utils.{Dna, Cytosine, Guanine}
val inputFileName: String = "/stronghold/datasets/rosalind_gc.txt"
def getData(isPractice: Boolean): List[Fasta] = {
val data: List[String] = if (isPractice) sample else readInputData(inputFileName)
readFastaSequences(data, isLineSeparated = false)
}
def calcGcPercentage(sequence: Fasta): Double = {
val dna: Dna = Dna(sequence.string)
val gcContent: Int = dna.sequence.count(nucleotide => nucleotide == Cytosine || nucleotide == Guanine)
gcContent.toDouble / dna.length
}
def findDnaWithHighestGcPercentage(sequences: List[Fasta]): (Fasta, Double) = {
@tailrec
def loop(xs: List[Fasta], acc: (Fasta, Double)): (Fasta, Double) = xs match {
case Nil => acc
case sequence :: xss =>
val currentGcPercentage: Double = calcGcPercentage(sequence)
if (currentGcPercentage > acc._2) loop(xss, (sequence, currentGcPercentage))
else loop(xss, acc)
}
loop(sequences.tail, (sequences.head, calcGcPercentage(sequences.head)))
}
def main(args: Array[String]): Unit = {
val sequences: List[Fasta] = getData(isPractice = true)
val (result, gcContent): (Fasta, Double) = findDnaWithHighestGcPercentage(sequences)
println(result.name)
println(100 * gcContent)
}
}
|
ghostrider77/Bioinformatics
|
Bioinformatics/src/main/scala-2.11/stronghold/strings/ComputingGcContent.scala
|
Scala
|
mit
| 2,045 |
/*
* Copyright (c) 2015 Robert Conrad - All Rights Reserved.
* Unauthorized copying of this file, via any medium is strictly prohibited.
* This file is proprietary and confidential.
* Last modified by rconrad, 1/3/15 7:09 PM
*/
package base.socket.message
import base.socket.json.JsonFormats
import io.netty.channel.Channel
import org.json4s.native.Serialization
import org.json4s.{ Formats, JValue }
/**
* Base message class, all messages sent throughout the system must inherit from this originally
*/
sealed trait Message
/**
* Base class for all messages that have a command identifier
*/
sealed abstract class CommandMessage extends Message {
val cmd: Command.Cmd
}
/**
* Base class for all messages that originate with the server
* communications server->client
*/
sealed abstract class ServerMessage extends CommandMessage { val cmd: ServerCommand[_ <: ServerMessage] }
abstract class UserServerMessage extends ServerMessage { val cmd: UserServerCommand[_ <: UserServerMessage] }
/**
* Base class for all messages that originate with the client
* communications client->server
*/
sealed abstract class ClientMessage extends CommandMessage { val cmd: ClientCommand[_ <: ClientMessage] }
abstract class UserClientMessage extends ClientMessage { val cmd: UserClientCommand[_ <: UserClientMessage] }
/**
* Base class for all server->server control messages
* communications server->server
*/
sealed abstract class ControlMessage extends CommandMessage { val cmd: ControlCommand[_ <: ControlMessage] }
abstract class TestControlMessage extends ControlMessage { val cmd: TestControlCommand[_ <: TestControlMessage] }
/**
* Base wrapper for all messages that originate with the client,
* associating them with the client's SessionContext
*/
abstract class ActionMessage extends Message {
val channel: Channel
val msg: ClientMessage
}
/**
* Messages that interact with the user session
*/
trait UserSessionMessage extends Message
/**
* Control commands for rooms
*/
trait RoomControlMessage extends Message
/**
* Some messages require custom formats which are defined on their companion objects
*/
trait Formatted {
implicit val formats: Formats
}
trait Extractable[T] extends Formatted {
def extract(json: JValue): T
// unfortunately we can't do the extract here since it needs a manifest and 'T' doesn't have one
}
trait Writable extends Formatted {
def write(msg: CommandMessage) = {
Serialization.write(msg)
}
}
object Message {
implicit val formats = JsonFormats.defaultWithCommands
def write(msg: CommandMessage) = msg match {
case msg: JsonServerMessage => msg.write()
case _ => Serialization.write(msg)
}
}
|
robconrad/base-api
|
project-socket/src/main/scala/base/socket/message/Message.scala
|
Scala
|
mit
| 2,715 |
package org.jetbrains.plugins.scala.failed.typeInference
import org.jetbrains.plugins.scala.PerfCycleTests
import org.jetbrains.plugins.scala.lang.typeInference.TypeInferenceTestBase
import org.junit.experimental.categories.Category
/**
* Created by Anton Yalyshev on 17/05/16.
*/
@Category(Array(classOf[PerfCycleTests]))
class CaseClassTypeInferenceTest extends TypeInferenceTestBase {
override protected def shouldPass: Boolean = false
def testSCL10292(): Unit = {
doTest(
s"""
|case class Foo(a: Int)
|Foo.getClass.getMethods.find(${START}x => x.getName == "apply"$END)
|//(Nothing) => Boolean
""".stripMargin)
}
def testSCL11159a(): Unit = {
doTest(
s"""import java.util.concurrent.atomic.AtomicReference
|
|object UnaryOps {
|
| case class Test(value: Int = 0)
| val atomic: AtomicReference[Test] = new AtomicReference(Test())
| atomic.getAndUpdate(${START}(t: Test) => t.copy(value = 2)$END)
|}
|//UnaryOperator[UnaryOps.Test]
""".stripMargin)
}
def testSCL11159b(): Unit = {
doTest(
s"""import java.util.concurrent.atomic.AtomicReference
|
|object UnaryOps {
|
| case class Test(value: Int = 0)
| val atomic: AtomicReference[Test] = new AtomicReference(Test())
| atomic.getAndUpdate(${START}_.copy(value = 1)$END)
|}
|//UnaryOperator[UnaryOps.Test]
""".stripMargin)
}
}
|
jastice/intellij-scala
|
scala/scala-impl/test/org/jetbrains/plugins/scala/failed/typeInference/CaseClassTypeInferenceTest.scala
|
Scala
|
apache-2.0
| 1,528 |
package com.twitter.finatra.http.filters
import com.twitter.finagle.Service
import com.twitter.finagle.httpx.{Request, Response}
import com.twitter.finagle.stats.StatsReceiver
import com.twitter.finatra.http.internal.exceptions.ExceptionManager
import com.twitter.util.Memoize
import javax.inject.{Inject, Singleton}
@Singleton
@deprecated("Use ExceptionMapperFilter with com.twitter.finatra.httpx.filters.StatsFilter[Request]", "")
class ExceptionBarrierFilter @Inject()(
statsReceiver: StatsReceiver,
exceptionManager: ExceptionManager)
extends ExceptionMappingFilter[Request](exceptionManager) {
private val responseCodeStatsReceiver = statsReceiver.scope("server/response/status")
/* Public */
override def apply(request: Request, service: Service[Request, Response]) = {
super.apply(request, service) onSuccess { response =>
statusCodeCounter(response.statusCode).incr()
}
}
/* Private */
private val statusCodeCounter = Memoize { statusCode: Int =>
responseCodeStatsReceiver.counter(statusCode.toString)
}
}
|
deanh/finatra
|
http/src/main/scala/com/twitter/finatra/http/filters/ExceptionBarrierFilter.scala
|
Scala
|
apache-2.0
| 1,059 |
package org.sisioh.aws4s.eb.model
import com.amazonaws.services.elasticbeanstalk.model.RestartAppServerRequest
import org.sisioh.aws4s.PimpedType
object RestartAppServerRequestFactory {
def create(): RestartAppServerRequest = new RestartAppServerRequest()
}
class RichRestartAppServerRequest(val underlying: RestartAppServerRequest)
extends AnyVal with PimpedType[RestartAppServerRequest] {
def environmentIdOpt: Option[String] = Option(underlying.getEnvironmentId)
def environmentIdOpt_=(value: Option[String]): Unit = underlying.setEnvironmentId(value.orNull)
def withEnvironmentIdOpt(value: Option[String]): RestartAppServerRequest =
underlying.withEnvironmentId(value.orNull)
// ---
def environmentNameOpt: Option[String] = Option(underlying.getEnvironmentName)
def environmentNameOpt_=(value: Option[String]): Unit =
underlying.setEnvironmentName(value.orNull)
def withEnvironmentNameOpt(value: Option[String]): RestartAppServerRequest =
underlying.withEnvironmentName(value.orNull)
}
|
everpeace/aws4s
|
aws4s-eb/src/main/scala/org/sisioh/aws4s/eb/model/RichRestartAppServerRequest.scala
|
Scala
|
mit
| 1,035 |
package ch.descabato.core.model
import java.io.{File, IOException}
import java.math.{BigDecimal => JBigDecimal}
import java.nio.file.attribute._
import java.nio.file.{Files, LinkOption, Path}
import java.security.Principal
import java.util
import java.util.regex.Pattern
import ch.descabato.core.util.JacksonAnnotations.JsonIgnore
import ch.descabato.core.util._
import ch.descabato.remote.RemoteOptions
import ch.descabato.utils._
import ch.descabato.{CompressionMode, HashAlgorithm}
import org.bouncycastle.crypto.Digest
import scala.collection.JavaConverters._
class FileAttributes extends util.HashMap[String, Any] with Utils {
def lastModifiedTime = get(FileAttributes.lastModified)
def hasBeenModified(file: File): Boolean = {
val fromMap = lastModifiedTime
val lastMod: Long = {
Files.getAttribute(file.toPath(), FileAttributes.lastModified, LinkOption.NOFOLLOW_LINKS) match {
case x: FileTime => x.toMillis()
case _ => l.warn("Did not find filetime for " + file); 0L
}
}
val out = Option(fromMap) match {
case Some(l: Long) => lastMod != l
case Some(l: Int) => lastMod != l.toLong
case Some(ft: String) =>
val l = ft.toLong; lastMod != l
case None => true
case x => println("Was modified: " + lastMod + " vs " + x + " " + x.getClass + " "); true
}
out
}
}
case class MetadataOptions(saveMetadata: Boolean = false)
object FileAttributes extends Utils {
val posixGroup = "posix:group"
val posixPermissions = "posix:permissions"
val owner = "owner"
val lastModified = "lastModifiedTime"
val creationTime = "creationTime"
private def readAttributes[T <: BasicFileAttributes](path: Path)(implicit m: Manifest[T]) =
Files.readAttributes[T](path, m.runtimeClass.asInstanceOf[Class[T]], LinkOption.NOFOLLOW_LINKS)
def apply(path: Path): FileAttributes = {
val out = new FileAttributes()
def add(attr: String, o: Any) = o match {
case ft: FileTime => out.put(attr, ft.toMillis())
case x: Boolean => out.put(attr, x)
case x: String => out.put(attr, x)
case p: Principal => out.put(attr, p.getName())
}
readBasicAttributes(path, add _)
readPosixAttributes(add _, path)
readDosAttributes(add _, path)
out
}
private def readBasicAttributes(path: Path, add: (String, Any) => Any) = {
val attrs = readAttributes[BasicFileAttributes](path)
val keys = List(lastModified, creationTime)
keys.foreach { k =>
val m = attrs.getClass().getMethod(k)
m.setAccessible(true)
add(k, m.invoke(attrs))
}
}
def readPosixAttributes(add: (String, Object) => Any, path: Path) = {
try {
val posix = readAttributes[PosixFileAttributes](path)
if (posix != null) {
add(owner, posix.owner())
add(posixGroup, posix.group())
add(posixPermissions, PosixFilePermissions.toString(posix.permissions()))
}
} catch {
case _: UnsupportedOperationException => // ignore, not a posix system
}
}
private def readDosAttributes(add: (String, Any) => Any, path: Path) = {
try {
val dos = readAttributes[DosFileAttributes](path)
if (dos.isReadOnly) {
add("dos:readonly", true)
}
if (dos.isHidden) {
add("dos:hidden", true)
}
// if (dos.isArchive) {
// add("dos:archive", true)
// }
// if (dos.isSystem) {
// add("dos:system", true)
// }
} catch {
case _: UnsupportedOperationException => // ignore, not a dos system
}
}
def restore(attrs: FileAttributes, file: File) {
val path = file.toPath()
def lookupService = file.toPath().getFileSystem().getUserPrincipalLookupService()
lazy val posix = Files.getFileAttributeView(path, classOf[PosixFileAttributeView])
for ((name, o) <- attrs.asScala) {
setAttribute(file, path, lookupService, posix, name, o)
}
val value = attrs.lastModifiedTime
if (value != null) {
setAttribute(file, path, lookupService, posix, FileAttributes.lastModified, value)
}
}
private def setAttribute(file: File, path: Path, lookupService: UserPrincipalLookupService, posix: => PosixFileAttributeView, name: String, o: Any) = {
try {
val toSet: Option[Any] = (name, o) match {
case (key, time) if key.endsWith("Time") => Some(FileTime.fromMillis(o.toString.toLong))
case (s, group) if s == posixGroup =>
val g = lookupService.lookupPrincipalByGroupName(group.toString)
posix.setGroup(g)
None
case (s, group) if s == owner =>
val g = lookupService.lookupPrincipalByName(group.toString)
posix.setOwner(g)
None
case (s, perms) if s == posixPermissions =>
val p = PosixFilePermissions.fromString(perms.toString)
posix.setPermissions(p)
None
case (key, value) if key.startsWith("dos:") =>
Some(value)
}
toSet.foreach { s =>
Files.setAttribute(file.toPath(), name, s, LinkOption.NOFOLLOW_LINKS)
}
} catch {
case e: IOException if Files.isSymbolicLink(path) => // Ignore, seems normal on linux
case e: IOException => l.warn(s"Failed to restore attribute $name for file $file")
}
}
}
trait UpdatePart {
def size: Long
def path: String
@JsonIgnore def name: String = pathParts.last
@JsonIgnore def pathParts: Array[String] = path.split("[\\\\/]")
}
case class FileDeleted(path: String) extends UpdatePart {
def size = 0L
}
object FileDeleted {
def apply(x: BackupPart): FileDeleted = x match {
case FolderDescription(path, _) => new FileDeleted(path)
case FileDescription(path, _, _) => new FileDeleted(path)
}
}
case class FileDescription(path: String, size: Long, attrs: FileAttributes) extends BackupPart {
def this(file: File) = {
this(file.getAbsolutePath, file.length(), FileAttributes(file.toPath))
}
@JsonIgnore def isFolder = false
}
object FolderDescription {
def apply(dir: File): FolderDescription = {
if (!dir.isDirectory) throw new IllegalArgumentException(s"Must be a directory, $dir is a file")
FolderDescription(dir.toString(), FileAttributes(dir.toPath))
}
}
case class FolderDescription(path: String, attrs: FileAttributes) extends BackupPart {
@JsonIgnore val size = 0L
@JsonIgnore def isFolder = true
}
trait BackupPart extends UpdatePart {
@JsonIgnore def isFolder: Boolean
def attrs: FileAttributes
def size: Long
def applyAttrsTo(f: File) {
FileAttributes.restore(attrs, f)
}
}
case class SymbolicLink(path: String, linkTarget: String, attrs: FileAttributes) extends BackupPart {
def size = 0L
def isFolder = false
}
// Domain classes
case class Size(bytes: Long) {
override def toString: String = Utils.readableFileSize(bytes)
}
object Size {
val knownTypes: Set[Class[_]] = Set(classOf[Size])
val patt: Pattern = Pattern.compile("([\\d.]+)[\\s]*([GMK]?B)", Pattern.CASE_INSENSITIVE)
def apply(size: String): Size = {
var out: Long = -1
val matcher = patt.matcher(size)
val map = List(("GB", 3), ("MB", 2), ("KB", 1), ("B", 0)).toMap
if (matcher.find()) {
val number = matcher.group(1)
val pow = map.get(matcher.group(2).toUpperCase()).get
var bytes = new BigDecimal(new JBigDecimal(number))
bytes = bytes.*(BigDecimal.valueOf(1024).pow(pow))
out = bytes.longValue()
}
new Size(out)
}
}
|
Stivo/DeScaBaTo
|
core/src/main/scala/ch/descabato/core/model/Models.scala
|
Scala
|
gpl-3.0
| 7,508 |
/**
* Copyright (c) 2010, Stefan Langer
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of Element34 nor the names of its contributors may
* be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS ROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/**
* Base class for classpathentry filters.
*/
abstract class Filter(pattern: String) {
def mkString = "=\\"" + pattern + "\\""
}
/**
* Defines a include pattern for a classpath entry. <br />
* e.g.: <code><classpathentry kind="src" path="src/" including="*.scala" /></code>
*/
case class IncludeFilter(pattern: String) extends Filter(pattern) {
override def mkString = " including" + super.mkString
}
/**
* Defines a exclude pattern for a classpath entry. <br />
* e.g.: <code><classpathentry kind="src" path="src/" excluding="*.scala" /></code>
*/
case class ExcludeFilter(pattern: String) extends Filter(pattern) {
override def mkString = " excluding" + super.mkString
}
/**
* Combines <code>IncludeFilter</code> and <code>ExcludeFilter</code> for use in a <code>ClasspathEntry</code>
*/
case class FilterChain(inc: Option[IncludeFilter], ex: Option[ExcludeFilter]) {
/**
* Generates the actual markup for a classpathentry
*/
def mkString: String = {
def getStrOrEmpty[A <: Filter](opt: Option[A]) = {
opt.map(_.mkString).getOrElse("")
}
getStrOrEmpty[IncludeFilter](inc) + getStrOrEmpty[ExcludeFilter](ex)
}
}
/**
* Companion object for <code>FilterChain</code> to provide convenience method for their creation.
*/
object FilterChain {
def apply(inc: IncludeFilter, ex: ExcludeFilter) = new FilterChain(Some(inc), Some(ex))
def apply(inc: IncludeFilter) = new FilterChain(Some(inc), None)
def apply(ex: ExcludeFilter) = new FilterChain(None, Some(ex))
}
/**
* Special type designating an empty <code>FilterChain</code>
*/
object EmptyFilter extends FilterChain(None, None)
|
emarsys/dyson
|
project/build/Filter.scala
|
Scala
|
gpl-3.0
| 3,220 |
package sGeneticAlgorithm.ga
import akka.actor._
object GA {
case class Unique[T]( val any: Any, val uuid: String = java.util.UUID.randomUUID.toString) extends Serializable
case object Start
type Genome[T, I <: Iterable[T]] = I
type Population[T, I <: Iterable[T]] = Vector[Genome[T, I]]
type Species[T, I <: Iterable[T]] = Vector[Population[T, I]]
type EvaluatedSpecies[T, I <: Iterable[T], F] = Vector[EvaluatedPopulation[T, I, F]]
type EvaluatedPopulation[T, I <: Iterable[T], F] = Vector[EvaluatedGenome[T, I, F]]
type Ecosystem = Vector[Species[_, _ <: Iterable[_]]]
type EvaluatedEcosystem = Vector[EvaluatedSpecies[_, _ <: Iterable[_], _]]
class EvaluatedGenome[T, I <: Iterable[T], F: Ordering](val genome: Genome[T, I], val fitness: F)
case class PopArchive[T, I <: Iterable[T], F: Ordering](pop: Population[T, I], archiveOption: Option[EvaluatedPopulation[T,I,F]] = None)
case class EvaluatedPopArchive[T, I <: Iterable[T],F: Ordering](evaluatedPop: EvaluatedPopulation[T, I, F], archiveOption: Option[EvaluatedPopulation[T,I,F]])
case class SpeciesArchive[T, I <: Iterable[T], F: Ordering](popArchives: Vector[PopArchive[T, I, F]]) {
def populations: Species[T, I] = {
popArchives.map(_.pop)
}
def archives: Option[Vector[EvaluatedPopulation[T, I, F]]] = {
val options: Vector[Option[EvaluatedPopulation[T, I, F]]] = popArchives.map(_.archiveOption)
options.isEmpty match {
case true => None
case false => Some(options.map(_.get))
}
}
}
case class EvaluatedSpeciesArchive[T,I <: Iterable[T], F: Ordering](evaluatedPopArchives: Vector[EvaluatedPopArchive[T, I, F]]) {
def this(evSpecies: EvaluatedSpecies[T, I, F], archives: Option[Vector[EvaluatedPopulation[T, I, F]]]) = {
this((for (i <- 0 to evSpecies.size - 1) yield EvaluatedPopArchive(evSpecies(i), archives.map(_(i)))).toVector)
}
def populations: EvaluatedSpecies[T, I, F] = {
evaluatedPopArchives.map(_.evaluatedPop)
}
def archives: Option[Vector[EvaluatedPopulation[T, I, F]]] = {
val options: Vector[Option[EvaluatedPopulation[T, I, F]]] = evaluatedPopArchives.map(_.archiveOption)
options.isEmpty match {
case true => None
case false => Some(options.map(_.get))
}
}
}
case class EcosystemArchive(speciesArchives: Vector[SpeciesArchive[_, _ <: Iterable[_], _]])
case class EvaluatedEcosysteemArchive(evaluatedSpeciesArchives: Vector[EvaluatedSpeciesArchive[_, _ <: Iterable[_], _]])
trait GenomeInitializer[T, I <: Iterable[T]] {
def initialize: Population[T, I]
}
trait Mutator[T, I <: Iterable[T]] {
def mutate(genome: Genome[T, I]): Genome[T, I]
}
trait Crossover[T, I <: Iterable[T]] {
def crossover(mom: Genome[T, I], dad: Genome[T, I]): Population[T, I]
}
abstract class Evaluator[T, I <: Iterable[T], F: Ordering] {
def evaluate(speciesVector: Vector[Species[T, I]]): Vector[EvaluatedSpecies[T, I, F]]
}
abstract class Selector[T, I <: Iterable[T], F: Ordering] extends MultiSelector[T, I, F]{
def selectFrom(evaluated: EvaluatedPopulation[T, I, F], archiveOption: Option[EvaluatedPopulation[T, I, F]]): EvaluatedGenome[T, I, F]
private def selectNext(n: Int, evaluated: EvaluatedPopulation[T, I, F], selections: EvaluatedPopulation[T, I, F], archiveOption: Option[EvaluatedPopulation[T, I, F]]): EvaluatedPopulation[T, I, F] = {
selections.size == n match {
case true => selections
case false =>
val selected = selectFrom(evaluated, archiveOption)
selectNext(n, evaluated.diff(Vector(selected)), selections.+:(selected), archiveOption)
}
}
def selectMultiFrom(evaluated: EvaluatedPopulation[T, I, F], archiveOption: Option[EvaluatedPopulation[T, I, F]], numToSelect: Int): EvaluatedPopulation[T, I, F] = {
selectNext(numToSelect, evaluated, Vector[EvaluatedGenome[T, I, F]](), archiveOption)
}
}
abstract class MultiSelector[T, I <: Iterable[T], F: Ordering] {
def selectMultiFrom(evaluated: EvaluatedPopulation[T, I, F], archiveOption: Option[EvaluatedPopulation[T, I, F]], n: Int): EvaluatedPopulation[T, I, F]
}
abstract class Migrater[T, I <: Iterable[T], F: Ordering] {
def migrate(evaluatedSpecies: EvaluatedSpecies[T, I, F]): EvaluatedSpecies[T, I, F]
}
abstract class Evolver[T, I <: Iterable[T], F: Ordering] {
def evolve(evaluated: EvaluatedPopulation[T, I, F], archiveOption: Option[EvaluatedPopulation[T, I, F]]): PopArchive[T, I, F]
}
abstract class ArchiveUpdater[T, I <: Iterable[T], F: Ordering] {
def updateArchive(oldArchive: EvaluatedPopulation[T, I, F], newPopulation: EvaluatedPopulation[T, I, F]): EvaluatedPopulation[T, I, F]
}
abstract class StopCondition[T, I <: Iterable[T], F: Ordering] {
def checkStop(thisGeneration: Vector[SpeciesArchive[T, I, F]], nextGeneration: Vector[SpeciesArchive[T, I, F]]): Boolean
}
}
import GA._
class GAException(message: String) extends Exception(message)
class GA[T, I <: Iterable[T], F: Ordering](val evaluatorActor: ActorRef,
val dataActor: ActorRef,
val migrater: Migrater[T, I, F],
val evolver: Evolver[T, I, F],
firstGeneration: SpeciesArchive[T, I, F],
numGenerations: Int) extends Actor {
var generation: Int = 0
def migrate(evaluatedSpecies: EvaluatedSpecies[T, I, F]) = migrater.migrate(evaluatedSpecies)
def evolvePopulation(evaluated: EvaluatedPopulation[T, I, F], archiveOption: Option[EvaluatedPopulation[T, I, F]]): PopArchive[T, I, F] = {
evolver.evolve(evaluated, archiveOption)
}
def receive = {
case esa: EvaluatedSpeciesArchive[T, I, F] =>
// log the species evaluation to the data actor
dataActor ! esa
// continue to evolve if we have not reached the prescribed number of generations
generation = generation + 1
if (generation < numGenerations) {
val nextGeneration: SpeciesArchive[T, I, F] = evolveGeneration(esa)
evaluatorActor ! nextGeneration
}
case Start =>
evaluatorActor ! firstGeneration
}
def evolveGeneration(evaluatedSpeciesArchive: EvaluatedSpeciesArchive[T,I,F]): SpeciesArchive[T, I, F] = {
val migratedSpecies: EvaluatedSpecies[T, I, F] = migrate(evaluatedSpeciesArchive.populations)
val archiveVectorOption = evaluatedSpeciesArchive.archives
val speciesArchive: SpeciesArchive[T,I,F] = SpeciesArchive((for (j <- 0 to migratedSpecies.size - 1) yield {
val evaluatedPopulationn: EvaluatedPopulation[T,I,F] = migratedSpecies(j)
val archive: Option[EvaluatedPopulation[T,I,F]] = archiveVectorOption.map {av => av(j)}
evolvePopulation(evaluatedPopulationn, archive)
}).toVector)
speciesArchive
}
}
|
rkewley/sGeneticAlgorithm
|
src/main/scala/sGeneticAlgorithm/ga/GA.scala
|
Scala
|
apache-2.0
| 6,964 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.datasources.v2
import java.util.UUID
import scala.collection.JavaConverters._
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.Path
import org.apache.hadoop.mapreduce.Job
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat
import org.apache.spark.internal.io.FileCommitProtocol
import org.apache.spark.sql.{AnalysisException, SparkSession}
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.util.{CaseInsensitiveMap, DateTimeUtils}
import org.apache.spark.sql.connector.write.{BatchWrite, LogicalWriteInfo, Write}
import org.apache.spark.sql.execution.datasources.{BasicWriteJobStatsTracker, DataSource, OutputWriterFactory, WriteJobDescription}
import org.apache.spark.sql.execution.metric.SQLMetric
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.types.{DataType, StructType}
import org.apache.spark.sql.util.SchemaUtils
import org.apache.spark.util.SerializableConfiguration
trait FileWrite extends Write {
def paths: Seq[String]
def formatName: String
def supportsDataType: DataType => Boolean
def info: LogicalWriteInfo
private val schema = info.schema()
private val queryId = info.queryId()
private val options = info.options()
override def description(): String = formatName
override def toBatch: BatchWrite = {
val sparkSession = SparkSession.active
validateInputs(sparkSession.sessionState.conf.caseSensitiveAnalysis)
val path = new Path(paths.head)
val caseSensitiveMap = options.asCaseSensitiveMap.asScala.toMap
// Hadoop Configurations are case sensitive.
val hadoopConf = sparkSession.sessionState.newHadoopConfWithOptions(caseSensitiveMap)
val job = getJobInstance(hadoopConf, path)
val committer = FileCommitProtocol.instantiate(
sparkSession.sessionState.conf.fileCommitProtocolClass,
jobId = java.util.UUID.randomUUID().toString,
outputPath = paths.head)
lazy val description =
createWriteJobDescription(sparkSession, hadoopConf, job, paths.head, options.asScala.toMap)
committer.setupJob(job)
new FileBatchWrite(job, description, committer)
}
/**
* Prepares a write job and returns an [[OutputWriterFactory]]. Client side job preparation can
* be put here. For example, user defined output committer can be configured here
* by setting the output committer class in the conf of spark.sql.sources.outputCommitterClass.
*/
def prepareWrite(
sqlConf: SQLConf,
job: Job,
options: Map[String, String],
dataSchema: StructType): OutputWriterFactory
private def validateInputs(caseSensitiveAnalysis: Boolean): Unit = {
assert(schema != null, "Missing input data schema")
assert(queryId != null, "Missing query ID")
if (paths.length != 1) {
throw new IllegalArgumentException("Expected exactly one path to be specified, but " +
s"got: ${paths.mkString(", ")}")
}
val pathName = paths.head
SchemaUtils.checkColumnNameDuplication(schema.fields.map(_.name),
s"when inserting into $pathName", caseSensitiveAnalysis)
DataSource.validateSchema(schema)
schema.foreach { field =>
if (!supportsDataType(field.dataType)) {
throw new AnalysisException(
s"$formatName data source does not support ${field.dataType.catalogString} data type.")
}
}
}
private def getJobInstance(hadoopConf: Configuration, path: Path): Job = {
val job = Job.getInstance(hadoopConf)
job.setOutputKeyClass(classOf[Void])
job.setOutputValueClass(classOf[InternalRow])
FileOutputFormat.setOutputPath(job, path)
job
}
private def createWriteJobDescription(
sparkSession: SparkSession,
hadoopConf: Configuration,
job: Job,
pathName: String,
options: Map[String, String]): WriteJobDescription = {
val caseInsensitiveOptions = CaseInsensitiveMap(options)
// Note: prepareWrite has side effect. It sets "job".
val outputWriterFactory =
prepareWrite(sparkSession.sessionState.conf, job, caseInsensitiveOptions, schema)
val allColumns = schema.toAttributes
val metrics: Map[String, SQLMetric] = BasicWriteJobStatsTracker.metrics
val serializableHadoopConf = new SerializableConfiguration(hadoopConf)
val statsTracker = new BasicWriteJobStatsTracker(serializableHadoopConf, metrics)
// TODO: after partitioning is supported in V2:
// 1. filter out partition columns in `dataColumns`.
// 2. Don't use Seq.empty for `partitionColumns`.
new WriteJobDescription(
uuid = UUID.randomUUID().toString,
serializableHadoopConf = new SerializableConfiguration(job.getConfiguration),
outputWriterFactory = outputWriterFactory,
allColumns = allColumns,
dataColumns = allColumns,
partitionColumns = Seq.empty,
bucketIdExpression = None,
path = pathName,
customPartitionLocations = Map.empty,
maxRecordsPerFile = caseInsensitiveOptions.get("maxRecordsPerFile").map(_.toLong)
.getOrElse(sparkSession.sessionState.conf.maxRecordsPerFile),
timeZoneId = caseInsensitiveOptions.get(DateTimeUtils.TIMEZONE_OPTION)
.getOrElse(sparkSession.sessionState.conf.sessionLocalTimeZone),
statsTrackers = Seq(statsTracker)
)
}
}
|
maropu/spark
|
sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/FileWrite.scala
|
Scala
|
apache-2.0
| 6,144 |
/*
* Scala (https://www.scala-lang.org)
*
* Copyright EPFL and Lightbend, Inc.
*
* Licensed under Apache License 2.0
* (http://www.apache.org/licenses/LICENSE-2.0).
*
* See the NOTICE file distributed with this work for
* additional information regarding copyright ownership.
*/
package scala.tools.nsc
/** A class representing command line info for scalac */
class CompilerCommand(arguments: List[String], val settings: Settings) {
def this(arguments: List[String], error: String => Unit) = this(arguments, new Settings(error))
def this(arguments: List[String], settings: Settings, error: String => Unit) = this(arguments, settings withErrorFn error)
type Setting = Settings#Setting
private val processArgumentsResult =
if (shouldProcessArguments) processArguments
else (true, Nil)
def ok = processArgumentsResult._1
def files = processArgumentsResult._2
/** The name of the command. */
def cmdName = "scalac"
/** A descriptive alias for version and help messages. */
def cmdDesc = "compiler"
private def explainAdvanced = """
|-- Note --
|Boolean settings generally are false unless set: -Xdev -Xcheck-init:true -Xprompt:false
|Multi-valued settings are comma-separated: -Xlint:infer-any,unused,-missing-interpolator
|Phases are a list of names, ids, or ranges of ids: -Vprint:parser,typer,5-10 -Ylog:-4
|Use _ to enable all: -language:_ -Vprint:_
|
""".stripMargin.trim
def shortUsage = "Usage: %s <options> <source files>" format cmdName
/** Creates a help message for a subset of options based on cond */
def optionsMessage(cond: Setting => Boolean): String = {
val iswarning = cond(settings.warnUnused) // sordid check for if we're building -W warning help, to include lint and unused
val baseList = settings.visibleSettings.filter(cond).toList.sortBy(_.name)
val (deprecateds, theRest) = baseList.partition(_.isDeprecated)
def columnOneWidth(s: Setting): Int =
if (iswarning && (s == settings.lint || s == settings.warnUnused))
s.asInstanceOf[settings.MultiChoiceSetting[_]].choices.map(c => s"${s.name}:$c".length).max
else
s.helpSyntax.length
val width = baseList.map(columnOneWidth).max
val columnOneFormat = s"%-${width}s"
def format(s: String) = columnOneFormat.format(s)
def layout(c1: String, c2: String) = s"${format(c1)} ${c2}"
def helpStr(s: Setting) = {
val str = layout(s.helpSyntax, s.helpDescription)
val suffix = s.deprecationMessage match {
case Some(msg) => "\\n" + format("") + " deprecated: " + msg
case _ => ""
}
str + suffix
}
def appendDescriptions(sb: StringBuilder, msg: String, xs: List[Setting]): Unit =
if (!xs.isEmpty) {
val ss = xs.flatMap { s =>
if (iswarning && (s == settings.lint || s == settings.warnUnused)) {
val mcs = s.asInstanceOf[settings.MultiChoiceSetting[_]]
mcs.choices.map(c => s"${s.name}:$c").zipAll(mcs.descriptions, "", "").map {
case (c, d) => layout(c, d)
}
} else
List(helpStr(s))
}
sb.append(msg)
for (each <- ss) sb.append(" ").append(each).append("\\n")
}
val sb = new StringBuilder()
appendDescriptions(sb, "", theRest)
appendDescriptions(sb, "\\nDeprecated settings:\\n", deprecateds)
sb.toString
}
def createUsageMsg(label: String, explain: Boolean = true)(cond: Setting => Boolean): String = {
val explained = if (explain) s"\\n$explainAdvanced" else ""
s"$shortUsage\\n\\n$label options:\\n${optionsMessage(cond)}${explained}\\n"
}
/** Messages explaining usage and options */
def usageMsg = createUsageMsg("Standard", explain = false)(_.isStandard)
def vusageMsg = createUsageMsg("Verbose")(_.isVerbose)
def wusageMsg = createUsageMsg("Warnings")(_.isWarning)
def xusageMsg = createUsageMsg("Available advanced")(_.isAdvanced)
def yusageMsg = createUsageMsg("Available private")(_.isPrivate)
/** For info settings, compiler should just print a message and quit. */
def shouldStopWithInfo = settings.isInfo
def getInfoMessage(global: Global): String = {
import settings._
if (version) Properties.versionFor(cmdDesc)
else if (help) usageMsg + global.pluginOptionsHelp
else if (Vhelp) vusageMsg
else if (Whelp) wusageMsg
else if (Xhelp) xusageMsg
else if (Yhelp) yusageMsg
else if (showPlugins) global.pluginDescriptions
else if (showPhases) global.phaseDescriptions + (
if (settings.isDebug) "\\n" + global.phaseFlagDescriptions else ""
)
else if (genPhaseGraph.isSetByUser) {
val components = global.phaseNames // global.phaseDescriptors // one initializes
s"Phase graph of ${components.size} components output to ${genPhaseGraph.value}*.dot."
}
else allSettings.valuesIterator.filter(_.isHelping).map(_.help).mkString("\\n\\n")
}
/** Expands all arguments starting with @ to the contents of the file named like each argument. */
def expandArg(arg: String): List[String] = {
import java.nio.file.{Files, Paths}
import scala.jdk.CollectionConverters._
def stripComment(s: String) = s.takeWhile(_ != '#')
val file = Paths.get(arg stripPrefix "@")
if (!Files.exists(file))
throw new java.io.FileNotFoundException(s"argument file $file could not be found")
settings.splitParams(Files.readAllLines(file).asScala.map(stripComment).mkString(" "))
}
// override this if you don't want arguments processed here
def shouldProcessArguments: Boolean = true
def processArguments: (Boolean, List[String]) = {
// expand out @filename to the contents of that filename
val expandedArguments = arguments flatMap {
case x if x startsWith "@" => expandArg(x)
case x => List(x)
}
settings.processArguments(expandedArguments, processAll = true)
}
}
|
scala/scala
|
src/compiler/scala/tools/nsc/CompilerCommand.scala
|
Scala
|
apache-2.0
| 6,028 |
/*
* Copyright 2022 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package fixtures
import play.api.libs.json.Json
import router.definition._
import uk.gov.hmrc.auth.core.ConfidenceLevel
object SelfAssessmentApiDefinitionFixture {
private val scopeKey = "test-scope"
private val scopeName = "test scope name"
private val scopeDescription = "test scope description"
private val confidenceLevel: ConfidenceLevel = ConfidenceLevel.L200
private val apiName = "test api name"
private val apiDescription = "test api description"
private val apiContext = "test api context"
private val accessType = "test type"
val apiVersion_1 = APIVersion(
version = "1.0",
status = APIStatus.ALPHA,
endpointsEnabled = true
)
val selfAssessmentApiDefinition = Definition(
scopes = Seq(
Scope(
key = scopeKey,
name = scopeName,
description = scopeDescription,
confidenceLevel
)
),
api = APIDefinition(
name = apiName,
description = apiDescription,
context = apiContext,
versions = Seq(apiVersion_1),
requiresTrust = None
)
)
val selfAssessmentApiDefinitionJson = {
Json.obj(
"scopes" -> Json.arr(Json.obj(
"key" -> scopeKey,
"name" -> scopeName,
"description" -> scopeDescription
)),
"api" -> Json.obj(
"name" -> apiName,
"description" -> apiDescription,
"context" -> apiContext,
"versions" -> Json.arr(Json.obj(
"version" -> "1.0",
"access" -> Json.obj(
"type" -> accessType),
"status" -> "ALPHA",
"endpointsEnabled" -> true
))
)
)
}
}
|
hmrc/self-assessment-api
|
test/fixtures/SelfAssessmentApiDefinitionFixture.scala
|
Scala
|
apache-2.0
| 2,238 |
/*
* Copyright 2013 Carnegie Mellon University
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package edu.cmu.lti.suim
import java.io.ByteArrayInputStream
import java.io.ByteArrayOutputStream
import java.io.DataInput
import java.io.DataOutput
import java.io.Externalizable
import java.io.ObjectInput
import java.io.ObjectOutput
import org.apache.hadoop.io.Writable
import org.apache.uima.cas.CAS
import org.apache.uima.cas.impl.Serialization
import org.apache.uima.fit.factory.JCasFactory
object SCAS {
def read(in: DataInput) = {
val scas = new SCAS();
scas.readFields(in);
scas
}
}
class SCAS(val cas: CAS) extends Externalizable with Writable {
def this() {
this(JCasFactory.createJCas().getCas())
}
override def readExternal(in: ObjectInput) {
readFields(in)
}
override def writeExternal(out: ObjectOutput) {
write(out)
}
def jcas = cas.getJCas()
override def write(out: DataOutput) {
val baos = new ByteArrayOutputStream();
Serialization.serializeWithCompression(cas, baos)
out.writeInt(baos.size)
out.write(baos.toByteArray)
}
override def readFields(in: DataInput) {
val size = in.readInt();
val bytes = new Array[Byte](size)
in.readFully(bytes);
val bais = new ByteArrayInputStream(bytes)
Serialization.deserializeCAS(cas, bais);
}
}
|
oaqa/suim
|
suim-scala/src/main/scala/edu/cmu/lti/suim/SCAS.scala
|
Scala
|
apache-2.0
| 1,880 |
/*
Copyright 2013, 2014 NICTA
This file is part of t3as (Text Analysis As A Service).
t3as is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
t3as is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with t3as. If not, see <http://www.gnu.org/licenses/>.
*/
package org.t3as.patClas.service
import scala.collection.JavaConversions.asScalaBuffer
import org.scalatest.{FlatSpec, Matchers}
import org.slf4j.LoggerFactory
import org.t3as.patClas.common.CPCUtil.ClassificationItem
import org.t3as.patClas.common.TreeNode
import org.t3as.patClas.common.db.CPCdb
import org.t3as.patClas.common.search.RAMIndex
import java.io.File
import org.t3as.patClas.common.search.Suggest
import org.apache.lucene.search.suggest.Lookup
import org.t3as.patClas.api.Suggestions
class TestPatClasService extends FlatSpec with Matchers {
val log = LoggerFactory.getLogger(getClass)
"CPCService" should "retrieve ancestorsAndSelf" in {
val l8 = TreeNode(ClassificationItem(None, -1, false, true, false, "2013-01-01", 8, "B29C31/002", "title8", "notes8"), Seq())
val l7 = TreeNode(ClassificationItem(None, -1, false, true, false, "2013-01-01", 7, "B29C31/00", "title7", "notes7"), Seq(l8))
val l6 = TreeNode(ClassificationItem(None, -1, false, true, false, "2013-01-01", 6, "B29C31/00", "title6", "notes6"), Seq(l7))
val l5 = TreeNode(ClassificationItem(None, -1, false, false, false, "2013-01-01", 5, "B29C", "title5", "notes5"), Seq(l6))
// initialize singleton used by CPCService
PatClasService.testInit(new PatClasService {
override def indexDir(prop: String) = RAMIndex.makeTestIndex
override def mkCombinedSuggest(indexDir: File) = (key: String, num: Int) => Suggestions(Nil, Nil)
} )
val svc = PatClasService.service
import svc.{cpcDb, database}
import cpcDb.profile.simple._
val srv = new CPCService
{
val hits = srv.search("Symbol:B29C3*")
log.debug(s"hits = $hits")
hits.size should be(3) // matches 2 x "B29C31/00", 1 x "B29C31/002", 0 x "B29C"
}
database withSession { implicit session =>
// Create the table(s), indices etc.
cpcDb.cpcs.ddl.create
// an item for top level ClassificationItems to refer to as their "parent"; forceInsert overrides the autoInc id, may not work on all databases
val id = cpcDb.cpcs forceInsert ClassificationItem(Some(CPCdb.topLevel), CPCdb.topLevel, false, false, false, "2013-01-01", 0, "parent", "universal ancestor", "no notes")
cpcDb.insertTree(l5, CPCdb.topLevel)
srv.ancestorsAndSelf("B29C31/00", "xml") zip Seq(l5, l6, l7) foreach {
case (desc, n) => {
desc.symbol should be(n.value.symbol)
desc.level should be(n.value.level)
desc.classTitle should be(n.value.classTitle)
desc.notesAndWarnings should be(n.value.notesAndWarnings)
}
}
}
PatClasService.close
}
}
|
NICTA/t3as-pat-clas
|
pat-clas-service/src/test/scala/org/t3as/patClas/service/TestPatClasService.scala
|
Scala
|
gpl-3.0
| 3,392 |
package shapeless
import scala.collection.immutable.ListMap
import scala.reflect.macros.whitebox
trait OpenImplicitMacros {
val c: whitebox.Context
import c.universe._
def openImplicitTpe: Option[Type] =
c.openImplicits.headOption.map(_.pt)
def openImplicitTpeParam: Option[Type] =
openImplicitTpe.map {
case TypeRef(_, _, List(tpe)) =>
tpe.dealias
case other =>
c.abort(c.enclosingPosition, s"Bad materialization: $other")
}
def secondOpenImplicitTpe: Option[Type] =
c.openImplicits match {
case (List(_, second, _ @ _*)) =>
Some(second.pt)
case _ => None
}
}
class LazyMacros(val c: whitebox.Context) extends CaseClassMacros with OpenImplicitMacros {
import c.universe._
import c.internal._
import decorators._
def mkLazyImpl[I](implicit iTag: WeakTypeTag[I]): Tree =
mkImpl[I](
(tree, actualType) => q"_root_.shapeless.Lazy.apply[$actualType]($tree)",
q"null.asInstanceOf[_root_.shapeless.Lazy[_root_.scala.Nothing]]"
)
def mkStrictImpl[I](implicit iTag: WeakTypeTag[I]): Tree =
mkImpl[I](
(tree, actualType) => q"_root_.shapeless.Strict.apply[$actualType]($tree)",
q"null.asInstanceOf[_root_.shapeless.Strict[_root_.scala.Nothing]]"
)
def mkImpl[I](mkInst: (Tree, Type) => Tree, nullInst: => Tree)(implicit iTag: WeakTypeTag[I]): Tree = {
openImplicitTpeParam match {
case Some(tpe) => LazyMacros.deriveInstance(this)(tpe, mkInst)
case None =>
val tpe = iTag.tpe.dealias
if (tpe.typeSymbol.isParameter)
nullInst
else
LazyMacros.deriveInstance(this)(tpe, mkInst)
}
}
def setAnnotation(msg: String): Unit = {
val tree0 =
c.typecheck(
q"""
new _root_.scala.annotation.implicitNotFound("dummy")
""",
silent = false
)
class SubstMessage extends Transformer {
val global = c.universe.asInstanceOf[scala.tools.nsc.Global]
override def transform(tree: Tree): Tree = {
super.transform {
tree match {
case Literal(Constant("dummy")) => Literal(Constant(msg))
case t => t
}
}
}
}
val tree = new SubstMessage().transform(tree0)
symbolOf[Lazy[Any]].setAnnotations(Annotation(tree))
}
def resetAnnotation: Unit =
setAnnotation("could not find Lazy implicit value of type ${T}")
trait LazyDefinitions {
case class Instance(
instTpe: Type,
name: TermName,
symbol: Symbol,
inst: Option[Tree],
actualTpe: Type,
dependsOn: List[Type]
) {
def ident = Ident(symbol)
}
object Instance {
def apply(instTpe: Type) = {
val nme = TermName(c.freshName("inst"))
val sym = c.internal.setInfo(c.internal.newTermSymbol(NoSymbol, nme), instTpe)
new Instance(instTpe, nme, sym, None, instTpe, Nil)
}
}
class TypeWrapper(val tpe: Type) {
override def equals(other: Any): Boolean =
other match {
case TypeWrapper(tpe0) => tpe =:= tpe0
case _ => false
}
override def toString = tpe.toString
}
object TypeWrapper {
def apply(tpe: Type) = new TypeWrapper(tpe)
def unapply(tw: TypeWrapper): Option[Type] = Some(tw.tpe)
}
}
class DerivationContext extends LazyDefinitions {
object State {
val empty = State("", ListMap.empty, Nil, Nil)
private var current = Option.empty[State]
def resolveInstance(state: State)(tpe: Type): Option[(State, Tree)] = {
val former = State.current
State.current = Some(state)
val (state0, tree) =
try {
val tree = c.inferImplicitValue(tpe, silent = true)
if(tree.isEmpty) {
tpe.typeSymbol.annotations.
find(_.tree.tpe =:= typeOf[_root_.scala.annotation.implicitNotFound]).foreach { _ =>
setAnnotation(implicitNotFoundMessage(c)(tpe))
}
}
(State.current.get, tree)
} finally {
State.current = former
}
if (tree == EmptyTree) None
else Some((state0, tree))
}
def deriveInstance(instTpe0: Type, root: Boolean, mkInst: (Tree, Type) => Tree): Tree = {
if (root) {
assert(current.isEmpty)
val open = c.openImplicits
val name = if (open.length > 1) open(1).sym.name.toTermName.toString else "lazy"
current = Some(empty.copy(name = "anon$"+name))
}
derive(current.get)(instTpe0) match {
case Right((state, inst)) =>
val (tree, actualType) = if (root) mkInstances(state)(instTpe0) else (inst.ident, inst.actualTpe)
current = if (root) None else Some(state)
mkInst(tree, actualType)
case Left(err) =>
abort(err)
}
}
}
case class State(
name: String,
dict: ListMap[TypeWrapper, Instance],
open: List[Instance],
/** Types whose derivation must fail no matter what */
prevent: List[TypeWrapper]
) {
def addDependency(tpe: Type): State = {
import scala.::
val open0 = open match {
case Nil => Nil
case h :: t => h.copy(dependsOn = if (h.instTpe =:= tpe || h.dependsOn.exists(_ =:= tpe)) h.dependsOn else tpe :: h.dependsOn) :: t
}
copy(open = open0)
}
private def update(inst: Instance): State =
copy(dict = dict.updated(TypeWrapper(inst.instTpe), inst))
def openInst(tpe: Type): (State, Instance) = {
val inst = Instance(tpe)
val state0 = addDependency(tpe)
(state0.copy(open = inst :: state0.open).update(inst), inst)
}
def closeInst(tpe: Type, tree: Tree, actualTpe: Type): (State, Instance) = {
assert(open.nonEmpty)
assert(open.head.instTpe =:= tpe)
val instance = open.head
val sym = c.internal.setInfo(instance.symbol, actualTpe)
val instance0 = instance.copy(inst = Some(tree), actualTpe = actualTpe, symbol = sym)
(copy(open = open.tail).update(instance0), instance0)
}
def lookup(instTpe: Type): Either[State, (State, Instance)] =
dict.get(TypeWrapper(instTpe)) match {
case Some(i) => Right((addDependency(instTpe), i))
case None => Left(openInst(instTpe)._1)
}
def dependsOn(tpe: Type): List[Instance] = {
import scala.::
def helper(tpes: List[List[Type]], acc: List[Instance]): List[Instance] =
tpes match {
case Nil => acc
case Nil :: t =>
helper(t, acc)
case (h :: t0) :: t =>
if (acc.exists(_.instTpe =:= h))
helper(t0 :: t, acc)
else {
val inst = dict(TypeWrapper(h))
helper(inst.dependsOn :: t0 :: t, inst :: acc)
}
}
helper(List(List(tpe)), Nil)
}
}
def stripRefinements(tpe: Type): Option[Type] =
tpe match {
case RefinedType(parents, decls) => Some(parents.head)
case _ => None
}
def resolve(state: State)(inst: Instance): Option[(State, Instance)] =
resolve0(state)(inst.instTpe)
.filter{case (_, tree, _) => !tree.equalsStructure(inst.ident) }
.map {case (state0, extInst, actualTpe) =>
state0.closeInst(inst.instTpe, extInst, actualTpe)
}
def resolve0(state: State)(tpe: Type): Option[(State, Tree, Type)] = {
val extInstOpt =
State.resolveInstance(state)(tpe)
.orElse(
stripRefinements(tpe).flatMap(State.resolveInstance(state))
)
extInstOpt.map {case (state0, extInst) =>
(state0, extInst, extInst.tpe.finalResultType)
}
}
def derive(state: State)(tpe: Type): Either[String, (State, Instance)] =
state.lookup(tpe).swap.flatMap { state0 =>
val inst = state0.dict(TypeWrapper(tpe))
resolve(state0)(inst).toLeft(s"Unable to derive $tpe")
}.swap
// Workaround for https://issues.scala-lang.org/browse/SI-5465
class StripUnApplyNodes extends Transformer {
val global = c.universe.asInstanceOf[scala.tools.nsc.Global]
import global.nme
override def transform(tree: Tree): Tree = {
super.transform {
tree match {
case UnApply(Apply(Select(qual, nme.unapply | nme.unapplySeq), List(Ident(nme.SELECTOR_DUMMY))), args) =>
Apply(transform(qual), transformTrees(args))
case UnApply(Apply(TypeApply(Select(qual, nme.unapply | nme.unapplySeq), _), List(Ident(nme.SELECTOR_DUMMY))), args) =>
Apply(transform(qual), transformTrees(args))
case t => t
}
}
}
}
def mkInstances(state: State)(primaryTpe: Type): (Tree, Type) = {
val instances = state.dict.values.toList
val (from, to) = instances.map { d => (d.symbol, NoSymbol) }.unzip
def clean(inst: Tree) = {
val cleanInst = c.untypecheck(c.internal.substituteSymbols(inst, from, to))
new StripUnApplyNodes().transform(cleanInst)
}
if (instances.length == 1) {
val instance = instances.head
import instance._
inst match {
case Some(inst) =>
val cleanInst = clean(inst)
(q"$cleanInst.asInstanceOf[$actualTpe]", actualTpe)
case None =>
abort(s"Uninitialized $instTpe lazy implicit")
}
} else {
val instTrees =
instances.map { instance =>
import instance._
inst match {
case Some(inst) =>
val cleanInst = clean(inst)
q"""lazy val $name: $actualTpe = $cleanInst.asInstanceOf[$actualTpe]"""
case None =>
abort(s"Uninitialized $instTpe lazy implicit")
}
}
val primaryInstance = (state.lookup(primaryTpe): @unchecked) match {
case Right((_, pi)) => pi
}
val primaryNme = primaryInstance.name
val clsName = TypeName(c.freshName(state.name))
val tree =
q"""
final class $clsName extends _root_.scala.Serializable {
..$instTrees
}
(new $clsName).$primaryNme
"""
val actualType = primaryInstance.actualTpe
(tree, actualType)
}
}
}
}
object LazyMacros {
def dcRef(lm: LazyMacros): Option[LazyMacros#DerivationContext] = {
// N.B. openMacros/enclosingMacros annoyingly include macros which are not enclosing this macro at all,
// but simply happen to be expanding further up on the same compiler stack (and the compiler stack doesn't
// necessarily correspond to a single path through the AST - it can jump to other trees during typing), so
// we need to stop once the position of the open macros no longer matches ours
lm.c.openMacros.takeWhile(_.enclosingPosition == lm.c.enclosingPosition)
// use the first enclosing DerivationContext we find (if any)
.find(c => c.internal.attachments(c.macroApplication).contains[lm.DerivationContext])
.flatMap(c => c.internal.attachments(c.macroApplication).get[lm.DerivationContext])
}
def deriveInstance(lm: LazyMacros)(tpe: lm.c.Type, mkInst: (lm.c.Tree, lm.c.Type) => lm.c.Tree): lm.c.Tree = {
val (dc, root) =
dcRef(lm) match {
case None =>
lm.resetAnnotation
val dc = new lm.DerivationContext
lm.c.internal.updateAttachment(lm.c.macroApplication, dc)
(dc, true)
case Some(dc) =>
(dc.asInstanceOf[lm.DerivationContext], false)
}
if (root)
// Sometimes corrupted, and slows things too
lm.c.universe.asInstanceOf[scala.tools.nsc.Global].analyzer.resetImplicits()
try {
dc.State.deriveInstance(tpe, root, mkInst)
} finally {
if(root) {
lm.c.internal.removeAttachment[lm.DerivationContext](lm.c.macroApplication)
}
}
}
}
|
isaka/shapeless
|
core/src/main/scala_2.13-/shapeless/LazyMacros.scala
|
Scala
|
apache-2.0
| 12,126 |
package enumeratum.values
import argonaut._
import Argonaut._
/** Created by alonsodomin on 14/10/2016.
*/
object Argonauter {
def encoder[ValueType: EncodeJson, EntryType <: ValueEnumEntry[ValueType]](
enum: ValueEnum[ValueType, EntryType]
): EncodeJson[EntryType] = {
val encodeValue = implicitly[EncodeJson[ValueType]]
EncodeJson { entry =>
encodeValue(entry.value)
}
}
def decoder[ValueType: DecodeJson, EntryType <: ValueEnumEntry[ValueType]](
enum: ValueEnum[ValueType, EntryType]
): DecodeJson[EntryType] = {
val decodeValue = implicitly[DecodeJson[ValueType]]
DecodeJson { cursor =>
decodeValue(cursor).flatMap { value =>
enum.withValueOpt(value) match {
case Some(entry) => okResult(entry)
case _ => failResult(s"$value is not a member of enum $enum", cursor.history)
}
}
}
}
}
|
lloydmeta/enumeratum
|
enumeratum-argonaut/src/main/scala/enumeratum/values/Argonauter.scala
|
Scala
|
mit
| 906 |
package org.jetbrains.plugins.scala
package codeInspection
package unusedInspections
import com.intellij.codeInsight.FileModificationService
import com.intellij.codeInsight.daemon.QuickFixBundle
import com.intellij.codeInsight.intention.{HighPriorityAction, IntentionAction, LowPriorityAction}
import com.intellij.openapi.editor.Editor
import com.intellij.openapi.project.Project
import com.intellij.psi.PsiFile
import com.intellij.util.FileContentUtil
import org.jetbrains.plugins.scala.editor.importOptimizer.ScalaImportOptimizer
import org.jetbrains.plugins.scala.extensions.PsiFileExt
import org.jetbrains.plugins.scala.lang.formatting.settings.ScalaCodeStyleSettings
import org.jetbrains.plugins.scala.settings.ScalaApplicationSettings
import scala.jdk.CollectionConverters._
sealed abstract class ScalaOptimizeImportsFixBase extends IntentionAction {
override final def startInWriteAction: Boolean = true
override def isAvailable(project: Project, editor: Editor, file: PsiFile): Boolean =
file.getManager.isInProject(file) && file.hasScalaPsi
/**
* We can't just select ScalaImportOptimizer because of Play2 templates
*
* @param file Any parallel psi file
*/
override def invoke(project: Project, editor: Editor, file: PsiFile): Unit = for {
scalaFile <- file.findAnyScalaFile
if FileModificationService.getInstance.prepareFileForWrite(scalaFile)
optimizer <- ScalaImportOptimizer.findOptimizerFor(scalaFile)
runner = optimizer.processFile(scalaFile)
} runner.run()
override final def getFamilyName: String = getText
}
final class ScalaOptimizeImportsFix extends ScalaOptimizeImportsFixBase with HighPriorityAction {
override def getText: String = QuickFixBundle.message("optimize.imports.fix")
}
final class ScalaEnableOptimizeImportsOnTheFlyFix extends ScalaOptimizeImportsFixBase {
override def getText: String = QuickFixBundle.message("enable.optimize.imports.on.the.fly")
override def isAvailable(project: Project, editor: Editor, file: PsiFile): Boolean =
!ScalaApplicationSettings.getInstance().OPTIMIZE_IMPORTS_ON_THE_FLY &&
super.isAvailable(project, editor, file)
override def invoke(project: Project, editor: Editor, file: PsiFile): Unit = {
ScalaApplicationSettings.getInstance().OPTIMIZE_IMPORTS_ON_THE_FLY = true
super.invoke(project, editor, file)
}
}
final class MarkImportAsAlwaysUsed(importText: String) extends IntentionAction with LowPriorityAction {
override def getText: String = ScalaInspectionBundle.message("mark.import.as.always.used.in.this.project")
override def startInWriteAction: Boolean = true
override def isAvailable(project: Project, editor: Editor, file: PsiFile): Boolean = {
importText.contains(".") && !ScalaCodeStyleSettings.getInstance(project).isAlwaysUsedImport(importText)
}
override def invoke(project: Project, editor: Editor, file: PsiFile): Unit = {
val settings = ScalaCodeStyleSettings.getInstance(project)
settings.setAlwaysUsedImports((settings.getAlwaysUsedImports ++ Array(importText)).sorted)
FileContentUtil.reparseFiles(project, Seq(file.getVirtualFile).asJava, true)
}
override def getFamilyName: String = ScalaInspectionBundle.message("mark.import.as.always.used.in.this.project")
}
|
JetBrains/intellij-scala
|
scala/scala-impl/src/org/jetbrains/plugins/scala/codeInspection/unusedInspections/ScalaOptimizeImportsFix.scala
|
Scala
|
apache-2.0
| 3,270 |
package play.boilerplate.generators
import org.scalatest.{FlatSpec, Matchers}
import play.boilerplate.generators.security.{Play2AuthSecurityProvider, SecurityProvider}
import play.boilerplate.parser.backend.swagger.SwaggerBackend
import treehugger.forest
class ClientCodeGeneratorTest extends FlatSpec with Matchers with PrintSyntaxString {
"Full support" should "Parse petStore.v1.yaml" in {
val schema = SwaggerBackend.parseSchema("petStore.v1.yaml").get
val security = new Play2AuthSecurityProvider("User", "AuthConfig", "session") {
override def parseAuthority(scopes: Seq[SecurityProvider.SecurityScope]): Seq[forest.Tree] = Nil
}
val ctx = GeneratorContext.initial(DefaultGeneratorSettings(
"petStore.v1.yaml",
"test",
Nil,
injectionProvider = injection.ScaldiInjectionProvider,
securityProviders = List(security),
useTraceId = true,
traceIdHeader = Some("X-TraceID")
))
val gen = new ClientCodeGenerator().generate(schema)(ctx)
printCodeFile(gen)
true should be (true)
}
it should "Parse petStore.v2.yaml" in {
val schema = SwaggerBackend.parseSchema("petStore.v2.yaml").get
val ctx = GeneratorContext.initial(DefaultGeneratorSettings("petStore.v2.yaml", "test", Nil, injectionProvider = injection.GuiceInjectionProvider))
val gen = new ClientCodeGenerator().generate(schema)(ctx)
printCodeFile(gen)
true should be (true)
}
}
|
Romastyi/sbt-play-boilerplate
|
sbt-plugin/lib/src/test/scala/play/boilerplate/generators/ClientCodeGeneratorTest.scala
|
Scala
|
apache-2.0
| 1,451 |
package net.categoricaldata.category
import org.scalatest.FlatSpec
import org.scalatest.matchers.ShouldMatchers
import org.junit.runner.RunWith
import org.scalatest.junit.JUnitRunner
import scala.math._
import net.categoricaldata.ontology._
import net.categoricaldata.examples.Examples
import net.categoricaldata.util.CustomMatchers
/*
* This should always compile when checked in.
*/
@RunWith(classOf[JUnitRunner])
class NaturalTransformationDevTest extends FlatSpec with ShouldMatchers with CustomMatchers {
// NOTE to use the DSL, you need this line:
import net.categoricaldata.dsl.Sentences._
//TODO (Scott) These are important tests. Can you make them compile?
// Knowing that unit and counit work correctly will validate a lot of code, I think.
"leftUnit" should "be an isomorphism for translations that are isomorphisms" in {
val F = Examples.ReverseGraph
// Apologies for this circumlocution. The compiler can't deduce by itself that F.target and GraphDataset120114 are both 'Graph'.
val X = F.source.internalize(GraphDataset120114)
F.leftUnit(X) should be('isomorphism_?)
}
"leftCounit" should "be an isomorphism for translations that are isomorphisms" in {
val F = Examples.ReverseGraph
val X = F.target.internalize(GraphDataset120114)
// F.leftCounit(X) should be('isomorphism_?)
}
"rightUnit" should "be an isomorphism for translations that are isomorphisms" in {
val F = Examples.ReverseGraph
val X = F.target.internalize(GraphDataset120114)
F.rightUnit(X) should be('isomorphism_?)
}
"rightCounit" should "be an isomorphism for translations that are isomorphisms" in {
val F = Examples.ReverseGraph
val X = F.source.internalize(GraphDataset120114)
F.rightCounit(X) should be('isomorphism_?)
}
"leftUnit" should "be an isomorphism for translations that are equivalences" in {
val C = Examples.IndiscreteCategory(3)
val F = Ontologies.morphismToTerminalObject(C)
val X = F.source.internalize(Set120121)
F.leftUnit(X) should be('isomorphism_?)
}
"leftCounit" should "be an isomorphism for translations that are equivalences" in {
val C = Examples.IndiscreteCategory(3)
val F = Ontologies.morphismToTerminalObject(C)
val X = F.target.internalize(Indiscrete3Dataset120113)
// F.leftCounit(X) should be('isomorphism_?)
}
"rightUnit" should "be an isomorphism for translations that are equivalences" in {
val C = Examples.IndiscreteCategory(3)
val F = Ontologies.morphismToTerminalObject(C)
val X = F.target.internalize(Indiscrete3Dataset120113)
F.rightUnit(X) should be('isomorphism_?)
}
"rightCounit" should "be an isomorphism for translations that are equivalences" in {
val C = Examples.IndiscreteCategory(3)
val F = Ontologies.morphismToTerminalObject(C)
val X = F.source.internalize(Set120121)
F.rightCounit(X) should be('isomorphism_?)
}
"rightUnit" should "be an isomorphism for fully faithful transformations" in {
val F = Examples.Skip(3, 2)
val X = F.target.internalize(Chain3Dataset120114)
F.rightUnit(X) should be('isomorphism_?)
}
"leftCounit" should "be an isomorphism for fully faithful transformations" in {
val F = Examples.Skip(3, 2)
val X = F.target.internalize(Chain3Dataset120114)
// F.leftCounit(X) should be('isomorphism_?)
}
"leftCounit" should "be an injection for functor from Graph to Chain1" in {
val F = Examples.GraphToFunction
val X = F.target.internalize(GraphDataset120114)
// F.leftCounit(X) should be('injection_?)
}
"leftUnit" should "be a surjection for functor from Graph to Chain1" in {
val F = Examples.GraphToFunction
val X = F.source.internalize(DavidsFunkyFunction)
F.leftUnit(X) should be('surjection_?)
}
"leftUnit" should "be an isomorphism for epi-like functors" in {
val F = Examples.GraphToFunction
val X = F.source.internalize(DavidsFunkyFunction)
F.leftUnit(X) should be('isomorphism_?)
}
"rightCounit" should "be an isomorphism for epi-like functors" in {
val F = Examples.GraphToFunction
val X = F.source.internalize(DavidsFunkyFunction)
F.rightCounit(X) should be('isomorphism_?)
}
val GraphDataset120114 = Dataset(source = Examples.Graph,
onObjects = Map(
"an edge" -> List("f", "g", "h", "i", "j"),
"a vertex" -> List("A", "B", "C", "D")),
onMorphisms = Map(
("an edge" --- "has as source" --> "a vertex") -> Map(
"f" -> "B",
"g" -> "B",
"h" -> "C",
"i" -> "C",
"j" -> "C"),
("an edge" --- "has as target" --> "a vertex") -> Map(
"f" -> "A",
"g" -> "A",
"h" -> "B",
"i" -> "A",
"j" -> "C")))
val Indiscrete3Dataset120113 = Dataset(
source = Examples.IndiscreteCategory(3),
onObjects = Map(
"V1" -> List("a1", "b1", "b2", "c1", "c2", "c3"),
"V2" -> List("A1", "B1", "B2", "C1", "C2", "C3"),
"V3" -> List("u", "v", "w", "x", "y", "z")),
onMorphisms = Map(
("V1" --- "E11" --> "V1") -> Map(
"a1" -> "a1",
"b1" -> "b1",
"b2" -> "b2",
"c1" -> "c1",
"c2" -> "c2",
"c3" -> "c3"),
("V1" --- "E12" --> "V2") -> Map(
"a1" -> "A1",
"b1" -> "B1",
"b2" -> "B2",
"c1" -> "C1",
"c2" -> "C2",
"c3" -> "C3"),
("V1" --- "E13" --> "V3") -> Map(
"a1" -> "u",
"b1" -> "v",
"b2" -> "w",
"c1" -> "x",
"c2" -> "y",
"c3" -> "z"),
("V2" --- "E21" --> "V1") -> Map(
"A1" -> "a1",
"B1" -> "b1",
"B2" -> "b2",
"C1" -> "c1",
"C2" -> "c2",
"C3" -> "c3"),
("V2" --- "E22" --> "V2") -> Map(
"A1" -> "A1",
"B1" -> "B1",
"B2" -> "B2",
"C1" -> "C1",
"C2" -> "C2",
"C3" -> "C3"),
("V2" --- "E23" --> "V3") -> Map(
"A1" -> "u",
"B1" -> "v",
"B2" -> "w",
"C1" -> "x",
"C2" -> "y",
"C3" -> "z"),
("V3" --- "E31" --> "V1") -> Map(
"u" -> "a1",
"v" -> "b1",
"w" -> "b2",
"x" -> "c1",
"y" -> "c2",
"z" -> "c3"),
("V3" --- "E32" --> "V2") -> Map(
"u" -> "A1",
"v" -> "B1",
"w" -> "B2",
"x" -> "C1",
"y" -> "C2",
"z" -> "C3"),
("V3" --- "E33" --> "V3") -> Map(
"u" -> "u",
"v" -> "v",
"w" -> "w",
"x" -> "x",
"y" -> "y",
"z" -> "z")))
val Chain3Dataset120114 = Dataset(source = Examples.Chain(3),
onObjects = Map(
"V0" -> List(), //Deliberately left as empty list, representing empty-set.
"V1" -> List("1a", "1b", "1c", "1d", "1e"),
"V2" -> List("2f", "2g", "2h"),
"V3" -> List("3i", "3j", "3k", "3l")),
onMorphisms = Map(
("V0" --- "E01" --> "V1") -> Map[String, String](), //Deliberately left empty.
("V1" --- "E12" --> "V2") -> Map(
"1a" -> "2f",
"1b" -> "2g",
"1c" -> "2g",
"1d" -> "2h",
"1e" -> "2h"),
("V2" --- "E23" --> "V3") -> Map(
"2f" -> "3i",
"2g" -> "3k",
"2h" -> "3l")))
val DavidsFunkyFunction = Dataset(source = Examples.Chain(1),
onObjects = Map(
"V0" -> List("David", "Scott", "UC Berkeley", "MIT"),
"V1" -> List("1978", "Scott's birthyear", "1868", "1861")),
onMorphisms = Map(
"V0" --- "E01" --> "V1" -> Map(
"David" -> "1978",
"Scott" -> "Scott's birthyear",
"UC Berkeley" -> "1868",
"MIT" -> "1861")))
val Set120121 = Dataset(source = Examples.Chain(0),
onObjects = Map ("V0" -> List("a","b","c","d")),
onMorphisms = Map()
)
}
|
JasonGross/categoricaldata
|
src/test/scala/net/categoricaldata/category/NaturalTransformationDevTest.scala
|
Scala
|
mit
| 7,785 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.network
import scala.reflect.ClassTag
import org.apache.spark.TaskContext
import org.apache.spark.network.buffer.ManagedBuffer
import org.apache.spark.network.client.StreamCallbackWithID
import org.apache.spark.storage.{BlockId, ShuffleBlockId, StorageLevel}
private[spark]
trait BlockDataManager {
/**
* Interface to get host-local shuffle block data. Throws an exception if the block cannot be
* found or cannot be read successfully.
*/
def getHostLocalShuffleData(blockId: BlockId, dirs: Array[String]): ManagedBuffer
/**
* Interface to get local block data. Throws an exception if the block cannot be found or
* cannot be read successfully.
*/
def getLocalBlockData(blockId: BlockId): ManagedBuffer
/**
* Put the block locally, using the given storage level.
*
* Returns true if the block was stored and false if the put operation failed or the block
* already existed.
*/
def putBlockData(
blockId: BlockId,
data: ManagedBuffer,
level: StorageLevel,
classTag: ClassTag[_]): Boolean
/**
* Put the given block that will be received as a stream.
*
* When this method is called, the block data itself is not available -- it will be passed to the
* returned StreamCallbackWithID.
*/
def putBlockDataAsStream(
blockId: BlockId,
level: StorageLevel,
classTag: ClassTag[_]): StreamCallbackWithID
/**
* Release locks acquired by [[putBlockData()]] and [[getLocalBlockData()]].
*/
def releaseLock(blockId: BlockId, taskContext: Option[TaskContext]): Unit
}
|
goldmedal/spark
|
core/src/main/scala/org/apache/spark/network/BlockDataManager.scala
|
Scala
|
apache-2.0
| 2,403 |
/***********************************************************************
* Copyright (c) 2013-2019 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.geotools.tools.export
import com.beust.jcommander.Parameters
import org.geotools.data.DataStore
import org.locationtech.geomesa.geotools.tools.GeoToolsDataStoreCommand
import org.locationtech.geomesa.geotools.tools.GeoToolsDataStoreCommand.GeoToolsDataStoreParams
import org.locationtech.geomesa.geotools.tools.export.GeoToolsPlaybackCommand.GeoToolsPlaybackParams
import org.locationtech.geomesa.tools.export.PlaybackCommand
import org.locationtech.geomesa.tools.export.PlaybackCommand.PlaybackParams
class GeoToolsPlaybackCommand extends PlaybackCommand[DataStore] with GeoToolsDataStoreCommand {
override val params: GeoToolsPlaybackParams = new GeoToolsPlaybackParams
}
object GeoToolsPlaybackCommand {
@Parameters(commandDescription = "Playback features from a data store, based on the feature date")
class GeoToolsPlaybackParams extends PlaybackParams with GeoToolsDataStoreParams
}
|
elahrvivaz/geomesa
|
geomesa-gt/geomesa-gt-tools/src/main/scala/org/locationtech/geomesa/geotools/tools/export/GeoToolsPlaybackCommand.scala
|
Scala
|
apache-2.0
| 1,396 |
package io.swagger.client.model
import io.swagger.client.core.ApiModel
import org.joda.time.DateTime
case class ValueObject (
/* Timestamp for the measurement event in epoch time (unixtime) */
timestamp: Long,
/* Measurement value */
value: Double,
/* Optional note to include with the measurement */
note: Option[String])
extends ApiModel
|
QuantiModo/QuantiModo-SDK-Akka-Scala
|
src/main/scala/io/swagger/client/model/ValueObject.scala
|
Scala
|
gpl-2.0
| 360 |
package com.socrata.datacoordinator.common.soql.csvreps
import com.socrata.datacoordinator.truth.csv.CsvColumnRep
import com.socrata.soql.types.{SoQLNull, SoQLValue, SoQLID, SoQLType}
object IDRep extends CsvColumnRep[SoQLType, SoQLValue] {
val size = 1
val representedType = SoQLID
def decode(row: IndexedSeq[String], indices: IndexedSeq[Int]): Option[SoQLValue] = {
assert(indices.size == size)
val x = row(indices(0))
if(x.isEmpty) {
Some(SoQLNull)
} else {
try {
Some(SoQLID(row(indices(0)).toLong))
} catch {
case _: NumberFormatException => None
}
}
}
}
|
socrata-platform/data-coordinator
|
coordinatorlib/src/main/scala/com/socrata/datacoordinator/common/soql/csvreps/IDRep.scala
|
Scala
|
apache-2.0
| 632 |
package sio.regions
import cats.data.ReaderT
import sio.core.{IORef, IO, MonadIO}
import sio.core.syntax.st._
/**
* Duplicate a handle in the parent region.
*/
trait Dup[H[_[_]]] {
def dup[F[_], CS, PS](h: H[RegionT[CS, RegionT[PS, F, ?], ?]])
(implicit F: MonadIO[F]): RegionT[CS, RegionT[PS, F, ?], H[RegionT[PS, F, ?]]]
}
sealed abstract class DupInstances {
import Dup._
implicit val finalizerHandleDup: Dup[FinalizerHandle] =
new Dup[FinalizerHandle] {
type H[F[_]] = FinalizerHandle[F]
type HR[S, F[_]] = H[RegionT[S, F, ?]]
def H[F[_]](finalizer: RefCountedFinalizer): H[F] =
FinalizerHandle[F](finalizer: RefCountedFinalizer)
/**
* Takes a handle in region `R` and moves it to some other region
*/
def copy[S, F[_], R[_]](h: H[R])(implicit F: MonadIO[F]): RegionT[S, F, HR[S, F]] =
RegionT.liftIO(hs =>
h.finalizer.refCount.modify(_ + 1) *>
hs.modify(h.finalizer :: _) *>
IO.pure(H[RegionT[S, F, ?]](h.finalizer)))
def dup[F[_], CS, PS](h: H[RegionT[CS, RegionT[PS, F, ?], ?]])
(implicit F: MonadIO[F]): RegionT[CS, RegionT[PS, F, ?], HR[PS, F]] =
RegionT[CS, RegionT[PS, F, ?], HR[PS, F]](
hs => copy[PS, F, RegionT[CS, RegionT[PS, F, ?], ?]](h))
}
}
object Dup extends DupInstances {
/**Duplicates a handle to its parent region. */
def dup[H[_[_]], F[_], CS, PS](h: H[RegionT[CS, RegionT[PS, F, ?], ?]])(implicit H: Dup[H], PP: MonadIO[F]):
RegionT[CS, RegionT[PS, F, ?], H[RegionT[PS, F, ?]]] = H.dup(h)
}
|
alexknvl/sio
|
regions/src/main/scala/sio/regions/Dup.scala
|
Scala
|
mit
| 1,576 |
/* __ *\\
** ________ ___ / / ___ __ ____ Scala.js sbt plugin **
** / __/ __// _ | / / / _ | __ / // __/ (c) 2013, LAMP/EPFL **
** __\\ \\/ /__/ __ |/ /__/ __ |/_// /_\\ \\ http://scala-js.org/ **
** /____/\\___/_/ |_/____/_/ | |__/ /____/ **
** |/____/ **
\\* */
package org.scalajs.jsenv.rhino
import org.scalajs.jsenv._
import org.scalajs.jsenv.Utils.OptDeadline
import org.scalajs.core.tools.sem.Semantics
import org.scalajs.core.tools.io._
import org.scalajs.core.tools.classpath._
import org.scalajs.core.tools.logging._
import scala.annotation.tailrec
import scala.io.Source
import scala.collection.mutable
import scala.concurrent.{Future, Promise, Await, TimeoutException}
import scala.concurrent.duration._
import scala.reflect.ClassTag
import org.mozilla.javascript._
final class RhinoJSEnv private (
semantics: Semantics,
withDOM: Boolean,
val sourceMap: Boolean
) extends ComJSEnv {
import RhinoJSEnv._
def this(semantics: Semantics = Semantics.Defaults, withDOM: Boolean = false) =
this(semantics, withDOM, sourceMap = true)
def withSourceMap(sourceMap: Boolean): RhinoJSEnv =
new RhinoJSEnv(semantics, withDOM, sourceMap)
/** Executes code in an environment where the Scala.js library is set up to
* load its classes lazily.
*
* Other .js scripts in the inputs are executed eagerly before the provided
* `code` is called.
*/
override def jsRunner(classpath: CompleteClasspath, code: VirtualJSFile,
logger: Logger, console: JSConsole): JSRunner = {
new Runner(classpath, code, logger, console)
}
private class Runner(classpath: CompleteClasspath, code: VirtualJSFile,
logger: Logger, console: JSConsole) extends JSRunner {
def run(): Unit = internalRunJS(classpath, code, logger, console, None)
}
override def asyncRunner(classpath: CompleteClasspath, code: VirtualJSFile,
logger: Logger, console: JSConsole): AsyncJSRunner = {
new AsyncRunner(classpath, code, logger, console)
}
private class AsyncRunner(classpath: CompleteClasspath, code: VirtualJSFile,
logger: Logger, console: JSConsole) extends AsyncJSRunner {
private[this] val promise = Promise[Unit]
private[this] val thread = new Thread {
override def run(): Unit = {
try {
internalRunJS(classpath, code, logger, console, optChannel)
promise.success(())
} catch {
case t: Throwable =>
promise.failure(t)
}
}
}
def future: Future[Unit] = promise.future
def start(): Future[Unit] = {
thread.start()
future
}
def stop(): Unit = thread.interrupt()
protected def optChannel(): Option[Channel] = None
}
override def comRunner(classpath: CompleteClasspath, code: VirtualJSFile,
logger: Logger, console: JSConsole): ComJSRunner = {
new ComRunner(classpath, code, logger, console)
}
private class ComRunner(classpath: CompleteClasspath, code: VirtualJSFile,
logger: Logger, console: JSConsole)
extends AsyncRunner(classpath, code, logger, console) with ComJSRunner {
private[this] val channel = new Channel
override protected def optChannel(): Option[Channel] = Some(channel)
def send(msg: String): Unit = channel.sendToJS(msg)
def receive(timeout: Duration): String = {
try {
channel.recvJVM(timeout)
} catch {
case _: ChannelClosedException =>
throw new ComJSEnv.ComClosedException
}
}
def close(): Unit = channel.closeJVM()
}
private def internalRunJS(classpath: CompleteClasspath, code: VirtualJSFile,
logger: Logger, console: JSConsole, optChannel: Option[Channel]): Unit = {
val context = Context.enter()
try {
val scope = context.initStandardObjects()
if (withDOM)
setupDOM(context, scope)
disableLiveConnect(context, scope)
setupConsole(context, scope, console)
val taskQ = setupSetTimeout(context, scope)
// Optionally setup scalaJSCom
var recvCallback: Option[String => Unit] = None
for (channel <- optChannel) {
setupCom(context, scope, channel,
setCallback = cb => recvCallback = Some(cb),
clrCallback = () => recvCallback = None)
}
try {
loadClasspath(context, scope, classpath)
// Actually run the code
context.evaluateFile(scope, code)
// Start the event loop
for (channel <- optChannel) {
comEventLoop(taskQ, channel,
() => recvCallback.get, () => recvCallback.isDefined)
}
// Channel is closed. Fall back to basic event loop
basicEventLoop(taskQ)
} catch {
case e: RhinoException =>
// Trace here, since we want to be in the context to trace.
logger.trace(e)
sys.error(s"Exception while running JS code: ${e.getMessage}")
}
} finally {
// Ensure the channel is closed to release JVM side
optChannel.foreach(_.closeJS())
Context.exit()
}
}
private def setupDOM(context: Context, scope: Scriptable): Unit = {
// Fetch env.rhino.js from webjar
val name = "env.rhino.js"
val path = "/META-INF/resources/webjars/envjs/1.2/" + name
val resource = getClass.getResource(path)
assert(resource != null, s"need $name as resource")
// Rhino can't optimize envjs
context.setOptimizationLevel(-1)
// Don't print envjs header
scope.addFunction("print", args => ())
// Pipe file to Rhino
val reader = Source.fromURL(resource).bufferedReader
context.evaluateReader(scope, reader, name, 1, null);
// No need to actually define print here: It is captured by envjs to
// implement console.log, which we'll override in the next statement
}
/** Make sure Rhino does not do its magic for JVM top-level packages (#364) */
private def disableLiveConnect(context: Context, scope: Scriptable): Unit = {
val PackagesObject =
ScriptableObject.getProperty(scope, "Packages").asInstanceOf[Scriptable]
val topLevelPackageIds = ScriptableObject.getPropertyIds(PackagesObject)
for (id <- topLevelPackageIds) (id: Any) match {
case name: String => ScriptableObject.deleteProperty(scope, name)
case index: Int => ScriptableObject.deleteProperty(scope, index)
case _ => // should not happen, I think, but with Rhino you never know
}
}
private def setupConsole(context: Context, scope: Scriptable,
console: JSConsole): Unit = {
// Setup console.log
val jsconsole = context.newObject(scope)
jsconsole.addFunction("log", _.foreach(console.log _))
ScriptableObject.putProperty(scope, "console", jsconsole)
}
private def setupSetTimeout(context: Context,
scope: Scriptable): TaskQueue = {
val ordering = Ordering.by[TimedTask, Deadline](_.deadline).reverse
val taskQ = mutable.PriorityQueue.empty(ordering)
def ensure[T: ClassTag](v: AnyRef, errMsg: String) = v match {
case v: T => v
case _ => sys.error(errMsg)
}
scope.addFunction("setTimeout", args => {
val cb = ensure[Function](args(0),
"First argument to setTimeout must be a function")
val deadline = Context.toNumber(args(1)).toInt.millis.fromNow
val task = new TimeoutTask(deadline, () =>
cb.call(context, scope, scope, args.slice(2, args.length)))
taskQ += task
task
})
scope.addFunction("setInterval", args => {
val cb = ensure[Function](args(0),
"First argument to setInterval must be a function")
val interval = Context.toNumber(args(1)).toInt.millis
val firstDeadline = interval.fromNow
val task = new IntervalTask(firstDeadline, interval, () =>
cb.call(context, scope, scope, args.slice(2, args.length)))
taskQ += task
task
})
scope.addFunction("clearTimeout", args => {
val task = ensure[TimeoutTask](args(0), "First argument to " +
"clearTimeout must be a value returned by setTimeout")
task.cancel()
})
scope.addFunction("clearInterval", args => {
val task = ensure[IntervalTask](args(0), "First argument to " +
"clearInterval must be a value returned by setInterval")
task.cancel()
})
taskQ
}
private def setupCom(context: Context, scope: Scriptable, channel: Channel,
setCallback: (String => Unit) => Unit, clrCallback: () => Unit): Unit = {
val comObj = context.newObject(scope)
comObj.addFunction("send", s =>
channel.sendToJVM(Context.toString(s(0))))
comObj.addFunction("init", s => s(0) match {
case f: Function =>
val cb: String => Unit =
msg => f.call(context, scope, scope, Array(msg))
setCallback(cb)
case _ =>
sys.error("First argument to init must be a function")
})
comObj.addFunction("close", _ => {
// Tell JVM side we won't send anything
channel.closeJS()
// Internally register that we're done
clrCallback()
})
ScriptableObject.putProperty(scope, "scalajsCom", comObj)
}
/** Loads the classpath. Either through lazy loading or by simply inserting */
private def loadClasspath(context: Context, scope: Scriptable,
classpath: CompleteClasspath): Unit = classpath match {
case cp: IRClasspath =>
// Setup lazy loading classpath and source mapper
val optLoader = if (cp.scalaJSIR.nonEmpty) {
val loader = new ScalaJSCoreLib(semantics, cp)
// Setup sourceMapper
if (sourceMap) {
val scalaJSenv = context.newObject(scope)
scalaJSenv.addFunction("sourceMapper", args => {
val trace = Context.toObject(args(0), scope)
loader.mapStackTrace(trace, context, scope)
})
ScriptableObject.putProperty(scope, "__ScalaJSEnv", scalaJSenv)
}
Some(loader)
} else {
None
}
// Load JS libraries
cp.jsLibs.foreach(dep => context.evaluateFile(scope, dep.lib))
optLoader.foreach(_.insertInto(context, scope))
case cp =>
cp.allCode.foreach(context.evaluateFile(scope, _))
}
private def basicEventLoop(taskQ: TaskQueue): Unit =
eventLoopImpl(taskQ, sleepWait, () => true)
private def comEventLoop(taskQ: TaskQueue, channel: Channel,
callback: () => String => Unit, isOpen: () => Boolean): Unit = {
if (!isOpen())
// The channel has not been opened yet. Wait for opening.
eventLoopImpl(taskQ, sleepWait, () => !isOpen())
// Once we reach this point, we either:
// - Are done
// - The channel is open
// Guard call to `callback`
if (isOpen()) {
val cb = callback()
try {
@tailrec
def loop(): Unit = {
val loopResult = eventLoopImpl(taskQ, channel.recvJS _, isOpen)
loopResult match {
case Some(msg) =>
cb(msg)
loop()
case None if isOpen() =>
assert(taskQ.isEmpty)
cb(channel.recvJS())
loop()
case None =>
// No tasks left, channel closed
}
}
loop()
} catch {
case _: ChannelClosedException =>
// the JVM side closed the connection
}
}
}
/** Run an event loop on [[taskQ]] using [[waitFct]] to wait
*
* If [[waitFct]] returns a Some, this method returns this value immediately
* If [[waitFct]] returns a None, we assume a sufficient amount has been
* waited for the Deadline to pass. The event loop then runs the task.
*
* Each iteration, [[continue]] is queried, whether to continue the loop.
*
* @returns A Some returned by [[waitFct]] or None if [[continue]] has
* returned false, or there are no more tasks (i.e. [[taskQ]] is empty)
* @throws InterruptedException if the thread was interrupted
*/
private def eventLoopImpl[T](taskQ: TaskQueue,
waitFct: Deadline => Option[T], continue: () => Boolean): Option[T] = {
@tailrec
def loop(): Option[T] = {
if (Thread.interrupted())
throw new InterruptedException()
if (taskQ.isEmpty || !continue()) None
else {
val task = taskQ.head
if (task.canceled) {
taskQ.dequeue()
loop()
} else {
waitFct(task.deadline) match {
case result @ Some(_) => result
case None =>
// The time has actually expired
val task = taskQ.dequeue()
// Perform task
task.task()
if (task.reschedule())
taskQ += task
loop()
}
}
}
}
loop()
}
private val sleepWait = { (deadline: Deadline) =>
val timeLeft = deadline.timeLeft.toMillis
if (timeLeft > 0)
Thread.sleep(timeLeft)
None
}
}
object RhinoJSEnv {
/** Communication channel between the Rhino thread and the rest of the JVM */
private class Channel {
private[this] var _closedJS = false
private[this] var _closedJVM = false
private[this] val js2jvm = mutable.Queue.empty[String]
private[this] val jvm2js = mutable.Queue.empty[String]
def sendToJS(msg: String): Unit = synchronized {
ensureOpen(_closedJVM)
jvm2js.enqueue(msg)
notifyAll()
}
def sendToJVM(msg: String): Unit = synchronized {
ensureOpen(_closedJS)
js2jvm.enqueue(msg)
notifyAll()
}
def recvJVM(timeout: Duration): String = synchronized {
val deadline = OptDeadline(timeout)
while (js2jvm.isEmpty && ensureOpen(_closedJS) && !deadline.isOverdue)
wait(deadline.millisLeft)
if (js2jvm.isEmpty)
throw new TimeoutException("Timeout expired")
js2jvm.dequeue()
}
def recvJS(): String = synchronized {
while (jvm2js.isEmpty && ensureOpen(_closedJVM))
wait()
jvm2js.dequeue()
}
def recvJS(deadline: Deadline): Option[String] = synchronized {
var expired = false
while (jvm2js.isEmpty && !expired && ensureOpen(_closedJVM)) {
val timeLeft = deadline.timeLeft.toMillis
if (timeLeft > 0)
wait(timeLeft)
else
expired = true
}
if (expired) None
else Some(jvm2js.dequeue())
}
def closeJS(): Unit = synchronized {
_closedJS = true
notifyAll()
}
def closeJVM(): Unit = synchronized {
_closedJVM = true
notifyAll()
}
/** Throws if the channel is closed and returns true */
private def ensureOpen(closed: Boolean): Boolean = {
if (closed)
throw new ChannelClosedException
true
}
}
private class ChannelClosedException extends Exception
private abstract class TimedTask(val task: () => Unit) {
private[this] var _canceled: Boolean = false
def deadline: Deadline
def reschedule(): Boolean
def canceled: Boolean = _canceled
def cancel(): Unit = _canceled = true
}
private final class TimeoutTask(val deadline: Deadline,
task: () => Unit) extends TimedTask(task) {
def reschedule(): Boolean = false
override def toString(): String =
s"TimeoutTask($deadline, canceled = $canceled)"
}
private final class IntervalTask(firstDeadline: Deadline,
interval: FiniteDuration, task: () => Unit) extends TimedTask(task) {
private[this] var _deadline = firstDeadline
def deadline: Deadline = _deadline
def reschedule(): Boolean = {
_deadline += interval
!canceled
}
override def toString(): String =
s"IntervalTask($deadline, interval = $interval, canceled = $canceled)"
}
private type TaskQueue = mutable.PriorityQueue[TimedTask]
}
|
matthughes/scala-js
|
js-envs/src/main/scala/org/scalajs/jsenv/rhino/RhinoJSEnv.scala
|
Scala
|
bsd-3-clause
| 16,080 |
/*
* Scala.js (https://www.scala-js.org/)
*
* Copyright EPFL.
*
* Licensed under Apache License 2.0
* (https://www.apache.org/licenses/LICENSE-2.0).
*
* See the NOTICE file distributed with this work for
* additional information regarding copyright ownership.
*/
package org.scalajs.testing.interface
import scala.scalajs.js
import scala.scalajs.js.annotation._
import scala.concurrent.duration._
import scala.concurrent.ExecutionContext.Implicits.global
import org.scalajs.testing.common.RPCCore
/** JS RPC Core. Uses `scalajsCom`. */
private[interface] final object JSRPC extends RPCCore {
Com.init(handleMessage _)
override protected def send(msg: String): Unit = Com.send(msg)
@js.native
@JSGlobal("scalajsCom")
private object Com extends js.Object {
def init(onReceive: js.Function1[String, Unit]): Unit = js.native
def send(msg: String): Unit = js.native
// We support close, but do not use it. The JS side just terminates.
// def close(): Unit = js.native
}
}
|
nicolasstucki/scala-js
|
test-interface/src/main/scala/org/scalajs/testing/interface/JSRPC.scala
|
Scala
|
apache-2.0
| 1,012 |
def &&(p: Prop) = Prop {
(n,rng) => run(n,rng) match {
case Right((a,n)) => p.run(n,rng).right.map { case (s,m) => (s,n+m) }
case l => l
}
}
def ||(p: Prop) = Prop {
(n,rng) => run(n,rng) match {
case Left(msg) => p.tag(msg).run(n,rng)
case r => r
}
}
/* This is rather simplistic - in the event of failure, we simply prepend
* the given message on a newline in front of the existing message.
*/
def tag(msg: String) = Prop {
(n,rng) => run(n,rng) match {
case Left(e) => Left(msg + "\\n" + e)
}
}
|
galarragas/FpInScala
|
answerkey/testing/12.answer.scala
|
Scala
|
mit
| 534 |
package playground.models
case class Password(private val password: String) {
def neverLog[A](f: String => A): A = f(password)
// intended to be final so you can never log a Password by inadvertance but by passing by neverLog
final override def toString() = "[PROTECTED]"
}
|
ybr/playground
|
src/main/scala/playground/models/Password.scala
|
Scala
|
mit
| 282 |
package com.outr.arango.api
import com.outr.arango.api.model._
import io.youi.client.HttpClient
import io.youi.http.HttpMethod
import io.youi.net._
import io.circe.Json
import scala.concurrent.{ExecutionContext, Future}
object APIIndexskiplist {
def post(client: HttpClient, collectionName: String, body: PostAPIIndexSkiplist)(implicit ec: ExecutionContext): Future[Json] = client
.method(HttpMethod.Post)
.path(path"/_api/index", append = true)
.params("collection" -> collectionName.toString)
.restful[PostAPIIndexSkiplist, Json](body)
}
|
outr/arangodb-scala
|
api/src/main/scala/com/outr/arango/api/APIIndexskiplist.scala
|
Scala
|
mit
| 566 |
/*
* sbt
* Copyright 2011 - 2018, Lightbend, Inc.
* Copyright 2008 - 2010, Mark Harrah
* Licensed under Apache License 2.0 (see LICENSE)
*/
package sbt.internal
import sbt.BasicCommandStrings.{ ClearOnFailure, FailureWall }
import sbt.Watched.ContinuousEventMonitor
import sbt.internal.io.{ EventMonitor, WatchState }
import sbt.internal.nio.{ FileEventMonitor, FileTreeRepository, WatchLogger }
import sbt.{ State, Watched }
import scala.annotation.tailrec
import scala.concurrent.duration._
import scala.util.control.NonFatal
private[sbt] object LegacyWatched {
@deprecated("Replaced by Watched.command", "1.3.0")
def executeContinuously(watched: Watched, s: State, next: String, repeat: String): State = {
@tailrec def shouldTerminate: Boolean =
(System.in.available > 0) && (watched.terminateWatch(System.in.read()) || shouldTerminate)
val log = s.log
s get ContinuousEventMonitor match {
case None =>
val watchState = WatchState.empty(watched.watchService(), watched.watchSources(s))
// This is the first iteration, so run the task and create a new EventMonitor
val logger: WatchLogger = (a: Any) => log.debug(a.toString)
val repo = FileTreeRepository.legacy(logger, watched.watchService())
val fileEventMonitor = FileEventMonitor.antiEntropy(
repo,
watched.antiEntropy,
logger,
watched.antiEntropy,
10.minutes
)
val monitor = new EventMonitor {
override def awaitEvent(): Boolean = fileEventMonitor.poll(2.millis).nonEmpty
override def state(): WatchState = watchState
override def close(): Unit = watchState.close()
}
(ClearOnFailure :: next :: FailureWall :: repeat :: s)
.put(ContinuousEventMonitor, monitor: EventMonitor)
case Some(eventMonitor) =>
Watched.printIfDefined(watched watchingMessage eventMonitor.state())
@tailrec def impl(): State = {
val triggered = try eventMonitor.awaitEvent()
catch {
case NonFatal(e) =>
log.error(
"Error occurred obtaining files to watch. Terminating continuous execution..."
)
s.handleError(e)
false
}
if (triggered) {
Watched.printIfDefined(watched triggeredMessage eventMonitor.state())
ClearOnFailure :: next :: FailureWall :: repeat :: s
} else if (shouldTerminate) {
while (System.in.available() > 0) System.in.read()
eventMonitor.close()
s.remove(ContinuousEventMonitor)
} else {
impl()
}
}
impl()
}
}
}
package io {
@deprecated("No longer used", "1.3.0")
private[sbt] trait EventMonitor extends AutoCloseable {
/** Block indefinitely until the monitor receives a file event or the user stops the watch. */
def awaitEvent(): Boolean
/** A snapshot of the WatchState that includes the number of build triggers and watch sources. */
def state(): WatchState
}
}
|
xuwei-k/xsbt
|
main-command/src/main/scala/sbt/internal/LegacyWatched.scala
|
Scala
|
apache-2.0
| 3,109 |
package org.opencommercesearch.api.models
/*
* Licensed to OpenCommerceSearch under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. OpenCommerceSearch licenses this
* file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.Date
import org.opencommercesearch.api.models.Availability._
import play.api.libs.json.Json
import reactivemongo.bson.{BSONDocument, BSONDocumentReader, BSONDocumentWriter}
case class Availability(
var status: Option[String] = None,
var stockLevel: Option[Int] = None,
var backorderLevel: Option[Int] = None,
var date: Option[Date] = None) {
require((status, stockLevel) match {
case (Some(s), Some(l)) => s match {
case OutOfStock | Backorderable | PermanentlyOutOfStock => l == 0
case InStock => l > 0
case _ => false
}
case (Some(s), _) => s match {
case InStock | OutOfStock | PermanentlyOutOfStock | Backorderable | Preorderable => true
case _ => false
}
case _ => true
})
require(backorderLevel match {
case Some(l) => l >= InfiniteStock
case None => true
})
}
object Availability {
val InStock = "InStock"
val OutOfStock = "OutOfStock"
val PermanentlyOutOfStock = "PermanentlyOutOfStock"
val Backorderable = "Backorderable"
val Preorderable = "Preorderable"
val InfiniteStock = -1
implicit val readsAvailability = Json.reads[Availability]
implicit val writesAvailability = Json.writes[Availability]
implicit object AvailabilityWriter extends BSONDocumentWriter[Availability] {
import org.opencommercesearch.bson.BSONFormats._
def write(availability: Availability): BSONDocument = BSONDocument(
"status" -> availability.status,
"stockLevel" -> availability.stockLevel,
"backorderLevel" -> availability.backorderLevel,
"date" -> availability.date
)
}
implicit object AvailabilityReader extends BSONDocumentReader[Availability] {
import org.opencommercesearch.bson.BSONFormats._
def read(doc: BSONDocument): Availability = Availability(
doc.getAs[String]("status"),
doc.getAs[Int]("stockLevel"),
doc.getAs[Int]("backorderLevel"),
doc.getAs[Date]("date")
)
}
}
|
madickson/opencommercesearch
|
opencommercesearch-api/app/org/opencommercesearch/api/models/Availability.scala
|
Scala
|
apache-2.0
| 2,787 |
package io.hnfmr.chapter4
import cats.syntax.either._
object EitherTut extends App {
// Either in Scala 2.12 is right-biased
val either0 = Right(123)
val either1 = either0.flatMap(x => Right(x + 1))
val either2 = either0.flatMap(x => Right(x + 2))
val either3 = "DIV0".asLeft[Int]
val d = for {
a <- either1
b <- either2
c <- either3 // fail-fast error handling
} yield a + b + c
println(d)
}
|
hnfmr/advanced-scala
|
src/main/scala/io/hnfmr/chapter4/EitherTut.scala
|
Scala
|
mit
| 425 |
object TypedPlaceholder {
def foo[T](x: T => Int, y: T) = x(y)
foo(/*start*/(_: String).length/*end*/, "")
}
//(String) => Int
|
ilinum/intellij-scala
|
testdata/typeInference/expected/placeholder/TypedPlaceholder.scala
|
Scala
|
apache-2.0
| 131 |
package emailvalidator
import emailvalidator.lexer.{Token, TokenReader}
import emailvalidator.parser.EmailParser
import scala.util.parsing.input.Reader
sealed trait ValidationResult {
def warnings: Option[List[Warning]] = None
def isSuccess: Boolean
def isFailure: Boolean
}
sealed case class Warning (msg:String, explanation:String)
case class Success(override val warnings: Option[List[Warning]] = None) extends ValidationResult {
override def isFailure = false
override def isSuccess = true
}
case class Failure(msg:String) extends ValidationResult {
override def isFailure = true
override def isSuccess = false
}
object EmailValidator {
def validate(email:String): Either[Failure,Success] = result(new TokenReader(email))
def validate(tokenReader: Reader[Token]): Either[Failure,Success] = result(tokenReader)
private def result(tokenReader: Reader[Token]): Either[Failure, Success] = {
val parsingResult = EmailParser.parse(tokenReader)
if (parsingResult.successful) Right(Success())
else Left(Failure(parsingResult.toString))
}
}
|
egulias/EmailValidator4Scala
|
src/main/scala/emailvalidator/EmailValidator.scala
|
Scala
|
mit
| 1,080 |
package controllers.admin
import com.google.inject.Inject
import controllers.{routes => normalroutes}
import model.Room
import model.json.ResultMessage
import play.Logger
import play.api.Environment
import play.api.i18n.MessagesApi
import play.api.libs.json.{JsError, JsSuccess, Json}
import services.{LaboratoryService, RoomService, UserService, state}
import scala.concurrent.{ExecutionContext, Future}
/**
* @author Camilo Sampedro <[email protected]>
*/
class RoomController @Inject()(roomService: RoomService, laboratoryService: LaboratoryService, val messagesApi: MessagesApi)(implicit userService: UserService, executionContext: ExecutionContext, environment: Environment) extends ControllerWithAuthRequired {
def add = AuthRequiredAction { implicit request =>
implicit val username = Some(loggedIn.username)
Logger.debug("Adding room... ")
request.body.asJson match {
case Some(json) => json.validate[Room] match {
case JsSuccess(room, _) => roomService.add(room).map {
case state.ActionCompleted => Ok(Json.toJson(new ResultMessage("Room added")))
case _ => BadRequest(Json.toJson(new ResultMessage("Could not add that room")))
}
case JsError(errors) => Future.successful(BadRequest(Json.toJson(ResultMessage.wrongJsonFormat(errors))))
}
case _ => Future.successful(BadRequest(Json.toJson(ResultMessage.inputWasNotAJson)))
}
}
def update = AuthRequiredAction { implicit request =>
implicit val username = Some(loggedIn.username)
Logger.debug("Updating room... ")
request.body.asJson match {
case Some(json) => json.validate[Room] match {
case JsSuccess(room, _) => roomService.update(room).map {
case state.ActionCompleted => Ok(Json.toJson(new ResultMessage("Room updated")))
case state.NotFound => NotFound(Json.toJson( ResultMessage("Room not found", Seq(("id", room.id.toString)))))
case _ => BadRequest(Json.toJson(new ResultMessage("Could not update that room")))
}
case JsError(errors) => Future.successful(BadRequest(Json.toJson(ResultMessage.wrongJsonFormat(errors))))
}
case _ => Future.successful(BadRequest(Json.toJson(ResultMessage.inputWasNotAJson)))
}
}
def delete(roomId: Long) = AuthRequiredAction { implicit request =>
roomService.delete(roomId).map {
case state.ActionCompleted => Redirect(normalroutes.HomeController.home())
case state.NotFound => NotFound
case _ => BadRequest
}
}
def blockUser(roomId: Long) = AuthRequiredAction { implicit request =>
// TODO: Processing Not Yet Implemented
val results = for {
roomResult <- roomService.get(roomId)
} yield roomResult
results.map { result: Option[Room] =>
if (result.isDefined)
Redirect(normalroutes.LaboratoryController.get(result.get.laboratoryID))
else
NotFound
}
}
}
|
ProjectAton/AtonLab
|
app/controllers/admin/RoomController.scala
|
Scala
|
gpl-3.0
| 2,935 |
package io.skysail.server.demo.resources.html
import play.twirl.api.Html
import html.main
import io.skysail.server.RepresentationModel
object BookmarksResource_Get extends _root_.play.twirl.api.BaseScalaTemplate[play.twirl.api.HtmlFormat.Appendable,_root_.play.twirl.api.Format[play.twirl.api.HtmlFormat.Appendable]](play.twirl.api.HtmlFormat) with _root_.play.twirl.api.Template1[RepresentationModel,play.twirl.api.HtmlFormat.Appendable] {
/*************************************
* Home page. *
* *
* @param msg The message to display *
*************************************/
def apply/*6.2*/(rep: RepresentationModel):play.twirl.api.HtmlFormat.Appendable = {
_display_ {
{
Seq[Any](format.raw/*6.28*/("""
"""),_display_(/*8.2*/main/*8.6*/ {_display_(Seq[Any](format.raw/*8.8*/("""
"""),format.raw/*10.1*/("""<br><br><br>
<div class="container">
<div class="starter-template">
<h1>Bookmarks</h1>
<p class="lead">all bookmarks:</p>
<table class="table table-sm">
<thead>
<tr>
<th>Title</th>
<th>Url</th>
<th>Actions</th>
</tr>
</thead>
<tbody>
"""),_display_(/*25.14*/for(p <- rep.rawData) yield /*25.35*/ {_display_(Seq[Any](format.raw/*25.37*/("""
"""),format.raw/*26.13*/("""<tr>
<th scope="row">"""),_display_(/*27.34*/p/*27.35*/.get("title")),format.raw/*27.48*/("""</th>
<td><a href='"""),_display_(/*28.31*/p/*28.32*/.get("url")),format.raw/*28.43*/("""'>"""),_display_(/*28.46*/p/*28.47*/.get("url")),format.raw/*28.58*/("""</a></td>
<td>
<a href='"""),_display_(/*30.31*/rep/*30.34*/.linkFor("io.skysail.server.demo.resources.BookmarkResource", p.get("id"))),format.raw/*30.108*/("""'>[show]</a>
<a href='"""),_display_(/*31.31*/rep/*31.34*/.linkFor("io.skysail.server.demo.resources.PutBookmarkResource", p.get("id"))),format.raw/*31.111*/("""'>[update]</a>
</td>
</tr>
""")))}),format.raw/*34.14*/("""
"""),format.raw/*36.13*/("""</tbody>
</table>
<a href='"""),_display_(/*40.19*/rep/*40.22*/.linkFor("io.skysail.server.demo.resources.PostBookmarkResource",None)),format.raw/*40.92*/("""'>Create New Bookmark</a>
<hr>
<a href="/doc/v1/index.html" target="_docs">Doc</a>
</div>
</div>
""")))}))
}
}
}
def render(rep:RepresentationModel): play.twirl.api.HtmlFormat.Appendable = apply(rep)
def f:((RepresentationModel) => play.twirl.api.HtmlFormat.Appendable) = (rep) => apply(rep)
def ref: this.type = this
}
/*
-- GENERATED --
DATE: Fri Dec 29 11:25:03 CET 2017
SOURCE: C:/git/skysail-server/skysail.server.demo/./src/io/skysail/server/demo/resources/BookmarksResource_Get.scala.html
HASH: 8f27c18bdab7fb779d6be559a6780518d6787dce
MATRIX: 657->193|778->219|806->222|817->226|855->228|884->230|1295->614|1332->635|1372->637|1413->650|1478->688|1488->689|1522->702|1585->738|1595->739|1627->750|1657->753|1667->754|1699->765|1787->826|1799->829|1895->903|1965->946|1977->949|2076->1026|2175->1094|2217->1108|2290->1154|2302->1157|2393->1227
LINES: 15->6|20->6|22->8|22->8|22->8|24->10|39->25|39->25|39->25|40->26|41->27|41->27|41->27|42->28|42->28|42->28|42->28|42->28|42->28|44->30|44->30|44->30|45->31|45->31|45->31|48->34|50->36|54->40|54->40|54->40
-- GENERATED --
*/
|
evandor/skysail-server
|
skysail.server.demo/src/io/skysail/server/demo/resources/html/BookmarksResource_Get.template.scala
|
Scala
|
apache-2.0
| 3,689 |
package skinny.filter
import skinny.micro.contrib.{ FlashMapSupport, CSRFTokenSupport }
import skinny.micro.contrib.csrf.CSRFTokenGenerator
import skinny.micro.contrib.flash.FlashMap
import scala.language.implicitConversions
import skinny.controller.feature._
import skinny.micro.context.SkinnyContext
import skinny.session._
import FlashMapSupport._
import java.util.Locale
object SkinnySessionFilter {
val ATTR_SKINNY_SESSION_IN_REQUEST_SCOPE = classOf[SkinnySessionFilter].getCanonicalName + "_SkinnySessionWrapper"
}
/**
* Enables replacing Servlet session with Skinny's session shared among several Servlet apps.
*
* Mounting skinny.session.SkinnySessionInitializer on the top of ScalatraBootstrap.scala is required.
*
* {{{
* ctx.mount(classOf[SkinnySessionInitializer], "/\\*")
* }}}
*/
trait SkinnySessionFilter extends SkinnyFilter {
self: FlashFeature with CSRFProtectionFeature with LocaleFeature =>
import SkinnySessionFilter._
// --------------------------------------
// SkinnySession by using Skinny beforeAction/afterAction
beforeAction()(initializeSkinnySession)
afterAction()(saveCurrentSkinnySession)
/**
* Replace this when you use other backend.
*/
protected def initializeSkinnySession: SkinnyHttpSession = {
// SkinnyHttpSession's factory method doesn't support several backend implementation.
// Of course, pull requests are always welcome.
SkinnyHttpSession.getOrCreate(request)
}
protected def saveCurrentSkinnySession(): Unit = {
try {
getFromRequestScope[SkinnyHttpSession](ATTR_SKINNY_SESSION_IN_REQUEST_SCOPE).foreach { sessionWrapper =>
sessionWrapper.save()
}
} catch {
case scala.util.control.NonFatal(e) =>
logger.warn(s"Failed to save skinny session because ${e.getMessage}", e)
}
}
// --------------------------------------
// Accessing SkinnySession
def skinnySession(implicit ctx: SkinnyContext): SkinnyHttpSession = {
getFromRequestScope[SkinnyHttpSession](ATTR_SKINNY_SESSION_IN_REQUEST_SCOPE)(ctx).getOrElse {
initializeSkinnySession
}
}
def skinnySession[A](key: String)(implicit ctx: SkinnyContext): Option[A] = skinnySession(ctx).getAs[A](key)
def skinnySession[A](key: Symbol)(implicit ctx: SkinnyContext): Option[A] = skinnySession[A](key.name)(ctx)
// --------------------------------------
// override FlashMapSupport
// NOTICE: This API doesn't support Future ops
override def flashMapSetSession(f: FlashMap)(implicit ctx: SkinnyContext): Unit = {
try {
skinnySession(ctx).setAttribute(SessionKey, f)
} catch {
case scala.util.control.NonFatal(e) => logger.debug(s"Failed to set flashMap to skinny session because ${e.getMessage}")
}
}
// --------------------------------------
// override CsrfTokenSupport
override protected def isForged: Boolean = {
if (skinnySession.getAttribute(csrfKey).isEmpty) {
prepareCsrfToken()
}
!request.requestMethod.isSafe &&
skinnySession.getAttribute(csrfKey) != params.get(csrfKey) &&
!CSRFTokenSupport.HeaderNames.map(request.headers.get).contains(skinnySession.getAttribute(csrfKey))
}
override protected def prepareCsrfToken() = {
skinnySession.getAttributeOrElseUpdate(csrfKey, CSRFTokenGenerator())
}
// --------------------------------------
// override SessionLocaleFeature
override def setCurrentLocale(locale: String)(implicit ctx: SkinnyContext): Unit = {
skinnySession(ctx).setAttribute(sessionLocaleKey, locale)
}
override def currentLocale(implicit ctx: SkinnyContext): Option[Locale] = {
skinnySession(ctx)
.getAttribute(sessionLocaleKey)
.map(l => new Locale(l.toString))
.orElse(defaultLocale)
}
}
|
Kuchitama/skinny-framework
|
framework/src/main/scala/skinny/filter/SkinnySessionFilter.scala
|
Scala
|
mit
| 3,765 |
package com.cyrusinnovation.computation.util
/*
* Copyright 2014 Cyrus Innovation, LLC. Licensed under Apache license 2.0.
*/
import org.slf4j.Logger
/**
* logging is done via slf4j
*
* Keeping Logging implementation separate from
* the one used in the scripting engine
*
*/
trait Log {
def debug(msg: String)
def info(msg: String)
def warn(msg: String)
def error(msg: String)
def error(msg: String, e: Throwable)
}
case class ComputationEngineLog(logger: Logger) extends Log {
def debug(msg: String) = if (logger.isDebugEnabled) logger.debug(msg)
def info(msg: String) = if (logger.isInfoEnabled) logger.info(msg)
def warn(msg: String) = logger.warn(msg)
def error(msg: String) = logger.error(msg)
def error(msg: String, e: Throwable) = logger.error(msg, e)
}
|
psfblair/computation-engine
|
core/src/main/scala/com/cyrusinnovation/computation/util/Logger.scala
|
Scala
|
apache-2.0
| 793 |
package com.meekrab.rsseater
import scalaj.http._
import scala.xml._
import sys.process._
import java.io._
/*
* Based trait for RSS, Atom, and Podcast feeds
*/
trait Feed
/*
* Singleton factory that creates new instances of RSS and Atom feeds
*/
object Feeder {
/*
* Contains RSS Feed elements
* @param feed: xml feed
*/
class RSSFeed(feed: scala.xml.NodeSeq) extends Feed {
private val channel = feed \\ "channel"
val title = (channel \\ "title").text
val link = (channel \\ "link").text
val category = (channel \\ "category").text
val desc = (channel \\ "description").text
val image = Image(channel \\ "image")
val items: List[Item] =
(for(item <- (channel \\ "item")) yield { Item(item) }).toList
/*
* Writes XML feed to file for local storage
* @param path: Path to file location
*/
def store(path: String) = {
val writer = new PrintWriter(new File(path))
writer.write(feed.toString)
writer.close
}
/*
* Contains image elements
* @param image: Image node
*/
case class Image(image: scala.xml.NodeSeq) {
val title = (image \\ "title").text
val url = (image \\ "url").text
val link = (image \\ "link").text
val desc = (image \\ "description").text
val width = (image \\ "width").text
val height = (image \\ "height").text
}
/*
* Contains item elements
* @param item: Item node
*/
case class Item(item: scala.xml.NodeSeq) {
val title = (item \\ "title").text
val link = (item \\ "link").text
val desc = (item \\ "description").text
val thumbs =
(for(thumb <- (item \\ "thumbnail"))
yield { Thumb((thumb \\ "@url").text,
(thumb \\ "@width").text,
(thumb \\ "@height").text)
}).toList
val pubDate = (item \\ "pubDate").text
val category = (item \\ "category").text
}
/*
* Contains thumbnail elements
* @param url: URL of thumbnail
* @param width: Width of thumbnail
* @param height: Height of thumbnail
*/
case class Thumb(url: String, width: String, height: String)
}
/*
* Contains Atom Feed elements
* @param feed: xml feed
*/
class AtomFeed(feed: scala.xml.NodeSeq) extends Feed {
val title = (feed \\ "title").text
val link = (feed \\ "link" \\ "@href").text
val id = (feed \\ "id").text
val desc = (feed \\ "description").text
val entries: List[Entry] =
(for(entry <- (feed \\ "entry")) yield { Entry(entry) }).toList
/*
* Writes XML feed to file for local storage
* @param path: Path to file location
*/
def store(path: String) = {
val writer = new PrintWriter(new File(path))
writer.write(feed.toString)
writer.close
}
/*
* Contains entry elements
* @param item: Item Node
*/
case class Entry(item: scala.xml.NodeSeq) {
val title = (item \\ "title").text
val id = (item \\ "id").text
val link = (item \\ "link" \\ "@href").text
val updated = (item \\ "updated").text
val summary = (item \\ "summary").text
}
}
/*
* Creates a new instance of RSSFeed (either from a local file or a feed)
* @param path: Directory path or URL
* @param kind: Flag for feed/local
* @return new RSSFeed instance
*/
def createRSS(path: String, kind: String = "feed"): RSSFeed = kind match {
case "feed" => new RSSFeed(Http(path).option(HttpOptions.connTimeout(100000000))
.option(HttpOptions.readTimeout(500000000)).asXml)
case "local" => new RSSFeed(XML.load(path))
case _ => throw new RuntimeException("bad kind")
}
/*
* Creates a new instance of AtomFeed (either from a local file or a feed)
* @param path: Directory path or URL
* @param kind: Flag for feed/local
* @return new AtomFeed instance
*/
def createAtom(path: String, kind: String = "feed"): AtomFeed = kind match {
case "feed" => new AtomFeed(Http(path).option(HttpOptions.connTimeout(100000000))
.option(HttpOptions.readTimeout(500000000)).asXml)
case "local" => new AtomFeed(XML.load(path))
case _ => throw new RuntimeException("bad kind")
}
private def matcher(feed: scala.xml.NodeSeq) = feed match {
case Seq(<rss>{x @ _*}</rss>) => new RSSFeed(feed)
case Seq(<feed>{x @ _*}</feed>) => new AtomFeed(feed)
case _ => throw new RuntimeException("bad format")
}
}
|
meekrabR6R/rsseater-scala
|
src/main/scala/Reader.scala
|
Scala
|
mit
| 4,552 |
/*
* Copyright (C) 2014 HMPerson1 <[email protected]> and nathanfei123
*
* This file is part of AOCM.
*
* AOCM is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package net.adorableoctocm.graphics
import java.awt.Color
import java.awt.geom.AffineTransform
import java.awt.image.BufferedImage
import scala.swing.{ Component, Graphics2D }
import net.adorableoctocm.State
/**
* Renders the game onto the screen.
*/
class Renderer extends Component {
override def paint(g: Graphics2D): Unit = {
repaint()
g.drawImage(frame, 0, 0, null)
g.setFont(font.deriveFont(20.0f))
g.drawString("NOT YET IMPLEMENTED", (size.width - 234) / 2.0f, 30.107422f)
}
private lazy val frame = new BufferedImage(size.width, size.height, BufferedImage.TYPE_INT_ARGB)
def onUpdate(s: State): Unit = {
// TODO: To be implemented
val g = frame.createGraphics()
g.setBackground(Color.GRAY)
g.clearRect(0, 0, frame.getWidth, frame.getHeight)
g.setPaint(Color.RED)
g.setFont(font.deriveFont(15.0f))
g.drawString(s"(${s.posx}, ${s.posy})", 20, 20)
g.drawString(s"(${s.velx}, ${s.vely})", 20, 40)
val af = AffineTransform.getScaleInstance(1, -1)
af.translate(0, -frame.getHeight + 1)
g.transform(af)
g.setPaint(Color.WHITE)
g.fillOval(s.posx, s.posy, 16, 32)
s.blocks.zipWithIndex.foreach {
case (seq, x) => seq.zipWithIndex.foreach {
case (exists, y) => {
if (exists) g.drawRect(x * 16, y * 16, 16, 16)
}
}
}
}
}
|
HMPerson1/adorable-octo-computing-machine
|
src/net/adorableoctocm/graphics/Renderer.scala
|
Scala
|
gpl-3.0
| 2,107 |
package io.github.mandar2812.dynaml.utils
import breeze.linalg._
import breeze.stats.distributions.{Gaussian, LogNormal}
import io.github.mandar2812.dynaml.algebra.PartitionedMatrixOps._
import io.github.mandar2812.dynaml.algebra.{PartitionedMatrix, btrace}
import io.github.mandar2812.dynaml.analysis.implicits._
import io.github.mandar2812.dynaml.utils
import org.scalatest.{FlatSpec, Matchers}
import spire.algebra._
import spire.implicits._
class UtilsSpec extends FlatSpec with Matchers {
"log1pexp" should " compute correctly" in assert(utils.log1pExp(0d) == math.log(2))
"diagonal function" should " obtain the diagonals of matrices correctly" in {
val m = DenseMatrix.eye[Double](2)
val errMat = utils.diagonal(m) - m
assert(trace(errMat.t*errMat) == 0.0)
val blocks = Stream.tabulate(2, 2)((i, j) =>
if(i == j) ((i.toLong, j.toLong), m)
else ((i.toLong, j.toLong), DenseMatrix.zeros[Double](2, 2))
).flatten
val pm = PartitionedMatrix(blocks, numrows = 4L, numcols = 4L)
val errMat2 = utils.diagonal(pm) - pm
assert(btrace(errMat2.t*errMat2) == 0.0)
}
"CSV Reader" should " return an non-empty iterator over csv lines" in
assert(utils.getCSVReader("data/delve.csv", ',').iterator.hasNext)
"Non empty text files " should "be readable" in assert(utils.textFileToStream("data/delve.csv").nonEmpty)
"Sumamry/Order statistics" should " compute correctly" in {
val data = List(DenseVector(0d), DenseVector(1d))
val (m, s) = utils.getStats(data)
assert(m(0) == 0.5 && s(0) == 0.5)
val (m1, s1) = utils.getStatsMult(data)
assert(m1(0) == 0.5 && s1(0, 0) == 0.5)
val (min, max) = utils.getMinMax(data)
assert(min(0) == 0d && max(0) == 1d)
}
"Quick select" should " find the kth smallest element in a collection" in
assert(utils.quickselect(Stream(2d, 3d, 4d, 1d), 2) == 2d)
"Median implementation" should " compute correctly" in
assert(utils.median(Stream(4d, 3d, 2d, 5d, 1d)) == 3d)
"Prior Map distributions" should " be generated from Map objects of continuous distributions" in {
val p = Map("a" -> Gaussian(0d, 1d), "b" -> LogNormal(0d, 1d))
assert(utils.getPriorMapDistr(p).draw().keys.toSeq == Seq("a", "b"))
}
"Chebyshev, Legendre, Hermite, Harmonic & factorial functions" should " compute" in {
assert(utils.hermite(0, 100d) == 1d && utils.hermite(1, 13.45) == 13.45 && utils.hermite(2, 2) == 3d)
assert(
utils.chebyshev(0, 100d, 1) == 1d &&
utils.chebyshev(1, 13.45, 1) == 13.45 &&
utils.chebyshev(2, 2, 2) == 15d)
assert(utils.legendre(0, 100d) == 1d && utils.legendre(1, 13.45) == 13.45 && utils.legendre(2, 2) == 5.5)
assert(utils.H(2.5) == 1.5)
assert(utils.factorial(5) == 120)
}
"Numeric ranges " should " be left inclusive only" in
assert(utils.range[Double](0d, 1d, 2) == Stream(0d, 0.5))
"Haar DWT matrix " should "compute correctly" in {
assert(utils.haarMatrix(2) == DenseMatrix((1d, 1d), (1d, -1d)))
assert(
utils.haarMatrix(4) ==
DenseMatrix(
(1d, 1d, 1d, 1d),
(1d, 1d, -1d, -1d),
(1d, -1d, 0d, 0d),
(0d, 0d, 1d, -1d))
)
}
"Product Fields" should " extend from their components" in {
val f = Field[Double]
val pf = utils.productField(f, f)
val (x, y) = ((1d, 2d), (1d, 2d))
assert(
pf.plus(x, y) == (2d, 4d) &&
pf.minus(x, y) == (0d, 0d) &&
pf.times(x, y) == (1d, 4d) &&
pf.div(x, y) == (1d, 1d) &&
pf.negate(x) == (-1d, -2d) &&
pf.zero == (f.zero, f.zero) &&
pf.one == (f.one, f.one) &&
pf.emod(x, y) == (0d, 0d) &&
pf.equot(x, y) == (1d, 1d) &&
pf.gcd(x, y) == (1d, 1d) &&
pf.lcm(x, y) == (1d, 4d))
}
"String replace function" should " work as expected" in
assert(utils.replace("abab")("blah")("abab blah abab") == "blah blah blah")
"isSquareMatrix and isSymmetricMatrix methods" should " yield correct results" in {
val res1 = try {
utils.isSquareMatrix(DenseMatrix.zeros[Double](2, 3))
None
} catch {
case m: MatrixNotSquareException => Some(m)
}
val res2 = try {
utils.isSymmetricMatrix(DenseMatrix((1d, 3d), (2d, 4d)))
None
} catch {
case m: MatrixNotSymmetricException => Some(m)
}
assert(res1.isDefined)
assert(res2.isDefined)
}
}
|
transcendent-ai-labs/DynaML
|
dynaml-core/src/test/scala/io/github/mandar2812/dynaml/utils/UtilsSpec.scala
|
Scala
|
apache-2.0
| 4,440 |
package pages.disposal_of_vehicle
import uk.gov.dvla.vehicles.presentation.common.helpers.webbrowser.{Page, WebDriverFactory}
object TermsAndConditionsPage extends Page {
final val address = buildAppUrl("tandc")
final override val title: String = "Terms and Conditions"
final val titleCy: String = "Amodau a Thelerau"
override lazy val url = WebDriverFactory.testUrl + address.substring(1)
}
|
dvla/vehicles-online
|
test/pages/disposal_of_vehicle/TermsAndConditionsPage.scala
|
Scala
|
mit
| 403 |
package org.yamlidea.psi
import com.intellij.psi.tree.IElementType
import org.yamlidea.YamlLanguage
import org.jetbrains.annotations.{NotNull, NonNls}
class YamlTokenType(@NotNull @NonNls debugName: String)
extends IElementType(debugName, YamlLanguage) {
override
def toString = "YamlTokenType." + super.toString
}
|
jameslan/yaml-idea
|
src/main/scala/org/yamlidea/psi/YamlTokenType.scala
|
Scala
|
apache-2.0
| 333 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.hive
import java.io.{File, PrintWriter}
import scala.reflect.ClassTag
import scala.util.matching.Regex
import org.apache.hadoop.hive.common.StatsSetupConst
import org.apache.spark.sql._
import org.apache.spark.sql.catalyst.TableIdentifier
import org.apache.spark.sql.catalyst.catalog.{CatalogStatistics, HiveTableRelation}
import org.apache.spark.sql.catalyst.plans.logical.ColumnStat
import org.apache.spark.sql.catalyst.util.StringUtils
import org.apache.spark.sql.execution.command.DDLUtils
import org.apache.spark.sql.execution.datasources.LogicalRelation
import org.apache.spark.sql.execution.joins._
import org.apache.spark.sql.hive.HiveExternalCatalog._
import org.apache.spark.sql.hive.test.TestHiveSingleton
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.types._
class StatisticsSuite extends StatisticsCollectionTestBase with TestHiveSingleton {
private def dropMetadata(schema: StructType): StructType = {
val newFields = schema.fields.map { f =>
StructField(f.name, f.dataType, f.nullable, Metadata.empty)
}
StructType(newFields)
}
test("Hive serde tables should fallback to HDFS for size estimation") {
withSQLConf(SQLConf.ENABLE_FALL_BACK_TO_HDFS_FOR_STATS.key -> "true") {
withTable("csv_table") {
withTempDir { tempDir =>
// EXTERNAL OpenCSVSerde table pointing to LOCATION
val file1 = new File(tempDir + "/data1")
val writer1 = new PrintWriter(file1)
writer1.write("1,2")
writer1.close()
val file2 = new File(tempDir + "/data2")
val writer2 = new PrintWriter(file2)
writer2.write("1,2")
writer2.close()
sql(
s"""
|CREATE EXTERNAL TABLE csv_table(page_id INT, impressions INT)
|ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.OpenCSVSerde'
|WITH SERDEPROPERTIES (
|\\"separatorChar\\" = \\",\\",
|\\"quoteChar\\" = \\"\\\\\\"\\",
|\\"escapeChar\\" = \\"\\\\\\\\\\")
|LOCATION '${tempDir.toURI}'""".stripMargin)
val relation = spark.table("csv_table").queryExecution.analyzed.children.head
.asInstanceOf[HiveTableRelation]
val properties = relation.tableMeta.ignoredProperties
assert(properties("totalSize").toLong <= 0, "external table totalSize must be <= 0")
assert(properties("rawDataSize").toLong <= 0, "external table rawDataSize must be <= 0")
val sizeInBytes = relation.stats.sizeInBytes
assert(sizeInBytes === BigInt(file1.length() + file2.length()))
}
}
}
}
test("analyze Hive serde tables") {
def queryTotalSize(tableName: String): BigInt =
spark.table(tableName).queryExecution.analyzed.stats.sizeInBytes
// Non-partitioned table
val nonPartTable = "non_part_table"
withTable(nonPartTable) {
sql(s"CREATE TABLE $nonPartTable (key STRING, value STRING)")
sql(s"INSERT INTO TABLE $nonPartTable SELECT * FROM src")
sql(s"INSERT INTO TABLE $nonPartTable SELECT * FROM src")
sql(s"ANALYZE TABLE $nonPartTable COMPUTE STATISTICS noscan")
assert(queryTotalSize(nonPartTable) === BigInt(11624))
}
// Partitioned table
val partTable = "part_table"
withTable(partTable) {
sql(s"CREATE TABLE $partTable (key STRING, value STRING) PARTITIONED BY (ds STRING)")
sql(s"INSERT INTO TABLE $partTable PARTITION (ds='2010-01-01') SELECT * FROM src")
sql(s"INSERT INTO TABLE $partTable PARTITION (ds='2010-01-02') SELECT * FROM src")
sql(s"INSERT INTO TABLE $partTable PARTITION (ds='2010-01-03') SELECT * FROM src")
assert(queryTotalSize(partTable) === spark.sessionState.conf.defaultSizeInBytes)
sql(s"ANALYZE TABLE $partTable COMPUTE STATISTICS noscan")
assert(queryTotalSize(partTable) === BigInt(17436))
}
// Try to analyze a temp table
withView("tempTable") {
sql("""SELECT * FROM src""").createOrReplaceTempView("tempTable")
intercept[AnalysisException] {
sql("ANALYZE TABLE tempTable COMPUTE STATISTICS")
}
}
}
test("analyze non hive compatible datasource tables") {
val table = "parquet_tab"
withTable(table) {
sql(
s"""
|CREATE TABLE $table (a int, b int)
|USING parquet
|OPTIONS (skipHiveMetadata true)
""".stripMargin)
// Verify that the schema stored in catalog is a dummy one used for
// data source tables. The actual schema is stored in table properties.
val rawSchema = dropMetadata(hiveClient.getTable("default", table).schema)
val expectedRawSchema = new StructType()
.add("col", "array<string>")
assert(rawSchema == expectedRawSchema)
val actualSchema = spark.sharedState.externalCatalog.getTable("default", table).schema
val expectedActualSchema = new StructType()
.add("a", "int")
.add("b", "int")
assert(actualSchema == expectedActualSchema)
sql(s"INSERT INTO $table VALUES (1, 1)")
sql(s"INSERT INTO $table VALUES (2, 1)")
sql(s"ANALYZE TABLE $table COMPUTE STATISTICS FOR COLUMNS a, b")
val fetchedStats0 =
checkTableStats(table, hasSizeInBytes = true, expectedRowCounts = Some(2))
assert(fetchedStats0.get.colStats == Map(
"a" -> ColumnStat(2, Some(1), Some(2), 0, 4, 4),
"b" -> ColumnStat(1, Some(1), Some(1), 0, 4, 4)))
}
}
test("Analyze hive serde tables when schema is not same as schema in table properties") {
val table = "hive_serde"
withTable(table) {
sql(s"CREATE TABLE $table (C1 INT, C2 STRING, C3 DOUBLE)")
// Verify that the table schema stored in hive catalog is
// different than the schema stored in table properties.
val rawSchema = dropMetadata(hiveClient.getTable("default", table).schema)
val expectedRawSchema = new StructType()
.add("c1", "int")
.add("c2", "string")
.add("c3", "double")
assert(rawSchema == expectedRawSchema)
val actualSchema = spark.sharedState.externalCatalog.getTable("default", table).schema
val expectedActualSchema = new StructType()
.add("C1", "int")
.add("C2", "string")
.add("C3", "double")
assert(actualSchema == expectedActualSchema)
sql(s"INSERT INTO TABLE $table SELECT 1, 'a', 10.0")
sql(s"ANALYZE TABLE $table COMPUTE STATISTICS FOR COLUMNS C1")
val fetchedStats1 =
checkTableStats(table, hasSizeInBytes = true, expectedRowCounts = Some(1)).get
assert(fetchedStats1.colStats == Map(
"C1" -> ColumnStat(distinctCount = 1, min = Some(1), max = Some(1), nullCount = 0,
avgLen = 4, maxLen = 4)))
}
}
test("SPARK-21079 - analyze table with location different than that of individual partitions") {
val tableName = "analyzeTable_part"
withTable(tableName) {
withTempPath { path =>
sql(s"CREATE TABLE $tableName (key STRING, value STRING) PARTITIONED BY (ds STRING)")
val partitionDates = List("2010-01-01", "2010-01-02", "2010-01-03")
partitionDates.foreach { ds =>
sql(s"INSERT INTO TABLE $tableName PARTITION (ds='$ds') SELECT * FROM src")
}
sql(s"ALTER TABLE $tableName SET LOCATION '$path'")
sql(s"ANALYZE TABLE $tableName COMPUTE STATISTICS noscan")
assert(getCatalogStatistics(tableName).sizeInBytes === BigInt(17436))
}
}
}
test("SPARK-21079 - analyze partitioned table with only a subset of partitions visible") {
val sourceTableName = "analyzeTable_part"
val tableName = "analyzeTable_part_vis"
withTable(sourceTableName, tableName) {
withTempPath { path =>
// Create a table with 3 partitions all located under a single top-level directory 'path'
sql(
s"""
|CREATE TABLE $sourceTableName (key STRING, value STRING)
|PARTITIONED BY (ds STRING)
|LOCATION '$path'
""".stripMargin)
val partitionDates = List("2010-01-01", "2010-01-02", "2010-01-03")
partitionDates.foreach { ds =>
sql(
s"""
|INSERT INTO TABLE $sourceTableName PARTITION (ds='$ds')
|SELECT * FROM src
""".stripMargin)
}
// Create another table referring to the same location
sql(
s"""
|CREATE TABLE $tableName (key STRING, value STRING)
|PARTITIONED BY (ds STRING)
|LOCATION '$path'
""".stripMargin)
// Register only one of the partitions found on disk
val ds = partitionDates.head
sql(s"ALTER TABLE $tableName ADD PARTITION (ds='$ds')")
// Analyze original table - expect 3 partitions
sql(s"ANALYZE TABLE $sourceTableName COMPUTE STATISTICS noscan")
assert(getCatalogStatistics(sourceTableName).sizeInBytes === BigInt(3 * 5812))
// Analyze partial-copy table - expect only 1 partition
sql(s"ANALYZE TABLE $tableName COMPUTE STATISTICS noscan")
assert(getCatalogStatistics(tableName).sizeInBytes === BigInt(5812))
}
}
}
test("test table-level statistics for hive tables created in HiveExternalCatalog") {
val textTable = "textTable"
withTable(textTable) {
// Currently Spark's statistics are self-contained, we don't have statistics until we use
// the `ANALYZE TABLE` command.
sql(s"CREATE TABLE $textTable (key STRING, value STRING) STORED AS TEXTFILE")
checkTableStats(
textTable,
hasSizeInBytes = false,
expectedRowCounts = None)
sql(s"INSERT INTO TABLE $textTable SELECT * FROM src")
checkTableStats(
textTable,
hasSizeInBytes = true,
expectedRowCounts = None)
// noscan won't count the number of rows
sql(s"ANALYZE TABLE $textTable COMPUTE STATISTICS noscan")
val fetchedStats1 =
checkTableStats(textTable, hasSizeInBytes = true, expectedRowCounts = None)
// without noscan, we count the number of rows
sql(s"ANALYZE TABLE $textTable COMPUTE STATISTICS")
val fetchedStats2 =
checkTableStats(textTable, hasSizeInBytes = true, expectedRowCounts = Some(500))
assert(fetchedStats1.get.sizeInBytes == fetchedStats2.get.sizeInBytes)
}
}
test("keep existing row count in stats with noscan if table is not changed") {
val textTable = "textTable"
withTable(textTable) {
sql(s"CREATE TABLE $textTable (key STRING, value STRING)")
sql(s"INSERT INTO TABLE $textTable SELECT * FROM src")
sql(s"ANALYZE TABLE $textTable COMPUTE STATISTICS")
val fetchedStats1 =
checkTableStats(textTable, hasSizeInBytes = true, expectedRowCounts = Some(500))
sql(s"ANALYZE TABLE $textTable COMPUTE STATISTICS noscan")
// when the table is not changed, total size is the same, and the old row count is kept
val fetchedStats2 =
checkTableStats(textTable, hasSizeInBytes = true, expectedRowCounts = Some(500))
assert(fetchedStats1 == fetchedStats2)
}
}
test("keep existing column stats if table is not changed") {
val table = "update_col_stats_table"
withTable(table) {
sql(s"CREATE TABLE $table (c1 INT, c2 STRING, c3 DOUBLE)")
sql(s"ANALYZE TABLE $table COMPUTE STATISTICS FOR COLUMNS c1")
val fetchedStats0 =
checkTableStats(table, hasSizeInBytes = true, expectedRowCounts = Some(0))
assert(fetchedStats0.get.colStats == Map("c1" -> ColumnStat(0, None, None, 0, 4, 4)))
// Insert new data and analyze: have the latest column stats.
sql(s"INSERT INTO TABLE $table SELECT 1, 'a', 10.0")
sql(s"ANALYZE TABLE $table COMPUTE STATISTICS FOR COLUMNS c1")
val fetchedStats1 =
checkTableStats(table, hasSizeInBytes = true, expectedRowCounts = Some(1)).get
assert(fetchedStats1.colStats == Map(
"c1" -> ColumnStat(distinctCount = 1, min = Some(1), max = Some(1), nullCount = 0,
avgLen = 4, maxLen = 4)))
// Analyze another column: since the table is not changed, the precious column stats are kept.
sql(s"ANALYZE TABLE $table COMPUTE STATISTICS FOR COLUMNS c2")
val fetchedStats2 =
checkTableStats(table, hasSizeInBytes = true, expectedRowCounts = Some(1)).get
assert(fetchedStats2.colStats == Map(
"c1" -> ColumnStat(distinctCount = 1, min = Some(1), max = Some(1), nullCount = 0,
avgLen = 4, maxLen = 4),
"c2" -> ColumnStat(distinctCount = 1, min = None, max = None, nullCount = 0,
avgLen = 1, maxLen = 1)))
// Insert new data and analyze: stale column stats are removed and newly collected column
// stats are added.
sql(s"INSERT INTO TABLE $table SELECT 2, 'b', 20.0")
sql(s"ANALYZE TABLE $table COMPUTE STATISTICS FOR COLUMNS c1, c3")
val fetchedStats3 =
checkTableStats(table, hasSizeInBytes = true, expectedRowCounts = Some(2)).get
assert(fetchedStats3.colStats == Map(
"c1" -> ColumnStat(distinctCount = 2, min = Some(1), max = Some(2), nullCount = 0,
avgLen = 4, maxLen = 4),
"c3" -> ColumnStat(distinctCount = 2, min = Some(10.0), max = Some(20.0), nullCount = 0,
avgLen = 8, maxLen = 8)))
}
}
private def createNonPartitionedTable(
tabName: String,
analyzedBySpark: Boolean = true,
analyzedByHive: Boolean = true): Unit = {
sql(
s"""
|CREATE TABLE $tabName (key STRING, value STRING)
|STORED AS TEXTFILE
|TBLPROPERTIES ('prop1' = 'val1', 'prop2' = 'val2')
""".stripMargin)
sql(s"INSERT INTO TABLE $tabName SELECT * FROM src")
if (analyzedBySpark) sql(s"ANALYZE TABLE $tabName COMPUTE STATISTICS")
// This is to mimic the scenario in which Hive genrates statistics before we reading it
if (analyzedByHive) hiveClient.runSqlHive(s"ANALYZE TABLE $tabName COMPUTE STATISTICS")
val describeResult1 = hiveClient.runSqlHive(s"DESCRIBE FORMATTED $tabName")
val tableMetadata = getCatalogTable(tabName).properties
// statistics info is not contained in the metadata of the original table
assert(Seq(StatsSetupConst.COLUMN_STATS_ACCURATE,
StatsSetupConst.NUM_FILES,
StatsSetupConst.NUM_PARTITIONS,
StatsSetupConst.ROW_COUNT,
StatsSetupConst.RAW_DATA_SIZE,
StatsSetupConst.TOTAL_SIZE).forall(!tableMetadata.contains(_)))
if (analyzedByHive) {
assert(StringUtils.filterPattern(describeResult1, "*numRows\\\\s+500*").nonEmpty)
} else {
assert(StringUtils.filterPattern(describeResult1, "*numRows\\\\s+500*").isEmpty)
}
}
private def extractStatsPropValues(
descOutput: Seq[String],
propKey: String): Option[BigInt] = {
val str = descOutput
.filterNot(_.contains(STATISTICS_PREFIX))
.filter(_.contains(propKey))
if (str.isEmpty) {
None
} else {
assert(str.length == 1, "found more than one matches")
val pattern = new Regex(s"""$propKey\\\\s+(-?\\\\d+)""")
val pattern(value) = str.head.trim
Option(BigInt(value))
}
}
test("get statistics when not analyzed in Hive or Spark") {
val tabName = "tab1"
withTable(tabName) {
createNonPartitionedTable(tabName, analyzedByHive = false, analyzedBySpark = false)
checkTableStats(tabName, hasSizeInBytes = true, expectedRowCounts = None)
// ALTER TABLE SET TBLPROPERTIES invalidates some contents of Hive specific statistics
// This is triggered by the Hive alterTable API
val describeResult = hiveClient.runSqlHive(s"DESCRIBE FORMATTED $tabName")
val rawDataSize = extractStatsPropValues(describeResult, "rawDataSize")
val numRows = extractStatsPropValues(describeResult, "numRows")
val totalSize = extractStatsPropValues(describeResult, "totalSize")
assert(rawDataSize.isEmpty, "rawDataSize should not be shown without table analysis")
assert(numRows.isEmpty, "numRows should not be shown without table analysis")
assert(totalSize.isDefined && totalSize.get > 0, "totalSize is lost")
}
}
test("alter table rename after analyze table") {
Seq(true, false).foreach { analyzedBySpark =>
val oldName = "tab1"
val newName = "tab2"
withTable(oldName, newName) {
createNonPartitionedTable(oldName, analyzedByHive = true, analyzedBySpark = analyzedBySpark)
val fetchedStats1 = checkTableStats(
oldName, hasSizeInBytes = true, expectedRowCounts = Some(500))
sql(s"ALTER TABLE $oldName RENAME TO $newName")
val fetchedStats2 = checkTableStats(
newName, hasSizeInBytes = true, expectedRowCounts = Some(500))
assert(fetchedStats1 == fetchedStats2)
// ALTER TABLE RENAME does not affect the contents of Hive specific statistics
val describeResult = hiveClient.runSqlHive(s"DESCRIBE FORMATTED $newName")
val rawDataSize = extractStatsPropValues(describeResult, "rawDataSize")
val numRows = extractStatsPropValues(describeResult, "numRows")
val totalSize = extractStatsPropValues(describeResult, "totalSize")
assert(rawDataSize.isDefined && rawDataSize.get > 0, "rawDataSize is lost")
assert(numRows.isDefined && numRows.get == 500, "numRows is lost")
assert(totalSize.isDefined && totalSize.get > 0, "totalSize is lost")
}
}
}
test("alter table should not have the side effect to store statistics in Spark side") {
val table = "alter_table_side_effect"
withTable(table) {
sql(s"CREATE TABLE $table (i string, j string)")
sql(s"INSERT INTO TABLE $table SELECT 'a', 'b'")
val catalogTable1 = getCatalogTable(table)
val hiveSize1 = BigInt(catalogTable1.ignoredProperties(StatsSetupConst.TOTAL_SIZE))
sql(s"ALTER TABLE $table SET TBLPROPERTIES ('prop1' = 'a')")
sql(s"INSERT INTO TABLE $table SELECT 'c', 'd'")
val catalogTable2 = getCatalogTable(table)
val hiveSize2 = BigInt(catalogTable2.ignoredProperties(StatsSetupConst.TOTAL_SIZE))
// After insertion, Hive's stats should be changed.
assert(hiveSize2 > hiveSize1)
// We haven't generate stats in Spark, so we should still use Hive's stats here.
assert(catalogTable2.stats.get.sizeInBytes == hiveSize2)
}
}
private def testAlterTableProperties(tabName: String, alterTablePropCmd: String): Unit = {
Seq(true, false).foreach { analyzedBySpark =>
withTable(tabName) {
createNonPartitionedTable(tabName, analyzedByHive = true, analyzedBySpark = analyzedBySpark)
checkTableStats(tabName, hasSizeInBytes = true, expectedRowCounts = Some(500))
// Run ALTER TABLE command
sql(alterTablePropCmd)
val describeResult = hiveClient.runSqlHive(s"DESCRIBE FORMATTED $tabName")
val totalSize = extractStatsPropValues(describeResult, "totalSize")
assert(totalSize.isDefined && totalSize.get > 0, "totalSize is lost")
// ALTER TABLE SET/UNSET TBLPROPERTIES invalidates some Hive specific statistics, but not
// Spark specific statistics. This is triggered by the Hive alterTable API.
val numRows = extractStatsPropValues(describeResult, "numRows")
assert(numRows.isDefined && numRows.get == -1, "numRows is lost")
val rawDataSize = extractStatsPropValues(describeResult, "rawDataSize")
assert(rawDataSize.isDefined && rawDataSize.get == -1, "rawDataSize is lost")
if (analyzedBySpark) {
checkTableStats(tabName, hasSizeInBytes = true, expectedRowCounts = Some(500))
} else {
checkTableStats(tabName, hasSizeInBytes = true, expectedRowCounts = None)
}
}
}
}
test("alter table SET TBLPROPERTIES after analyze table") {
testAlterTableProperties("set_prop_table",
"ALTER TABLE set_prop_table SET TBLPROPERTIES ('foo' = 'a')")
}
test("alter table UNSET TBLPROPERTIES after analyze table") {
testAlterTableProperties("unset_prop_table",
"ALTER TABLE unset_prop_table UNSET TBLPROPERTIES ('prop1')")
}
/**
* To see if stats exist, we need to check spark's stats properties instead of catalog
* statistics, because hive would change stats in metastore and thus change catalog statistics.
*/
private def getStatsProperties(tableName: String): Map[String, String] = {
val hTable = hiveClient.getTable(spark.sessionState.catalog.getCurrentDatabase, tableName)
hTable.properties.filterKeys(_.startsWith(STATISTICS_PREFIX))
}
test("change stats after insert command for hive table") {
val table = s"change_stats_insert_hive_table"
Seq(false, true).foreach { autoUpdate =>
withSQLConf(SQLConf.AUTO_UPDATE_SIZE.key -> autoUpdate.toString) {
withTable(table) {
sql(s"CREATE TABLE $table (i int, j string)")
// analyze to get initial stats
sql(s"ANALYZE TABLE $table COMPUTE STATISTICS FOR COLUMNS i, j")
val fetched1 = checkTableStats(table, hasSizeInBytes = true, expectedRowCounts = Some(0))
assert(fetched1.get.sizeInBytes == 0)
assert(fetched1.get.colStats.size == 2)
// insert into command
sql(s"INSERT INTO TABLE $table SELECT 1, 'abc'")
if (autoUpdate) {
val fetched2 = checkTableStats(table, hasSizeInBytes = true, expectedRowCounts = None)
assert(fetched2.get.sizeInBytes > 0)
assert(fetched2.get.colStats.isEmpty)
val statsProp = getStatsProperties(table)
assert(statsProp(STATISTICS_TOTAL_SIZE).toLong == fetched2.get.sizeInBytes)
} else {
assert(getStatsProperties(table).isEmpty)
}
}
}
}
}
test("change stats after load data command") {
val table = "change_stats_load_table"
Seq(false, true).foreach { autoUpdate =>
withSQLConf(SQLConf.AUTO_UPDATE_SIZE.key -> autoUpdate.toString) {
withTable(table) {
sql(s"CREATE TABLE $table (i INT, j STRING) STORED AS PARQUET")
// analyze to get initial stats
sql(s"ANALYZE TABLE $table COMPUTE STATISTICS FOR COLUMNS i, j")
val fetched1 = checkTableStats(table, hasSizeInBytes = true, expectedRowCounts = Some(0))
assert(fetched1.get.sizeInBytes == 0)
assert(fetched1.get.colStats.size == 2)
withTempDir { loadPath =>
// load data command
val file = new File(loadPath + "/data")
val writer = new PrintWriter(file)
writer.write("2,xyz")
writer.close()
sql(s"LOAD DATA INPATH '${loadPath.toURI.toString}' INTO TABLE $table")
if (autoUpdate) {
val fetched2 = checkTableStats(table, hasSizeInBytes = true, expectedRowCounts = None)
assert(fetched2.get.sizeInBytes > 0)
assert(fetched2.get.colStats.isEmpty)
val statsProp = getStatsProperties(table)
assert(statsProp(STATISTICS_TOTAL_SIZE).toLong == fetched2.get.sizeInBytes)
} else {
assert(getStatsProperties(table).isEmpty)
}
}
}
}
}
}
test("change stats after add/drop partition command") {
val table = "change_stats_part_table"
Seq(false, true).foreach { autoUpdate =>
withSQLConf(SQLConf.AUTO_UPDATE_SIZE.key -> autoUpdate.toString) {
withTable(table) {
sql(s"CREATE TABLE $table (i INT, j STRING) PARTITIONED BY (ds STRING, hr STRING)")
// table has two partitions initially
for (ds <- Seq("2008-04-08"); hr <- Seq("11", "12")) {
sql(s"INSERT OVERWRITE TABLE $table PARTITION (ds='$ds',hr='$hr') SELECT 1, 'a'")
}
// analyze to get initial stats
sql(s"ANALYZE TABLE $table COMPUTE STATISTICS FOR COLUMNS i, j")
val fetched1 = checkTableStats(table, hasSizeInBytes = true, expectedRowCounts = Some(2))
assert(fetched1.get.sizeInBytes > 0)
assert(fetched1.get.colStats.size == 2)
withTempPaths(numPaths = 2) { case Seq(dir1, dir2) =>
val file1 = new File(dir1 + "/data")
val writer1 = new PrintWriter(file1)
writer1.write("1,a")
writer1.close()
val file2 = new File(dir2 + "/data")
val writer2 = new PrintWriter(file2)
writer2.write("1,a")
writer2.close()
// add partition command
sql(
s"""
|ALTER TABLE $table ADD
|PARTITION (ds='2008-04-09', hr='11') LOCATION '${dir1.toURI.toString}'
|PARTITION (ds='2008-04-09', hr='12') LOCATION '${dir2.toURI.toString}'
""".stripMargin)
if (autoUpdate) {
val fetched2 = checkTableStats(table, hasSizeInBytes = true, expectedRowCounts = None)
assert(fetched2.get.sizeInBytes > fetched1.get.sizeInBytes)
assert(fetched2.get.colStats.isEmpty)
val statsProp = getStatsProperties(table)
assert(statsProp(STATISTICS_TOTAL_SIZE).toLong == fetched2.get.sizeInBytes)
} else {
assert(getStatsProperties(table).isEmpty)
}
// now the table has four partitions, generate stats again
sql(s"ANALYZE TABLE $table COMPUTE STATISTICS FOR COLUMNS i, j")
val fetched3 = checkTableStats(
table, hasSizeInBytes = true, expectedRowCounts = Some(4))
assert(fetched3.get.sizeInBytes > 0)
assert(fetched3.get.colStats.size == 2)
// drop partition command
sql(s"ALTER TABLE $table DROP PARTITION (ds='2008-04-08'), PARTITION (hr='12')")
assert(spark.sessionState.catalog.listPartitions(TableIdentifier(table))
.map(_.spec).toSet == Set(Map("ds" -> "2008-04-09", "hr" -> "11")))
// only one partition left
if (autoUpdate) {
val fetched4 = checkTableStats(table, hasSizeInBytes = true, expectedRowCounts = None)
assert(fetched4.get.sizeInBytes < fetched1.get.sizeInBytes)
assert(fetched4.get.colStats.isEmpty)
val statsProp = getStatsProperties(table)
assert(statsProp(STATISTICS_TOTAL_SIZE).toLong == fetched4.get.sizeInBytes)
} else {
assert(getStatsProperties(table).isEmpty)
}
}
}
}
}
}
test("add/drop partitions - managed table") {
val catalog = spark.sessionState.catalog
val managedTable = "partitionedTable"
withTable(managedTable) {
sql(
s"""
|CREATE TABLE $managedTable (key INT, value STRING)
|PARTITIONED BY (ds STRING, hr STRING)
""".stripMargin)
for (ds <- Seq("2008-04-08", "2008-04-09"); hr <- Seq("11", "12")) {
sql(
s"""
|INSERT OVERWRITE TABLE $managedTable
|partition (ds='$ds',hr='$hr')
|SELECT 1, 'a'
""".stripMargin)
}
checkTableStats(
managedTable, hasSizeInBytes = false, expectedRowCounts = None)
sql(s"ANALYZE TABLE $managedTable COMPUTE STATISTICS")
val stats1 = checkTableStats(
managedTable, hasSizeInBytes = true, expectedRowCounts = Some(4))
sql(
s"""
|ALTER TABLE $managedTable DROP PARTITION (ds='2008-04-08'),
|PARTITION (hr='12')
""".stripMargin)
assert(catalog.listPartitions(TableIdentifier(managedTable)).map(_.spec).toSet ==
Set(Map("ds" -> "2008-04-09", "hr" -> "11")))
sql(s"ANALYZE TABLE $managedTable COMPUTE STATISTICS")
val stats2 = checkTableStats(
managedTable, hasSizeInBytes = true, expectedRowCounts = Some(1))
assert(stats1.get.sizeInBytes > stats2.get.sizeInBytes)
sql(s"ALTER TABLE $managedTable ADD PARTITION (ds='2008-04-08', hr='12')")
sql(s"ANALYZE TABLE $managedTable COMPUTE STATISTICS")
val stats4 = checkTableStats(
managedTable, hasSizeInBytes = true, expectedRowCounts = Some(1))
assert(stats1.get.sizeInBytes > stats4.get.sizeInBytes)
assert(stats4.get.sizeInBytes == stats2.get.sizeInBytes)
}
}
test("test statistics of LogicalRelation converted from Hive serde tables") {
val parquetTable = "parquetTable"
val orcTable = "orcTable"
withTable(parquetTable, orcTable) {
sql(s"CREATE TABLE $parquetTable (key STRING, value STRING) STORED AS PARQUET")
sql(s"CREATE TABLE $orcTable (key STRING, value STRING) STORED AS ORC")
sql(s"INSERT INTO TABLE $parquetTable SELECT * FROM src")
sql(s"INSERT INTO TABLE $orcTable SELECT * FROM src")
// the default value for `spark.sql.hive.convertMetastoreParquet` is true, here we just set it
// for robustness
withSQLConf(HiveUtils.CONVERT_METASTORE_PARQUET.key -> "true") {
checkTableStats(parquetTable, hasSizeInBytes = false, expectedRowCounts = None)
sql(s"ANALYZE TABLE $parquetTable COMPUTE STATISTICS")
checkTableStats(parquetTable, hasSizeInBytes = true, expectedRowCounts = Some(500))
}
withSQLConf(HiveUtils.CONVERT_METASTORE_ORC.key -> "true") {
// We still can get tableSize from Hive before Analyze
checkTableStats(orcTable, hasSizeInBytes = true, expectedRowCounts = None)
sql(s"ANALYZE TABLE $orcTable COMPUTE STATISTICS")
checkTableStats(orcTable, hasSizeInBytes = true, expectedRowCounts = Some(500))
}
}
}
test("verify serialized column stats after analyzing columns") {
import testImplicits._
val tableName = "column_stats_test2"
// (data.head.productArity - 1) because the last column does not support stats collection.
assert(stats.size == data.head.productArity - 1)
val df = data.toDF(stats.keys.toSeq :+ "carray" : _*)
withTable(tableName) {
df.write.saveAsTable(tableName)
// Collect statistics
sql(s"analyze table $tableName compute STATISTICS FOR COLUMNS " + stats.keys.mkString(", "))
// Validate statistics
val table = hiveClient.getTable("default", tableName)
val props = table.properties.filterKeys(_.startsWith("spark.sql.statistics.colStats"))
assert(props == Map(
"spark.sql.statistics.colStats.cbinary.avgLen" -> "3",
"spark.sql.statistics.colStats.cbinary.distinctCount" -> "2",
"spark.sql.statistics.colStats.cbinary.maxLen" -> "3",
"spark.sql.statistics.colStats.cbinary.nullCount" -> "1",
"spark.sql.statistics.colStats.cbinary.version" -> "1",
"spark.sql.statistics.colStats.cbool.avgLen" -> "1",
"spark.sql.statistics.colStats.cbool.distinctCount" -> "2",
"spark.sql.statistics.colStats.cbool.max" -> "true",
"spark.sql.statistics.colStats.cbool.maxLen" -> "1",
"spark.sql.statistics.colStats.cbool.min" -> "false",
"spark.sql.statistics.colStats.cbool.nullCount" -> "1",
"spark.sql.statistics.colStats.cbool.version" -> "1",
"spark.sql.statistics.colStats.cbyte.avgLen" -> "1",
"spark.sql.statistics.colStats.cbyte.distinctCount" -> "2",
"spark.sql.statistics.colStats.cbyte.max" -> "2",
"spark.sql.statistics.colStats.cbyte.maxLen" -> "1",
"spark.sql.statistics.colStats.cbyte.min" -> "1",
"spark.sql.statistics.colStats.cbyte.nullCount" -> "1",
"spark.sql.statistics.colStats.cbyte.version" -> "1",
"spark.sql.statistics.colStats.cdate.avgLen" -> "4",
"spark.sql.statistics.colStats.cdate.distinctCount" -> "2",
"spark.sql.statistics.colStats.cdate.max" -> "2016-05-09",
"spark.sql.statistics.colStats.cdate.maxLen" -> "4",
"spark.sql.statistics.colStats.cdate.min" -> "2016-05-08",
"spark.sql.statistics.colStats.cdate.nullCount" -> "1",
"spark.sql.statistics.colStats.cdate.version" -> "1",
"spark.sql.statistics.colStats.cdecimal.avgLen" -> "16",
"spark.sql.statistics.colStats.cdecimal.distinctCount" -> "2",
"spark.sql.statistics.colStats.cdecimal.max" -> "8.000000000000000000",
"spark.sql.statistics.colStats.cdecimal.maxLen" -> "16",
"spark.sql.statistics.colStats.cdecimal.min" -> "1.000000000000000000",
"spark.sql.statistics.colStats.cdecimal.nullCount" -> "1",
"spark.sql.statistics.colStats.cdecimal.version" -> "1",
"spark.sql.statistics.colStats.cdouble.avgLen" -> "8",
"spark.sql.statistics.colStats.cdouble.distinctCount" -> "2",
"spark.sql.statistics.colStats.cdouble.max" -> "6.0",
"spark.sql.statistics.colStats.cdouble.maxLen" -> "8",
"spark.sql.statistics.colStats.cdouble.min" -> "1.0",
"spark.sql.statistics.colStats.cdouble.nullCount" -> "1",
"spark.sql.statistics.colStats.cdouble.version" -> "1",
"spark.sql.statistics.colStats.cfloat.avgLen" -> "4",
"spark.sql.statistics.colStats.cfloat.distinctCount" -> "2",
"spark.sql.statistics.colStats.cfloat.max" -> "7.0",
"spark.sql.statistics.colStats.cfloat.maxLen" -> "4",
"spark.sql.statistics.colStats.cfloat.min" -> "1.0",
"spark.sql.statistics.colStats.cfloat.nullCount" -> "1",
"spark.sql.statistics.colStats.cfloat.version" -> "1",
"spark.sql.statistics.colStats.cint.avgLen" -> "4",
"spark.sql.statistics.colStats.cint.distinctCount" -> "2",
"spark.sql.statistics.colStats.cint.max" -> "4",
"spark.sql.statistics.colStats.cint.maxLen" -> "4",
"spark.sql.statistics.colStats.cint.min" -> "1",
"spark.sql.statistics.colStats.cint.nullCount" -> "1",
"spark.sql.statistics.colStats.cint.version" -> "1",
"spark.sql.statistics.colStats.clong.avgLen" -> "8",
"spark.sql.statistics.colStats.clong.distinctCount" -> "2",
"spark.sql.statistics.colStats.clong.max" -> "5",
"spark.sql.statistics.colStats.clong.maxLen" -> "8",
"spark.sql.statistics.colStats.clong.min" -> "1",
"spark.sql.statistics.colStats.clong.nullCount" -> "1",
"spark.sql.statistics.colStats.clong.version" -> "1",
"spark.sql.statistics.colStats.cshort.avgLen" -> "2",
"spark.sql.statistics.colStats.cshort.distinctCount" -> "2",
"spark.sql.statistics.colStats.cshort.max" -> "3",
"spark.sql.statistics.colStats.cshort.maxLen" -> "2",
"spark.sql.statistics.colStats.cshort.min" -> "1",
"spark.sql.statistics.colStats.cshort.nullCount" -> "1",
"spark.sql.statistics.colStats.cshort.version" -> "1",
"spark.sql.statistics.colStats.cstring.avgLen" -> "3",
"spark.sql.statistics.colStats.cstring.distinctCount" -> "2",
"spark.sql.statistics.colStats.cstring.maxLen" -> "3",
"spark.sql.statistics.colStats.cstring.nullCount" -> "1",
"spark.sql.statistics.colStats.cstring.version" -> "1",
"spark.sql.statistics.colStats.ctimestamp.avgLen" -> "8",
"spark.sql.statistics.colStats.ctimestamp.distinctCount" -> "2",
"spark.sql.statistics.colStats.ctimestamp.max" -> "2016-05-09 00:00:02.0",
"spark.sql.statistics.colStats.ctimestamp.maxLen" -> "8",
"spark.sql.statistics.colStats.ctimestamp.min" -> "2016-05-08 00:00:01.0",
"spark.sql.statistics.colStats.ctimestamp.nullCount" -> "1",
"spark.sql.statistics.colStats.ctimestamp.version" -> "1"
))
}
}
private def testUpdatingTableStats(tableDescription: String, createTableCmd: String): Unit = {
test("test table-level statistics for " + tableDescription) {
val parquetTable = "parquetTable"
withTable(parquetTable) {
sql(createTableCmd)
val catalogTable = getCatalogTable(parquetTable)
assert(DDLUtils.isDatasourceTable(catalogTable))
// Add a filter to avoid creating too many partitions
sql(s"INSERT INTO TABLE $parquetTable SELECT * FROM src WHERE key < 10")
checkTableStats(parquetTable, hasSizeInBytes = false, expectedRowCounts = None)
// noscan won't count the number of rows
sql(s"ANALYZE TABLE $parquetTable COMPUTE STATISTICS noscan")
val fetchedStats1 =
checkTableStats(parquetTable, hasSizeInBytes = true, expectedRowCounts = None)
sql(s"INSERT INTO TABLE $parquetTable SELECT * FROM src WHERE key < 10")
sql(s"ANALYZE TABLE $parquetTable COMPUTE STATISTICS noscan")
val fetchedStats2 =
checkTableStats(parquetTable, hasSizeInBytes = true, expectedRowCounts = None)
assert(fetchedStats2.get.sizeInBytes > fetchedStats1.get.sizeInBytes)
// without noscan, we count the number of rows
sql(s"ANALYZE TABLE $parquetTable COMPUTE STATISTICS")
val fetchedStats3 =
checkTableStats(parquetTable, hasSizeInBytes = true, expectedRowCounts = Some(20))
assert(fetchedStats3.get.sizeInBytes == fetchedStats2.get.sizeInBytes)
}
}
}
testUpdatingTableStats(
"data source table created in HiveExternalCatalog",
"CREATE TABLE parquetTable (key STRING, value STRING) USING PARQUET")
testUpdatingTableStats(
"partitioned data source table",
"CREATE TABLE parquetTable (key STRING, value STRING) USING PARQUET PARTITIONED BY (key)")
/** Used to test refreshing cached metadata once table stats are updated. */
private def getStatsBeforeAfterUpdate(isAnalyzeColumns: Boolean)
: (CatalogStatistics, CatalogStatistics) = {
val tableName = "tbl"
var statsBeforeUpdate: CatalogStatistics = null
var statsAfterUpdate: CatalogStatistics = null
withTable(tableName) {
val tableIndent = TableIdentifier(tableName, Some("default"))
val catalog = spark.sessionState.catalog.asInstanceOf[HiveSessionCatalog]
sql(s"CREATE TABLE $tableName (key int) USING PARQUET")
sql(s"INSERT INTO $tableName SELECT 1")
if (isAnalyzeColumns) {
sql(s"ANALYZE TABLE $tableName COMPUTE STATISTICS FOR COLUMNS key")
} else {
sql(s"ANALYZE TABLE $tableName COMPUTE STATISTICS")
}
// Table lookup will make the table cached.
spark.table(tableIndent)
statsBeforeUpdate = catalog.metastoreCatalog.getCachedDataSourceTable(tableIndent)
.asInstanceOf[LogicalRelation].catalogTable.get.stats.get
sql(s"INSERT INTO $tableName SELECT 2")
if (isAnalyzeColumns) {
sql(s"ANALYZE TABLE $tableName COMPUTE STATISTICS FOR COLUMNS key")
} else {
sql(s"ANALYZE TABLE $tableName COMPUTE STATISTICS")
}
spark.table(tableIndent)
statsAfterUpdate = catalog.metastoreCatalog.getCachedDataSourceTable(tableIndent)
.asInstanceOf[LogicalRelation].catalogTable.get.stats.get
}
(statsBeforeUpdate, statsAfterUpdate)
}
test("test refreshing table stats of cached data source table by `ANALYZE TABLE` statement") {
val (statsBeforeUpdate, statsAfterUpdate) = getStatsBeforeAfterUpdate(isAnalyzeColumns = false)
assert(statsBeforeUpdate.sizeInBytes > 0)
assert(statsBeforeUpdate.rowCount == Some(1))
assert(statsAfterUpdate.sizeInBytes > statsBeforeUpdate.sizeInBytes)
assert(statsAfterUpdate.rowCount == Some(2))
}
test("estimates the size of a test Hive serde tables") {
val df = sql("""SELECT * FROM src""")
val sizes = df.queryExecution.analyzed.collect {
case relation: HiveTableRelation => relation.stats.sizeInBytes
}
assert(sizes.size === 1, s"Size wrong for:\\n ${df.queryExecution}")
assert(sizes(0).equals(BigInt(5812)),
s"expected exact size 5812 for test table 'src', got: ${sizes(0)}")
}
test("auto converts to broadcast hash join, by size estimate of a relation") {
def mkTest(
before: () => Unit,
after: () => Unit,
query: String,
expectedAnswer: Seq[Row],
ct: ClassTag[_]): Unit = {
before()
var df = sql(query)
// Assert src has a size smaller than the threshold.
val sizes = df.queryExecution.analyzed.collect {
case r if ct.runtimeClass.isAssignableFrom(r.getClass) => r.stats.sizeInBytes
}
assert(sizes.size === 2 && sizes(0) <= spark.sessionState.conf.autoBroadcastJoinThreshold
&& sizes(1) <= spark.sessionState.conf.autoBroadcastJoinThreshold,
s"query should contain two relations, each of which has size smaller than autoConvertSize")
// Using `sparkPlan` because for relevant patterns in HashJoin to be
// matched, other strategies need to be applied.
var bhj = df.queryExecution.sparkPlan.collect { case j: BroadcastHashJoinExec => j }
assert(bhj.size === 1,
s"actual query plans do not contain broadcast join: ${df.queryExecution}")
checkAnswer(df, expectedAnswer) // check correctness of output
spark.sessionState.conf.settings.synchronized {
val tmp = spark.sessionState.conf.autoBroadcastJoinThreshold
sql(s"""SET ${SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key}=-1""")
df = sql(query)
bhj = df.queryExecution.sparkPlan.collect { case j: BroadcastHashJoinExec => j }
assert(bhj.isEmpty, "BroadcastHashJoin still planned even though it is switched off")
val shj = df.queryExecution.sparkPlan.collect { case j: SortMergeJoinExec => j }
assert(shj.size === 1,
"SortMergeJoin should be planned when BroadcastHashJoin is turned off")
sql(s"""SET ${SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key}=$tmp""")
}
after()
}
/** Tests for Hive serde tables */
val metastoreQuery = """SELECT * FROM src a JOIN src b ON a.key = 238 AND a.key = b.key"""
val metastoreAnswer = Seq.fill(4)(Row(238, "val_238", 238, "val_238"))
mkTest(
() => (),
() => (),
metastoreQuery,
metastoreAnswer,
implicitly[ClassTag[HiveTableRelation]]
)
}
test("auto converts to broadcast left semi join, by size estimate of a relation") {
val leftSemiJoinQuery =
"""SELECT * FROM src a
|left semi JOIN src b ON a.key=86 and a.key = b.key""".stripMargin
val answer = Row(86, "val_86")
var df = sql(leftSemiJoinQuery)
// Assert src has a size smaller than the threshold.
val sizes = df.queryExecution.analyzed.collect {
case relation: HiveTableRelation => relation.stats.sizeInBytes
}
assert(sizes.size === 2 && sizes(1) <= spark.sessionState.conf.autoBroadcastJoinThreshold
&& sizes(0) <= spark.sessionState.conf.autoBroadcastJoinThreshold,
s"query should contain two relations, each of which has size smaller than autoConvertSize")
// Using `sparkPlan` because for relevant patterns in HashJoin to be
// matched, other strategies need to be applied.
var bhj = df.queryExecution.sparkPlan.collect {
case j: BroadcastHashJoinExec => j
}
assert(bhj.size === 1,
s"actual query plans do not contain broadcast join: ${df.queryExecution}")
checkAnswer(df, answer) // check correctness of output
spark.sessionState.conf.settings.synchronized {
val tmp = spark.sessionState.conf.autoBroadcastJoinThreshold
sql(s"SET ${SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key}=-1")
df = sql(leftSemiJoinQuery)
bhj = df.queryExecution.sparkPlan.collect {
case j: BroadcastHashJoinExec => j
}
assert(bhj.isEmpty, "BroadcastHashJoin still planned even though it is switched off")
val shj = df.queryExecution.sparkPlan.collect {
case j: SortMergeJoinExec => j
}
assert(shj.size === 1,
"SortMergeJoinExec should be planned when BroadcastHashJoin is turned off")
sql(s"SET ${SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key}=$tmp")
}
}
}
|
UndeadBaneGitHub/spark
|
sql/hive/src/test/scala/org/apache/spark/sql/hive/StatisticsSuite.scala
|
Scala
|
apache-2.0
| 44,803 |
package io.scalac.slack
import io.scalac.slack.api.{ApiTestResponse, AuthTestResponse, RtmStartResponse}
import io.scalac.slack.models._
import org.joda.time.DateTime
import org.scalatest.{FunSuite, Matchers}
import spray.json._
/**
* Created on 27.01.15 22:39
*/
class UnmarshallerTest extends FunSuite with Matchers {
import io.scalac.slack.api.Unmarshallers._
val url = "https://testapp.slack.com/"
val team = "testteam"
val username = "testuser"
val teamId = "T03DN3GTN"
val userId = "U03DQKG34"
test("api.test empty response") {
val response = /*language=JSON*/ """{"ok":true}"""
val apiTestResponse = response.parseJson.convertTo[ApiTestResponse]
apiTestResponse shouldBe 'ok
apiTestResponse.args should be(None)
apiTestResponse.error should be(None)
}
test("api.test with param") {
val response = /*language=JSON*/ """{"ok":true,"args":{"name":"mario"}}"""
val apiTestResponse = response.parseJson.convertTo[ApiTestResponse]
apiTestResponse shouldBe 'ok
apiTestResponse.args should be(Some(Map("name" -> "mario")))
apiTestResponse.error should be(None)
}
test("api.test with error") {
val response = """{"ok":false,"error":"auth_error","args":{"error":"auth_error"}}"""
val apiTestResponse = response.parseJson.convertTo[ApiTestResponse]
apiTestResponse should not be 'ok
apiTestResponse.args should be(Some(Map("error" -> "auth_error")))
apiTestResponse.error should be(Some("auth_error"))
}
test("api.test with error and param") {
val response = """{"ok":false,"error":"auth_error","args":{"error":"auth_error","name":"mario"}}"""
val apiTestResponse = response.parseJson.convertTo[ApiTestResponse]
apiTestResponse should not be 'ok
apiTestResponse.args should be(Some(Map("error" -> "auth_error", "name" -> "mario")))
apiTestResponse.error should be(Some("auth_error"))
}
test("auth.test successful") {
val response = s"""{"ok":true,"url":"$url","team":"$team","user":"$username","team_id":"$teamId","user_id":"$userId"}"""
val authTestResponse = response.parseJson.convertTo[AuthTestResponse]
authTestResponse shouldBe 'ok
authTestResponse.error should be(None)
authTestResponse.url should equal(Some(url))
authTestResponse.team should equal(Some(team))
authTestResponse.user should equal(Some(username))
authTestResponse.user_id should equal(Some(userId))
authTestResponse.team_id should equal(Some(teamId))
}
test("auth.test failed") {
val response = """{"ok":false,"error":"not_authed"}"""
val authTestResponse = response.parseJson.convertTo[AuthTestResponse]
authTestResponse should not be 'ok
authTestResponse.error should be(Some("not_authed"))
authTestResponse.url should be(None)
authTestResponse.team should equal(None)
authTestResponse.user should equal(None)
authTestResponse.user_id should equal(None)
authTestResponse.team_id should equal(None)
}
test("rtm.start successful") {
/* language=JSON */
val response = """{"channels": [{
| "is_channel": true,
| "name": "general",
| "last_read": "1421772996.000005",
| "creator": "U03DN1GTQ",
| "purpose": {
| "value": "This channel is for team-wide communication and announcements. All team members are in this channel.",
| "creator": "",
| "last_set": 0
| },
| "is_member": true,
| "id": "C03DN1GUJ",
| "unread_count": 1,
| "members": ["U03DKUF05", "U03DKUMKH", "U03DKUTAZ", "U03DL3Q9M", "U03DN1GTQ", "U03DQKG14"],
| "is_general": true,
| "topic": {
| "value": "",
| "creator": "",
| "last_set": 0
| },
| "latest": {
| "subtype": "channel_join",
| "ts": "1421786647.000002",
| "text": "<@U03DQKG14|secretary> has joined the channel",
| "type": "message",
| "user": "U03DQKG14"
| },
| "is_archived": false,
| "created": 1421772055
| }, {
| "is_channel": true,
| "name": "random",
| "creator": "U03DN1GTQ",
| "is_member": false,
| "id": "C03DN1GUN",
| "is_general": false,
| "is_archived": false,
| "created": 1421772055
| }],
| "url": "wss://ms25.slack-msgs.com/websocket/_eQUaO1csLMyoe4p4rUgEIH/W/gEruHxke8x0TNSE0ltMOdO7bHsP_W9mOznr5U1DzWvW7qs6BZulFXKcg0X2giBxV8UaHtptGEK0_F_rUA=",
| "bots": [{
| "id": "B03DL3Q9K",
| "name": "bot",
| "deleted": false,
| "icons": {
| "image_48": "https://slack.global.ssl.fastly.net/26133/plugins/bot/assets/bot_48.png"
| }
| }, {
| "id": "B03DQKG0Y",
| "name": "bot",
| "deleted": false,
| "icons": {
| "image_48": "https://slack.global.ssl.fastly.net/26133/plugins/bot/assets/bot_48.png"
| }
| }],
| "users": [{
| "is_bot": false,
| "name": "benek",
| "tz_offset": 3600,
| "is_admin": false,
| "tz": "Europe/Amsterdam",
| "color": "4bbe2e",
| "is_owner": false,
| "has_files": false,
| "id": "U03DKUF05",
| "presence": "away",
| "profile": {
| "email": "[email protected]",
| "image_72": "https://secure.gravatar.com/avatar/3d6188e64eb0f7d1156d3bda95452901.jpg?s=72&d=https%3A%2F%2Fslack.global.ssl.fastly.net%2F8390%2Fimg%2Favatars%2Fava_0000-72.png",
| "image_48": "https://secure.gravatar.com/avatar/3d6188e64eb0f7d1156d3bda95452901.jpg?s=48&d=https%3A%2F%2Fslack.global.ssl.fastly.net%2F8390%2Fimg%2Favatars%2Fava_0000-48.png",
| "image_32": "https://secure.gravatar.com/avatar/3d6188e64eb0f7d1156d3bda95452901.jpg?s=32&d=https%3A%2F%2Fslack.global.ssl.fastly.net%2F8390%2Fimg%2Favatars%2Fava_0000-32.png",
| "real_name_normalized": "",
| "real_name": "",
| "image_24": "https://secure.gravatar.com/avatar/3d6188e64eb0f7d1156d3bda95452901.jpg?s=24&d=https%3A%2F%2Fslack.global.ssl.fastly.net%2F8390%2Fimg%2Favatars%2Fava_0000-24.png",
| "image_192": "https://secure.gravatar.com/avatar/3d6188e64eb0f7d1156d3bda95452901.jpg?s=192&d=https%3A%2F%2Fslack.global.ssl.fastly.net%2F8390%2Fimg%2Favatars%2Fava_0000.png"
| },
| "tz_label": "Central European Time",
| "is_ultra_restricted": false,
| "status": null,
| "real_name": "",
| "is_restricted": false,
| "deleted": false,
| "is_primary_owner": false
| }, {
| "is_bot": true,
| "name": "iwan",
| "has_files": false,
| "id": "U03DL3Q9M",
| "presence": "away",
| "profile": {
| "image_original": "https://s3-us-west-2.amazonaws.com/slack-files2/avatars/2015-01-20/3462126459_e2907a3b77c466905e17_original.jpg",
| "image_72": "https://s3-us-west-2.amazonaws.com/slack-files2/avatars/2015-01-20/3462126459_e2907a3b77c466905e17_72.jpg",
| "image_48": "https://s3-us-west-2.amazonaws.com/slack-files2/avatars/2015-01-20/3462126459_e2907a3b77c466905e17_48.jpg",
| "bot_id": "B03DL3Q9K",
| "image_32": "https://s3-us-west-2.amazonaws.com/slack-files2/avatars/2015-01-20/3462126459_e2907a3b77c466905e17_32.jpg",
| "real_name_normalized": "",
| "real_name": "",
| "image_24": "https://s3-us-west-2.amazonaws.com/slack-files2/avatars/2015-01-20/3462126459_e2907a3b77c466905e17_24.jpg",
| "image_192": "https://s3-us-west-2.amazonaws.com/slack-files2/avatars/2015-01-20/3462126459_e2907a3b77c466905e17_192.jpg"
| },
| "deleted": true
| }, {
| "is_bot": false,
| "name": "marioosh",
| "tz_offset": 3600,
| "is_admin": true,
| "tz": "Europe/Amsterdam",
| "color": "9f69e7",
| "is_owner": true,
| "has_files": false,
| "id": "U03DN1GTQ",
| "presence": "active",
| "profile": {
| "email": "[email protected]",
| "image_72": "https://secure.gravatar.com/avatar/ab02a07bc137cb73708602cafcd897d4.jpg?s=72&d=https%3A%2F%2Fslack.global.ssl.fastly.net%2F8390%2Fimg%2Favatars%2Fava_0020-72.png",
| "image_48": "https://secure.gravatar.com/avatar/ab02a07bc137cb73708602cafcd897d4.jpg?s=48&d=https%3A%2F%2Fslack.global.ssl.fastly.net%2F8390%2Fimg%2Favatars%2Fava_0020-48.png",
| "image_32": "https://secure.gravatar.com/avatar/ab02a07bc137cb73708602cafcd897d4.jpg?s=32&d=https%3A%2F%2Fslack.global.ssl.fastly.net%2F8390%2Fimg%2Favatars%2Fava_0020-32.png",
| "real_name_normalized": "",
| "real_name": "",
| "image_24": "https://secure.gravatar.com/avatar/ab02a07bc137cb73708602cafcd897d4.jpg?s=24&d=https%3A%2F%2Fslack.global.ssl.fastly.net%2F8390%2Fimg%2Favatars%2Fava_0020-24.png",
| "image_192": "https://secure.gravatar.com/avatar/ab02a07bc137cb73708602cafcd897d4.jpg?s=192&d=https%3A%2F%2Fslack.global.ssl.fastly.net%2F8390%2Fimg%2Favatars%2Fava_0020.png"
| },
| "tz_label": "Central European Time",
| "is_ultra_restricted": false,
| "status": null,
| "real_name": "",
| "is_restricted": false,
| "deleted": false,
| "is_primary_owner": true
| }, {
| "is_bot": true,
| "name": "secretary",
| "tz_offset": -28800,
| "is_admin": false,
| "tz": null,
| "color": "e96699",
| "is_owner": false,
| "has_files": false,
| "id": "U03DQKG14",
| "presence": "away",
| "profile": {
| "first_name": "IVAN",
| "image_original": "https://s3-us-west-2.amazonaws.com/slack-files2/avatars/2015-01-20/3466670008_0a4adf28d0f251ad032e_original.jpg",
| "image_72": "https://s3-us-west-2.amazonaws.com/slack-files2/avatars/2015-01-20/3466670008_0a4adf28d0f251ad032e_48.jpg",
| "image_48": "https://s3-us-west-2.amazonaws.com/slack-files2/avatars/2015-01-20/3466670008_0a4adf28d0f251ad032e_48.jpg",
| "bot_id": "B03DQKG0Y",
| "image_32": "https://s3-us-west-2.amazonaws.com/slack-files2/avatars/2015-01-20/3466670008_0a4adf28d0f251ad032e_32.jpg",
| "real_name_normalized": "IVAN DEPLOYER",
| "last_name": "DEPLOYER",
| "real_name": "IVAN DEPLOYER",
| "image_24": "https://s3-us-west-2.amazonaws.com/slack-files2/avatars/2015-01-20/3466670008_0a4adf28d0f251ad032e_24.jpg",
| "title": "KEEP CHANNEL TIDY",
| "image_192": "https://s3-us-west-2.amazonaws.com/slack-files2/avatars/2015-01-20/3466670008_0a4adf28d0f251ad032e_48.jpg"
| },
| "tz_label": "Pacific Standard Time",
| "is_ultra_restricted": false,
| "status": null,
| "real_name": "IVAN DEPLOYER",
| "is_restricted": false,
| "deleted": false,
| "is_primary_owner": false
| }, {
| "is_bot": false,
| "name": "stefek",
| "tz_offset": 3600,
| "is_admin": false,
| "tz": "Europe/Amsterdam",
| "color": "3c989f",
| "is_owner": false,
| "has_files": false,
| "id": "U03DKUTAZ",
| "presence": "away",
| "profile": {
| "email": "[email protected]",
| "image_72": "https://secure.gravatar.com/avatar/a4551e4b7d330e59acf4bdda79ac8b21.jpg?s=72&d=https%3A%2F%2Fslack.global.ssl.fastly.net%2F8390%2Fimg%2Favatars%2Fava_0002-72.png",
| "image_48": "https://secure.gravatar.com/avatar/a4551e4b7d330e59acf4bdda79ac8b21.jpg?s=48&d=https%3A%2F%2Fslack.global.ssl.fastly.net%2F20655%2Fimg%2Favatars%2Fava_0002-48.png",
| "image_32": "https://secure.gravatar.com/avatar/a4551e4b7d330e59acf4bdda79ac8b21.jpg?s=32&d=https%3A%2F%2Fslack.global.ssl.fastly.net%2F8390%2Fimg%2Favatars%2Fava_0002-32.png",
| "real_name_normalized": "",
| "real_name": "",
| "image_24": "https://secure.gravatar.com/avatar/a4551e4b7d330e59acf4bdda79ac8b21.jpg?s=24&d=https%3A%2F%2Fslack.global.ssl.fastly.net%2F8390%2Fimg%2Favatars%2Fava_0002-24.png",
| "image_192": "https://secure.gravatar.com/avatar/a4551e4b7d330e59acf4bdda79ac8b21.jpg?s=192&d=https%3A%2F%2Fslack.global.ssl.fastly.net%2F8390%2Fimg%2Favatars%2Fava_0002.png"
| },
| "tz_label": "Central European Time",
| "is_ultra_restricted": false,
| "status": null,
| "real_name": "",
| "is_restricted": false,
| "deleted": false,
| "is_primary_owner": false
| }, {
| "is_bot": false,
| "name": "ziuta",
| "tz_offset": 3600,
| "is_admin": false,
| "tz": "Europe/Amsterdam",
| "color": "e7392d",
| "is_owner": false,
| "has_files": false,
| "id": "U03DKUMKH",
| "presence": "away",
| "profile": {
| "email": "[email protected]",
| "image_72": "https://secure.gravatar.com/avatar/92b61c6a2a1efea6208c7faf3ffabea4.jpg?s=72&d=https%3A%2F%2Fslack.global.ssl.fastly.net%2F8390%2Fimg%2Favatars%2Fava_0016-72.png",
| "image_48": "https://secure.gravatar.com/avatar/92b61c6a2a1efea6208c7faf3ffabea4.jpg?s=48&d=https%3A%2F%2Fslack.global.ssl.fastly.net%2F8390%2Fimg%2Favatars%2Fava_0016-48.png",
| "image_32": "https://secure.gravatar.com/avatar/92b61c6a2a1efea6208c7faf3ffabea4.jpg?s=32&d=https%3A%2F%2Fslack.global.ssl.fastly.net%2F8390%2Fimg%2Favatars%2Fava_0016-32.png",
| "real_name_normalized": "",
| "real_name": "",
| "image_24": "https://secure.gravatar.com/avatar/92b61c6a2a1efea6208c7faf3ffabea4.jpg?s=24&d=https%3A%2F%2Fslack.global.ssl.fastly.net%2F8390%2Fimg%2Favatars%2Fava_0016-24.png",
| "image_192": "https://secure.gravatar.com/avatar/92b61c6a2a1efea6208c7faf3ffabea4.jpg?s=192&d=https%3A%2F%2Fslack.global.ssl.fastly.net%2F8390%2Fimg%2Favatars%2Fava_0016.png"
| },
| "tz_label": "Central European Time",
| "is_ultra_restricted": false,
| "status": null,
| "real_name": "",
| "is_restricted": false,
| "deleted": false,
| "is_primary_owner": false
| }, {
| "is_bot": false,
| "name": "slackbot",
| "tz_offset": -28800,
| "is_admin": false,
| "tz": null,
| "color": "757575",
| "is_owner": false,
| "id": "USLACKBOT",
| "presence": "active",
| "profile": {
| "first_name": "Slack",
| "email": null,
| "image_72": "https://slack-assets2.s3-us-west-2.amazonaws.com/10068/img/slackbot_72.png",
| "image_48": "https://slack-assets2.s3-us-west-2.amazonaws.com/10068/img/slackbot_48.png",
| "image_32": "https://slack-assets2.s3-us-west-2.amazonaws.com/10068/img/slackbot_32.png",
| "real_name_normalized": "Slack Bot",
| "last_name": "Bot",
| "real_name": "Slack Bot",
| "image_24": "https://slack-assets2.s3-us-west-2.amazonaws.com/10068/img/slackbot_24.png",
| "image_192": "https://slack-assets2.s3-us-west-2.amazonaws.com/10068/img/slackbot_192.png"
| },
| "tz_label": "Pacific Standard Time",
| "is_ultra_restricted": false,
| "status": null,
| "real_name": "Slack Bot",
| "is_restricted": false,
| "deleted": false,
| "is_primary_owner": false
| }],
| "latest_event_ts": "1422397894.000000",
| "self": {
| "name": "secretary",
| "id": "U03DQKG14",
| "manual_presence": "active",
| "prefs": {
| "has_invited": false,
| "no_created_overlays": false,
| "seen_team_menu_tip_card": false,
| "webapp_spellcheck": true,
| "expand_snippets": false,
| "color_names_in_list": true,
| "no_joined_overlays": false,
| "sidebar_behavior": "",
| "email_alerts": "instant",
| "seen_ssb_prompt": false,
| "has_uploaded": false,
| "show_member_presence": true,
| "email_misc": true,
| "time24": false,
| "never_channels": "",
| "push_dm_alert": true,
| "user_colors": "",
| "expand_inline_imgs": true,
| "last_snippet_type": "",
| "emoji_mode": "default",
| "collapsible_by_click": true,
| "load_lato_2": false,
| "mac_speak_speed": 250,
| "ss_emojis": true,
| "no_macssb1_banner": false,
| "highlight_words": "",
| "seen_welcome_2": false,
| "mute_sounds": false,
| "muted_channels": "",
| "seen_member_invite_reminder": false,
| "posts_formatting_guide": true,
| "seen_channels_tip_card": false,
| "sidebar_theme_custom_values": "",
| "at_channel_suppressed_channels": "",
| "f_key_search": false,
| "tz": null,
| "no_text_in_notifications": false,
| "has_created_channel": false,
| "seen_message_input_tip_card": false,
| "arrow_history": false,
| "email_alerts_sleep_until": 0,
| "seen_flexpane_tip_card": false,
| "mac_speak_voice": "com.apple.speech.synthesis.voice.Alex",
| "tab_ui_return_selects": true,
| "privacy_policy_seen": true,
| "mark_msgs_read_immediately": true,
| "push_sound": "b2.mp3",
| "comma_key_prefs": false,
| "collapsible": false,
| "mac_ssb_bullet": true,
| "k_key_omnibox": true,
| "dropbox_enabled": false,
| "growls_enabled": true,
| "welcome_message_hidden": false,
| "all_channels_loud": true,
| "email_weekly": true,
| "seen_search_input_tip_card": false,
| "seen_channel_menu_tip_card": false,
| "search_only_my_channels": false,
| "loud_channels_set": "",
| "search_exclude_channels": "",
| "seen_user_menu_tip_card": false,
| "win_ssb_bullet": true,
| "push_loud_channels_set": "",
| "push_loud_channels": "",
| "enter_is_special_in_tbt": false,
| "full_text_extracts": false,
| "fuzzy_matching": false,
| "push_idle_wait": 2,
| "obey_inline_img_limit": true,
| "seen_domain_invite_reminder": false,
| "sidebar_theme": "default",
| "expand_internal_inline_imgs": true,
| "push_mention_alert": true,
| "new_msg_snd": "knock_brush.mp3",
| "search_exclude_bots": false,
| "convert_emoticons": true,
| "start_scroll_at_oldest": true,
| "fuller_timestamps": false,
| "pagekeys_handled": true,
| "require_at": false,
| "push_mention_channels": "",
| "ls_disabled": false,
| "autoplay_chat_sounds": true,
| "mac_ssb_bounce": "",
| "graphic_emoticons": false,
| "snippet_editor_wrap_long_lines": false,
| "display_real_names_override": 0,
| "push_everything": true,
| "last_seen_at_channel_warning": 0,
| "messages_theme": "default",
| "show_typing": true,
| "speak_growls": false,
| "push_at_channel_suppressed_channels": "",
| "loud_channels": "",
| "search_sort": "timestamp",
| "prompted_for_email_disabling": false,
| "expand_non_media_attachments": true
| },
| "created": 1421786646
| },
| "groups": [],
| "cache_version": "v3-dog",
| "ims": [{
| "last_read": "0000000000.000000",
| "is_open": true,
| "id": "D03DQKG18",
| "unread_count": 0,
| "is_im": true,
| "latest": null,
| "user": "USLACKBOT",
| "created": 1421786647
| }, {
| "last_read": "0000000000.000000",
| "is_open": true,
| "id": "D03DQKG24",
| "unread_count": 0,
| "is_im": true,
| "latest": null,
| "user": "U03DKUF05",
| "created": 1421786647
| }, {
| "last_read": "0000000000.000000",
| "is_open": true,
| "id": "D03DQKG1L",
| "unread_count": 0,
| "is_im": true,
| "latest": null,
| "user": "U03DKUMKH",
| "created": 1421786647
| }, {
| "last_read": "0000000000.000000",
| "is_open": true,
| "id": "D03DQKG1U",
| "unread_count": 0,
| "is_im": true,
| "latest": null,
| "user": "U03DKUTAZ",
| "created": 1421786647
| }, {
| "last_read": "0000000000.000000",
| "is_open": true,
| "id": "D03DQKG1C",
| "unread_count": 0,
| "is_im": true,
| "latest": null,
| "user": "U03DN1GTQ",
| "created": 1421786647
| }],
| "team": {
| "name": "fivedots",
| "domain": "5dots",
| "icon": {
| "image_132": "https://slack.global.ssl.fastly.net/28461/img/avatars-teams/ava_0018-132.png",
| "image_68": "https://slack.global.ssl.fastly.net/28461/img/avatars-teams/ava_0018-68.png",
| "image_88": "https://slack.global.ssl.fastly.net/28461/img/avatars-teams/ava_0018-88.png",
| "image_102": "https://slack.global.ssl.fastly.net/28461/img/avatars-teams/ava_0018-102.png",
| "image_44": "https://slack.global.ssl.fastly.net/28461/img/avatars-teams/ava_0018-44.png",
| "image_34": "https://slack.global.ssl.fastly.net/28461/img/avatars-teams/ava_0018-34.png",
| "image_default": true
| },
| "over_storage_limit": false,
| "email_domain": "5dots.pl",
| "id": "T03DN1GTN",
| "prefs": {
| "who_can_kick_channels": "admin",
| "warn_before_at_channel": "always",
| "who_can_archive_channels": "regular",
| "dm_retention_type": 0,
| "retention_type": 0,
| "default_channels": ["C03DN1GUJ", "C03DN1GUN"],
| "who_can_post_general": "ra",
| "who_can_at_everyone": "regular",
| "who_can_at_channel": "ra",
| "allow_message_deletion": true,
| "require_at_for_mention": 0,
| "display_real_names": false,
| "who_can_create_channels": "regular",
| "compliance_export_start": 0,
| "who_can_create_groups": "ra",
| "dm_retention_duration": 0,
| "who_can_kick_groups": "regular",
| "retention_duration": 0,
| "msg_edit_window_mins": -1,
| "hide_referers": true,
| "group_retention_duration": 0,
| "group_retention_type": 0
| },
| "msg_edit_window_mins": -1
| },
| "ok": true
|}""".stripMargin
val rtmResponse = response.parseJson.convertTo[RtmStartResponse]
rtmResponse shouldBe 'ok
rtmResponse.url should equal("wss://ms25.slack-msgs.com/websocket/_eQUaO1csLMyoe4p4rUgEIH/W/gEruHxke8x0TNSE0ltMOdO7bHsP_W9mOznr5U1DzWvW7qs6BZulFXKcg0X2giBxV8UaHtptGEK0_F_rUA=")
rtmResponse.users shouldBe 'nonEmpty
rtmResponse.channels shouldBe 'nonEmpty
rtmResponse.users.size should equal(7)
rtmResponse.channels.size should equal(2)
rtmResponse.self.id should equal("U03DQKG14")
rtmResponse.self.name should equal("secretary")
rtmResponse.ims.size should equal(5)
}
test("long channel unmarshall") {
/* language=JSON */
val channelString = """{
| "is_channel": true,
| "name": "general",
| "last_read": "1421772996.000005",
| "creator": "U03DN1GTQ",
| "purpose": {
| "value": "This channel is for team-wide communication and announcements. All team members are in this channel.",
| "creator": "",
| "last_set": 0
| },
| "is_member": true,
| "id": "C03DN1GUJ",
| "unread_count": 1,
| "members": ["U03DKUF05", "U03DKUMKH", "U03DKUTAZ", "U03DL3Q9M", "U03DN1GTQ", "U03DQKG14"],
| "is_general": true,
| "topic": {
| "value": "",
| "creator": "",
| "last_set": 0
| },
| "latest": {
| "subtype": "channel_join",
| "ts": "1421786647.000002",
| "text": "<@U03DQKG14|secretary> has joined the channel",
| "type": "message",
| "user": "U03DQKG14"
| },
| "is_archived": false,
| "created": 1421772055
| }""".stripMargin
val channel = channelString.parseJson.convertTo[Channel]
channel shouldBe 'isChannel
channel shouldBe 'isMember
channel.name should equal("general")
channel.creator should equal("U03DN1GTQ")
channel.id should equal("C03DN1GUJ")
channel shouldBe 'isGeneral
channel should not be 'isArchived
channel.created should equal(new DateTime(1421772055000l))
channel.purpose should be(Some(ChannelInfo("This channel is for team-wide communication and announcements. All team members are in this channel.", "", 0)))
channel.topic should be(Some(ChannelInfo("", "", 0)))
channel.unreadCount should be(Some(1))
channel.lastRead should be(Some(new DateTime(1421772996000l)))
channel.members should be(Some(List("U03DKUF05", "U03DKUMKH", "U03DKUTAZ", "U03DL3Q9M", "U03DN1GTQ", "U03DQKG14")))
}
test("short channel unmarshall") {
/* language=JSON */
val channelString = """{
| "is_channel": true,
| "name": "random",
| "creator": "U03DN1GTQ",
| "is_member": false,
| "id": "C03DN1GUN",
| "is_general": false,
| "is_archived": false,
| "created": 1421772055
| }""".stripMargin
val channel = channelString.parseJson.convertTo[Channel]
channel shouldBe 'isChannel
channel should not be 'isMember
channel.name should equal("random")
channel.creator should equal("U03DN1GTQ")
channel.id should equal("C03DN1GUN")
channel should not be 'isGeneral
channel should not be 'isArchived
channel.created should equal(new DateTime(1421772055000L))
channel.purpose should be(None)
channel.topic should be(None)
channel.unreadCount should be(None)
channel.lastRead should be(None)
channel.members should be(None)
}
test("Channel topic") {
val topicString = """{
| "value": "",
| "creator": "",
| "last_set": 0
| }""".stripMargin
val topic = topicString.parseJson.convertTo[ChannelInfo]
topic.value should equal("")
topic.creator should equal("")
topic.last_set should equal(0)
}
test("channel purpose") {
val purposeString = """{
| "value": "This channel is for team-wide communication and announcements. All team members are in this channel.",
| "creator": "",
| "last_set": 0
| }""".stripMargin
val purpose = purposeString.parseJson.convertTo[ChannelInfo]
purpose.value should equal("This channel is for team-wide communication and announcements. All team members are in this channel.")
purpose.creator should equal("")
purpose.last_set should equal(0)
}
test("user object") {
/* language=JSON */
val userString = """{
| "is_bot": false,
| "name": "benek",
| "tz_offset": 3600,
| "is_admin": false,
| "tz": "Europe/Amsterdam",
| "color": "4bbe2e",
| "is_owner": false,
| "has_files": false,
| "id": "U03DKUF05",
| "presence": "away",
| "profile": {
| "email": "[email protected]",
| "image_72": "https://secure.gravatar.com/avatar/3d6188e64eb0f7d1156d3bda95452901.jpg?s=72&d=https%3A%2F%2Fslack.global.ssl.fastly.net%2F8390%2Fimg%2Favatars%2Fava_0000-72.png",
| "image_48": "https://secure.gravatar.com/avatar/3d6188e64eb0f7d1156d3bda95452901.jpg?s=48&d=https%3A%2F%2Fslack.global.ssl.fastly.net%2F8390%2Fimg%2Favatars%2Fava_0000-48.png",
| "image_32": "https://secure.gravatar.com/avatar/3d6188e64eb0f7d1156d3bda95452901.jpg?s=32&d=https%3A%2F%2Fslack.global.ssl.fastly.net%2F8390%2Fimg%2Favatars%2Fava_0000-32.png",
| "real_name_normalized": "",
| "real_name": "",
| "image_24": "https://secure.gravatar.com/avatar/3d6188e64eb0f7d1156d3bda95452901.jpg?s=24&d=https%3A%2F%2Fslack.global.ssl.fastly.net%2F8390%2Fimg%2Favatars%2Fava_0000-24.png",
| "image_192": "https://secure.gravatar.com/avatar/3d6188e64eb0f7d1156d3bda95452901.jpg?s=192&d=https%3A%2F%2Fslack.global.ssl.fastly.net%2F8390%2Fimg%2Favatars%2Fava_0000.png"
| },
| "tz_label": "Central European Time",
| "is_ultra_restricted": false,
| "status": null,
| "real_name": "",
| "is_restricted": false,
| "deleted": false,
| "is_primary_owner": false
| }""".stripMargin
val user = userString.parseJson.convertTo[SlackUser]
user.isBot should equal(Some(false))
user.name should equal("benek")
user.id should equal("U03DKUF05")
user should not be 'deleted
user.isAdmin should equal(Some(false))
user.isOwner should equal(Some(false))
user.isPrimaryOwner should equal(Some(false))
user.isRestricted should equal(Some(false))
user.isUltraRestricted should equal(Some(false))
user.hasFiles should equal(Some(false))
user.presence should equal(Away)
}
test("IM object") {
/*language=JSON*/
val imString = """{
| "last_read": "0000000000.000000",
| "is_open": true,
| "id": "D03DQKG1C",
| "unread_count": 0,
| "is_im": true,
| "latest": null,
| "user": "U03DN1GTQ",
| "created": 1421786647
| }""".stripMargin
val im = imString.parseJson.convertTo[DirectChannel]
im.id should equal("D03DQKG1C")
im.userId should equal("U03DN1GTQ")
}
}
|
Cheers-Dev/scala-slack-bot-core
|
src/test/scala/io/scalac/slack/UnmarshallerTest.scala
|
Scala
|
mit
| 38,839 |
package io.pathfinder.models
import java.util
import javax.persistence.CascadeType._
import javax.persistence._
import com.avaje.ebean.Model
import com.avaje.ebean.Model.{Finder, Find}
import play.api.libs.json.{Json, Format}
import scala.collection.mutable
import scala.collection.JavaConverters.asScalaBufferConverter
object Application {
val finder: Find[String, Application] =
new Finder[String, Application](classOf[Application])
val format: Format[Application] = Json.format[Application]
def apply(id: String, name: String): Application = {
val app = new Application
app.id = id
app
}
def unapply(p: Application): Option[(String, String)] =
Some((p.id, p.name))
}
@Entity
class Application extends Model {
@Id
@Column(updatable = false)
var id: String = null
@Column
var name: String = null
@ManyToOne
var customer: Customer = null
@OneToMany(mappedBy = "application", cascade = Array(ALL))
var capacityParametersList: util.List[CapacityParameter] = new util.ArrayList[CapacityParameter]()
@OneToMany(mappedBy = "application", cascade = Array(ALL))
var objectiveParametersList: util.List[ObjectiveParameter] = new util.ArrayList[ObjectiveParameter]()
@ManyToOne
var objectiveFunction: ObjectiveFunction = null
@Column
var key: Array[Byte] = null
@Column
var auth_url: String = null
def cluster: Cluster = {
Cluster.finder.byId(id)
}
def capacityParameters: mutable.Buffer[CapacityParameter] = capacityParametersList.asScala
def objectiveParameters: mutable.Buffer[ObjectiveParameter] = objectiveParametersList.asScala
}
|
CSSE497/pathfinder-server
|
app/io/pathfinder/models/Application.scala
|
Scala
|
mit
| 1,695 |
/***********************************************************************
* Copyright (c) 2013-2018 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.accumulo.audit
import java.nio.charset.StandardCharsets
import java.util.Map.Entry
import org.apache.accumulo.core.data.{Key, Mutation, Value}
import org.apache.hadoop.io.Text
import org.locationtech.geomesa.index.audit.QueryEvent
/**
* Maps query stats to accumulo
*/
object AccumuloQueryEventTransform extends AccumuloEventTransform[QueryEvent] {
private [audit] val CQ_USER = new Text("user")
private [audit] val CQ_FILTER = new Text("queryFilter")
private [audit] val CQ_HINTS = new Text("queryHints")
private [audit] val CQ_PLANTIME = new Text("timePlanning")
private [audit] val CQ_SCANTIME = new Text("timeScanning")
private [audit] val CQ_TIME = new Text("timeTotal")
private [audit] val CQ_HITS = new Text("hits")
private [audit] val CQ_DELETED = new Text("deleted")
override def toMutation(event: QueryEvent): Mutation = {
val mutation = createMutation(event)
val cf = createRandomColumnFamily
mutation.put(cf, CQ_USER, new Value(event.user.getBytes(StandardCharsets.UTF_8)))
mutation.put(cf, CQ_FILTER, new Value(event.filter.getBytes(StandardCharsets.UTF_8)))
mutation.put(cf, CQ_HINTS, new Value(event.hints.getBytes(StandardCharsets.UTF_8)))
mutation.put(cf, CQ_PLANTIME, new Value(s"${event.planTime}ms".getBytes(StandardCharsets.UTF_8)))
mutation.put(cf, CQ_SCANTIME, new Value(s"${event.scanTime}ms".getBytes(StandardCharsets.UTF_8)))
mutation.put(cf, CQ_TIME, new Value(s"${event.scanTime + event.planTime}ms".getBytes(StandardCharsets.UTF_8)))
mutation.put(cf, CQ_HITS, new Value(event.hits.toString.getBytes(StandardCharsets.UTF_8)))
mutation.put(cf, CQ_DELETED, new Value(event.deleted.toString.getBytes(StandardCharsets.UTF_8)))
mutation
}
override def toEvent(entries: Iterable[Entry[Key, Value]]): QueryEvent = {
if (entries.isEmpty) {
return null
}
val (featureName, date) = typeNameAndDate(entries.head.getKey)
val values = collection.mutable.Map.empty[Text, Any]
entries.foreach { e =>
e.getKey.getColumnQualifier match {
case CQ_USER => values.put(CQ_USER, e.getValue.toString)
case CQ_FILTER => values.put(CQ_FILTER, e.getValue.toString)
case CQ_HINTS => values.put(CQ_HINTS, e.getValue.toString)
case CQ_PLANTIME => values.put(CQ_PLANTIME, e.getValue.toString.stripSuffix("ms").toLong)
case CQ_SCANTIME => values.put(CQ_SCANTIME, e.getValue.toString.stripSuffix("ms").toLong)
case CQ_HITS => values.put(CQ_HITS, e.getValue.toString.toLong)
case CQ_DELETED => values.put(CQ_DELETED, e.getValue.toString.toBoolean)
case CQ_TIME => // time is an aggregate, doesn't need to map back to anything
case _ => logger.warn(s"Unmapped entry in query stat: ${e.getKey.getColumnQualifier.toString}")
}
}
val user = values.getOrElse(CQ_USER, "unknown").asInstanceOf[String]
val queryHints = values.getOrElse(CQ_HINTS, "").asInstanceOf[String]
val queryFilter = values.getOrElse(CQ_FILTER, "").asInstanceOf[String]
val planTime = values.getOrElse(CQ_PLANTIME, 0L).asInstanceOf[Long]
val scanTime = values.getOrElse(CQ_SCANTIME, 0L).asInstanceOf[Long]
val hits = values.getOrElse(CQ_HITS, 0L).asInstanceOf[Long]
val deleted = values.getOrElse(CQ_DELETED, false).asInstanceOf[Boolean]
QueryEvent(AccumuloAuditService.StoreType, featureName, date, user, queryFilter, queryHints, planTime, scanTime, hits, deleted)
}
}
|
ddseapy/geomesa
|
geomesa-accumulo/geomesa-accumulo-datastore/src/main/scala/org/locationtech/geomesa/accumulo/audit/AccumuloQueryEventTransform.scala
|
Scala
|
apache-2.0
| 4,018 |
package org.json4s
import org.specs2.mutable.Specification
class FormatsSpec extends Specification {
"Formats" should {
"be a Serializable" in {
val f = new Formats {
def dateFormat: DateFormat = ???
}
f.isInstanceOf[Serializable] must beTrue
}
}
"DefaultFormats" should {
"be a Serializable" in {
DefaultFormats.isInstanceOf[Serializable] must beTrue
}
}
}
|
dacr/json4s
|
tests/src/test/scala/org/json4s/FormatsSpec.scala
|
Scala
|
apache-2.0
| 418 |
/* Scala.js compiler
* Copyright 2013 LAMP/EPFL
* @author Sébastien Doeraene
*/
package org.scalajs.core.compiler
import scala.collection.mutable
import scala.tools.nsc._
import org.scalajs.core.ir
import ir.{Trees => js, Types => jstpe}
import util.ScopedVar
import ScopedVar.withScopedVars
/** Encoding of symbol names for JavaScript
*
* Some issues that this encoding solves:
* * Overloading: encode the full signature in the JS name
* * Same scope for fields and methods of a class
* * Global access to classes and modules (by their full name)
*
* @author Sébastien Doeraene
*/
trait JSEncoding extends SubComponent { self: GenJSCode =>
import global._
import jsAddons._
/** Outer separator string (between parameter types) */
final val OuterSep = "__"
/** Inner separator character (replace dots in full names) */
final val InnerSep = "_"
/** Name given to the local Scala.js environment variable */
final val ScalaJSEnvironmentName = "ScalaJS"
/** Name given to all exported stuff of a class for DCE */
final val dceExportName = "<exported>"
// Fresh local name generator ----------------------------------------------
private val usedLocalNames = new ScopedVar[mutable.Set[String]]
private val localSymbolNames = new ScopedVar[mutable.Map[Symbol, String]]
private val isReserved =
Set("arguments", "eval", ScalaJSEnvironmentName)
def withNewLocalNameScope[A](body: => A): A =
withScopedVars(
usedLocalNames := mutable.Set.empty,
localSymbolNames := mutable.Map.empty
)(body)
private def freshName(base: String = "x"): String = {
var suffix = 1
var longName = base
while (usedLocalNames(longName) || isReserved(longName)) {
suffix += 1
longName = base+"$"+suffix
}
usedLocalNames += longName
mangleJSName(longName)
}
def freshLocalIdent()(implicit pos: ir.Position): js.Ident =
js.Ident(freshName(), None)
def freshLocalIdent(base: String)(implicit pos: ir.Position): js.Ident =
js.Ident(freshName(base), Some(base))
private def localSymbolName(sym: Symbol): String =
localSymbolNames.getOrElseUpdate(sym, freshName(sym.name.toString))
// Encoding methods ----------------------------------------------------------
def encodeLabelSym(sym: Symbol)(implicit pos: Position): js.Ident = {
require(sym.isLabel, "encodeLabelSym called with non-label symbol: " + sym)
js.Ident(localSymbolName(sym), Some(sym.unexpandedName.decoded))
}
private lazy val allRefClasses: Set[Symbol] = {
import definitions._
(Set(ObjectRefClass, VolatileObjectRefClass) ++
refClass.values ++ volatileRefClass.values)
}
def encodeFieldSym(sym: Symbol)(implicit pos: Position): js.Ident = {
require(sym.owner.isClass && sym.isTerm && !sym.isMethod && !sym.isModule,
"encodeFieldSym called with non-field symbol: " + sym)
val name0 = encodeMemberNameInternal(sym)
val name =
if (name0.charAt(name0.length()-1) != ' ') name0
else name0.substring(0, name0.length()-1)
/* We have to special-case fields of Ref types (IntRef, ObjectRef, etc.)
* because they are emitted as private by our .scala source files, but
* they are considered public at use site since their symbols come from
* Java-emitted .class files.
*/
val idSuffix =
if (sym.isPrivate || allRefClasses.contains(sym.owner))
sym.owner.ancestors.count(!_.isInterface).toString
else
"f"
val encodedName = name + "$" + idSuffix
js.Ident(mangleJSName(encodedName), Some(sym.unexpandedName.decoded))
}
def encodeMethodSym(sym: Symbol, reflProxy: Boolean = false)
(implicit pos: Position): js.Ident = {
val (encodedName, paramsString) = encodeMethodNameInternal(sym, reflProxy)
js.Ident(encodedName + paramsString,
Some(sym.unexpandedName.decoded + paramsString))
}
def encodeMethodName(sym: Symbol, reflProxy: Boolean = false): String = {
val (encodedName, paramsString) = encodeMethodNameInternal(sym, reflProxy)
encodedName + paramsString
}
/** Encodes a method symbol of java.lang.String for use in RuntimeString.
*
* This basically means adding an initial parameter of type
* java.lang.String, which is the `this` parameter.
*/
def encodeRTStringMethodSym(sym: Symbol)(
implicit pos: Position): (Symbol, js.Ident) = {
require(sym.isMethod, "encodeMethodSym called with non-method symbol: " + sym)
require(sym.owner == definitions.StringClass)
require(!sym.isClassConstructor && !sym.isPrivate)
val (encodedName, paramsString) =
encodeMethodNameInternal(sym, inRTClass = true)
val methodIdent = js.Ident(encodedName + paramsString,
Some(sym.unexpandedName.decoded + paramsString))
(jsDefinitions.RuntimeStringModuleClass, methodIdent)
}
private def encodeMethodNameInternal(sym: Symbol,
reflProxy: Boolean = false,
inRTClass: Boolean = false): (String, String) = {
require(sym.isMethod, "encodeMethodSym called with non-method symbol: " + sym)
def name = encodeMemberNameInternal(sym)
val encodedName = {
if (sym.isClassConstructor)
"init" + InnerSep
else if (sym.isPrivate)
mangleJSName(name) + OuterSep + "p" +
sym.owner.ancestors.count(!_.isInterface).toString
else
mangleJSName(name)
}
val paramsString = makeParamsString(sym, reflProxy, inRTClass)
(encodedName, paramsString)
}
def encodeStaticMemberSym(sym: Symbol)(implicit pos: Position): js.Ident = {
require(sym.isStaticMember,
"encodeStaticMemberSym called with non-static symbol: " + sym)
js.Ident(
mangleJSName(encodeMemberNameInternal(sym)) +
makeParamsString(List(internalName(sym.tpe))),
Some(sym.unexpandedName.decoded))
}
def encodeLocalSym(sym: Symbol)(implicit pos: Position): js.Ident = {
/* The isValueParameter case is necessary to work around an internal bug
* of scalac: for some @varargs methods, the owner of some parameters is
* the enclosing class rather the method, so !sym.owner.isClass fails.
* Go figure ...
* See #1440
*/
require(sym.isValueParameter ||
(!sym.owner.isClass && sym.isTerm && !sym.isMethod && !sym.isModule),
"encodeLocalSym called with non-local symbol: " + sym)
js.Ident(localSymbolName(sym), Some(sym.unexpandedName.decoded))
}
def foreignIsImplClass(sym: Symbol): Boolean =
sym.isModuleClass && nme.isImplClassName(sym.name)
def encodeClassType(sym: Symbol): jstpe.Type = {
if (sym == definitions.ObjectClass) jstpe.AnyType
else if (isRawJSType(sym.toTypeConstructor)) jstpe.AnyType
else {
assert(sym != definitions.ArrayClass,
"encodeClassType() cannot be called with ArrayClass")
jstpe.ClassType(encodeClassFullName(sym))
}
}
def encodeClassFullNameIdent(sym: Symbol)(implicit pos: Position): js.Ident = {
js.Ident(encodeClassFullName(sym), Some(sym.fullName))
}
def encodeClassFullName(sym: Symbol): String = {
ir.Definitions.encodeClassName(
sym.fullName + (if (needsModuleClassSuffix(sym)) "$" else ""))
}
def needsModuleClassSuffix(sym: Symbol): Boolean =
sym.isModuleClass && !foreignIsImplClass(sym)
private def encodeMemberNameInternal(sym: Symbol): String =
sym.name.toString.replace("_", "$und")
// Encoding of method signatures
private def makeParamsString(sym: Symbol, reflProxy: Boolean,
inRTClass: Boolean): String = {
val tpe = sym.tpe
val paramTypeNames0 = tpe.params map (p => internalName(p.tpe))
val hasExplicitThisParameter =
inRTClass || isScalaJSDefinedJSClass(sym.owner)
val paramTypeNames =
if (!hasExplicitThisParameter) paramTypeNames0
else internalName(sym.owner.toTypeConstructor) :: paramTypeNames0
val paramAndResultTypeNames = {
if (sym.isClassConstructor)
paramTypeNames
else if (reflProxy)
paramTypeNames :+ ""
else
paramTypeNames :+ internalName(tpe.resultType)
}
makeParamsString(paramAndResultTypeNames)
}
private def makeParamsString(paramAndResultTypeNames: List[String]) =
paramAndResultTypeNames.mkString(OuterSep, OuterSep, "")
/** Computes the internal name for a type. */
private def internalName(tpe: Type): String = internalName(toTypeKind(tpe))
private def internalName(kind: TypeKind): String = kind match {
case VOID => "V"
case kind: ValueTypeKind => kind.primitiveCharCode.toString()
case NOTHING => ir.Definitions.RuntimeNothingClass
case NULL => ir.Definitions.RuntimeNullClass
case REFERENCE(cls) => encodeClassFullName(cls)
case ARRAY(elem) => "A"+internalName(elem)
}
/** mangles names that are illegal in JavaScript by prepending a $
* also mangles names that would collide with these mangled names
*/
private def mangleJSName(name: String) =
if (js.isKeyword(name) || name(0).isDigit || name(0) == '$')
"$" + name
else name
}
|
mdedetrich/scala-js
|
compiler/src/main/scala/org/scalajs/core/compiler/JSEncoding.scala
|
Scala
|
bsd-3-clause
| 9,150 |
import scala.tools.partest._
import java.io.File
object Test extends StoreReporterDirectTest {
def code = ???
def compileCode(code: String) = {
val classpath = pathOf(sys.props("partest.lib"), testOutput.path)
compileString(newCompiler("-cp", classpath))(code)
}
def library1 = """
package pack1
trait T
class U {
def t = new T {}
def one = 1
}
"""
def library2 = """
package pack2
object V {
def u = new pack1.U
}
"""
def app1 = """
package pack3
object Test {
pack2.V.u.one // okay
}
"""
def app2 = """
package pack3
object Test {
pack2.V.u.t // we have to fail if T.class is missing
}
"""
def show(): Unit = {
compileCode(library1)
val pack1 = new File(testOutput.path, "pack1")
val tClass = new File(pack1, "T.class")
assert(tClass.exists)
assert(tClass.delete())
// allowed to compile, no direct reference to `T`
compileCode(library2)
assert(filteredInfos.isEmpty, filteredInfos)
// allowed to compile, no direct reference to `T`
compileCode(app1)
assert(filteredInfos.isEmpty, filteredInfos)
// bad symbolic reference error expected (but no stack trace!)
compileCode(app2)
import scala.reflect.internal.util.Position
filteredInfos.map { report =>
print(if (report.severity == storeReporter.ERROR) "error: " else "")
println(Position.formatMessage(report.pos, report.msg, true))
}
}
}
|
scala/scala
|
test/files/run/t6440b.scala
|
Scala
|
apache-2.0
| 1,494 |
package org.bfn.ninetynineprobs
import org.scalatest._
class P65Spec extends UnitSpec {
// TODO
}
|
bfontaine/99Scala
|
src/test/scala/P65Spec.scala
|
Scala
|
mit
| 105 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.runtime.functions
import org.joda.time.format.DateTimeFormatter
import org.joda.time.format.DateTimeFormatterBuilder
object DateTimeFunctions {
private val PIVOT_YEAR = 2020
private val DATETIME_FORMATTER_CACHE = new ThreadLocalCache[String, DateTimeFormatter](64) {
protected override def getNewInstance(format: String): DateTimeFormatter
= createDateTimeFormatter(format)
}
def dateFormat(ts: Long, formatString: String): String = {
val formatter = DATETIME_FORMATTER_CACHE.get(formatString)
formatter.print(ts)
}
def createDateTimeFormatter(format: String): DateTimeFormatter = {
val builder = new DateTimeFormatterBuilder
var escaped = false
var i = 0
while (i < format.length) {
val character = format.charAt(i)
i = i + 1
if (escaped) {
character match {
// %a Abbreviated weekday name (Sun..Sat)
case 'a' => builder.appendDayOfWeekShortText
// %b Abbreviated month name (Jan..Dec)
case 'b' => builder.appendMonthOfYearShortText
// %c Month, numeric (0..12)
case 'c' => builder.appendMonthOfYear(1)
// %d Day of the month, numeric (00..31)
case 'd' => builder.appendDayOfMonth(2)
// %e Day of the month, numeric (0..31)
case 'e' => builder.appendDayOfMonth(1)
// %f Microseconds (000000..999999)
case 'f' => builder.appendFractionOfSecond(6, 9)
// %H Hour (00..23)
case 'H' => builder.appendHourOfDay(2)
case 'h' | 'I' => // %h Hour (01..12)
builder.appendClockhourOfHalfday(2)
// %i Minutes, numeric (00..59)
case 'i' => builder.appendMinuteOfHour(2)
// %j Day of year (001..366)
case 'j' => builder.appendDayOfYear(3)
// %k Hour (0..23)
case 'k' => builder.appendHourOfDay(1)
// %l Hour (1..12)
case 'l' => builder.appendClockhourOfHalfday(1)
// %M Month name (January..December)
case 'M' => builder.appendMonthOfYearText
// %m Month, numeric (00..12)
case 'm' => builder.appendMonthOfYear(2)
// %p AM or PM
case 'p' => builder.appendHalfdayOfDayText
// %r Time, 12-hour (hh:mm:ss followed by AM or PM)
case 'r' => builder.appendClockhourOfHalfday(2).appendLiteral(':').
appendMinuteOfHour(2).appendLiteral(':').appendSecondOfMinute(2).
appendLiteral(' ').appendHalfdayOfDayText
// %S Seconds (00..59)
case 'S' | 's' => builder.appendSecondOfMinute(2)
// %T Time, 24-hour (hh:mm:ss)
case 'T' => builder.appendHourOfDay(2).appendLiteral(':').
appendMinuteOfHour(2).appendLiteral(':').appendSecondOfMinute(2)
// %v Week (01..53), where Monday is the first day of the week; used with %x
case 'v' => builder.appendWeekOfWeekyear(2)
// %x Year for the week, where Monday is the first day of the week, numeric,
// four digits; used with %v
case 'x' => builder.appendWeekyear(4, 4)
// %W Weekday name (Sunday..Saturday)
case 'W' => builder.appendDayOfWeekText
// %Y Year, numeric, four digits
case 'Y' => builder.appendYear(4, 4)
// %y Year, numeric (two digits)
case 'y' => builder.appendTwoDigitYear(PIVOT_YEAR)
// %w Day of the week (0=Sunday..6=Saturday)
// %U Week (00..53), where Sunday is the first day of the week
// %u Week (00..53), where Monday is the first day of the week
// %V Week (01..53), where Sunday is the first day of the week; used with %X
// %X Year for the week where Sunday is the first day of the
// week, numeric, four digits; used with %V
// %D Day of the month with English suffix (0th, 1st, 2nd, 3rd, ...)
case 'w' | 'U' | 'u' | 'V' | 'X' | 'D' =>
throw new UnsupportedOperationException(
s"%%$character not supported in date format string")
// %% A literal "%" character
case '%' => builder.appendLiteral('%')
// %<x> The literal character represented by <x>
case _ => builder.appendLiteral(character)
}
escaped = false
}
else if (character == '%') { escaped = true }
else { builder.appendLiteral(character) }
}
builder.toFormatter
}
}
|
ueshin/apache-flink
|
flink-table/flink-table-planner/src/main/scala/org/apache/flink/table/runtime/functions/DateTimeFunctions.scala
|
Scala
|
apache-2.0
| 5,278 |
package org.randi3.model
object ActionType extends Enumeration{
val LOGIN, LOGIN_FAILED, LOGOUT, CREATE, UPDATE, DELETE, RANDOMIZE, ADD_RESPONSE = Value
}
|
dschrimpf/randi3-core
|
src/main/scala/org/randi3/model/ActionType.scala
|
Scala
|
gpl-3.0
| 160 |
package io.taig.android.graphic
import android.graphics.Color._
case class Color(color: Int) extends AnyVal {
def darken(amount: Float) = {
val hsv = new Array[Float](3)
colorToHSV(color, hsv)
hsv(2) *= 1 - amount
Color(HSVToColor(hsv))
}
}
object Color {
val Black = Color(BLACK)
val Blue = Color(BLUE)
val Cyan = Color(CYAN)
val DarkGray = Color(DKGRAY)
val Gray = Color(GRAY)
val Green = Color(GREEN)
val LightGray = Color(LTGRAY)
val Magenta = Color(MAGENTA)
val Transparent = Color(TRANSPARENT)
val Red = Color(RED)
val White = Color(WHITE)
val Yellow = Color(YELLOW)
def parse(color: String): Color = Color(parseColor(color))
}
|
Taig/Toolbelt
|
graphic/src/main/scala/io/taig/android/graphic/Color.scala
|
Scala
|
mit
| 695 |
package com.twitter.finagle.redis.integration
import com.twitter.finagle.redis.naggati.RedisClientTest
import com.twitter.finagle.redis.protocol.BitOp
import com.twitter.finagle.redis.tags.{RedisTest, ClientTest}
import com.twitter.util.Await
import com.twitter.finagle.redis.util.{CBToString, StringToChannelBuffer}
import org.junit.Ignore
import org.junit.runner.RunWith
import org.scalatest.junit.JUnitRunner
@Ignore
@RunWith(classOf[JUnitRunner])
final class StringClientIntegrationSuite extends RedisClientTest {
test("Correctly perform the APPEND command", RedisTest, ClientTest) {
withRedisClient { client =>
Await.result(client.set(foo, bar))
assert(Await.result(client.append(foo, baz)) == 6)
}
}
test("Correctly perform the DECRBY command", RedisTest, ClientTest) {
withRedisClient { client =>
Await.result(client.set(foo, StringToChannelBuffer("21")))
assert(Await.result(client.decrBy(foo, 2)) == 19)
}
}
test("Correctly perform the GETRANGE command", RedisTest, ClientTest) {
withRedisClient { client =>
Await.result(client.set(foo, StringToChannelBuffer("boing")))
assert(CBToString(Await.result(client.getRange(foo, 0, 2)).get) == "boi")
}
}
test("Correctly perform the GET & SET commands", RedisTest, ClientTest) {
withRedisClient { client =>
assert(Await.result(client.get(foo)) == None)
Await.result(client.set(foo, bar))
assert(CBToString(Await.result(client.get(foo)).get) == "bar")
}
}
test("Correctly perform bit operations", RedisTest, ClientTest) {
withRedisClient { client =>
assert(Await.result(client.bitCount(foo)) == 0L)
assert(Await.result(client.getBit(foo, 0)) == 0L)
assert(Await.result(client.setBit(foo, 0, 1)) == 0L)
assert(Await.result(client.getBit(foo, 0)) == 1L)
assert(Await.result(client.setBit(foo, 0, 0)) == 1L)
assert(Await.result(client.setBit(foo, 2, 1)) == 0L)
assert(Await.result(client.setBit(foo, 3, 1)) == 0L)
assert(Await.result(client.setBit(foo, 8, 1)) == 0L)
assert(Await.result(client.bitCount(foo)) == 3L)
assert(Await.result(client.bitCount(foo, Some(0), Some(0))) == 2L)
assert(Await.result(client.setBit(foo, 8, 0)) == 1L)
assert(Await.result(client.setBit(bar, 0, 1)) == 0L)
assert(Await.result(client.setBit(bar, 3, 1)) == 0L)
assert(Await.result(client.bitOp(BitOp.And, baz, Seq(foo, bar))) == 2L)
assert(Await.result(client.bitCount(baz)) == 1L)
assert(Await.result(client.getBit(baz, 0)) == 0L)
assert(Await.result(client.getBit(baz, 3)) == 1L)
assert(Await.result(client.bitOp(BitOp.Or, baz, Seq(foo, bar))) == 2L)
assert(Await.result(client.bitCount(baz)) == 3L)
assert(Await.result(client.getBit(baz, 0)) == 1L)
assert(Await.result(client.getBit(baz, 1)) == 0L)
assert(Await.result(client.bitOp(BitOp.Xor, baz, Seq(foo, bar))) == 2L)
assert(Await.result(client.bitCount(baz)) == 2L)
assert(Await.result(client.getBit(baz, 0)) == 1L)
assert(Await.result(client.getBit(baz, 1)) == 0L)
assert(Await.result(client.bitOp(BitOp.Not, baz, Seq(foo))) == 2L)
assert(Await.result(client.bitCount(baz)) == 14)
assert(Await.result(client.getBit(baz, 0)) == 1)
assert(Await.result(client.getBit(baz, 2)) == 0)
assert(Await.result(client.getBit(baz, 4)) == 1)
}
}
test("Correctly perform getSet commands", RedisTest, ClientTest) {
withRedisClient { client =>
assert(Await.result(client.getSet(foo, bar)) == None)
assert(Await.result(client.get(foo)) == Some(bar))
assert(Await.result(client.getSet(foo, baz)) == Some(bar))
assert(Await.result(client.get(foo)) == Some(baz))
}
}
test("Correctly perform increment commands", RedisTest, ClientTest) {
withRedisClient { client =>
assert(Await.result(client.incr(foo)) == 1L)
assert(Await.result(client.incrBy(foo, 10L)) == 11L)
assert(Await.result(client.incrBy(bar, 10L)) == 10L)
assert(Await.result(client.incr(bar)) == 11L)
}
}
test("Correctly perform mGet, mSet, and mSetNx commands", RedisTest, ClientTest) {
withRedisClient { client =>
Await.result(client.mSet(Map(foo -> bar, bar -> baz)))
assert(Await.result(client.mGet(Seq(foo, bar, baz))) == Seq(Some(bar), Some(baz), None))
assert(Await.result(client.mSetNx(Map(foo -> bar, baz -> foo, boo -> moo))) == false)
assert(Await.result(client.mSetNx(Map(baz -> foo, boo -> moo))) == true)
assert(Await.result(client.mGet(Seq(baz, boo))) == Seq(Some(foo), Some(moo)))
}
}
test("Correctly perform set variations", RedisTest, ClientTest) {
withRedisClient { client =>
Await.result(client.pSetEx(foo, 10000L, bar))
assert(Await.result(client.get(foo)) == Some(bar))
assert(Await.result(client.ttl(foo)) forall (_ <= 10L))
Await.result(client.setEx(bar, 10L, foo))
assert(Await.result(client.get(bar)) == Some(foo))
assert(Await.result(client.ttl(bar)) forall (_ <= 10L))
assert(Await.result(client.setNx(baz, foo)) == true)
assert(Await.result(client.setNx(baz, bar)) == false)
assert(Await.result(client.setRange(baz, 1, baz)) == 4L)
assert(Await.result(client.get(baz)) == Some(StringToChannelBuffer("fbaz")))
}
}
test("Correctly perform new set syntax variations", RedisTest, ClientTest) {
withRedisClient { client =>
assert(Await.result(client.setExNx(foo, 10L, bar)) == true)
assert(Await.result(client.get(foo)) == Some(bar))
assert(Await.result(client.ttl(foo)) forall (_ <= 10L))
assert(Await.result(client.setExNx(foo, 10L, baz)) == false)
assert(Await.result(client.setPxNx(bar, 10000L, baz)) == true)
assert(Await.result(client.get(bar)) == Some(baz))
assert(Await.result(client.ttl(bar)) forall (_ <= 10L))
assert(Await.result(client.setPxNx(bar, 100L, bar)) == false)
assert(Await.result(client.setXx(baz, foo)) == false)
Await.result(client.set(baz, foo))
assert(Await.result(client.setXx(baz, bar)) == true)
assert(Await.result(client.get(baz)) == Some(bar))
assert(Await.result(client.setExXx(boo, 10L, foo)) == false)
Await.result(client.set(boo, foo))
assert(Await.result(client.setExXx(boo, 10L, bar)) == true)
assert(Await.result(client.get(boo)) == Some(bar))
assert(Await.result(client.ttl(boo)) forall (_ <= 10L))
assert(Await.result(client.setPxXx(moo, 10000L, foo)) == false)
Await.result(client.set(moo, foo))
assert(Await.result(client.setPxXx(moo, 10000L, bar)) == true)
assert(Await.result(client.get(moo)) == Some(bar))
assert(Await.result(client.ttl(moo)) forall (_ <= 10L))
Await.result(client.setPx(num, 10000L, foo))
assert(Await.result(client.get(num)) == Some(foo))
assert(Await.result(client.ttl(num)) forall (_ <= 10L))
}
}
test("Correctly perform strlen", RedisTest, ClientTest) {
withRedisClient { client =>
assert(Await.result(client.strlen(foo)) == 0L)
Await.result(client.set(foo, bar))
assert(Await.result(client.strlen(foo)) == 3L)
}
}
}
|
a-manumohan/finagle
|
finagle-redis/src/test/scala/com/twitter/finagle/redis/commands/string/StringClientIntegrationSuite.scala
|
Scala
|
apache-2.0
| 7,237 |
package tests
object Example {
def foo(o: AnyRef) {
o == null<caret>
}
}
|
JetBrains/intellij-scala
|
scala/scala-impl/testdata/postfixTemplate/tabCompletion/else.scala
|
Scala
|
apache-2.0
| 82 |
package services.crunch.deskrecs
import actors.acking.AckingReceiver.{Ack, StreamInitialized}
import akka.NotUsed
import akka.actor.{Actor, ActorRef, Props}
import akka.stream.Materializer
import akka.stream.scaladsl.Source
import akka.testkit.TestProbe
import controllers.ArrivalGenerator
import drt.shared.CrunchApi.DeskRecMinutes
import drt.shared.PaxTypes.EeaMachineReadable
import drt.shared.Queues.{EGate, EeaDesk, NonEeaDesk, Queue}
import drt.shared.SplitRatiosNs.SplitSources.ApiSplitsWithHistoricalEGateAndFTPercentages
import drt.shared.Terminals.{T1, Terminal}
import drt.shared._
import drt.shared.api.Arrival
import manifests.passengers.BestAvailableManifest
import manifests.queues.SplitsCalculator
import manifests.queues.SplitsCalculator.SplitsForArrival
import manifests.{ManifestLookupLike, UniqueArrivalKey}
import passengersplits.parsing.VoyageManifestParser.{PassengerInfoJson, VoyageManifest, VoyageManifests}
import queueus.{AdjustmentsNoop, B5JPlusTypeAllocator, PaxTypeQueueAllocation, TerminalQueueAllocator}
import services.crunch.VoyageManifestGenerator.{euIdCard, manifestForArrival, visa, xOfPaxType}
import services.crunch.desklimits.{PortDeskLimits, TerminalDeskLimitsLike}
import services.crunch.deskrecs.DynamicRunnableDeskRecs.{HistoricManifestsProvider, addManifests}
import services.crunch.deskrecs.OptimiserMocks.{MockSinkActor, mockFlightsProvider, mockHistoricManifestsProvider, mockLiveManifestsProvider}
import services.crunch.deskrecs.RunnableOptimisation.CrunchRequest
import services.crunch.{CrunchTestLike, TestDefaults, VoyageManifestGenerator}
import services.graphstages.{CrunchMocks, FlightFilter}
import services.{SDate, TryCrunch}
import scala.collection.immutable.Map
import scala.concurrent.duration._
import scala.concurrent.{ExecutionContext, Future}
object OptimiserMocks {
class MockActor(somethingToReturn: List[Any]) extends Actor {
override def receive: Receive = {
case _ => sender() ! Source(somethingToReturn)
}
}
class MockSinkActor(probe: ActorRef) extends Actor {
override def receive: Receive = {
case StreamInitialized =>
sender() ! Ack
case somethingToReturn =>
probe ! somethingToReturn
sender() ! Ack
}
}
def mockFlightsProvider(arrivals: List[Arrival])
(implicit ec: ExecutionContext): CrunchRequest => Future[Source[List[Arrival], NotUsed]] =
_ => Future(Source(List(arrivals)))
def mockLiveManifestsProviderNoop(implicit ec: ExecutionContext): CrunchRequest => Future[Source[VoyageManifests, NotUsed]] = {
_ => Future(Source(List()))
}
def mockHistoricManifestsProviderNoop(implicit ec: ExecutionContext): HistoricManifestsProvider = {
_: Iterable[Arrival] => Future(Source(List()))
}
def mockLiveManifestsProvider(arrival: Arrival, maybePax: Option[List[PassengerInfoJson]])
(implicit ec: ExecutionContext): CrunchRequest => Future[Source[VoyageManifests, NotUsed]] = {
val manifests = maybePax match {
case Some(pax) => VoyageManifests(Set(manifestForArrival(arrival, pax)))
case None => VoyageManifests(Set())
}
_ => Future(Source(List(manifests)))
}
def mockHistoricManifestsProvider(arrivalsWithMaybePax: Map[Arrival, Option[List[PassengerInfoJson]]])
(implicit ec: ExecutionContext, mat: Materializer): HistoricManifestsProvider = {
OptimisationProviders.historicManifestsProvider(
PortCode("STN"),
MockManifestLookupService(arrivalsWithMaybePax.map { case (arrival, maybePax) =>
val key = UniqueArrivalKey(PortCode("STN"), arrival.Origin, arrival.VoyageNumber, SDate(arrival.Scheduled))
val maybeManifest = maybePax.map(pax => BestAvailableManifest.historic(VoyageManifestGenerator.manifestForArrival(arrival, pax)))
(key, maybeManifest)
})
)
}
}
import scala.concurrent.ExecutionContext.Implicits.global
case class MockManifestLookupService(bestAvailableManifests: Map[UniqueArrivalKey, Option[BestAvailableManifest]]) extends ManifestLookupLike {
override def maybeBestAvailableManifest(arrivalPort: PortCode,
departurePort: PortCode,
voyageNumber: VoyageNumber,
scheduled: SDateLike)
(implicit mat: Materializer): Future[(UniqueArrivalKey, Option[BestAvailableManifest])] = {
val key = UniqueArrivalKey(arrivalPort, departurePort, voyageNumber, scheduled)
val maybeManifest = bestAvailableManifests.get(key).flatten
Future((key, maybeManifest))
}
}
class RunnableDynamicDeskRecsSpec extends CrunchTestLike {
val airportConfig: AirportConfig = TestDefaults.airportConfigWithEgates
val maxDesksProvider: Map[Terminal, TerminalDeskLimitsLike] = PortDeskLimits.flexed(airportConfig)
val mockCrunch: TryCrunch = CrunchMocks.mockCrunch
val pcpPaxCalcFn: Arrival => Int = PcpUtils.bestPcpPaxEstimate
val ptqa: PaxTypeQueueAllocation = PaxTypeQueueAllocation(
B5JPlusTypeAllocator,
TerminalQueueAllocator(airportConfig.terminalPaxTypeQueueAllocation))
val splitsCalculator: SplitsCalculator = manifests.queues.SplitsCalculator(ptqa, airportConfig.terminalPaxSplits, AdjustmentsNoop)
val desksAndWaitsProvider: PortDesksAndWaitsProvider = PortDesksAndWaitsProvider(airportConfig, mockCrunch, FlightFilter.forPortConfig(airportConfig))
val mockSplitsSink: ActorRef = system.actorOf(Props(new MockSplitsSinkActor))
def setupGraphAndCheckQueuePax(arrival: Arrival,
livePax: Option[List[PassengerInfoJson]],
historicPax: Option[List[PassengerInfoJson]],
expectedQueuePax: Map[(Terminal, Queue), Int]): Any = {
val probe = TestProbe()
val request = CrunchRequest(SDate(arrival.Scheduled).toLocalDate, 0, 1440)
val sink = system.actorOf(Props(new MockSinkActor(probe.ref)))
val deskRecs = DynamicRunnableDeskRecs.crunchRequestsToQueueMinutes(
mockFlightsProvider(List(arrival)),
mockLiveManifestsProvider(arrival, livePax),
mockHistoricManifestsProvider(Map(arrival -> historicPax)),
splitsCalculator,
mockSplitsSink,
desksAndWaitsProvider.flightsToLoads,
desksAndWaitsProvider.loadsToDesks,
maxDesksProvider)
val (queue, _) = RunnableOptimisation.createGraph(sink, deskRecs).run()
queue.offer(request)
probe.fishForMessage(1 second) {
case DeskRecMinutes(drms) =>
val tqPax = drms
.groupBy(drm => (drm.terminal, drm.queue))
.map {
case (tq, minutes) => (tq, minutes.map(_.paxLoad).sum)
}
.collect {
case (tq, pax) if pax > 0 => (tq, pax)
}
tqPax === expectedQueuePax
}
}
"Given a flight and a mock splits calculator" >> {
val arrival = ArrivalGenerator.arrival(actPax = Option(100), origin = PortCode("JFK"))
val flights = Seq(ApiFlightWithSplits(arrival, Set()))
val splits = Splits(Set(ApiPaxTypeAndQueueCount(EeaMachineReadable, EeaDesk, 1.0, None, None)), ApiSplitsWithHistoricalEGateAndFTPercentages, None, Percentage)
val mockSplits: SplitsForArrival = (_, _) => splits
"When I have a manifest matching the arrival I should get the mock splits added to the arrival" >> {
val manifest = VoyageManifestGenerator.manifestForArrival(arrival, List(euIdCard))
val manifestsForArrival = manifestsByKey(manifest)
val withLiveManifests = addManifests(flights, manifestsForArrival, mockSplits)
withLiveManifests === Seq(ApiFlightWithSplits(arrival.copy(ApiPax = Option(1)), Set(splits)))
}
"When I have no manifests matching the arrival I should get no splits added to the arrival" >> {
val manifest = VoyageManifestGenerator.voyageManifest(portCode = PortCode("AAA"))
val manifestsForDifferentArrival = manifestsByKey(manifest)
val withLiveManifests = addManifests(flights, manifestsForDifferentArrival, mockSplits)
withLiveManifests === Seq(ApiFlightWithSplits(arrival, Set()))
}
}
def manifestsByKey(manifest: VoyageManifest): Map[ArrivalKey, VoyageManifest] =
List(manifest)
.map { vm => vm.maybeKey.map(k => (k, vm)) }
.collect { case Some(k) => k }
.toMap
"Given an arrival with 100 pax " >> {
val arrival = ArrivalGenerator.arrival("BA0001", actPax = Option(100), schDt = s"2021-06-01T12:00", origin = PortCode("JFK"), feedSources = Set(LiveFeedSource))
"When I provide no live and no historic manifests, terminal splits should be applied (50% desk, 50% egates)" >> {
val expected: Map[(Terminal, Queue), Int] = Map((T1, EGate) -> 50, (T1, EeaDesk) -> 50)
setupGraphAndCheckQueuePax(
arrival = arrival,
livePax = None,
historicPax = None,
expectedQueuePax = expected)
success
}
"When I provide only historic splits with an id card pax, all pax should arrive at the eea desk " >> {
val expected: Map[(Terminal, Queue), Int] = Map((T1, EeaDesk) -> 100)
setupGraphAndCheckQueuePax(
arrival = arrival,
livePax = None,
historicPax = Option(List(euIdCard)),
expectedQueuePax = expected)
success
}
"When I provide only live splits with an id card pax, all pax should arrive at the eea desk " >> {
val expected: Map[(Terminal, Queue), Int] = Map((T1, EeaDesk) -> 100)
setupGraphAndCheckQueuePax(
arrival = arrival,
livePax = Option(xOfPaxType(100, euIdCard)),
historicPax = None,
expectedQueuePax = expected)
success
}
"When I provide live (id card) and historic (visa) splits, all pax should arrive at the eea desk as per the live splits" >> {
val expected: Map[(Terminal, Queue), Int] = Map((T1, EeaDesk) -> 100)
setupGraphAndCheckQueuePax(
arrival = arrival,
livePax = Option(xOfPaxType(100, euIdCard)),
historicPax = Option(List(visa)),
expectedQueuePax = expected)
success
}
"When I provide live (visa) and historic (id card) splits, all pax should arrive at the non-eea desk as per the live splits" >> {
val expected: Map[(Terminal, Queue), Int] = Map((T1, NonEeaDesk) -> 100)
setupGraphAndCheckQueuePax(
arrival = arrival,
livePax = Option(xOfPaxType(100, visa)),
historicPax = Option(List(euIdCard)),
expectedQueuePax = expected)
success
}
}
}
|
UKHomeOffice/drt-scalajs-spa-exploration
|
server/src/test/scala/services/crunch/deskrecs/DynamicRunnableDeskRecsSpec.scala
|
Scala
|
apache-2.0
| 10,615 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.log
import java.io.File
import java.util.Properties
import kafka.api.KAFKA_0_11_0_IV0
import kafka.api.{KAFKA_0_10_0_IV1, KAFKA_0_9_0}
import kafka.server.KafkaConfig
import kafka.server.checkpoints.OffsetCheckpointFile
import kafka.utils._
import org.apache.kafka.common.TopicPartition
import org.apache.kafka.common.record._
import org.junit.Assert._
import org.junit._
import org.junit.runner.RunWith
import org.junit.runners.Parameterized
import org.junit.runners.Parameterized.Parameters
import scala.Seq
import scala.collection._
/**
* This is an integration test that tests the fully integrated log cleaner
*/
@RunWith(value = classOf[Parameterized])
class LogCleanerParameterizedIntegrationTest(compressionCodec: String) extends AbstractLogCleanerIntegrationTest {
val codec: CompressionType = CompressionType.forName(compressionCodec)
val time = new MockTime()
val topicPartitions = Array(new TopicPartition("log", 0), new TopicPartition("log", 1), new TopicPartition("log", 2))
@Test
def cleanerTest() {
val largeMessageKey = 20
val (largeMessageValue, largeMessageSet) = createLargeSingleMessageSet(largeMessageKey, RecordBatch.CURRENT_MAGIC_VALUE)
val maxMessageSize = largeMessageSet.sizeInBytes
cleaner = makeCleaner(partitions = topicPartitions, maxMessageSize = maxMessageSize)
val log = cleaner.logs.get(topicPartitions(0))
val appends = writeDups(numKeys = 100, numDups = 3, log = log, codec = codec)
val startSize = log.size
cleaner.startup()
val firstDirty = log.activeSegment.baseOffset
checkLastCleaned("log", 0, firstDirty)
val compactedSize = log.logSegments.map(_.size).sum
assertTrue(s"log should have been compacted: startSize=$startSize compactedSize=$compactedSize", startSize > compactedSize)
checkLogAfterAppendingDups(log, startSize, appends)
val appendInfo = log.appendAsLeader(largeMessageSet, leaderEpoch = 0)
val largeMessageOffset = appendInfo.firstOffset.get
val dups = writeDups(startKey = largeMessageKey + 1, numKeys = 100, numDups = 3, log = log, codec = codec)
val appends2 = appends ++ Seq((largeMessageKey, largeMessageValue, largeMessageOffset)) ++ dups
val firstDirty2 = log.activeSegment.baseOffset
checkLastCleaned("log", 0, firstDirty2)
checkLogAfterAppendingDups(log, startSize, appends2)
// simulate deleting a partition, by removing it from logs
// force a checkpoint
// and make sure its gone from checkpoint file
cleaner.logs.remove(topicPartitions(0))
cleaner.updateCheckpoints(logDir)
val checkpoints = new OffsetCheckpointFile(new File(logDir, cleaner.cleanerManager.offsetCheckpointFile)).read()
// we expect partition 0 to be gone
assertFalse(checkpoints.contains(topicPartitions(0)))
}
@Test
def testCleansCombinedCompactAndDeleteTopic(): Unit = {
val logProps = new Properties()
val retentionMs: Integer = 100000
logProps.put(LogConfig.RetentionMsProp, retentionMs: Integer)
logProps.put(LogConfig.CleanupPolicyProp, "compact,delete")
def runCleanerAndCheckCompacted(numKeys: Int): (Log, Seq[(Int, String, Long)]) = {
cleaner = makeCleaner(partitions = topicPartitions.take(1), propertyOverrides = logProps, backOffMs = 100L)
val log = cleaner.logs.get(topicPartitions(0))
val messages = writeDups(numKeys = numKeys, numDups = 3, log = log, codec = codec)
val startSize = log.size
log.onHighWatermarkIncremented(log.logEndOffset)
val firstDirty = log.activeSegment.baseOffset
cleaner.startup()
// should compact the log
checkLastCleaned("log", 0, firstDirty)
val compactedSize = log.logSegments.map(_.size).sum
assertTrue(s"log should have been compacted: startSize=$startSize compactedSize=$compactedSize", startSize > compactedSize)
(log, messages)
}
val (log, _) = runCleanerAndCheckCompacted(100)
// should delete old segments
log.logSegments.foreach(_.lastModified = time.milliseconds - (2 * retentionMs))
TestUtils.waitUntilTrue(() => log.numberOfSegments == 1, "There should only be 1 segment remaining", 10000L)
assertEquals(1, log.numberOfSegments)
cleaner.shutdown()
// run the cleaner again to make sure if there are no issues post deletion
val (log2, messages) = runCleanerAndCheckCompacted(20)
val read = readFromLog(log2)
assertEquals("Contents of the map shouldn't change", toMap(messages), toMap(read))
}
@Test
def testCleanerWithMessageFormatV0(): Unit = {
// zstd compression is not supported with older message formats
if (codec == CompressionType.ZSTD)
return
val largeMessageKey = 20
val (largeMessageValue, largeMessageSet) = createLargeSingleMessageSet(largeMessageKey, RecordBatch.MAGIC_VALUE_V0)
val maxMessageSize = codec match {
case CompressionType.NONE => largeMessageSet.sizeInBytes
case _ =>
// the broker assigns absolute offsets for message format 0 which potentially causes the compressed size to
// increase because the broker offsets are larger than the ones assigned by the client
// adding `5` to the message set size is good enough for this test: it covers the increased message size while
// still being less than the overhead introduced by the conversion from message format version 0 to 1
largeMessageSet.sizeInBytes + 5
}
cleaner = makeCleaner(partitions = topicPartitions, maxMessageSize = maxMessageSize)
val log = cleaner.logs.get(topicPartitions(0))
val props = logConfigProperties(maxMessageSize = maxMessageSize)
props.put(LogConfig.MessageFormatVersionProp, KAFKA_0_9_0.version)
log.config = new LogConfig(props)
val appends = writeDups(numKeys = 100, numDups = 3, log = log, codec = codec, magicValue = RecordBatch.MAGIC_VALUE_V0)
val startSize = log.size
cleaner.startup()
val firstDirty = log.activeSegment.baseOffset
checkLastCleaned("log", 0, firstDirty)
val compactedSize = log.logSegments.map(_.size).sum
assertTrue(s"log should have been compacted: startSize=$startSize compactedSize=$compactedSize", startSize > compactedSize)
checkLogAfterAppendingDups(log, startSize, appends)
val appends2: Seq[(Int, String, Long)] = {
val dupsV0 = writeDups(numKeys = 40, numDups = 3, log = log, codec = codec, magicValue = RecordBatch.MAGIC_VALUE_V0)
val appendInfo = log.appendAsLeader(largeMessageSet, leaderEpoch = 0)
val largeMessageOffset = appendInfo.firstOffset.get
// also add some messages with version 1 and version 2 to check that we handle mixed format versions correctly
props.put(LogConfig.MessageFormatVersionProp, KAFKA_0_11_0_IV0.version)
log.config = new LogConfig(props)
val dupsV1 = writeDups(startKey = 30, numKeys = 40, numDups = 3, log = log, codec = codec, magicValue = RecordBatch.MAGIC_VALUE_V1)
val dupsV2 = writeDups(startKey = 15, numKeys = 5, numDups = 3, log = log, codec = codec, magicValue = RecordBatch.MAGIC_VALUE_V2)
appends ++ dupsV0 ++ Seq((largeMessageKey, largeMessageValue, largeMessageOffset)) ++ dupsV1 ++ dupsV2
}
val firstDirty2 = log.activeSegment.baseOffset
checkLastCleaned("log", 0, firstDirty2)
checkLogAfterAppendingDups(log, startSize, appends2)
}
@Test
def testCleaningNestedMessagesWithMultipleVersions(): Unit = {
// zstd compression is not supported with older message formats
if (codec == CompressionType.ZSTD)
return
val maxMessageSize = 192
cleaner = makeCleaner(partitions = topicPartitions, maxMessageSize = maxMessageSize, segmentSize = 256)
val log = cleaner.logs.get(topicPartitions(0))
val props = logConfigProperties(maxMessageSize = maxMessageSize, segmentSize = 256)
props.put(LogConfig.MessageFormatVersionProp, KAFKA_0_9_0.version)
log.config = new LogConfig(props)
// with compression enabled, these messages will be written as a single message containing
// all of the individual messages
var appendsV0 = writeDupsSingleMessageSet(numKeys = 2, numDups = 3, log = log, codec = codec, magicValue = RecordBatch.MAGIC_VALUE_V0)
appendsV0 ++= writeDupsSingleMessageSet(numKeys = 2, startKey = 3, numDups = 2, log = log, codec = codec, magicValue = RecordBatch.MAGIC_VALUE_V0)
props.put(LogConfig.MessageFormatVersionProp, KAFKA_0_10_0_IV1.version)
log.config = new LogConfig(props)
var appendsV1 = writeDupsSingleMessageSet(startKey = 4, numKeys = 2, numDups = 2, log = log, codec = codec, magicValue = RecordBatch.MAGIC_VALUE_V1)
appendsV1 ++= writeDupsSingleMessageSet(startKey = 4, numKeys = 2, numDups = 2, log = log, codec = codec, magicValue = RecordBatch.MAGIC_VALUE_V1)
appendsV1 ++= writeDupsSingleMessageSet(startKey = 6, numKeys = 2, numDups = 2, log = log, codec = codec, magicValue = RecordBatch.MAGIC_VALUE_V1)
val appends = appendsV0 ++ appendsV1
val startSize = log.size
cleaner.startup()
val firstDirty = log.activeSegment.baseOffset
assertTrue(firstDirty > appendsV0.size) // ensure we clean data from V0 and V1
checkLastCleaned("log", 0, firstDirty)
val compactedSize = log.logSegments.map(_.size).sum
assertTrue(s"log should have been compacted: startSize=$startSize compactedSize=$compactedSize", startSize > compactedSize)
checkLogAfterAppendingDups(log, startSize, appends)
}
@Test
def cleanerConfigUpdateTest() {
val largeMessageKey = 20
val (largeMessageValue, largeMessageSet) = createLargeSingleMessageSet(largeMessageKey, RecordBatch.CURRENT_MAGIC_VALUE)
val maxMessageSize = largeMessageSet.sizeInBytes
cleaner = makeCleaner(partitions = topicPartitions, backOffMs = 1, maxMessageSize = maxMessageSize,
cleanerIoBufferSize = Some(1))
val log = cleaner.logs.get(topicPartitions(0))
writeDups(numKeys = 100, numDups = 3, log = log, codec = codec)
val startSize = log.size
cleaner.startup()
assertEquals(1, cleaner.cleanerCount)
// Verify no cleaning with LogCleanerIoBufferSizeProp=1
val firstDirty = log.activeSegment.baseOffset
val topicPartition = new TopicPartition("log", 0)
cleaner.awaitCleaned(topicPartition, firstDirty, maxWaitMs = 10)
assertTrue("Should not have cleaned", cleaner.cleanerManager.allCleanerCheckpoints.isEmpty)
def kafkaConfigWithCleanerConfig(cleanerConfig: CleanerConfig): KafkaConfig = {
val props = TestUtils.createBrokerConfig(0, "localhost:2181")
props.put(KafkaConfig.LogCleanerThreadsProp, cleanerConfig.numThreads.toString)
props.put(KafkaConfig.LogCleanerDedupeBufferSizeProp, cleanerConfig.dedupeBufferSize.toString)
props.put(KafkaConfig.LogCleanerDedupeBufferLoadFactorProp, cleanerConfig.dedupeBufferLoadFactor.toString)
props.put(KafkaConfig.LogCleanerIoBufferSizeProp, cleanerConfig.ioBufferSize.toString)
props.put(KafkaConfig.MessageMaxBytesProp, cleanerConfig.maxMessageSize.toString)
props.put(KafkaConfig.LogCleanerBackoffMsProp, cleanerConfig.backOffMs.toString)
props.put(KafkaConfig.LogCleanerIoMaxBytesPerSecondProp, cleanerConfig.maxIoBytesPerSecond.toString)
KafkaConfig.fromProps(props)
}
// Verify cleaning done with larger LogCleanerIoBufferSizeProp
val oldConfig = kafkaConfigWithCleanerConfig(cleaner.currentConfig)
val newConfig = kafkaConfigWithCleanerConfig(CleanerConfig(numThreads = 2,
dedupeBufferSize = cleaner.currentConfig.dedupeBufferSize,
dedupeBufferLoadFactor = cleaner.currentConfig.dedupeBufferLoadFactor,
ioBufferSize = 100000,
maxMessageSize = cleaner.currentConfig.maxMessageSize,
maxIoBytesPerSecond = cleaner.currentConfig.maxIoBytesPerSecond,
backOffMs = cleaner.currentConfig.backOffMs))
cleaner.reconfigure(oldConfig, newConfig)
assertEquals(2, cleaner.cleanerCount)
checkLastCleaned("log", 0, firstDirty)
val compactedSize = log.logSegments.map(_.size).sum
assertTrue(s"log should have been compacted: startSize=$startSize compactedSize=$compactedSize", startSize > compactedSize)
}
private def checkLastCleaned(topic: String, partitionId: Int, firstDirty: Long) {
// wait until cleaning up to base_offset, note that cleaning happens only when "log dirty ratio" is higher than
// LogConfig.MinCleanableDirtyRatioProp
val topicPartition = new TopicPartition(topic, partitionId)
cleaner.awaitCleaned(topicPartition, firstDirty)
val lastCleaned = cleaner.cleanerManager.allCleanerCheckpoints(topicPartition)
assertTrue(s"log cleaner should have processed up to offset $firstDirty, but lastCleaned=$lastCleaned",
lastCleaned >= firstDirty)
}
private def checkLogAfterAppendingDups(log: Log, startSize: Long, appends: Seq[(Int, String, Long)]) {
val read = readFromLog(log)
assertEquals("Contents of the map shouldn't change", toMap(appends), toMap(read))
assertTrue(startSize > log.size)
}
private def toMap(messages: Iterable[(Int, String, Long)]): Map[Int, (String, Long)] = {
messages.map { case (key, value, offset) => key -> (value, offset) }.toMap
}
private def readFromLog(log: Log): Iterable[(Int, String, Long)] = {
import JavaConverters._
for (segment <- log.logSegments; deepLogEntry <- segment.log.records.asScala) yield {
val key = TestUtils.readString(deepLogEntry.key).toInt
val value = TestUtils.readString(deepLogEntry.value)
(key, value, deepLogEntry.offset)
}
}
private def writeDupsSingleMessageSet(numKeys: Int, numDups: Int, log: Log, codec: CompressionType,
startKey: Int = 0, magicValue: Byte): Seq[(Int, String, Long)] = {
val kvs = for (_ <- 0 until numDups; key <- startKey until (startKey + numKeys)) yield {
val payload = counter.toString
incCounter()
(key, payload)
}
val records = kvs.map { case (key, payload) =>
new SimpleRecord(key.toString.getBytes, payload.toString.getBytes)
}
val appendInfo = log.appendAsLeader(MemoryRecords.withRecords(magicValue, codec, records: _*), leaderEpoch = 0)
val offsets = appendInfo.firstOffset.get to appendInfo.lastOffset
kvs.zip(offsets).map { case (kv, offset) => (kv._1, kv._2, offset) }
}
}
object LogCleanerParameterizedIntegrationTest {
@Parameters
def parameters: java.util.Collection[Array[String]] = {
val list = new java.util.ArrayList[Array[String]]()
for (codec <- CompressionType.values)
list.add(Array(codec.name))
list
}
}
|
gf53520/kafka
|
core/src/test/scala/unit/kafka/log/LogCleanerParameterizedIntegrationTest.scala
|
Scala
|
apache-2.0
| 15,379 |
package com.twitter.gizzard.scheduler
import org.specs.Specification
import org.specs.mock.{ClassMocker, JMocker}
import com.twitter.json.Json
import com.twitter.gizzard.ConfiguredSpecification
class JsonJobParserSpec extends ConfiguredSpecification with JMocker with ClassMocker {
"JsonJobParser" should {
val attributes = Map("a" -> 1)
val jobMap = Map("Job" -> attributes)
val codec = mock[JsonCodec]
val job = mock[JsonJob]
val jobParser = new JsonJobParser {
def apply(json: Map[String, Any]) = {
job
}
}
"parse" in {
"simple job" in {
expect {
one(job).errorCount_=(0)
one(job).errorMessage_=(any[String])
}
jobParser.parse(attributes) mustEqual job
}
"nested job" in {
expect {
one(codec).inflate(jobMap) willReturn job
allowing(job).className willReturn "ugh"
allowing(job).toMap willReturn Map.empty[String, String]
}
val nestedAttributes = Map("tasks" -> List(jobMap))
new JsonNestedJobParser(codec).parse(nestedAttributes) mustEqual new JsonNestedJob(List(job))
}
"errors" in {
expect {
one(job).errorCount_=(23)
one(job).errorMessage_=("Good heavens!")
}
jobParser.parse(attributes ++ Map("error_count" -> 23, "error_message" -> "Good heavens!")) mustEqual job
}
}
"toJson" in {
val job = new JsonJob {
def toMap = attributes
override def className = "FakeJob"
def apply() { }
}
"JsonJob" in {
val json = job.toJson
json mustMatch "\\"FakeJob\\""
json mustMatch "\\"a\\":1"
json mustMatch "\\"error_count\\":0"
json mustMatch "\\"error_message\\":\\"\\\\(none\\\\)\\""
}
"JsonNestedJob" in {
val nestedJob = new JsonNestedJob(List(job))
val json = nestedJob.toJson
json mustMatch "\\"com.twitter.gizzard.scheduler.JsonNestedJob\\":\\\\{"
json mustMatch "\\"error_count\\":0"
json mustMatch "\\"error_message\\":\\"\\\\(none\\\\)\\""
json mustMatch "\\"tasks\\":\\\\[\\\\{\\"FakeJob\\":\\\\{\\"a\\":1\\\\}\\\\}\\\\]"
}
"errors" in {
job.errorCount = 23
job.errorMessage = "Good heavens!"
val json = job.toJson
json mustMatch "\\\\{\\"FakeJob\\":\\\\{"
json mustMatch "\\"a\\":1"
json mustMatch "\\"error_count\\":23"
json mustMatch "\\"error_message\\":\\"Good heavens!\\""
}
}
}
}
|
kangkot/gizzard
|
src/test/scala/com/twitter/gizzard/scheduler_new/JsonJobParserSpec.scala
|
Scala
|
apache-2.0
| 2,514 |
package com.github.j5ik2o.dddbase.memory
import com.github.j5ik2o.dddbase.{ AggregateMultiSoftDeletable, AggregateMultiWriter }
import monix.eval.Task
trait AggregateMultiSoftDeleteFeature extends AggregateMultiSoftDeletable[Task] with AggregateBaseReadFeature {
this: AggregateMultiWriter[Task] with AggregateSingleSoftDeleteFeature =>
override def softDeleteMulti(ids: Seq[IdType]): Task[Long] = dao.softDeleteMulti(ids.map(_.value.toString))
}
|
j5ik2o/scala-ddd-base-functional
|
nosql/memory/src/main/scala/com/github/j5ik2o/dddbase/memory/AggregateMultiSoftDeleteFeature.scala
|
Scala
|
mit
| 455 |
/*
* SPDX-License-Identifier: Apache-2.0
*
* Copyright 2015-2021 Andre White.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.truthencode.ddo.model.feats
import io.truthencode.ddo.support.requisite.RequiresCharacterLevel
/**
* Epic Feat that can be taken at level 21 or 24.
*/
protected[feats] trait EpicSkills extends Passive with RequiresCharacterLevel {
self: EpicFeat =>
override val requireCharacterLevel: Int = 21
}
|
adarro/ddo-calc
|
subprojects/common/ddo-core/src/main/scala/io/truthencode/ddo/model/feats/EpicSkills.scala
|
Scala
|
apache-2.0
| 987 |
package ghpages.examples
import ghpages.examples.util.SingleSide
import japgolly.scalajs.react._
import japgolly.scalajs.react.vdom.prefix_<^._
import org.scalajs.dom
/**
* Example of using Touch events.
*
* TouchList is JavaScript collection, so it is converted to Scala IndexedSeq.
* Showing only top 10 events, so mobile phone will not crash.
* Preventing default events, so move and zoom events could also be tested
*/
object TouchExample {
def content = SingleSide.Content(source, TouchExampleApp())
val source =
"""
|// Recommended to test with real Touch screens or with Chrome "Emulate touch screen"
|
|/** Keeping history of events **/
|case class State(log: List[String] = List()) {
| def withEntry(name: String) = copy(log = name :: log)
|
| def limit(max: Int) = if (log.size > max) copy(log = log.init) else this
|}
|
|/** Saving touch event details to state */
|class Backend(val $: BackendScope[Unit, State]) {
| def debugEvent(e: ReactTouchEvent): Unit = preventDefault(e) { state =>
| state withEntry s"${e.nativeEvent.`type`}: ${formatTouches(e.changedTouches)}" limit 10
| }
|
| private def preventDefault(e: ReactTouchEvent)(transformer: State => State): Unit = {
| e.preventDefault()
| $.modState(transformer)
| }
|
| private def formatTouches(touches: dom.TouchList) =
| toSeq(touches).map(formatCoordinates).mkString(" | ")
|
| private def toSeq[A](list: dom.DOMList[A]) =
| for(i <- 0 to list.length - 1) yield list.item(i)
|
| private def formatCoordinates(touch: dom.Touch) =
| s"${touch.screenX}x${touch.screenY}: ${touch.radiusX}x${touch.radiusY}"
|}
|
|/** Rendering touch area and history of events */
|val TouchExampleApp = ReactComponentB[Unit]("TouchExample")
| .initialState(new State)
| .backend(new Backend(_))
| .render { (P, S, B) =>
|
| <.div(
| <.div(
| "Area to test touch events",
| ^.width := 200, // Basic style
| ^.height := 200,
| ^.border := "1px solid blue",
| ^.onTouchStart ==> B.debugEvent, // Forwarding touch events
| ^.onTouchMove ==> B.debugEvent,
| ^.onTouchEnd ==> B.debugEvent,
| ^.onTouchCancel ==> B.debugEvent
| ),
| <.ul ( // Rendering history of events
| S.log.map(
| <.li(_)
| )
| )
| )
|
|}.buildU
|""".stripMargin
/** Keeping history of events **/
case class State(log: List[String] = List()) {
def withEntry(name: String) = copy(log = name :: log)
def limit(max: Int) = if (log.size > max) copy(log = log.init) else this
}
/** Saving touch event details to state */
class Backend(val $: BackendScope[Unit, State]) {
def debugEvent(e: ReactTouchEvent): Unit = preventDefault(e) { state =>
state withEntry s"${e.nativeEvent.`type`}: ${formatTouches(e.changedTouches)}" limit 10
}
private def preventDefault(e: ReactTouchEvent)(transformer: State => State): Unit = {
e.preventDefault()
$.modState(transformer)
}
private def formatTouches(touches: dom.TouchList) =
toSeq(touches).map(formatCoordinates).mkString(" | ")
private def toSeq[A](list: dom.DOMList[A]) =
for(i <- 0 to list.length - 1) yield list.item(i)
private def formatCoordinates(touch: dom.Touch) =
s"${touch.screenX}x${touch.screenY}: ${touch.radiusX}x${touch.radiusY}"
}
/** Rendering touch area and history of events */
val TouchExampleApp = ReactComponentB[Unit]("TouchExample")
.initialState(new State)
.backend(new Backend(_))
.render { (P, S, B) =>
<.div(
<.div(
"Area to test touch events",
^.width := 200, // Basic style
^.height := 200,
^.border := "1px solid blue",
^.onTouchStart ==> B.debugEvent, // Forwarding touch events
^.onTouchMove ==> B.debugEvent,
^.onTouchEnd ==> B.debugEvent,
^.onTouchCancel ==> B.debugEvent
),
<.ul ( // Rendering history of events
S.log.map(
<.li(_)
)
)
)
}.buildU
}
|
gshakhn/scalajs-react
|
gh-pages/src/main/scala/ghpages/examples/TouchExample.scala
|
Scala
|
apache-2.0
| 4,423 |
/* The Computer Language Benchmarks Game
* http://shootout.alioth.debian.org/
* original contributed by Kenneth Jonsson
*/
import scala.actors.Actor
import scala.actors.Actor._
class Worker(size: Int) extends Actor {
private val bytesPerRow = (size + 7) >> 3
private val maxIterations = 50
private val limitSquared = 4.0
// Calculate all pixes for one row [-i..i], the real-part
// coordinate is constant throughout this method
private def calcRow(rowNum: Int): (Actor, Int, Array[Byte]) = {
var rowBitmap = new Array[Byte](bytesPerRow)
var column = 0
val ci = 2.0 * rowNum / size - 1.0
while (column < size) {
val cr = 2.0 * column / size - 1.5
var zr, tr, zi, ti = 0.0
var iterations = 0
do {
zi = 2 * zr * zi + ci
zr = tr - ti + cr
ti = zi * zi
tr = zr * zr
iterations += 1
} while (tr + ti <= limitSquared && iterations < maxIterations)
if (tr + ti <= limitSquared)
rowBitmap(column >> 3) = (rowBitmap(column >> 3)
| (0x80 >> (column & 7))).toByte
column += 1
}
return (self, rowNum, rowBitmap)
}
def act() {
while (true) {
receive {
case rowNum: Int =>
reply(calcRow(rowNum))
case "EXIT" =>
exit()
}
}
}
}
class MandelbrotCoordinator(size: Int) extends Actor {
private var nextRowNum = 0
private var rowsRemaining = size
private var bitmap = new Array[Array[Byte]](size)
private def calcNextRow(worker: Actor) {
if (nextRowNum == size)
// All rows has been dispatched, tell the worker to exit
worker ! "EXIT"
else {
worker ! nextRowNum
nextRowNum += 1
}
}
def act() {
for (i <- 1 to Runtime.getRuntime().availableProcessors()) {
val worker = new Worker(size)
// Keep two rows in flight per worker to avoid any worker
// idle time, probably not neccessary on a quad-core
// machine but might help at higher core count...
calcNextRow(worker)
calcNextRow(worker)
worker.start
}
while (true) {
receive {
case (sender: Actor, rowNum: Int, rowBitmap: Array[Byte]) =>
calcNextRow(sender)
bitmap(rowNum) = rowBitmap
rowsRemaining -= 1
if (rowsRemaining == 0) {
// The image is finished, write it to stdout and exit
println("P4\\n" + size + " " + size)
bitmap.foreach(row => System.out.write(row, 0, row.length))
exit()
}
}
}
}
}
object mandelbrot {
def main(args: Array[String]) {
val coordinator = new MandelbrotCoordinator(args(0).toInt)
coordinator.start
}
}
|
kragen/shootout
|
bench/mandelbrot/mandelbrot.scala-3.scala
|
Scala
|
bsd-3-clause
| 2,581 |
package lila.tournament
import org.joda.time.DateTime
import play.api.libs.iteratee._
import reactivemongo.bson._
import scala.concurrent.duration._
import lila.common.Maths
import lila.common.paginator.Paginator
import lila.db.BSON._
import lila.db.dsl._
import lila.db.paginator.Adapter
import lila.rating.PerfType
import lila.user.User
final class LeaderboardApi(
coll: Coll,
maxPerPage: Int) {
import LeaderboardApi._
import BSONHandlers._
def recentByUser(user: User, page: Int) = paginator(user, page, $doc("d" -> -1))
def bestByUser(user: User, page: Int) = paginator(user, page, $doc("w" -> 1))
def chart(user: User): Fu[ChartData] = {
import reactivemongo.bson._
import reactivemongo.api.collections.bson.BSONBatchCommands.AggregationFramework._
coll.aggregate(
Match($doc("u" -> user.id)),
List(GroupField("v")("nb" -> SumValue(1), "points" -> PushField("s"), "ratios" -> PushField("w")))
).map {
_.firstBatch map leaderboardAggregationResultBSONHandler.read
}.map { aggs =>
ChartData {
aggs.flatMap { agg =>
PerfType.byId get agg._id map {
_ -> ChartData.PerfResult(
nb = agg.nb,
points = ChartData.Ints(agg.points),
rank = ChartData.Ints(agg.ratios))
}
}.sortLike(PerfType.leaderboardable, _._1)
}
}
}
private def paginator(user: User, page: Int, sort: Bdoc): Fu[Paginator[TourEntry]] = Paginator(
adapter = new Adapter[Entry](
collection = coll,
selector = $doc("u" -> user.id),
projection = $empty,
sort = sort
) mapFutureList withTournaments,
currentPage = page,
maxPerPage = maxPerPage)
private def withTournaments(entries: Seq[Entry]): Fu[Seq[TourEntry]] =
TournamentRepo byIds entries.map(_.tourId) map { tours =>
entries.flatMap { entry =>
tours.find(_.id == entry.tourId).map { TourEntry(_, entry) }
}
}
}
object LeaderboardApi {
private val rankRatioMultiplier = 100 * 1000
case class TourEntry(tour: Tournament, entry: Entry)
case class Ratio(value: Double)
case class Entry(
id: String, // same as tournament player id
userId: String,
tourId: String,
nbGames: Int,
score: Int,
rank: Int,
rankRatio: Ratio, // ratio * rankRatioMultiplier. function of rank and tour.nbPlayers. less is better.
freq: Option[Schedule.Freq],
speed: Option[Schedule.Speed],
perf: PerfType,
date: DateTime)
case class ChartData(perfResults: List[(PerfType, ChartData.PerfResult)]) {
import ChartData._
lazy val allPerfResults: PerfResult = perfResults.map(_._2) match {
case head :: tail => tail.foldLeft(head) {
case (acc, res) => PerfResult(
nb = acc.nb + res.nb,
points = res.points ::: acc.points,
rank = res.rank ::: acc.rank)
}
case Nil => PerfResult(0, Ints(Nil), Ints(Nil))
}
}
object ChartData {
case class Ints(v: List[Int]) {
def mean = v.toNel map Maths.mean[Int]
def median = v.toNel map Maths.median[Int]
def sum = v.sum
def :::(i: Ints) = Ints(v ::: i.v)
}
case class PerfResult(nb: Int, points: Ints, rank: Ints) {
private def rankPercent(n: Double) = (n * 100 / rankRatioMultiplier).toInt
def rankPercentMean = rank.mean map rankPercent
def rankPercentMedian = rank.median map rankPercent
}
case class AggregationResult(_id: Int, nb: Int, points: List[Int], ratios: List[Int])
}
}
|
clarkerubber/lila
|
modules/tournament/src/main/LeaderboardApi.scala
|
Scala
|
agpl-3.0
| 3,568 |
package spinoco.protocol.http.header.value
import scodec.codecs._
import scodec.{Attempt, Codec, Err}
import spinoco.protocol.http.codec.helper._
sealed case class EntityTag(tag: String, weak: Boolean)
object EntityTag {
val codec: Codec[EntityTag] = {
val weakTag: Codec[EntityTag] = {
(asciiConstant("W/") ~> alwaysQuotedUtf8String).exmap(
s => Attempt.successful(EntityTag(s, weak = true))
, et => if (et.weak) Attempt.successful(et.tag) else Attempt.failure(Err("Weak tag is expected"))
)
}
choice(
weakTag
, alwaysQuotedUtf8String.xmap(s => EntityTag(s, weak = false), _.tag)
)
}
}
|
Spinoco/protocol
|
http/src/main/scala/spinoco/protocol/http/header/value/EntityTag.scala
|
Scala
|
mit
| 654 |
/*
* Copyright (c) 2013 Christos KK Loverdos
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.ckkloverdos.thrift3r
package codec
package primitiveref
import com.ckkloverdos.thrift3r.BinReprType
import com.ckkloverdos.thrift3r.protocol.Protocol
/**
*
* @author Christos KK Loverdos <[email protected]>
*/
case object IntRefCodec extends Codec[java.lang.Integer] with CodecToString {
final def binReprType = BinReprType.INT32
final def typeToken = typeTokenOfClass(IntRefClass)
@inline final def getValue(value: java.lang.Integer): Int = {
value match {
case null ⇒ 0
case _ ⇒ value.intValue()
}
}
final def encode(protocol: Protocol, value: java.lang.Integer) {
val intValue = getValue(value)
protocol.writeInt32(intValue)
}
final def decode(protocol: Protocol) = java.lang.Integer.valueOf(protocol.readInt32())
final def toDirectString(value: java.lang.Integer) = String.valueOf(getValue(value))
final def fromDirectString(value: String) = java.lang.Integer.valueOf(value)
}
|
loverdos/thrift3r
|
src/main/scala/com/ckkloverdos/thrift3r/codec/primitiveref/IntRefCodec.scala
|
Scala
|
apache-2.0
| 1,567 |
import sbt._
import Keys._
import org.scalatra.sbt._
import com.mojolly.scalate.ScalatePlugin._
import ScalateKeys._
object LunabotBuild extends Build {
val Organization = "com.lunatech"
val Name = "lunabot"
val Version = "0.1.0-SNAPSHOT"
val ScalaVersion = "2.11.7"
val ScalatraVersion = "2.3.0"
lazy val project = Project (
"lunabot",
file("."),
settings = ScalatraPlugin.scalatraSettings ++ scalateSettings ++ Seq(
organization := Organization,
name := Name,
version := Version,
scalaVersion := ScalaVersion,
resolvers += Classpaths.typesafeReleases,
resolvers += "Scalaz Bintray Repo" at "http://dl.bintray.com/scalaz/releases",
libraryDependencies ++= Seq(
"org.scalatra" %% "scalatra" % ScalatraVersion,
"org.scalatra" %% "scalatra-scalate" % ScalatraVersion,
"org.scalatra" %% "scalatra-specs2" % ScalatraVersion % "test",
"org.scalatra" %% "scalatra-json" % ScalatraVersion,
"org.json4s" %% "json4s-jackson" % "3.2.9",
"ch.qos.logback" % "logback-classic" % "1.1.2" % "runtime",
"org.eclipse.jetty" % "jetty-webapp" % "9.2.10.v20150310" % "container;compile",
"javax.servlet" % "javax.servlet-api" % "3.1.0" % "provided",
"net.databinder.dispatch" %% "dispatch-core" % "0.11.2",
"org.scalatest" % "scalatest_2.11" % "2.2.4" % "test",
"com.typesafe" % "config" % "1.2.1"
),
scalateTemplateConfig in Compile <<= (sourceDirectory in Compile){ base =>
Seq(
TemplateConfig(
base / "webapp" / "WEB-INF" / "templates",
Seq.empty, /* default imports should be added here */
Seq(
Binding("context", "_root_.org.scalatra.scalate.ScalatraRenderContext", importMembers = true, isImplicit = true)
), /* add extra bindings here */
Some("templates")
)
)
}
)
)
}
|
mariadroman/lunabot
|
project/build.scala
|
Scala
|
apache-2.0
| 1,949 |
package io.eels.component.jdbc
import java.sql.DriverManager
import org.scalatest.{Matchers, WordSpec}
import scala.util.Random
class RangePartitionStrategyTest extends WordSpec with Matchers {
Class.forName("org.h2.Driver")
private val conn = DriverManager.getConnection("jdbc:h2:mem:rangetest")
conn.createStatement().executeUpdate("create table bucket_test (a integer)")
for (k <- 0 until 20) {
conn.createStatement().executeUpdate(s"insert into bucket_test (a) values (${Random.nextInt(10000)})")
}
"BucketPartitionStrategy" should {
"generate evenly spaced ranges" in {
RangePartitionStrategy("a", 10, 2, 29).ranges shouldBe List((2, 4), (5, 7), (8, 10), (11, 13), (14, 16), (17, 19), (20, 22), (23, 25), (26, 27), (28, 29))
RangePartitionStrategy("a", 2, 2, 30).ranges shouldBe List((2, 16), (17, 30))
RangePartitionStrategy("a", 1, 4, 5).ranges shouldBe List((4, 5))
RangePartitionStrategy("a", 1, 4, 4).ranges shouldBe List((4, 4))
RangePartitionStrategy("a", 6, 1, 29).ranges shouldBe List((1, 5), (6, 10), (11, 15), (16, 20), (21, 25), (26, 29))
}
"return correct number of ranges" in {
JdbcSource(() => DriverManager.getConnection("jdbc:h2:mem:rangetest"), "select * from bucket_test")
.withPartitionStrategy(RangePartitionStrategy("a", 4, 0, 10000))
.parts().size shouldBe 4
}
"return full and non overlapping data" in {
JdbcSource(() => DriverManager.getConnection("jdbc:h2:mem:rangetest"), "select * from bucket_test")
.withPartitionStrategy(RangePartitionStrategy("a", 4, 0, 10000))
.toDataStream().collect.size shouldBe 20
}
}
}
|
eel-lib/eel
|
eel-core/src/test/scala/io/eels/component/jdbc/RangePartitionStrategyTest.scala
|
Scala
|
mit
| 1,668 |
/*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
*/
package swave.docs
import org.scalatest.{FreeSpec, Matchers}
class DrainSpec extends FreeSpec with Matchers {
"the examples in the `drains` chapter should work as expected" - {
"examples" in {
//#examples
import scala.concurrent.Future
import swave.core._
implicit val env = StreamEnv()
// a drain, which produces the sum of all `Int` elements it receives
def sumDrain: Drain[Int, Future[Int]] =
Drain.fold(0)(_ + _)
Spout(1 to 100) // Spout[Int]
.to(sumDrain) // StreamGraph[Int]
.run() // StreamRun[Future[Int]
.result // Future[Int]
.value // Option[Try[Int]]
.get // Try[Int]
.get shouldEqual 5050
// same but shorter
Spout(1 to 100)
.drainTo(sumDrain) // shortcut for `.to(sumDrain).run().result`
.value.get.get shouldEqual 5050
//#examples
}
}
}
|
sirthias/swave
|
docs/src/test/scala/swave/docs/DrainSpec.scala
|
Scala
|
mpl-2.0
| 1,149 |
package org.bitcoins.core.util
import org.bitcoins.core.util.testprotocol._
import org.scalatest.{FlatSpec, MustMatchers}
import spray.json._
import scala.io.Source
/**
* Created by tom on 5/17/16.
*/
class Base58Test extends FlatSpec with MustMatchers {
"Base58" must "encode byte value of 0 to character of 1" in {
Base58.encode(0.toByte) must be("1")
}
it must "encode byte value of 22 to character P" in {
Base58.encode(22.toByte) must be("P")
}
it must "decode base58 character '1' to byte value of 0 then encode back to base58 char '1'" in {
val char = "1"
val decoded = Base58.decode(char)
Base58.encode(decoded) must be(char)
}
it must "decode character Z to byte value of 32" in {
Base58.decode("Z").head must be(32.toByte)
}
it must "encode tests in base58_encode_decode.json" in {
Base58.encode("") must be("")
Base58.encode("61") must be("2g")
Base58.encode("626262") must be("a3gV")
Base58.encode("636363") must be("aPEr")
Base58.encode("73696d706c792061206c6f6e6720737472696e67") must be(
"2cFupjhnEsSn59qHXstmK2ffpLv2")
Base58.encode("00eb15231dfceb60925886b67d065299925915aeb172c06647") must be(
"1NS17iag9jJgTHD1VXjvLCEnZuQ3rJDE9L")
Base58.encode("516b6fcd0f") must be("ABnLTmg")
Base58.encode("bf4f89001e670274dd") must be("3SEo3LWLoPntC")
Base58.encode("572e4794") must be("3EFU7m")
Base58.encode("ecac89cad93923c02321") must be("EJDM8drfXA6uyA")
Base58.encode("10c8511e") must be("Rt5zm")
Base58.encode("00000000000000000000") must be("1111111111")
}
it must "decode tests in base58_encode_decode.json" in {
def decodedBase58EncodeToHex(value: String): String =
BitcoinSUtil.encodeHex(Base58.decode(value))
decodedBase58EncodeToHex("2g") must be("61")
decodedBase58EncodeToHex("a3gV") must be("626262")
decodedBase58EncodeToHex("aPEr") must be("636363")
decodedBase58EncodeToHex("2cFupjhnEsSn59qHXstmK2ffpLv2") must be(
"73696d706c792061206c6f6e6720737472696e67")
decodedBase58EncodeToHex("1NS17iag9jJgTHD1VXjvLCEnZuQ3rJDE9L") must be(
"00eb15231dfceb60925886b67d065299925915aeb172c06647")
decodedBase58EncodeToHex("ABnLTmg") must be("516b6fcd0f")
decodedBase58EncodeToHex("3SEo3LWLoPntC") must be("bf4f89001e670274dd")
decodedBase58EncodeToHex("3EFU7m") must be("572e4794")
decodedBase58EncodeToHex("EJDM8drfXA6uyA") must be("ecac89cad93923c02321")
decodedBase58EncodeToHex("Rt5zm") must be("10c8511e")
decodedBase58EncodeToHex("1111111111") must be("00000000000000000000")
}
it must "read base58_keys_valid.json and validate each case" in {
import org.bitcoins.core.util.testprotocol.Base58ValidTestCaseProtocol._
val source =
Source.fromURL(this.getClass.getResource("/base58_keys_valid.json"))
val lines =
try source.getLines.filterNot(_.isEmpty).map(_.trim) mkString "\\n"
finally source.close()
val json = lines.parseJson
val testCases: Seq[Base58ValidTestCase] =
json.convertTo[Seq[Base58ValidTestCase]]
for {
testCase <- testCases
} yield {
//if testCase is an Address, it must have a valid base58 representation
if (testCase.addressOrWIFPrivKey.isLeft) {
Base58.isValid(testCase.addressOrWIFPrivKey.left.get.value) must be(
true)
} else {
Base58.isValid(testCase.addressOrWIFPrivKey.right.get) must be(true)
}
}
}
it must "read base58_keys_invalid.json and return each as an invalid base58 string" in {
import org.bitcoins.core.util.testprotocol.Base58InvalidTestCase
import org.bitcoins.core.util.testprotocol.Base58InvalidTestCaseProtocol._
val source =
Source.fromURL(this.getClass.getResource("/base58_keys_invalid.json"))
val lines =
try source.getLines.filterNot(_.isEmpty).map(_.trim) mkString "\\n"
finally source.close()
val json = lines.parseJson
val testCases: Seq[Base58InvalidTestCase] =
json.convertTo[Seq[Base58InvalidTestCase]]
for {
testCase <- testCases
} yield {
testCase must be(Base58InvalidTestCaseImpl(testCase.base58EncodedString))
Base58.isValid(testCase.base58EncodedString) must be(false)
}
}
it must "check validity of base58 string with illegal characters and fail" in {
Base58.isValid("3CMNFxN1oHBc4R1EpboAL5yzHGgE611Xol") must be(false)
}
it must "decodeCheck a string with a length less than 4 and fail" in {
Base58.decodeCheck("asf").isFailure must be(true)
}
it must "decodeCheck a valid string and succeed" in {
Base58.decodeCheck("3CMNFxN1oHBc4R1EpboAL5yzHGgE611Xou").isSuccess must be(
true)
}
}
|
bitcoin-s/bitcoin-s-core
|
core-test/src/test/scala/org/bitcoins/core/util/Base58Test.scala
|
Scala
|
mit
| 4,679 |
package com.github.kmizu.llwan
import com.github.kmizu.llwan.Ast.Choices
class GrammarAnalyzer(grammar: Ast.Grammar) {
type FirstSetTable = Map[Symbol, Set[String]]
private[this] val mapping: Map[Symbol, Choices] = grammar.rules.map{ rule => rule.name -> rule.body}.toMap
def calculateFirstSet: FirstSetTable = {
def first(e: Ast.Exp, visit: Set[Symbol]): Set[String] = e match {
case Ast.Choices(_, choices) =>
def firsts(seq: List[Ast.Prm]): Set[String] = seq match {
case hd::tl =>
val result = first(hd, visit)
result ++ (if(result.contains("")) firsts(tl) else Set.empty[String])
case Nil => Set.empty[String]
}
choices.foldLeft(Set.empty[String]){(set, choice) => set ++ firsts(choice)}
case [email protected](_, name) =>
if(visit.contains(name)) Set() else first(mapping(name), visit + name)
case Ast.Str(_, c) =>
Set(c)
case Ast.Emp(_) =>
Set("")
}
(mapping.map { case (ident, exp) => (ident) -> first(mapping(ident), Set(ident)) }: FirstSetTable)
}
def calculateFollowSet(grammar: Ast.Grammar) = ???
def calculateDirectorSet(grammar: Ast.Grammar) = ???
}
|
kmizu/llwan
|
src/main/scala/com/github/kmizu/llwan/GrammarAnalyzer.scala
|
Scala
|
mit
| 1,206 |
/**
* Copyright 2017 Interel
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package core3.security
import core3.database.containers.core.LocalUser
import play.api.libs.json.JsValue
/**
* User data container.
*
* @param userID the ID of the user
* @param permissions the permissions the user has
* @param profile user profile data
* @param sessionToken the session associated with the user
*/
class LocalAuthUserToken(val userID: String, val permissions: Vector[String], val profile: JsValue, val sessionToken: String) extends UserTokenBase {
}
object LocalAuthUserToken {
def apply(userID: String, permissions: Vector[String], profile: JsValue, sessionToken: String): LocalAuthUserToken = new LocalAuthUserToken(userID, permissions, profile, sessionToken)
def apply(user: LocalUser, sessionToken: String): LocalAuthUserToken = new LocalAuthUserToken(user.userID, user.permissions, user.metadata, sessionToken)
}
|
Interel-Group/core3
|
src/main/scala/core3/security/LocalAuthUserToken.scala
|
Scala
|
apache-2.0
| 1,472 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.ml.feature
import java.{util => ju}
import org.apache.spark.SparkException
import org.apache.spark.annotation.Experimental
import org.apache.spark.ml.Model
import org.apache.spark.ml.attribute.NominalAttribute
import org.apache.spark.ml.param._
import org.apache.spark.ml.param.shared.{HasInputCol, HasOutputCol}
import org.apache.spark.ml.util.{Identifiable, SchemaUtils}
import org.apache.spark.sql._
import org.apache.spark.sql.functions._
import org.apache.spark.sql.types.{DoubleType, StructField, StructType}
/**
* :: Experimental ::
* `Bucketizer` maps a column of continuous features to a column of feature buckets.
* `Bucketizer`将一列连续特征映射到一列特征桶
*/
@Experimental
final class Bucketizer(override val uid: String)
extends Model[Bucketizer] with HasInputCol with HasOutputCol {
def this() = this(Identifiable.randomUID("bucketizer"))
/**
* Parameter for mapping continuous features into buckets. With n+1 splits, there are n buckets.
* 用于将连续要素映射到存储桶的参数,对于n + 1个分裂,有n个桶
*
* A bucket defined by splits x,y holds values in the range [x,y) except the last bucket, which
* also includes y. Splits should be strictly increasing.
* 由拆分x,y定义的存储区包含除最后一个存储区之外的[x,y]范围内的值,
* 由拆分x,y定义的存储区保存除最后一个存储区之外的[x,y]范围内的值,
* Values at -inf, inf must be explicitly provided to cover all Double values;
* otherwise, values outside the splits specified will be treated as errors.
* 必须显式提供-inf,inf处的值以涵盖所有Double值;否则,指定的拆分之外的值将被视为错误。
* @group param
*/
val splits: DoubleArrayParam = new DoubleArrayParam(this, "splits",
"Split points for mapping continuous features into buckets. With n+1 splits, there are n " +
"buckets. A bucket defined by splits x,y holds values in the range [x,y) except the last " +
"bucket, which also includes y. The splits should be strictly increasing. " +
"Values at -inf, inf must be explicitly provided to cover all Double values; " +
"otherwise, values outside the splits specified will be treated as errors.",
Bucketizer.checkSplits)
/** @group getParam */
def getSplits: Array[Double] = $(splits)
/** @group setParam */
def setSplits(value: Array[Double]): this.type = set(splits, value)
/** @group setParam */
def setInputCol(value: String): this.type = set(inputCol, value)
/** @group setParam */
def setOutputCol(value: String): this.type = set(outputCol, value)
override def transform(dataset: DataFrame): DataFrame = {
transformSchema(dataset.schema)
val bucketizer = udf { feature: Double =>
Bucketizer.binarySearchForBuckets($(splits), feature)
}
val newCol = bucketizer(dataset($(inputCol)))
val newField = prepOutputField(dataset.schema)
dataset.withColumn($(outputCol), newCol.as($(outputCol), newField.metadata))
}
private def prepOutputField(schema: StructType): StructField = {
val buckets = $(splits).sliding(2).map(bucket => bucket.mkString(", ")).toArray
val attr = new NominalAttribute(name = Some($(outputCol)), isOrdinal = Some(true),
values = Some(buckets))
attr.toStructField()
}
override def transformSchema(schema: StructType): StructType = {
SchemaUtils.checkColumnType(schema, $(inputCol), DoubleType)
SchemaUtils.appendColumn(schema, prepOutputField(schema))
}
override def copy(extra: ParamMap): Bucketizer = {
defaultCopy[Bucketizer](extra).setParent(parent)
}
}
private[feature] object Bucketizer {
/** We require splits to be of length >= 3 and to be in strictly increasing order.
* 我们要求拆分长度> = 3并且严格按顺序递增
* */
def checkSplits(splits: Array[Double]): Boolean = {
if (splits.length < 3) {
false
} else {
var i = 0
val n = splits.length - 1
while (i < n) {
if (splits(i) >= splits(i + 1)) return false
i += 1
}
true
}
}
/**
* Binary searching in several buckets to place each data point.
* 在几个桶中进行二进制搜索以放置每个数据点
* @throws SparkException if a feature is < splits.head or > splits.last
*/
def binarySearchForBuckets(splits: Array[Double], feature: Double): Double = {
if (feature == splits.last) {
splits.length - 2
} else {
val idx = ju.Arrays.binarySearch(splits, feature)
if (idx >= 0) {
idx
} else {
val insertPos = -idx - 1
if (insertPos == 0 || insertPos == splits.length) {
throw new SparkException(s"Feature value $feature out of Bucketizer bounds" +
s" [${splits.head}, ${splits.last}]. Check your features, or loosen " +
s"the lower/upper bound constraints.")
} else {
insertPos - 1
}
}
}
}
}
|
tophua/spark1.52
|
mllib/src/main/scala/org/apache/spark/ml/feature/Bucketizer.scala
|
Scala
|
apache-2.0
| 5,823 |
package ru.dgolubets.neo4s.internal.iteratee
import play.api.libs.iteratee.{Input, Iteratee, Step}
import scala.concurrent.Future
/**
* Helper to feed iteratees.
* Iteratee.feed method can return unfinished iteratee when it's async.
* But this one returns only when that iteratee reported with Cont.
*/
class IterateeFeeder[In, Mat](private var iteratee: Iteratee[In, Mat]){
import scala.concurrent.ExecutionContext.Implicits.global
private object syncRoot
@volatile
var wantsMore: Boolean = true
/**
* Feeds iteratee a value and signals when it's finished.
* @param input
* @return
*/
def feed(input: Input[In]): Future[Boolean] = syncRoot.synchronized {
iteratee.fold {
case Step.Cont(next) =>
iteratee = next(input)
iteratee.pureFold {
case Step.Cont(_) =>
true
case Step.Done(_, _) =>
false
case Step.Error(err, _) =>
sys.error(err)
}
case Step.Done(r, left) =>
Future.successful(false)
case Step.Error(err, left) =>
sys.error(err)
}.map { more =>
wantsMore = more
more
}
}
def feed(input: In): Future[Boolean] = feed(Input.El(input))
def finish(): Future[Mat] = {
feed(Input.EOF).flatMap { _ =>
iteratee.run
}
}
}
|
DGolubets/neo4s
|
src/main/scala/ru/dgolubets/neo4s/internal/iteratee/IterateeFeeder.scala
|
Scala
|
apache-2.0
| 1,324 |
package edu.gemini.pit.ui.view.submit
import edu.gemini.model.p1.immutable._
import edu.gemini.model.p1.submit.SubmitDestination.{Exchange, Ngo}
import edu.gemini.pit.ui.robot.ProblemRobot._
import edu.gemini.model.p1.submit.{SubmitResult, SubmitDestination}
import edu.gemini.model.p1.submit.SubmitResult.{Offline, ServiceError, Failure}
import scalaz._
import Scalaz._
object SubmitStatus {
def forProposal(p:ProposalClass, ps:List[Problem]):SubmitStatus = p match {
case _ if ps.map(_.severity).exists(s => s == Severity.Error || s == Severity.Todo) => Incomplete
case _ if p.key.isEmpty => Ready
case q:QueueProposalClass => q.subs match {
case Left(ss) if ss.forall(_.response.isDefined) => Success
case Right(s) if s.response.isDefined => Success
case _ => Partial
}
case c:ClassicalProposalClass => c.subs match {
case Left(ss) if ss.forall(_.response.isDefined) => Success
case Right(s) if s.response.isDefined => Success
case _ => Partial
}
case e:ExchangeProposalClass if e.subs.forall(_.response.isDefined) => Success
case s:SpecialProposalClass if s.sub.response.isDefined => Success
case l:LargeProgramClass if l.sub.response.isDefined => Success
case f:FastTurnaroundProgramClass if f.sub.response.isDefined => Success
case _ => Partial
}
def destinationName(destination: Option[SubmitDestination]) = destination.map(d => d.destinationName).getOrElse("Unknown")
def semesterName(semester: Option[Semester]) = semester.map(s => s.display).getOrElse("Unknown")
def nonCompliantDestination(destination:Option[SubmitDestination], pc: Option[ProposalClass]) = (destination, pc).bisequence[Option, SubmitDestination, ProposalClass].collect {
case (Ngo(p), _: QueueProposalClass) => s"Queue proposals to ${destinationName(destination)}"
case (Ngo(p), _: ClassicalProposalClass) => s"Classical proposals to ${destinationName(destination)}"
case (Ngo(p), _: ExchangeProposalClass) => s"Exchange proposals to ${destinationName(destination)}"
case (Exchange(p), _) => s"Exchange proposals to ${destinationName(destination)}"
case (d:SubmitDestination, _) => s"${destinationName(destination)} proposals"
case _ => s"proposals to ${destinationName(destination)}" // fallback
}
def nonCompliantBackend(destination: Option[SubmitDestination], pc: Option[ProposalClass], year: String, semester: String, version: String) = s"PIT version $year$semester$version is required to submit ${~nonCompliantDestination(destination, pc)}"
def offlineBackend(destination: Option[SubmitDestination]) = s"There is a connection problem with the ${destinationName(destination)} backend server. Please check your network connection and/or try again later."
def genericError(destination: Option[SubmitDestination]) = s"The ${destinationName(destination)} backend server returned an unexpected result. Please try again later or submit a Helpdesk ticket at http://www.gemini.edu/sciops/helpdesk/."
def lpSubmissionClosed(destination: Option[SubmitDestination]) = s"""The ${destinationName(destination)} proposal server is currently closed. Please see the Gemini web pages for proposal submission dates. Note that Large & Long programs are only accepted for "B" semesters."""
def submissionClosed(destination: Option[SubmitDestination]) = s"The ${destinationName(destination)} proposal server is currently closed. Please see the Gemini web pages for proposal submission dates"
private val VersionMismatchRegex = """.* (\\d\\d\\d\\d)([A|B]) \\d{4}(\\.[\\d\\.]*).*""".r // Extract semester and version from the backend error message
def parseServiceError405(destination: Option[SubmitDestination], pc: Option[ProposalClass], message: String) = message match {
case VersionMismatchRegex(year, sem, version) => nonCompliantBackend(destination, pc, year, sem, version)
case m => m
}
def msg(results: Seq[SubmitResult]):Seq[String] = results.collect {
case ServiceError(destination, pc, code, message) if code == 405 => parseServiceError405(destination, pc, message)
case ServiceError(d @ Some(SubmitDestination.LargeProgram), _, code, message) if code == 401 => lpSubmissionClosed(d)
case ServiceError(destination, _, code, message) if code == 401 => submissionClosed(destination)
case ServiceError(destination, _, _, _) => genericError(destination)
case Offline(destination) => offlineBackend(destination)
case f: Failure => genericError(f.destination)
}
}
sealed trait SubmitStatus {
def title:String
def description:String
}
case object Incomplete extends SubmitStatus {
val title = "Incomplete"
val description =
"This proposal has problems or to-do tasks that must be corrected prior to submission (see the Problems view below)."
}
case object Ready extends SubmitStatus {
val title = "Ready"
val description =
"This proposal is ready for submission. Please double-check your PDF attachment and the generated cover material; " +
"once submitted, this proposal will be locked (but you will be able to open an editable copy)."
}
case object Partial extends SubmitStatus {
val title = "Partially Submitted"
val description =
"Submission failed for at least one partner. Please wait a while and try again later. This proposal is locked (but " +
"you can open an editable copy)."
}
case object Success extends SubmitStatus {
val title = "Successfully Received"
val description =
"The proposal has been successfully received, no confirmation emails will be sent. The proposal is locked but you can open an editable copy."
}
|
arturog8m/ocs
|
bundle/edu.gemini.pit/src/main/scala/edu/gemini/pit/ui/view/submit/SubmitStatus.scala
|
Scala
|
bsd-3-clause
| 6,311 |
/*
* MUSIT is a museum database to archive natural and cultural history data.
* Copyright (C) 2016 MUSIT Norway, part of www.uio.no (University of Oslo)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License,
* or any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
package modules
import akka.actor.ActorSystem
import akka.stream.Materializer
import com.google.inject.{AbstractModule, Inject, Provider}
import net.codingwell.scalaguice.{ScalaModule, ScalaMultibinder}
import no.uio.musit.healthcheck.{HealthCheck, HealthCheckDao, ZabbixExecutor}
import no.uio.musit.service.BuildInfo
import play.api.{Configuration, Environment}
class HealthCheckModule extends AbstractModule with ScalaModule {
override def configure(): Unit = {
val healthChecks = ScalaMultibinder.newSetBinder[HealthCheck](binder)
healthChecks.addBinding.to[HealthCheckDao]
bind[ZabbixExecutor].toProvider(classOf[ZabbixExecutorProvider]).asEagerSingleton()
}
}
class ZabbixExecutorProvider @Inject()(
implicit environment: Environment,
configuration: Configuration,
healthChecks: Set[HealthCheck],
actorSystem: ActorSystem,
materializer: Materializer
) extends Provider[ZabbixExecutor] {
override def get() = ZabbixExecutor(
BuildInfo.name,
s"api/barcode/routes.HealthCheckController.healthCheck().url",
healthChecks,
environment.mode,
configuration
)
}
|
kpmeen/musit
|
service_barcode/app/modules/HealthCheckModule.scala
|
Scala
|
gpl-2.0
| 2,003 |
package demo
import org.scalajs.dom._
/*
* Copyright (C) 24/08/16 // [email protected]
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
import org.scalajs.dom.Element
import scaladget.bootstrapnative.bsn._
import scaladget.tools._
import org.scalajs.dom.raw._
import scalatags.JsDom.all._
object LabelDemo {
val sc = sourcecode.Text {
import rx._
val hovered = Var("None")
val labelStyle: ModifierSeq = Seq(
marginTop := 20
)
def overAction(tag: String) = onmouseover := { () => hovered() = tag }
div(row)(
label("Default", label_default, overAction("default")).size4(labelStyle),
label("Primary", label_primary, overAction("primary")).size4(labelStyle),
label("Info", label_info, overAction("info")).size4(labelStyle),
label("Success", label_success, overAction("success")).size4(labelStyle),
label("Warning", label_warning, overAction("warning")).size5(labelStyle),
label("Danger", label_danger, overAction("danger")).size6(labelStyle),
Rx{
div(paddingTop := 15, s"Hovered: ${hovered()}")
}
).render
}
val elementDemo = new ElementDemo {
def title: String = "Label"
def code: String = sc.source
def element: Element = sc.value
}
}
|
mathieuleclaire/scaladget
|
demo/src/main/scala/demo/LabelDemo.scala
|
Scala
|
agpl-3.0
| 1,887 |
package unit.akka
import scala.collection.immutable
import scala.concurrent.duration.DurationInt
import scala.util.Random
import org.scalatest.BeforeAndAfterAll
import org.scalatest.Matchers
import org.scalatest.WordSpecLike
import com.typesafe.config.ConfigFactory
import akka.actor.Actor
import akka.actor.ActorRef
import akka.actor.ActorSystem
import akka.actor.Props
import akka.actor.actorRef2Scala
import akka.testkit.DefaultTimeout
import akka.testkit.ImplicitSender
import akka.testkit.TestActors
import akka.testkit.TestKit
/**
* a Test to show some TestKit examples
*/
class TestKitUsageSpec extends TestKit(ActorSystem("TestKitUsageSpec", ConfigFactory.parseString(TestKitUsageSpec.config)))
with DefaultTimeout with ImplicitSender
with WordSpecLike with Matchers with BeforeAndAfterAll {
import TestKitUsageSpec._
val echoRef = system.actorOf(TestActors.echoActorProps)
val forwardRef = system.actorOf(Props(classOf[ForwardingActor], testActor))
val filterRef = system.actorOf(Props(classOf[FilteringActor], testActor))
val randomHead = Random.nextInt(6)
val randomTail = Random.nextInt(10)
val headList = immutable.Seq().padTo(randomHead, "0")
val tailList = immutable.Seq().padTo(randomTail, "1")
val seqRef =
system.actorOf(Props(classOf[SequencingActor], testActor, headList, tailList))
override def afterAll {
shutdown()
}
"An EchoActor" should {
"Respond with the same message it receives" in {
within(500 millis) {
echoRef ! "test"
expectMsg("test")
}
}
}
"A ForwardingActor" should {
"Forward a message it receives" in {
within(500 millis) {
forwardRef ! "test"
expectMsg("test")
}
}
}
"A FilteringActor" should {
"Filter all messages, except expected messagetypes it receives" in {
var messages = Seq[String]()
within(500 millis) {
filterRef ! "test"
expectMsg("test")
filterRef ! 1
expectNoMsg
filterRef ! "some"
filterRef ! "more"
filterRef ! 1
filterRef ! "text"
filterRef ! 1
receiveWhile(500 millis) {
case msg: String => messages = msg +: messages
}
}
messages.length should be(3)
messages.reverse should be(Seq("some", "more", "text"))
}
}
"A SequencingActor" should {
"receive an interesting message at some point " in {
within(500 millis) {
ignoreMsg {
case msg: String => msg != "something"
}
seqRef ! "something"
expectMsg("something")
ignoreMsg {
case msg: String => msg == "1"
}
expectNoMsg
ignoreNoMsg
}
}
}
}
object TestKitUsageSpec {
// Define your test specific configuration here
val config = """
akka {
loglevel = "WARNING"
}
"""
/**
* An Actor that forwards every message to a next Actor
*/
class ForwardingActor(next: ActorRef) extends Actor {
def receive = {
case msg => next ! msg
}
}
/**
* An Actor that only forwards certain messages to a next Actor
*/
class FilteringActor(next: ActorRef) extends Actor {
def receive = {
case msg: String => next ! msg
case _ => None
}
}
/**
* An actor that sends a sequence of messages with a random head list, an
* interesting value and a random tail list. The idea is that you would
* like to test that the interesting value is received and that you cant
* be bothered with the rest
*/
class SequencingActor(next: ActorRef, head: immutable.Seq[String],
tail: immutable.Seq[String]) extends Actor {
def receive = {
case msg => {
head foreach { next ! _ }
next ! msg
tail foreach { next ! _ }
}
}
}
}
|
nidkil/akka-downloader
|
src/test/scala/unit/akka/TestkitExample.scala
|
Scala
|
apache-2.0
| 3,807 |
package net.rrm.ehour.persistence.retry
import java.util.Random
import org.apache.log4j.Logger
import org.hibernate.HibernateException
object ExponentialBackoffRetryPolicy {
private final val MaxAttempts = 5
private final val Log = Logger.getLogger(ExponentialBackoffRetryPolicy.getClass)
def retry[T](op: => T, maxAttempts:Int = MaxAttempts) = {
def execOrWait(waitTimes: Seq[Int], lastException: Option[HibernateException]): T = {
if (waitTimes.nonEmpty) {
try {
op
} catch {
case e: HibernateException =>
val waitTime = waitTimes.head
val attempt = 1 + (maxAttempts - waitTimes.length)
Log.warn(s"Attempt $attempt: Failed to query, sleeping for $waitTime ms", e)
Thread.sleep(waitTime)
execOrWait(waitTimes.tail, Some(e))
}
} else {
lastException match {
case Some(e) =>
Log.warn("Giving up on query, max. attempts reached")
throw new RuntimeException(e)
case None => throw new RuntimeException("No waittimes found")
}
}
}
val random = new Random()
val waitTimes = for (i <- 1 to maxAttempts) yield { 1000 * Math.max(1, random.nextInt(1 << i)) }
execOrWait(waitTimes, None)
}
}
|
momogentoo/ehour
|
eHour-persistence/src/main/scala/net/rrm/ehour/persistence/retry/ExponentialBackoffRetryPolicy.scala
|
Scala
|
gpl-2.0
| 1,298 |
package com.eigengo.lift.exercise
import java.nio.ByteBuffer
import scodec.bits.{BitVector, ByteOrdering, ByteVector}
import scalaz.\/
/**
* The multi packet message follows a simple structure
*
* {{{
* header: UInt16 = 0xcab1 // + 2 B
* count: Byte = ... // + 3 B
* timestamp: UInt32 = ... // + 7 B
* ===
* size0: UInt16 = ... // + 9 B
* sloc0: Byte = ... // + 10 B
* data0: Array[Byte] = ...
* size1: UInt16 = ...
* sloc1: Byte = ...
* data1: Array[Byte] = ...
* ...
* sizen: UInt16 = ...
* slocn: Byte = ...
* datan: Array[Byte] = ...
* }}}
*/
object MultiPacketDecoder {
private val header = 0xcab1.toShort
def decodeShort(b0: Byte, b1: Byte): Int = {
ByteVector(b0, b1).toInt(signed = false, ordering = ByteOrdering.BigEndian)
}
def decodeUInt32(b0: Byte, b1: Byte, b2: Byte, b3: Byte): Long = {
ByteVector(b0, b1, b2, b3).toLong(signed = false, ordering = ByteOrdering.BigEndian)
}
def decodeSensorDataSourceLocation(sloc: Byte): String \/ SensorDataSourceLocation = sloc match {
case 0x01 ⇒ \/.right(SensorDataSourceLocationWrist)
case 0x02 ⇒ \/.right(SensorDataSourceLocationWaist)
case 0x03 ⇒ \/.right(SensorDataSourceLocationChest)
case 0x04 ⇒ \/.right(SensorDataSourceLocationFoot)
case 0x7f ⇒ \/.right(SensorDataSourceLocationAny)
case x ⇒ \/.left(s"Unknown sensor data source location $x")
}
def decode(input: ByteBuffer): String \/ MultiPacket = {
if (input.limit() < 10) \/.left("No viable input: size < 10.")
else {
val inputHeader = input.getShort
if (inputHeader != header) \/.left(s"Incorrect header. Expected $header, got $inputHeader.")
else {
val count = input.get()
val timestamp = decodeUInt32(input.get(), input.get(), input.get(), input.get())
if (count == 0) \/.left("No content.")
else {
val (h :: t) = (0 until count).toList.map { x ⇒
if (input.position() + 3 >= input.limit()) \/.left(s"Incomplete or truncated input. (Header of packet $x.)")
else {
val size = decodeShort(input.get, input.get)
if (input.position() + size >= input.limit()) \/.left(s"Incomplete or truncated input. ($size bytes payload of packet $x.)")
else {
val sloc = decodeSensorDataSourceLocation(input.get)
val buf = input.slice().limit(size).asInstanceOf[ByteBuffer]
input.position(input.position() + size)
sloc.map(sloc ⇒ PacketWithLocation(sloc, BitVector(buf)))
}
}
}
t.foldLeft(h.map(MultiPacket.single(timestamp)))((r, b) ⇒ r.flatMap(mp ⇒ b.map(mp.withNewPacket)))
}
}
}
}
}
|
lachatak/lift
|
server/exercise/src/main/scala/com/eigengo/lift/exercise/MultiPacketDecoder.scala
|
Scala
|
apache-2.0
| 2,851 |
package com.github.bhanafee.whereami
import scala.concurrent.duration._
import akka.actor._
import akka.pattern.ask
import spray.routing._
import spray.http.StatusCodes
import spray.httpx.SprayJsonSupport._
import spray.routing.RequestContext
import TrackerProtocol._
class RestInterface(val gis: ActorRef, val tracker: ActorRef, val apiTimeout: Duration) extends HttpServiceActor with RestApi {
def receive = runRoute(routes)
}
trait RestApi extends HttpService with ActorLogging { actor: Actor =>
import context.dispatcher
val gis: ActorRef
val tracker: ActorRef
val apiTimeout: Duration
val responder: RequestContext=>ActorRef = rc => context.actorOf(Props(classOf[Responder], rc, apiTimeout))
val getLatLon = get & parameters('latitude.as[Degrees], 'longitude.as[Degrees])
def routes: Route =
(path("tags") & getLatLon) { (lat, lon) => ctx =>
gis.tell(Tag(Point(lat, lon)), responder(ctx))
} ~
(path("nearest") & getLatLon) { (lat, lon) => ctx =>
gis.tell(FindNearest(Point(lat, lon)), responder(ctx))
} ~
(path("track" / PathElement / "checkin") & get) { (device) => ctx =>
tracker ! Checkin(device, new Timestamp())
ctx.complete(StatusCodes.OK)
}~
(path("track" / PathElement) & getLatLon) { (device, lat, lon) => ctx =>
tracker.tell(Report(device, Position(Point(lat, lon), new Timestamp())), responder(ctx))
} ~
(path("track" / PathElement) & entity(as[Position])) { (device: DeviceId, position: Position) => ctx =>
tracker.tell(Report(device, position), responder(ctx))
}
}
class Responder(requestContext: RequestContext, apiTimeout: Duration) extends Actor with ActorLogging {
context.setReceiveTimeout(apiTimeout)
def receive = {
case tags: Tagged =>
requestContext.complete(StatusCodes.OK, tags)
self ! PoisonPill
case nearest: Nearest =>
requestContext.complete(StatusCodes.OK, nearest)
self ! PoisonPill
case boundary: Boundary =>
requestContext.complete(StatusCodes.OK, boundary)
self ! PoisonPill
case ReceiveTimeout =>
log.warning("Timeout on responder")
requestContext.complete(StatusCodes.ServiceUnavailable)
self ! PoisonPill
}
}
|
bhanafee/whereami
|
app/src/main/scala/com/github/bhanafee/whereami/RestInterface.scala
|
Scala
|
apache-2.0
| 2,234 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.