code
stringlengths 5
1M
| repo_name
stringlengths 5
109
| path
stringlengths 6
208
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 5
1M
|
---|---|---|---|---|---|
package puzzle2016.q
import org.junit.runner.RunWith
import org.scalatest.FunSuite
import org.scalatest.junit.JUnitRunner
import CoinJam._
/**
* Created by pascalabaziou on 16/03/2017.
*/
@RunWith(classOf[JUnitRunner])
class CoinJamTest extends FunSuite {
test("isPrime is ok") { // calculates only for odd numbers
assert(divisor(1) === 1)
assert(divisor(2) === 2)
assert(divisor(3) === 3)
assert(divisor(5) === 5)
assert(divisor(7) === 7)
assert(divisor(9) === 3)
assert(divisor(11) === 11)
assert(divisor(13) === 13)
assert(divisor(15) === 3)
assert(divisor(17) === 17)
assert(divisor(19) === 19)
assert(divisor(21) === 3)
}
test("Coin Jam is ok") {
listJam(6,3)
}
}
|
javathought/CodeJam
|
src/test/scala/puzzle2016/q/CoinJamTest.scala
|
Scala
|
apache-2.0
| 748 |
package varys.framework.master
import akka.actor.ActorRef
import scala.collection.mutable.{ArrayBuffer, HashMap, HashSet}
import varys.util.BpsInfo
private[varys] class SlaveInfo(
val id: String,
val host: String,
val port: Int,
val actor: ActorRef,
val webUiPort: Int,
val commPort: Int,
val publicAddress: String) {
var state: SlaveState.Value = SlaveState.ALIVE
var lastHeartbeat = System.currentTimeMillis()
val rxBpsInfo = new BpsInfo()
val txBpsInfo = new BpsInfo()
def rxBps = rxBpsInfo.getBps
def txBps = txBpsInfo.getBps
def webUiAddress : String = {
"http://" + this.publicAddress + ":" + this.webUiPort
}
def setState(state: SlaveState.Value) = {
this.state = state
}
def updateNetworkStats(newRxBps: Double, newTxBps: Double) = {
rxBpsInfo.update(newRxBps)
txBpsInfo.update(newTxBps)
}
var numCoflows = 0
var coflowIds: Array[Int] = null
var sizes: Array[Long] = null
var flows: Array[Array[String]] = null
def updateCoflows(
coflowIds_ : Array[Int],
sizes_ : Array[Long],
flows_ : Array[Array[String]]) {
numCoflows = coflowIds_.size
coflowIds = coflowIds_
sizes = sizes_
flows = flows_
}
var lastSchedule: String = null
def sameAsLastSchedule(newCoflowOrder: String, newSchedule: HashSet[String]): Boolean = {
val ns = newCoflowOrder + " <> " + scala.util.Sorting.stableSort(newSchedule.toBuffer).mkString("|")
if (lastSchedule == null) {
lastSchedule = ns
return true
}
val retVal = (lastSchedule == ns)
lastSchedule = ns
retVal
}
override def toString: String = "SlaveInfo(" + id + "[" + host + ":" + port + "]:" + state + ")"
}
|
coflow/aalo
|
core/src/main/scala/varys/framework/master/SlaveInfo.scala
|
Scala
|
apache-2.0
| 1,711 |
package chrome.webRequest.bindings
import scala.scalajs.js
@js.native
trait WebAuthenticationChallengeDetails extends WebResponseHeadersDetails {
/**
* The authentication scheme, e.g. Basic or Digest.
*/
val scheme: String = js.native
/**
* The authentication realm provided by the server, if there is one.
*/
val realm: js.UndefOr[String] = js.native
/**
* The server requesting authentication.
*/
val challenger: WebAuthChallenger = js.native
/**
* True for Proxy-Authenticate, false for WWW-Authenticate.
*/
val isProxy: Boolean = js.native
}
|
lucidd/scala-js-chrome
|
bindings/src/main/scala/chrome/webRequest/bindings/WebAuthenticationChallengeDetails.scala
|
Scala
|
mit
| 599 |
package com.twitter.zipkin.aggregate
import java.util.Date
import com.twitter.algebird.{Moments, Monoid, Semigroup}
import com.twitter.scalding._
import com.twitter.util.Time
import com.twitter.zipkin.common.{Dependencies, DependencyLink, Service, Span}
final class ZipkinAggregateJob
(args: Args) extends Job(args)
{
val dateRange: DateRange = DateRange(new Date(0L), new Date)
@transient
val (extraConfig, spanSource) = SpanSourceProvider(args)
override def config = super.config ++ extraConfig
val allSpans = TypedPipe.from(spanSource)
.groupBy { span: Span => (span.id, span.traceId) }
.reduce { (s1, s2) => s1.mergeSpan(s2) }
.filter { case (key, span) => span.isValid }
val parentSpans = allSpans
.group
val childSpans = allSpans
.filter { case (key, span) => span.parentId.isDefined }
.map { case (key, span) => ((span.parentId.get, span.traceId), span)}
.group
val result = parentSpans.join(childSpans)
.map { case (_,(parent: Span,child: Span)) =>
val moments = child.duration.map { d => Moments(d.toDouble) }.getOrElse(Monoid.zero[Moments])
val dlink = DependencyLink(Service(parent.serviceName.get), Service(child.serviceName.get), moments)
((parent.serviceName.get, child.serviceName.get), dlink)
}
.group
.sum
.values
.map { dlink => Dependencies(Time.fromMilliseconds(dateRange.start.timestamp), Time.fromMilliseconds(dateRange.end.timestamp), Seq(dlink))}
.sum
result.write(spanSource)
}
object SpanSourceProvider {
def apply(args: Args) : (Map[AnyRef,AnyRef], Source with TypedSource[Span] with TypedSink[Dependencies]) = args.required("source") match {
case "cassandra" => {
(Map("hosts" -> args.required("hosts"), "port" -> args.getOrElse("port", "9160")), new cassandra.SpanSource)
}
case s:String => throw new ArgsException(s+" is not an implemented source.")
}
}
|
cogitate/twitter-zipkin-uuid
|
zipkin-aggregate/src/main/scala/com/twitter/zipkin/aggregate/ZipkinAggregateJob.scala
|
Scala
|
apache-2.0
| 1,903 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.hive.thriftserver
import java.io.{File, FilenameFilter}
import java.net.URL
import java.nio.charset.StandardCharsets
import java.sql.{Date, DriverManager, SQLException, Statement}
import java.util.{Locale, UUID}
import scala.collection.JavaConverters._
import scala.collection.mutable
import scala.collection.mutable.ArrayBuffer
import scala.concurrent.{ExecutionContext, Future, Promise}
import scala.concurrent.duration._
import scala.io.Source
import scala.util.{Random, Try}
import com.google.common.io.Files
import org.apache.hadoop.hive.conf.HiveConf.ConfVars
import org.apache.hive.jdbc.HiveDriver
import org.apache.hive.service.auth.PlainSaslHelper
import org.apache.hive.service.cli.{FetchOrientation, FetchType, GetInfoType, RowSet}
import org.apache.hive.service.cli.thrift.ThriftCLIServiceClient
import org.apache.thrift.protocol.TBinaryProtocol
import org.apache.thrift.transport.TSocket
import org.scalatest.BeforeAndAfterAll
import org.apache.spark.{SparkException, SparkFunSuite}
import org.apache.spark.internal.Logging
import org.apache.spark.sql.hive.HiveUtils
import org.apache.spark.sql.hive.test.HiveTestJars
import org.apache.spark.sql.internal.StaticSQLConf.HIVE_THRIFT_SERVER_SINGLESESSION
import org.apache.spark.sql.test.ProcessTestUtils.ProcessOutputCapturer
import org.apache.spark.util.{ThreadUtils, Utils}
object TestData {
def getTestDataFilePath(name: String): URL = {
Thread.currentThread().getContextClassLoader.getResource(s"data/files/$name")
}
val smallKv = getTestDataFilePath("small_kv.txt")
val smallKvWithNull = getTestDataFilePath("small_kv_with_null.txt")
}
class HiveThriftBinaryServerSuite extends HiveThriftJdbcTest {
override def mode: ServerMode.Value = ServerMode.binary
private def withCLIServiceClient(f: ThriftCLIServiceClient => Unit): Unit = {
// Transport creation logic below mimics HiveConnection.createBinaryTransport
val rawTransport = new TSocket("localhost", serverPort)
val user = System.getProperty("user.name")
val transport = PlainSaslHelper.getPlainTransport(user, "anonymous", rawTransport)
val protocol = new TBinaryProtocol(transport)
val client = new ThriftCLIServiceClient(new ThriftserverShimUtils.Client(protocol))
transport.open()
try f(client) finally transport.close()
}
test("GetInfo Thrift API") {
withCLIServiceClient { client =>
val user = System.getProperty("user.name")
val sessionHandle = client.openSession(user, "")
assertResult("Spark SQL", "Wrong GetInfo(CLI_DBMS_NAME) result") {
client.getInfo(sessionHandle, GetInfoType.CLI_DBMS_NAME).getStringValue
}
assertResult("Spark SQL", "Wrong GetInfo(CLI_SERVER_NAME) result") {
client.getInfo(sessionHandle, GetInfoType.CLI_SERVER_NAME).getStringValue
}
assertResult(true, "Spark version shouldn't be \\"Unknown\\"") {
val version = client.getInfo(sessionHandle, GetInfoType.CLI_DBMS_VER).getStringValue
logInfo(s"Spark version: $version")
version != "Unknown"
}
}
}
test("SPARK-16563 ThriftCLIService FetchResults repeat fetching result") {
withCLIServiceClient { client =>
val user = System.getProperty("user.name")
val sessionHandle = client.openSession(user, "")
withJdbcStatement("test_16563") { statement =>
val queries = Seq(
"CREATE TABLE test_16563(key INT, val STRING) USING hive",
s"LOAD DATA LOCAL INPATH '${TestData.smallKv}' OVERWRITE INTO TABLE test_16563")
queries.foreach(statement.execute)
val confOverlay = new java.util.HashMap[java.lang.String, java.lang.String]
val operationHandle = client.executeStatement(
sessionHandle,
"SELECT * FROM test_16563",
confOverlay)
// Fetch result first time
assertResult(5, "Fetching result first time from next row") {
val rows_next = client.fetchResults(
operationHandle,
FetchOrientation.FETCH_NEXT,
1000,
FetchType.QUERY_OUTPUT)
rows_next.numRows()
}
// Fetch result second time from first row
assertResult(5, "Repeat fetching result from first row") {
val rows_first = client.fetchResults(
operationHandle,
FetchOrientation.FETCH_FIRST,
1000,
FetchType.QUERY_OUTPUT)
rows_first.numRows()
}
}
}
}
test("Support beeline --hiveconf and --hivevar") {
withJdbcStatement() { statement =>
executeTest(hiveConfList)
executeTest(hiveVarList)
def executeTest(hiveList: String): Unit = {
hiveList.split(";").foreach{ m =>
val kv = m.split("=")
val k = kv(0)
val v = kv(1)
val modValue = s"${v}_MOD_VALUE"
// select '${a}'; ---> avalue
val resultSet = statement.executeQuery(s"select '$${$k}'")
resultSet.next()
assert(resultSet.getString(1) === v)
statement.executeQuery(s"set $k=$modValue")
val modResultSet = statement.executeQuery(s"select '$${$k}'")
modResultSet.next()
assert(modResultSet.getString(1) === s"$modValue")
}
}
}
}
test("JDBC query execution") {
withJdbcStatement("test") { statement =>
val queries = Seq(
"SET spark.sql.shuffle.partitions=3",
"CREATE TABLE test(key INT, val STRING) USING hive",
s"LOAD DATA LOCAL INPATH '${TestData.smallKv}' OVERWRITE INTO TABLE test",
"CACHE TABLE test")
queries.foreach(statement.execute)
assertResult(5, "Row count mismatch") {
val resultSet = statement.executeQuery("SELECT COUNT(*) FROM test")
resultSet.next()
resultSet.getInt(1)
}
}
}
test("Checks Hive version") {
withJdbcStatement() { statement =>
val resultSet = statement.executeQuery("SET spark.sql.hive.version")
resultSet.next()
assert(resultSet.getString(1) === "spark.sql.hive.version")
assert(resultSet.getString(2) === HiveUtils.builtinHiveVersion)
}
}
test("SPARK-3004 regression: result set containing NULL") {
withJdbcStatement("test_null") { statement =>
val queries = Seq(
"CREATE TABLE test_null(key INT, val STRING) USING hive",
s"LOAD DATA LOCAL INPATH '${TestData.smallKvWithNull}' OVERWRITE INTO TABLE test_null")
queries.foreach(statement.execute)
val resultSet = statement.executeQuery("SELECT * FROM test_null WHERE key IS NULL")
(0 until 5).foreach { _ =>
resultSet.next()
assert(resultSet.getInt(1) === 0)
assert(resultSet.wasNull())
}
assert(!resultSet.next())
}
}
test("SPARK-4292 regression: result set iterator issue") {
withJdbcStatement("test_4292") { statement =>
val queries = Seq(
"CREATE TABLE test_4292(key INT, val STRING) USING hive",
s"LOAD DATA LOCAL INPATH '${TestData.smallKv}' OVERWRITE INTO TABLE test_4292")
queries.foreach(statement.execute)
val resultSet = statement.executeQuery("SELECT key FROM test_4292")
Seq(238, 86, 311, 27, 165).foreach { key =>
resultSet.next()
assert(resultSet.getInt(1) === key)
}
}
}
test("SPARK-4309 regression: Date type support") {
withJdbcStatement("test_date") { statement =>
val queries = Seq(
"CREATE TABLE test_date(key INT, value STRING) USING hive",
s"LOAD DATA LOCAL INPATH '${TestData.smallKv}' OVERWRITE INTO TABLE test_date")
queries.foreach(statement.execute)
assertResult(Date.valueOf("2011-01-01")) {
val resultSet = statement.executeQuery(
"SELECT CAST('2011-01-01' as date) FROM test_date LIMIT 1")
resultSet.next()
resultSet.getDate(1)
}
}
}
test("SPARK-4407 regression: Complex type support") {
withJdbcStatement("test_map") { statement =>
val queries = Seq(
"CREATE TABLE test_map(key INT, value STRING) USING hive",
s"LOAD DATA LOCAL INPATH '${TestData.smallKv}' OVERWRITE INTO TABLE test_map")
queries.foreach(statement.execute)
assertResult("""{238:"val_238"}""") {
val resultSet = statement.executeQuery("SELECT MAP(key, value) FROM test_map LIMIT 1")
resultSet.next()
resultSet.getString(1)
}
assertResult("""["238","val_238"]""") {
val resultSet = statement.executeQuery(
"SELECT ARRAY(CAST(key AS STRING), value) FROM test_map LIMIT 1")
resultSet.next()
resultSet.getString(1)
}
}
}
test("SPARK-12143 regression: Binary type support") {
withJdbcStatement("test_binary") { statement =>
val queries = Seq(
"CREATE TABLE test_binary(key INT, value STRING) USING hive",
s"LOAD DATA LOCAL INPATH '${TestData.smallKv}' OVERWRITE INTO TABLE test_binary")
queries.foreach(statement.execute)
val expected: Array[Byte] = "val_238".getBytes
assertResult(expected) {
val resultSet = statement.executeQuery(
"SELECT CAST(value as BINARY) FROM test_binary LIMIT 1")
resultSet.next()
resultSet.getObject(1)
}
}
}
test("test multiple session") {
import org.apache.spark.sql.internal.SQLConf
var defaultV1: String = null
var defaultV2: String = null
var data: ArrayBuffer[Int] = null
withMultipleConnectionJdbcStatement("test_map", "db1.test_map2")(
// create table
{ statement =>
val queries = Seq(
"CREATE TABLE test_map(key INT, value STRING) USING hive",
s"LOAD DATA LOCAL INPATH '${TestData.smallKv}' OVERWRITE INTO TABLE test_map",
"CACHE TABLE test_table AS SELECT key FROM test_map ORDER BY key DESC",
"CREATE DATABASE db1")
queries.foreach(statement.execute)
val plan = statement.executeQuery("explain select * from test_table")
plan.next()
plan.next()
assert(plan.getString(1).contains("Scan In-memory table `test_table`"))
val rs1 = statement.executeQuery("SELECT key FROM test_table ORDER BY KEY DESC")
val buf1 = new collection.mutable.ArrayBuffer[Int]()
while (rs1.next()) {
buf1 += rs1.getInt(1)
}
rs1.close()
val rs2 = statement.executeQuery("SELECT key FROM test_map ORDER BY KEY DESC")
val buf2 = new collection.mutable.ArrayBuffer[Int]()
while (rs2.next()) {
buf2 += rs2.getInt(1)
}
rs2.close()
assert(buf1 === buf2)
data = buf1
},
// first session, we get the default value of the session status
{ statement =>
val rs1 = statement.executeQuery(s"SET ${SQLConf.SHUFFLE_PARTITIONS.key}")
rs1.next()
defaultV1 = rs1.getString(1)
assert(defaultV1 != "200")
rs1.close()
val rs2 = statement.executeQuery("SET hive.cli.print.header")
rs2.next()
defaultV2 = rs2.getString(1)
assert(defaultV1 != "true")
rs2.close()
},
// second session, we update the session status
{ statement =>
val queries = Seq(
s"SET ${SQLConf.SHUFFLE_PARTITIONS.key}=291",
"SET hive.cli.print.header=true"
)
queries.map(statement.execute)
val rs1 = statement.executeQuery(s"SET ${SQLConf.SHUFFLE_PARTITIONS.key}")
rs1.next()
assert("spark.sql.shuffle.partitions" === rs1.getString(1))
assert("291" === rs1.getString(2))
rs1.close()
val rs2 = statement.executeQuery("SET hive.cli.print.header")
rs2.next()
assert("hive.cli.print.header" === rs2.getString(1))
assert("true" === rs2.getString(2))
rs2.close()
},
// third session, we get the latest session status, supposed to be the
// default value
{ statement =>
val rs1 = statement.executeQuery(s"SET ${SQLConf.SHUFFLE_PARTITIONS.key}")
rs1.next()
assert(defaultV1 === rs1.getString(1))
rs1.close()
val rs2 = statement.executeQuery("SET hive.cli.print.header")
rs2.next()
assert(defaultV2 === rs2.getString(1))
rs2.close()
},
// try to access the cached data in another session
{ statement =>
// Cached temporary table can't be accessed by other sessions
intercept[SQLException] {
statement.executeQuery("SELECT key FROM test_table ORDER BY KEY DESC")
}
val plan = statement.executeQuery("explain select key from test_map ORDER BY key DESC")
plan.next()
plan.next()
assert(plan.getString(1).contains("Scan In-memory table `test_table`"))
val rs = statement.executeQuery("SELECT key FROM test_map ORDER BY KEY DESC")
val buf = new collection.mutable.ArrayBuffer[Int]()
while (rs.next()) {
buf += rs.getInt(1)
}
rs.close()
assert(buf === data)
},
// switch another database
{ statement =>
statement.execute("USE db1")
// there is no test_map table in db1
intercept[SQLException] {
statement.executeQuery("SELECT key FROM test_map ORDER BY KEY DESC")
}
statement.execute("CREATE TABLE test_map2(key INT, value STRING)")
},
// access default database
{ statement =>
// current database should still be `default`
intercept[SQLException] {
statement.executeQuery("SELECT key FROM test_map2")
}
statement.execute("USE db1")
// access test_map2
statement.executeQuery("SELECT key from test_map2")
}
)
}
// This test often hangs and then times out, leaving the hanging processes.
// Let's ignore it and improve the test.
ignore("test jdbc cancel") {
withJdbcStatement("test_map") { statement =>
val queries = Seq(
"CREATE TABLE test_map(key INT, value STRING)",
s"LOAD DATA LOCAL INPATH '${TestData.smallKv}' OVERWRITE INTO TABLE test_map")
queries.foreach(statement.execute)
implicit val ec = ExecutionContext.fromExecutorService(
ThreadUtils.newDaemonSingleThreadExecutor("test-jdbc-cancel"))
try {
// Start a very-long-running query that will take hours to finish, then cancel it in order
// to demonstrate that cancellation works.
val f = Future {
statement.executeQuery(
"SELECT COUNT(*) FROM test_map " +
List.fill(10)("join test_map").mkString(" "))
}
// Note that this is slightly race-prone: if the cancel is issued before the statement
// begins executing then we'll fail with a timeout. As a result, this fixed delay is set
// slightly more conservatively than may be strictly necessary.
Thread.sleep(1000)
statement.cancel()
val e = intercept[SparkException] {
ThreadUtils.awaitResult(f, 3.minute)
}.getCause
assert(e.isInstanceOf[SQLException])
assert(e.getMessage.contains("cancelled"))
// Cancellation is a no-op if spark.sql.hive.thriftServer.async=false
statement.executeQuery("SET spark.sql.hive.thriftServer.async=false")
try {
val sf = Future {
statement.executeQuery(
"SELECT COUNT(*) FROM test_map " +
List.fill(4)("join test_map").mkString(" ")
)
}
// Similarly, this is also slightly race-prone on fast machines where the query above
// might race and complete before we issue the cancel.
Thread.sleep(1000)
statement.cancel()
val rs1 = ThreadUtils.awaitResult(sf, 3.minute)
rs1.next()
assert(rs1.getInt(1) === math.pow(5, 5))
rs1.close()
val rs2 = statement.executeQuery("SELECT COUNT(*) FROM test_map")
rs2.next()
assert(rs2.getInt(1) === 5)
rs2.close()
} finally {
statement.executeQuery("SET spark.sql.hive.thriftServer.async=true")
}
} finally {
ec.shutdownNow()
}
}
}
test("test add jar") {
withMultipleConnectionJdbcStatement("smallKV", "addJar")(
{
statement =>
val jarFile = HiveTestJars.getHiveHcatalogCoreJar().getCanonicalPath
statement.executeQuery(s"ADD JAR $jarFile")
},
{
statement =>
val queries = Seq(
"CREATE TABLE smallKV(key INT, val STRING) USING hive",
s"LOAD DATA LOCAL INPATH '${TestData.smallKv}' OVERWRITE INTO TABLE smallKV",
"""CREATE TABLE addJar(key string)
|ROW FORMAT SERDE 'org.apache.hive.hcatalog.data.JsonSerDe'
""".stripMargin)
queries.foreach(statement.execute)
statement.executeQuery(
"""
|INSERT INTO TABLE addJar SELECT 'k1' as key FROM smallKV limit 1
""".stripMargin)
val actualResult =
statement.executeQuery("SELECT key FROM addJar")
val actualResultBuffer = new collection.mutable.ArrayBuffer[String]()
while (actualResult.next()) {
actualResultBuffer += actualResult.getString(1)
}
actualResult.close()
val expectedResult =
statement.executeQuery("SELECT 'k1'")
val expectedResultBuffer = new collection.mutable.ArrayBuffer[String]()
while (expectedResult.next()) {
expectedResultBuffer += expectedResult.getString(1)
}
expectedResult.close()
assert(expectedResultBuffer === actualResultBuffer)
}
)
}
test("Checks Hive version via SET -v") {
withJdbcStatement() { statement =>
val resultSet = statement.executeQuery("SET -v")
val conf = mutable.Map.empty[String, String]
while (resultSet.next()) {
conf += resultSet.getString(1) -> resultSet.getString(2)
}
if (HiveUtils.isHive23) {
assert(conf.get(HiveUtils.FAKE_HIVE_VERSION.key) === Some("2.3.7"))
} else {
assert(conf.get(HiveUtils.FAKE_HIVE_VERSION.key) === Some("1.2.1"))
}
}
}
test("Checks Hive version via SET") {
withJdbcStatement() { statement =>
val resultSet = statement.executeQuery("SET")
val conf = mutable.Map.empty[String, String]
while (resultSet.next()) {
conf += resultSet.getString(1) -> resultSet.getString(2)
}
if (HiveUtils.isHive23) {
assert(conf.get(HiveUtils.FAKE_HIVE_VERSION.key) === Some("2.3.7"))
} else {
assert(conf.get(HiveUtils.FAKE_HIVE_VERSION.key) === Some("1.2.1"))
}
}
}
test("SPARK-11595 ADD JAR with input path having URL scheme") {
withJdbcStatement("test_udtf") { statement =>
try {
val jarPath = "../hive/src/test/resources/TestUDTF.jar"
val jarURL = s"file://${System.getProperty("user.dir")}/$jarPath"
Seq(
s"ADD JAR $jarURL",
s"""CREATE TEMPORARY FUNCTION udtf_count2
|AS 'org.apache.spark.sql.hive.execution.GenericUDTFCount2'
""".stripMargin
).foreach(statement.execute)
val rs1 = statement.executeQuery("DESCRIBE FUNCTION udtf_count2")
assert(rs1.next())
assert(rs1.getString(1) === "Function: udtf_count2")
assert(rs1.next())
assertResult("Class: org.apache.spark.sql.hive.execution.GenericUDTFCount2") {
rs1.getString(1)
}
assert(rs1.next())
assert(rs1.getString(1) === "Usage: N/A.")
val dataPath = "../hive/src/test/resources/data/files/kv1.txt"
Seq(
"CREATE TABLE test_udtf(key INT, value STRING) USING hive",
s"LOAD DATA LOCAL INPATH '$dataPath' OVERWRITE INTO TABLE test_udtf"
).foreach(statement.execute)
val rs2 = statement.executeQuery(
"SELECT key, cc FROM test_udtf LATERAL VIEW udtf_count2(value) dd AS cc")
assert(rs2.next())
assert(rs2.getInt(1) === 97)
assert(rs2.getInt(2) === 500)
assert(rs2.next())
assert(rs2.getInt(1) === 97)
assert(rs2.getInt(2) === 500)
} finally {
statement.executeQuery("DROP TEMPORARY FUNCTION udtf_count2")
}
}
}
test("SPARK-11043 check operation log root directory") {
val expectedLine =
"Operation log root directory is created: " + operationLogPath.getAbsoluteFile
val bufferSrc = Source.fromFile(logPath)
Utils.tryWithSafeFinally {
assert(bufferSrc.getLines().exists(_.contains(expectedLine)))
} {
bufferSrc.close()
}
}
test("SPARK-23547 Cleanup the .pipeout file when the Hive Session closed") {
def pipeoutFileList(sessionID: UUID): Array[File] = {
lScratchDir.listFiles(new FilenameFilter {
override def accept(dir: File, name: String): Boolean = {
name.startsWith(sessionID.toString) && name.endsWith(".pipeout")
}
})
}
withCLIServiceClient { client =>
val user = System.getProperty("user.name")
val sessionHandle = client.openSession(user, "")
val sessionID = sessionHandle.getSessionId
if (HiveUtils.isHive23) {
assert(pipeoutFileList(sessionID).length == 2)
} else {
assert(pipeoutFileList(sessionID).length == 1)
}
client.closeSession(sessionHandle)
assert(pipeoutFileList(sessionID).length == 0)
}
}
test("SPARK-24829 Checks cast as float") {
withJdbcStatement() { statement =>
val resultSet = statement.executeQuery("SELECT CAST('4.56' AS FLOAT)")
resultSet.next()
assert(resultSet.getString(1) === "4.56")
}
}
test("SPARK-28463: Thriftserver throws BigDecimal incompatible with HiveDecimal") {
withJdbcStatement() { statement =>
val rs = statement.executeQuery("SELECT CAST(1 AS decimal(38, 18))")
assert(rs.next())
assert(rs.getBigDecimal(1) === new java.math.BigDecimal("1.000000000000000000"))
}
}
test("Support interval type") {
withJdbcStatement() { statement =>
val rs = statement.executeQuery("SELECT interval 3 months 1 hours")
assert(rs.next())
assert(rs.getString(1) === "3 months 1 hours")
}
// Invalid interval value
withJdbcStatement() { statement =>
val e = intercept[SQLException] {
statement.executeQuery("SELECT interval 3 months 1 hou")
}
assert(e.getMessage.contains("org.apache.spark.sql.catalyst.parser.ParseException"))
}
}
test("ThriftCLIService FetchResults FETCH_FIRST, FETCH_NEXT, FETCH_PRIOR") {
def checkResult(rows: RowSet, start: Long, end: Long): Unit = {
assert(rows.getStartOffset() == start)
assert(rows.numRows() == end - start)
rows.iterator.asScala.zip((start until end).iterator).foreach { case (row, v) =>
assert(row(0).asInstanceOf[Long] === v)
}
}
withCLIServiceClient { client =>
val user = System.getProperty("user.name")
val sessionHandle = client.openSession(user, "")
val confOverlay = new java.util.HashMap[java.lang.String, java.lang.String]
val operationHandle = client.executeStatement(
sessionHandle,
"SELECT * FROM range(10)",
confOverlay) // 10 rows result with sequence 0, 1, 2, ..., 9
var rows: RowSet = null
// Fetch 5 rows with FETCH_NEXT
rows = client.fetchResults(
operationHandle, FetchOrientation.FETCH_NEXT, 5, FetchType.QUERY_OUTPUT)
checkResult(rows, 0, 5) // fetched [0, 5)
// Fetch another 2 rows with FETCH_NEXT
rows = client.fetchResults(
operationHandle, FetchOrientation.FETCH_NEXT, 2, FetchType.QUERY_OUTPUT)
checkResult(rows, 5, 7) // fetched [5, 7)
// FETCH_PRIOR 3 rows
rows = client.fetchResults(
operationHandle, FetchOrientation.FETCH_PRIOR, 3, FetchType.QUERY_OUTPUT)
checkResult(rows, 2, 5) // fetched [2, 5)
// FETCH_PRIOR again will scroll back to 0, and then the returned result
// may overlap the results of previous FETCH_PRIOR
rows = client.fetchResults(
operationHandle, FetchOrientation.FETCH_PRIOR, 3, FetchType.QUERY_OUTPUT)
checkResult(rows, 0, 3) // fetched [0, 3)
// FETCH_PRIOR again will stay at 0
rows = client.fetchResults(
operationHandle, FetchOrientation.FETCH_PRIOR, 4, FetchType.QUERY_OUTPUT)
checkResult(rows, 0, 4) // fetched [0, 4)
// FETCH_NEXT will continue moving forward from offset 4
rows = client.fetchResults(
operationHandle, FetchOrientation.FETCH_NEXT, 10, FetchType.QUERY_OUTPUT)
checkResult(rows, 4, 10) // fetched [4, 10) until the end of results
// FETCH_NEXT is at end of results
rows = client.fetchResults(
operationHandle, FetchOrientation.FETCH_NEXT, 5, FetchType.QUERY_OUTPUT)
checkResult(rows, 10, 10) // fetched empty [10, 10) (at end of results)
// FETCH_NEXT is at end of results again
rows = client.fetchResults(
operationHandle, FetchOrientation.FETCH_NEXT, 2, FetchType.QUERY_OUTPUT)
checkResult(rows, 10, 10) // fetched empty [10, 10) (at end of results)
// FETCH_PRIOR 1 rows yet again
rows = client.fetchResults(
operationHandle, FetchOrientation.FETCH_PRIOR, 1, FetchType.QUERY_OUTPUT)
checkResult(rows, 9, 10) // fetched [9, 10)
// FETCH_NEXT will return 0 yet again
rows = client.fetchResults(
operationHandle, FetchOrientation.FETCH_NEXT, 5, FetchType.QUERY_OUTPUT)
checkResult(rows, 10, 10) // fetched empty [10, 10) (at end of results)
// FETCH_FIRST results from first row
rows = client.fetchResults(
operationHandle, FetchOrientation.FETCH_FIRST, 3, FetchType.QUERY_OUTPUT)
checkResult(rows, 0, 3) // fetch [0, 3)
// Fetch till the end rows with FETCH_NEXT"
rows = client.fetchResults(
operationHandle, FetchOrientation.FETCH_NEXT, 1000, FetchType.QUERY_OUTPUT)
checkResult(rows, 3, 10) // fetched [3, 10)
client.closeOperation(operationHandle)
client.closeSession(sessionHandle)
}
}
test("SPARK-29492: use add jar in sync mode") {
withCLIServiceClient { client =>
val user = System.getProperty("user.name")
val sessionHandle = client.openSession(user, "")
withJdbcStatement("smallKV", "addJar") { statement =>
val confOverlay = new java.util.HashMap[java.lang.String, java.lang.String]
val jarFile = HiveTestJars.getHiveHcatalogCoreJar().getCanonicalPath
Seq(s"ADD JAR $jarFile",
"CREATE TABLE smallKV(key INT, val STRING) USING hive",
s"LOAD DATA LOCAL INPATH '${TestData.smallKv}' OVERWRITE INTO TABLE smallKV")
.foreach(query => client.executeStatement(sessionHandle, query, confOverlay))
client.executeStatement(sessionHandle,
"""CREATE TABLE addJar(key string)
|ROW FORMAT SERDE 'org.apache.hive.hcatalog.data.JsonSerDe'
""".stripMargin, confOverlay)
client.executeStatement(sessionHandle,
"INSERT INTO TABLE addJar SELECT 'k1' as key FROM smallKV limit 1", confOverlay)
val operationHandle = client.executeStatement(
sessionHandle,
"SELECT key FROM addJar",
confOverlay)
// Fetch result first time
assertResult(1, "Fetching result first time from next row") {
val rows_next = client.fetchResults(
operationHandle,
FetchOrientation.FETCH_NEXT,
1000,
FetchType.QUERY_OUTPUT)
rows_next.numRows()
}
}
}
}
test("SPARK-31859 Thriftserver works with spark.sql.datetime.java8API.enabled=true") {
withJdbcStatement() { st =>
st.execute("set spark.sql.datetime.java8API.enabled=true")
val rs = st.executeQuery("select date '2020-05-28', timestamp '2020-05-28 00:00:00'")
rs.next()
assert(rs.getDate(1).toString() == "2020-05-28")
assert(rs.getTimestamp(2).toString() == "2020-05-28 00:00:00.0")
}
}
test("SPARK-31861 Thriftserver respects spark.sql.session.timeZone") {
withJdbcStatement() { st =>
st.execute("set spark.sql.session.timeZone=+03:15") // different than Thriftserver's JVM tz
val rs = st.executeQuery("select timestamp '2020-05-28 10:00:00'")
rs.next()
// The timestamp as string is the same as the literal
assert(rs.getString(1) == "2020-05-28 10:00:00.0")
// Parsing it to java.sql.Timestamp in the client will always result in a timestamp
// in client default JVM timezone. The string value of the Timestamp will match the literal,
// but if the JDBC application cares about the internal timezone and UTC offset of the
// Timestamp object, it should set spark.sql.session.timeZone to match its client JVM tz.
assert(rs.getTimestamp(1).toString() == "2020-05-28 10:00:00.0")
}
}
test("SPARK-31863 Session conf should persist between Thriftserver worker threads") {
val iter = 20
withJdbcStatement() { statement =>
// date 'now' is resolved during parsing, and relies on SQLConf.get to
// obtain the current set timezone. We exploit this to run this test.
// If the timezones are set correctly to 25 hours apart across threads,
// the dates should reflect this.
// iterate a few times for the odd chance the same thread is selected
for (_ <- 0 until iter) {
statement.execute("SET spark.sql.session.timeZone=GMT-12")
val firstResult = statement.executeQuery("SELECT date 'now'")
firstResult.next()
val beyondDateLineWest = firstResult.getDate(1)
statement.execute("SET spark.sql.session.timeZone=GMT+13")
val secondResult = statement.executeQuery("SELECT date 'now'")
secondResult.next()
val dateLineEast = secondResult.getDate(1)
assert(
dateLineEast after beyondDateLineWest,
"SQLConf changes should persist across execution threads")
}
}
}
test("SPARK-30808: use Java 8 time API and Proleptic Gregorian calendar by default") {
withJdbcStatement() { st =>
// Proleptic Gregorian calendar has no gap in the range 1582-10-04..1582-10-15
val date = "1582-10-10"
val rs = st.executeQuery(s"select date '$date'")
rs.next()
val expected = java.sql.Date.valueOf(date)
assert(rs.getDate(1) === expected)
assert(rs.getString(1) === expected.toString)
}
}
}
class SingleSessionSuite extends HiveThriftJdbcTest {
override def mode: ServerMode.Value = ServerMode.binary
override protected def extraConf: Seq[String] =
s"--conf ${HIVE_THRIFT_SERVER_SINGLESESSION.key}=true" :: Nil
test("share the temporary functions across JDBC connections") {
withMultipleConnectionJdbcStatement("test_udtf")(
{ statement =>
val jarPath = "../hive/src/test/resources/TestUDTF.jar"
val jarURL = s"file://${System.getProperty("user.dir")}/$jarPath"
// Configurations and temporary functions added in this session should be visible to all
// the other sessions.
Seq(
"SET foo=bar",
s"ADD JAR $jarURL",
"CREATE TABLE test_udtf(key INT, value STRING) USING hive",
s"LOAD DATA LOCAL INPATH '${TestData.smallKv}' OVERWRITE INTO TABLE test_udtf",
s"""CREATE TEMPORARY FUNCTION udtf_count2
|AS 'org.apache.spark.sql.hive.execution.GenericUDTFCount2'
""".stripMargin
).foreach(statement.execute)
},
{ statement =>
try {
val rs1 = statement.executeQuery("SET foo")
assert(rs1.next())
assert(rs1.getString(1) === "foo")
assert(rs1.getString(2) === "bar")
val rs2 = statement.executeQuery("DESCRIBE FUNCTION udtf_count2")
assert(rs2.next())
assert(rs2.getString(1) === "Function: udtf_count2")
assert(rs2.next())
assertResult("Class: org.apache.spark.sql.hive.execution.GenericUDTFCount2") {
rs2.getString(1)
}
assert(rs2.next())
assert(rs2.getString(1) === "Usage: N/A.")
val rs3 = statement.executeQuery(
"SELECT key, cc FROM test_udtf LATERAL VIEW udtf_count2(value) dd AS cc")
assert(rs3.next())
assert(rs3.getInt(1) === 165)
assert(rs3.getInt(2) === 5)
assert(rs3.next())
assert(rs3.getInt(1) === 165)
assert(rs3.getInt(2) === 5)
} finally {
statement.executeQuery("DROP TEMPORARY FUNCTION udtf_count2")
}
}
)
}
test("unable to changing spark.sql.hive.thriftServer.singleSession using JDBC connections") {
withJdbcStatement() { statement =>
// JDBC connections are not able to set the conf spark.sql.hive.thriftServer.singleSession
val e = intercept[SQLException] {
statement.executeQuery("SET spark.sql.hive.thriftServer.singleSession=false")
}.getMessage
assert(e.contains(
"Cannot modify the value of a static config: spark.sql.hive.thriftServer.singleSession"))
}
}
test("share the current database and temporary tables across JDBC connections") {
withMultipleConnectionJdbcStatement()(
{ statement =>
statement.execute("CREATE DATABASE IF NOT EXISTS db1")
},
{ statement =>
val rs1 = statement.executeQuery("SELECT current_database()")
assert(rs1.next())
assert(rs1.getString(1) === "default")
statement.execute("USE db1")
val rs2 = statement.executeQuery("SELECT current_database()")
assert(rs2.next())
assert(rs2.getString(1) === "db1")
statement.execute("CREATE TEMP VIEW tempView AS SELECT 123")
},
{ statement =>
// the current database is set to db1 by another JDBC connection.
val rs1 = statement.executeQuery("SELECT current_database()")
assert(rs1.next())
assert(rs1.getString(1) === "db1")
val rs2 = statement.executeQuery("SELECT * from tempView")
assert(rs2.next())
assert(rs2.getString(1) === "123")
statement.execute("USE default")
statement.execute("DROP VIEW tempView")
statement.execute("DROP DATABASE db1 CASCADE")
}
)
}
}
class HiveThriftCleanUpScratchDirSuite extends HiveThriftJdbcTest{
var tempScratchDir: File = _
override protected def beforeAll(): Unit = {
tempScratchDir = Utils.createTempDir()
tempScratchDir.setWritable(true, false)
assert(tempScratchDir.list().isEmpty)
new File(tempScratchDir.getAbsolutePath + File.separator + "SPARK-31626").createNewFile()
assert(tempScratchDir.list().nonEmpty)
super.beforeAll()
}
override def mode: ServerMode.Value = ServerMode.binary
override protected def extraConf: Seq[String] =
s" --hiveconf ${ConfVars.HIVE_START_CLEANUP_SCRATCHDIR}=true " ::
s"--hiveconf ${ConfVars.SCRATCHDIR}=${tempScratchDir.getAbsolutePath}" :: Nil
test("Cleanup the Hive scratchdir when starting the Hive Server") {
assert(!tempScratchDir.exists())
withJdbcStatement() { statement =>
val rs = statement.executeQuery("SELECT id FROM range(1)")
assert(rs.next())
assert(rs.getLong(1) === 0L)
}
}
override protected def afterAll(): Unit = {
Utils.deleteRecursively(tempScratchDir)
super.afterAll()
}
}
class HiveThriftHttpServerSuite extends HiveThriftJdbcTest {
override def mode: ServerMode.Value = ServerMode.http
test("JDBC query execution") {
withJdbcStatement("test") { statement =>
val queries = Seq(
"SET spark.sql.shuffle.partitions=3",
"CREATE TABLE test(key INT, val STRING) USING hive",
s"LOAD DATA LOCAL INPATH '${TestData.smallKv}' OVERWRITE INTO TABLE test",
"CACHE TABLE test")
queries.foreach(statement.execute)
assertResult(5, "Row count mismatch") {
val resultSet = statement.executeQuery("SELECT COUNT(*) FROM test")
resultSet.next()
resultSet.getInt(1)
}
}
}
test("Checks Hive version") {
withJdbcStatement() { statement =>
val resultSet = statement.executeQuery("SET spark.sql.hive.version")
resultSet.next()
assert(resultSet.getString(1) === "spark.sql.hive.version")
assert(resultSet.getString(2) === HiveUtils.builtinHiveVersion)
}
}
test("SPARK-24829 Checks cast as float") {
withJdbcStatement() { statement =>
val resultSet = statement.executeQuery("SELECT CAST('4.56' AS FLOAT)")
resultSet.next()
assert(resultSet.getString(1) === "4.56")
}
}
}
object ServerMode extends Enumeration {
val binary, http = Value
}
abstract class HiveThriftJdbcTest extends HiveThriftServer2Test {
Utils.classForName(classOf[HiveDriver].getCanonicalName)
private def jdbcUri = if (mode == ServerMode.http) {
s"""jdbc:hive2://localhost:$serverPort/
|default?
|hive.server2.transport.mode=http;
|hive.server2.thrift.http.path=cliservice;
|${hiveConfList}#${hiveVarList}
""".stripMargin.split("\\n").mkString.trim
} else {
s"jdbc:hive2://localhost:$serverPort/?${hiveConfList}#${hiveVarList}"
}
def withMultipleConnectionJdbcStatement(tableNames: String*)(fs: (Statement => Unit)*): Unit = {
val user = System.getProperty("user.name")
val connections = fs.map { _ => DriverManager.getConnection(jdbcUri, user, "") }
val statements = connections.map(_.createStatement())
try {
statements.zip(fs).foreach { case (s, f) => f(s) }
} finally {
tableNames.foreach { name =>
// TODO: Need a better way to drop the view.
if (name.toUpperCase(Locale.ROOT).startsWith("VIEW")) {
statements(0).execute(s"DROP VIEW IF EXISTS $name")
} else {
statements(0).execute(s"DROP TABLE IF EXISTS $name")
}
}
statements.foreach(_.close())
connections.foreach(_.close())
}
}
def withDatabase(dbNames: String*)(fs: (Statement => Unit)*): Unit = {
val user = System.getProperty("user.name")
val connections = fs.map { _ => DriverManager.getConnection(jdbcUri, user, "") }
val statements = connections.map(_.createStatement())
try {
statements.zip(fs).foreach { case (s, f) => f(s) }
} finally {
dbNames.foreach { name =>
statements(0).execute(s"DROP DATABASE IF EXISTS $name")
}
statements.foreach(_.close())
connections.foreach(_.close())
}
}
def withJdbcStatement(tableNames: String*)(f: Statement => Unit): Unit = {
withMultipleConnectionJdbcStatement(tableNames: _*)(f)
}
}
abstract class HiveThriftServer2Test extends SparkFunSuite with BeforeAndAfterAll with Logging {
def mode: ServerMode.Value
private val CLASS_NAME = HiveThriftServer2.getClass.getCanonicalName.stripSuffix("$")
private val LOG_FILE_MARK = s"starting $CLASS_NAME, logging to "
protected val startScript = "../../sbin/start-thriftserver.sh".split("/").mkString(File.separator)
protected val stopScript = "../../sbin/stop-thriftserver.sh".split("/").mkString(File.separator)
private var listeningPort: Int = _
protected def serverPort: Int = listeningPort
protected val hiveConfList = "a=avalue;b=bvalue"
protected val hiveVarList = "c=cvalue;d=dvalue"
protected def user = System.getProperty("user.name")
protected var warehousePath: File = _
protected var metastorePath: File = _
protected def metastoreJdbcUri = s"jdbc:derby:;databaseName=$metastorePath;create=true"
private val pidDir: File = Utils.createTempDir(namePrefix = "thriftserver-pid")
protected var logPath: File = _
protected var operationLogPath: File = _
protected var lScratchDir: File = _
private var logTailingProcess: Process = _
private var diagnosisBuffer: ArrayBuffer[String] = ArrayBuffer.empty[String]
protected def extraConf: Seq[String] = Nil
protected def serverStartCommand(port: Int) = {
val portConf = if (mode == ServerMode.binary) {
ConfVars.HIVE_SERVER2_THRIFT_PORT
} else {
ConfVars.HIVE_SERVER2_THRIFT_HTTP_PORT
}
val driverClassPath = {
// Writes a temporary log4j.properties and prepend it to driver classpath, so that it
// overrides all other potential log4j configurations contained in other dependency jar files.
val tempLog4jConf = Utils.createTempDir().getCanonicalPath
Files.write(
"""log4j.rootCategory=DEBUG, console
|log4j.appender.console=org.apache.log4j.ConsoleAppender
|log4j.appender.console.target=System.err
|log4j.appender.console.layout=org.apache.log4j.PatternLayout
|log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{1}: %m%n
""".stripMargin,
new File(s"$tempLog4jConf/log4j.properties"),
StandardCharsets.UTF_8)
tempLog4jConf
}
s"""$startScript
| --master local
| --hiveconf ${ConfVars.METASTORECONNECTURLKEY}=$metastoreJdbcUri
| --hiveconf ${ConfVars.METASTOREWAREHOUSE}=$warehousePath
| --hiveconf ${ConfVars.HIVE_SERVER2_THRIFT_BIND_HOST}=localhost
| --hiveconf ${ConfVars.HIVE_SERVER2_TRANSPORT_MODE}=$mode
| --hiveconf ${ConfVars.HIVE_SERVER2_LOGGING_OPERATION_LOG_LOCATION}=$operationLogPath
| --hiveconf ${ConfVars.LOCALSCRATCHDIR}=$lScratchDir
| --hiveconf $portConf=$port
| --driver-class-path $driverClassPath
| --driver-java-options -Dlog4j.debug
| --conf spark.ui.enabled=false
| ${extraConf.mkString("\\n")}
""".stripMargin.split("\\\\s+").toSeq
}
/**
* String to scan for when looking for the thrift binary endpoint running.
* This can change across Hive versions.
*/
val THRIFT_BINARY_SERVICE_LIVE = "Starting ThriftBinaryCLIService on port"
/**
* String to scan for when looking for the thrift HTTP endpoint running.
* This can change across Hive versions.
*/
val THRIFT_HTTP_SERVICE_LIVE = "Started ThriftHttpCLIService in http"
val SERVER_STARTUP_TIMEOUT = 3.minutes
private def startThriftServer(port: Int, attempt: Int) = {
warehousePath = Utils.createTempDir()
warehousePath.delete()
metastorePath = Utils.createTempDir()
metastorePath.delete()
operationLogPath = Utils.createTempDir()
operationLogPath.delete()
lScratchDir = Utils.createTempDir()
lScratchDir.delete()
logPath = null
logTailingProcess = null
val command = serverStartCommand(port)
diagnosisBuffer ++=
s"""
|### Attempt $attempt ###
|HiveThriftServer2 command line: $command
|Listening port: $port
|System user: $user
""".stripMargin.split("\\n")
logInfo(s"Trying to start HiveThriftServer2: port=$port, mode=$mode, attempt=$attempt")
logPath = {
val lines = Utils.executeAndGetOutput(
command = command,
extraEnvironment = Map(
// Disables SPARK_TESTING to exclude log4j.properties in test directories.
"SPARK_TESTING" -> "0",
// But set SPARK_SQL_TESTING to make spark-class happy.
"SPARK_SQL_TESTING" -> "1",
// Points SPARK_PID_DIR to SPARK_HOME, otherwise only 1 Thrift server instance can be
// started at a time, which is not Jenkins friendly.
"SPARK_PID_DIR" -> pidDir.getCanonicalPath),
redirectStderr = true)
logInfo(s"COMMAND: $command")
logInfo(s"OUTPUT: $lines")
lines.split("\\n").collectFirst {
case line if line.contains(LOG_FILE_MARK) => new File(line.drop(LOG_FILE_MARK.length))
}.getOrElse {
throw new RuntimeException("Failed to find HiveThriftServer2 log file.")
}
}
val serverStarted = Promise[Unit]()
// Ensures that the following "tail" command won't fail.
logPath.createNewFile()
val successLines = Seq(THRIFT_BINARY_SERVICE_LIVE, THRIFT_HTTP_SERVICE_LIVE)
logTailingProcess = {
val command = s"/usr/bin/env tail -n +0 -f ${logPath.getCanonicalPath}".split(" ")
// Using "-n +0" to make sure all lines in the log file are checked.
val builder = new ProcessBuilder(command: _*)
val captureOutput = (line: String) => diagnosisBuffer.synchronized {
diagnosisBuffer += line
successLines.foreach { r =>
if (line.contains(r)) {
serverStarted.trySuccess(())
}
}
}
val process = builder.start()
new ProcessOutputCapturer(process.getInputStream, captureOutput).start()
new ProcessOutputCapturer(process.getErrorStream, captureOutput).start()
process
}
ThreadUtils.awaitResult(serverStarted.future, SERVER_STARTUP_TIMEOUT)
}
private def stopThriftServer(): Unit = {
// The `spark-daemon.sh' script uses kill, which is not synchronous, have to wait for a while.
Utils.executeAndGetOutput(
command = Seq(stopScript),
extraEnvironment = Map("SPARK_PID_DIR" -> pidDir.getCanonicalPath))
Thread.sleep(3.seconds.toMillis)
warehousePath.delete()
warehousePath = null
metastorePath.delete()
metastorePath = null
operationLogPath.delete()
operationLogPath = null
lScratchDir.delete()
lScratchDir = null
Option(logPath).foreach(_.delete())
logPath = null
Option(logTailingProcess).foreach(_.destroy())
logTailingProcess = null
}
private def dumpLogs(): Unit = {
logError(
s"""
|=====================================
|HiveThriftServer2Suite failure output
|=====================================
|${diagnosisBuffer.mkString("\\n")}
|=========================================
|End HiveThriftServer2Suite failure output
|=========================================
""".stripMargin)
}
override protected def beforeAll(): Unit = {
super.beforeAll()
// Chooses a random port between 10000 and 19999
listeningPort = 10000 + Random.nextInt(10000)
diagnosisBuffer.clear()
// Retries up to 3 times with different port numbers if the server fails to start
(1 to 3).foldLeft(Try(startThriftServer(listeningPort, 0))) { case (started, attempt) =>
started.orElse {
listeningPort += 1
stopThriftServer()
Try(startThriftServer(listeningPort, attempt))
}
}.recover {
case cause: Throwable =>
dumpLogs()
throw cause
}.get
logInfo(s"HiveThriftServer2 started successfully")
}
override protected def afterAll(): Unit = {
try {
stopThriftServer()
logInfo("HiveThriftServer2 stopped")
} finally {
super.afterAll()
}
}
}
|
dbtsai/spark
|
sql/hive-thriftserver/src/test/scala/org/apache/spark/sql/hive/thriftserver/HiveThriftServer2Suites.scala
|
Scala
|
apache-2.0
| 47,799 |
package org.scala_tools.maven.plexus.converters
import org.junit.Assert._;
import org.junit.Test;
import ReflectionUtil._
/**
* Tests the reflection API for scala vars.
*/
class TestRelfectionUtil {
@Test
def mustInjectVars() {
val mojo = new DummyScalaMojo();
injectIntoVar(mojo, "dummyVar", "HAI");
assertEquals("HAI", mojo.dummyVar);
}
@Test
def mustFindFields() {
val mojo = new DummyScalaMojo();
val varType = getVarType(mojo, "dummyVar");
assertTrue(varType.isDefined);
assertEquals(classOf[String], varType.get);
val varType2 = getVarType(mojo, "dummyVar2");
assertFalse(varType2.isDefined);
val varType3 = getVarType(mojo, "otherVar");
assertTrue(varType3.isDefined);
assertEquals(classOf[Int], varType3.get);
}
}
|
jsuereth/scala-mojo-support
|
src/test/java/org/scala_tools/maven/plexus/converters/TestRelfectionUtil.scala
|
Scala
|
bsd-3-clause
| 768 |
package org.apache.spark.network.pmof
import org.apache.spark.network.BlockDataManager
import org.apache.spark.network.shuffle.BlockStoreClient
abstract class TransferService extends BlockStoreClient{
def init(blockDataManager: BlockDataManager): Unit
def close(): Unit
def hostname: String
def port: Int
}
|
Intel-bigdata/OAP
|
oap-shuffle/RPMem-shuffle/core/src/main/scala/org/apache/spark/network/pmof/TransferService.scala
|
Scala
|
apache-2.0
| 320 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.deploy.history
import java.io.{File, FileNotFoundException, IOException}
import java.util.{Date, ServiceLoader, UUID}
import java.util.concurrent.{ExecutorService, TimeUnit}
import java.util.zip.{ZipEntry, ZipOutputStream}
import scala.collection.JavaConverters._
import scala.collection.mutable
import scala.util.Try
import scala.xml.Node
import com.fasterxml.jackson.annotation.JsonIgnore
import com.google.common.io.ByteStreams
import com.google.common.util.concurrent.MoreExecutors
import org.apache.hadoop.fs.{FileStatus, Path}
import org.apache.hadoop.fs.permission.FsAction
import org.apache.hadoop.hdfs.DistributedFileSystem
import org.apache.hadoop.hdfs.protocol.HdfsConstants
import org.apache.hadoop.security.AccessControlException
import org.fusesource.leveldbjni.internal.NativeDB
import org.apache.spark.{SecurityManager, SparkConf, SparkException}
import org.apache.spark.deploy.SparkHadoopUtil
import org.apache.spark.internal.Logging
import org.apache.spark.io.CompressionCodec
import org.apache.spark.scheduler._
import org.apache.spark.scheduler.ReplayListenerBus._
import org.apache.spark.status._
import org.apache.spark.status.KVUtils._
import org.apache.spark.status.api.v1.{ApplicationAttemptInfo, ApplicationInfo}
import org.apache.spark.status.config._
import org.apache.spark.ui.SparkUI
import org.apache.spark.util.{Clock, SystemClock, ThreadUtils, Utils}
import org.apache.spark.util.kvstore._
/**
* A class that provides application history from event logs stored in the file system.
* This provider checks for new finished applications in the background periodically and
* renders the history application UI by parsing the associated event logs.
*
* == How new and updated attempts are detected ==
*
* - New attempts are detected in [[checkForLogs]]: the log dir is scanned, and any
* entries in the log dir whose modification time is greater than the last scan time
* are considered new or updated. These are replayed to create a new attempt info entry
* and update or create a matching application info element in the list of applications.
* - Updated attempts are also found in [[checkForLogs]] -- if the attempt's log file has grown, the
* attempt is replaced by another one with a larger log size.
*
* The use of log size, rather than simply relying on modification times, is needed to
* address the following issues
* - some filesystems do not appear to update the `modtime` value whenever data is flushed to
* an open file output stream. Changes to the history may not be picked up.
* - the granularity of the `modtime` field may be 2+ seconds. Rapid changes to the FS can be
* missed.
*
* Tracking filesize works given the following invariant: the logs get bigger
* as new events are added. If a format was used in which this did not hold, the mechanism would
* break. Simple streaming of JSON-formatted events, as is implemented today, implicitly
* maintains this invariant.
*/
private[history] class FsHistoryProvider(conf: SparkConf, clock: Clock)
extends ApplicationHistoryProvider with Logging {
def this(conf: SparkConf) = {
this(conf, new SystemClock())
}
import config._
import FsHistoryProvider._
// Interval between safemode checks.
private val SAFEMODE_CHECK_INTERVAL_S = conf.getTimeAsSeconds(
"spark.history.fs.safemodeCheck.interval", "5s")
// Interval between each check for event log updates
private val UPDATE_INTERVAL_S = conf.getTimeAsSeconds("spark.history.fs.update.interval", "10s")
// Interval between each cleaner checks for event logs to delete
private val CLEAN_INTERVAL_S = conf.getTimeAsSeconds("spark.history.fs.cleaner.interval", "1d")
// Number of threads used to replay event logs.
private val NUM_PROCESSING_THREADS = conf.getInt(SPARK_HISTORY_FS_NUM_REPLAY_THREADS,
Math.ceil(Runtime.getRuntime.availableProcessors() / 4f).toInt)
private val logDir = conf.get(EVENT_LOG_DIR)
private val HISTORY_UI_ACLS_ENABLE = conf.getBoolean("spark.history.ui.acls.enable", false)
private val HISTORY_UI_ADMIN_ACLS = conf.get("spark.history.ui.admin.acls", "")
private val HISTORY_UI_ADMIN_ACLS_GROUPS = conf.get("spark.history.ui.admin.acls.groups", "")
logInfo(s"History server ui acls " + (if (HISTORY_UI_ACLS_ENABLE) "enabled" else "disabled") +
"; users with admin permissions: " + HISTORY_UI_ADMIN_ACLS.toString +
"; groups with admin permissions" + HISTORY_UI_ADMIN_ACLS_GROUPS.toString)
private val hadoopConf = SparkHadoopUtil.get.newConfiguration(conf)
private val fs = new Path(logDir).getFileSystem(hadoopConf)
// Used by check event thread and clean log thread.
// Scheduled thread pool size must be one, otherwise it will have concurrent issues about fs
// and applications between check task and clean task.
private val pool = ThreadUtils.newDaemonSingleThreadScheduledExecutor("spark-history-task-%d")
// The modification time of the newest log detected during the last scan. Currently only
// used for logging msgs (logs are re-scanned based on file size, rather than modtime)
private val lastScanTime = new java.util.concurrent.atomic.AtomicLong(-1)
private val pendingReplayTasksCount = new java.util.concurrent.atomic.AtomicInteger(0)
private val storePath = conf.get(LOCAL_STORE_DIR).map(new File(_))
// Visible for testing.
private[history] val listing: KVStore = storePath.map { path =>
require(path.isDirectory(), s"Configured store directory ($path) does not exist.")
val dbPath = new File(path, "listing.ldb")
val metadata = new FsHistoryProviderMetadata(CURRENT_LISTING_VERSION,
AppStatusStore.CURRENT_VERSION, logDir.toString())
try {
open(dbPath, metadata)
} catch {
// If there's an error, remove the listing database and any existing UI database
// from the store directory, since it's extremely likely that they'll all contain
// incompatible information.
case _: UnsupportedStoreVersionException | _: MetadataMismatchException =>
logInfo("Detected incompatible DB versions, deleting...")
path.listFiles().foreach(Utils.deleteRecursively)
open(dbPath, metadata)
case dbExc: NativeDB.DBException =>
// Get rid of the corrupted listing.ldb and re-create it.
logWarning(s"Failed to load disk store $dbPath :", dbExc)
Utils.deleteRecursively(dbPath)
open(dbPath, metadata)
}
}.getOrElse(new InMemoryStore())
private val diskManager = storePath.map { path =>
new HistoryServerDiskManager(conf, path, listing, clock)
}
private val activeUIs = new mutable.HashMap[(String, Option[String]), LoadedAppUI]()
/**
* Return a runnable that performs the given operation on the event logs.
* This operation is expected to be executed periodically.
*/
private def getRunner(operateFun: () => Unit): Runnable = {
new Runnable() {
override def run(): Unit = Utils.tryOrExit {
operateFun()
}
}
}
/**
* Fixed size thread pool to fetch and parse log files.
*/
private val replayExecutor: ExecutorService = {
if (!Utils.isTesting) {
ThreadUtils.newDaemonFixedThreadPool(NUM_PROCESSING_THREADS, "log-replay-executor")
} else {
MoreExecutors.sameThreadExecutor()
}
}
val initThread = initialize()
private[history] def initialize(): Thread = {
if (!isFsInSafeMode()) {
startPolling()
null
} else {
startSafeModeCheckThread(None)
}
}
private[history] def startSafeModeCheckThread(
errorHandler: Option[Thread.UncaughtExceptionHandler]): Thread = {
// Cannot probe anything while the FS is in safe mode, so spawn a new thread that will wait
// for the FS to leave safe mode before enabling polling. This allows the main history server
// UI to be shown (so that the user can see the HDFS status).
val initThread = new Thread(new Runnable() {
override def run(): Unit = {
try {
while (isFsInSafeMode()) {
logInfo("HDFS is still in safe mode. Waiting...")
val deadline = clock.getTimeMillis() +
TimeUnit.SECONDS.toMillis(SAFEMODE_CHECK_INTERVAL_S)
clock.waitTillTime(deadline)
}
startPolling()
} catch {
case _: InterruptedException =>
}
}
})
initThread.setDaemon(true)
initThread.setName(s"${getClass().getSimpleName()}-init")
initThread.setUncaughtExceptionHandler(errorHandler.getOrElse(
new Thread.UncaughtExceptionHandler() {
override def uncaughtException(t: Thread, e: Throwable): Unit = {
logError("Error initializing FsHistoryProvider.", e)
System.exit(1)
}
}))
initThread.start()
initThread
}
private def startPolling(): Unit = {
diskManager.foreach(_.initialize())
// Validate the log directory.
val path = new Path(logDir)
try {
if (!fs.getFileStatus(path).isDirectory) {
throw new IllegalArgumentException(
"Logging directory specified is not a directory: %s".format(logDir))
}
} catch {
case f: FileNotFoundException =>
var msg = s"Log directory specified does not exist: $logDir"
if (logDir == DEFAULT_LOG_DIR) {
msg += " Did you configure the correct one through spark.history.fs.logDirectory?"
}
throw new FileNotFoundException(msg).initCause(f)
}
// Disable the background thread during tests.
if (!conf.contains("spark.testing")) {
// A task that periodically checks for event log updates on disk.
logDebug(s"Scheduling update thread every $UPDATE_INTERVAL_S seconds")
pool.scheduleWithFixedDelay(
getRunner(() => checkForLogs()), 0, UPDATE_INTERVAL_S, TimeUnit.SECONDS)
if (conf.getBoolean("spark.history.fs.cleaner.enabled", false)) {
// A task that periodically cleans event logs on disk.
pool.scheduleWithFixedDelay(
getRunner(() => cleanLogs()), 0, CLEAN_INTERVAL_S, TimeUnit.SECONDS)
}
} else {
logDebug("Background update thread disabled for testing")
}
}
override def getListing(): Iterator[ApplicationInfo] = {
// Return the listing in end time descending order.
listing.view(classOf[ApplicationInfoWrapper])
.index("endTime")
.reverse()
.iterator()
.asScala
.map(_.toApplicationInfo())
}
override def getApplicationInfo(appId: String): Option[ApplicationInfo] = {
try {
Some(load(appId).toApplicationInfo())
} catch {
case _: NoSuchElementException =>
None
}
}
override def getEventLogsUnderProcess(): Int = pendingReplayTasksCount.get()
override def getLastUpdatedTime(): Long = lastScanTime.get()
override def getAppUI(appId: String, attemptId: Option[String]): Option[LoadedAppUI] = {
val app = try {
load(appId)
} catch {
case _: NoSuchElementException =>
return None
}
val attempt = app.attempts.find(_.info.attemptId == attemptId).orNull
if (attempt == null) {
return None
}
val conf = this.conf.clone()
val secManager = new SecurityManager(conf)
secManager.setAcls(HISTORY_UI_ACLS_ENABLE)
// make sure to set admin acls before view acls so they are properly picked up
secManager.setAdminAcls(HISTORY_UI_ADMIN_ACLS + "," + attempt.adminAcls.getOrElse(""))
secManager.setViewAcls(attempt.info.sparkUser, attempt.viewAcls.getOrElse(""))
secManager.setAdminAclsGroups(HISTORY_UI_ADMIN_ACLS_GROUPS + "," +
attempt.adminAclsGroups.getOrElse(""))
secManager.setViewAclsGroups(attempt.viewAclsGroups.getOrElse(""))
val kvstore = try {
diskManager match {
case Some(sm) =>
loadDiskStore(sm, appId, attempt)
case _ =>
createInMemoryStore(attempt)
}
} catch {
case _: FileNotFoundException =>
return None
}
val ui = SparkUI.create(None, new AppStatusStore(kvstore), conf, secManager, app.info.name,
HistoryServer.getAttemptURI(appId, attempt.info.attemptId),
attempt.info.startTime.getTime(),
attempt.info.appSparkVersion)
loadPlugins().foreach(_.setupUI(ui))
val loadedUI = LoadedAppUI(ui)
synchronized {
activeUIs((appId, attemptId)) = loadedUI
}
Some(loadedUI)
}
override def getEmptyListingHtml(): Seq[Node] = {
<p>
Did you specify the correct logging directory? Please verify your setting of
<span style="font-style:italic">spark.history.fs.logDirectory</span>
listed above and whether you have the permissions to access it.
<br/>
It is also possible that your application did not run to
completion or did not stop the SparkContext.
</p>
}
override def getConfig(): Map[String, String] = {
val safeMode = if (isFsInSafeMode()) {
Map("HDFS State" -> "In safe mode, application logs not available.")
} else {
Map()
}
Map("Event log directory" -> logDir.toString) ++ safeMode
}
override def stop(): Unit = {
try {
if (initThread != null && initThread.isAlive()) {
initThread.interrupt()
initThread.join()
}
Seq(pool, replayExecutor).foreach { executor =>
executor.shutdown()
if (!executor.awaitTermination(5, TimeUnit.SECONDS)) {
executor.shutdownNow()
}
}
} finally {
activeUIs.foreach { case (_, loadedUI) => loadedUI.ui.store.close() }
activeUIs.clear()
listing.close()
}
}
override def onUIDetached(appId: String, attemptId: Option[String], ui: SparkUI): Unit = {
val uiOption = synchronized {
activeUIs.remove((appId, attemptId))
}
uiOption.foreach { loadedUI =>
loadedUI.lock.writeLock().lock()
try {
loadedUI.ui.store.close()
} finally {
loadedUI.lock.writeLock().unlock()
}
diskManager.foreach { dm =>
// If the UI is not valid, delete its files from disk, if any. This relies on the fact that
// ApplicationCache will never call this method concurrently with getAppUI() for the same
// appId / attemptId.
dm.release(appId, attemptId, delete = !loadedUI.valid)
}
}
}
/**
* Builds the application list based on the current contents of the log directory.
* Tries to reuse as much of the data already in memory as possible, by not reading
* applications that haven't been updated since last time the logs were checked.
*/
private[history] def checkForLogs(): Unit = {
try {
val newLastScanTime = getNewLastScanTime()
logDebug(s"Scanning $logDir with lastScanTime==$lastScanTime")
val updated = Option(fs.listStatus(new Path(logDir))).map(_.toSeq).getOrElse(Nil)
.filter { entry =>
!entry.isDirectory() &&
// FsHistoryProvider generates a hidden file which can't be read. Accidentally
// reading a garbage file is safe, but we would log an error which can be scary to
// the end-user.
!entry.getPath().getName().startsWith(".") &&
SparkHadoopUtil.get.checkAccessPermission(entry, FsAction.READ)
}
.filter { entry =>
try {
val info = listing.read(classOf[LogInfo], entry.getPath().toString())
if (info.fileSize < entry.getLen()) {
// Log size has changed, it should be parsed.
true
} else {
// If the SHS view has a valid application, update the time the file was last seen so
// that the entry is not deleted from the SHS listing.
if (info.appId.isDefined) {
listing.write(info.copy(lastProcessed = newLastScanTime))
}
false
}
} catch {
case _: NoSuchElementException =>
// If the file is currently not being tracked by the SHS, add an entry for it and try
// to parse it. This will allow the cleaner code to detect the file as stale later on
// if it was not possible to parse it.
listing.write(LogInfo(entry.getPath().toString(), newLastScanTime, None, None,
entry.getLen()))
entry.getLen() > 0
}
}
.sortWith { case (entry1, entry2) =>
entry1.getModificationTime() > entry2.getModificationTime()
}
if (updated.nonEmpty) {
logDebug(s"New/updated attempts found: ${updated.size} ${updated.map(_.getPath)}")
}
val tasks = updated.map { entry =>
try {
replayExecutor.submit(new Runnable {
override def run(): Unit = mergeApplicationListing(entry, newLastScanTime)
})
} catch {
// let the iteration over the updated entries break, since an exception on
// replayExecutor.submit (..) indicates the ExecutorService is unable
// to take any more submissions at this time
case e: Exception =>
logError(s"Exception while submitting event log for replay", e)
null
}
}.filter(_ != null)
pendingReplayTasksCount.addAndGet(tasks.size)
// Wait for all tasks to finish. This makes sure that checkForLogs
// is not scheduled again while some tasks are already running in
// the replayExecutor.
tasks.foreach { task =>
try {
task.get()
} catch {
case e: InterruptedException =>
throw e
case e: Exception =>
logError("Exception while merging application listings", e)
} finally {
pendingReplayTasksCount.decrementAndGet()
}
}
// Delete all information about applications whose log files disappeared from storage.
// This is done by identifying the event logs which were not touched by the current
// directory scan.
//
// Only entries with valid applications are cleaned up here. Cleaning up invalid log
// files is done by the periodic cleaner task.
val stale = listing.view(classOf[LogInfo])
.index("lastProcessed")
.last(newLastScanTime - 1)
.asScala
.toList
stale.foreach { log =>
log.appId.foreach { appId =>
cleanAppData(appId, log.attemptId, log.logPath)
listing.delete(classOf[LogInfo], log.logPath)
}
}
lastScanTime.set(newLastScanTime)
} catch {
case e: Exception => logError("Exception in checking for event log updates", e)
}
}
private def cleanAppData(appId: String, attemptId: Option[String], logPath: String): Unit = {
try {
val app = load(appId)
val (attempt, others) = app.attempts.partition(_.info.attemptId == attemptId)
assert(attempt.isEmpty || attempt.size == 1)
val isStale = attempt.headOption.exists { a =>
if (a.logPath != new Path(logPath).getName()) {
// If the log file name does not match, then probably the old log file was from an
// in progress application. Just return that the app should be left alone.
false
} else {
val maybeUI = synchronized {
activeUIs.remove(appId -> attemptId)
}
maybeUI.foreach { ui =>
ui.invalidate()
ui.ui.store.close()
}
diskManager.foreach(_.release(appId, attemptId, delete = true))
true
}
}
if (isStale) {
if (others.nonEmpty) {
val newAppInfo = new ApplicationInfoWrapper(app.info, others)
listing.write(newAppInfo)
} else {
listing.delete(classOf[ApplicationInfoWrapper], appId)
}
}
} catch {
case _: NoSuchElementException =>
}
}
private[history] def getNewLastScanTime(): Long = {
val fileName = "." + UUID.randomUUID().toString
val path = new Path(logDir, fileName)
val fos = fs.create(path)
try {
fos.close()
fs.getFileStatus(path).getModificationTime
} catch {
case e: Exception =>
logError("Exception encountered when attempting to update last scan time", e)
lastScanTime.get()
} finally {
if (!fs.delete(path, true)) {
logWarning(s"Error deleting ${path}")
}
}
}
override def writeEventLogs(
appId: String,
attemptId: Option[String],
zipStream: ZipOutputStream): Unit = {
/**
* This method compresses the files passed in, and writes the compressed data out into the
* [[OutputStream]] passed in. Each file is written as a new [[ZipEntry]] with its name being
* the name of the file being compressed.
*/
def zipFileToStream(file: Path, entryName: String, outputStream: ZipOutputStream): Unit = {
val fs = file.getFileSystem(hadoopConf)
val inputStream = fs.open(file, 1 * 1024 * 1024) // 1MB Buffer
try {
outputStream.putNextEntry(new ZipEntry(entryName))
ByteStreams.copy(inputStream, outputStream)
outputStream.closeEntry()
} finally {
inputStream.close()
}
}
val app = try {
load(appId)
} catch {
case _: NoSuchElementException =>
throw new SparkException(s"Logs for $appId not found.")
}
try {
// If no attempt is specified, or there is no attemptId for attempts, return all attempts
attemptId
.map { id => app.attempts.filter(_.info.attemptId == Some(id)) }
.getOrElse(app.attempts)
.map(_.logPath)
.foreach { log =>
zipFileToStream(new Path(logDir, log), log, zipStream)
}
} finally {
zipStream.close()
}
}
/**
* Replay the given log file, saving the application in the listing db.
*/
protected def mergeApplicationListing(fileStatus: FileStatus, scanTime: Long): Unit = {
val eventsFilter: ReplayEventsFilter = { eventString =>
eventString.startsWith(APPL_START_EVENT_PREFIX) ||
eventString.startsWith(APPL_END_EVENT_PREFIX) ||
eventString.startsWith(LOG_START_EVENT_PREFIX) ||
eventString.startsWith(ENV_UPDATE_EVENT_PREFIX)
}
val logPath = fileStatus.getPath()
val bus = new ReplayListenerBus()
val listener = new AppListingListener(fileStatus, clock)
bus.addListener(listener)
replay(fileStatus, bus, eventsFilter = eventsFilter)
val (appId, attemptId) = listener.applicationInfo match {
case Some(app) =>
// Invalidate the existing UI for the reloaded app attempt, if any. See LoadedAppUI for a
// discussion on the UI lifecycle.
synchronized {
activeUIs.get((app.info.id, app.attempts.head.info.attemptId)).foreach { ui =>
ui.invalidate()
ui.ui.store.close()
}
}
addListing(app)
(Some(app.info.id), app.attempts.head.info.attemptId)
case _ =>
// If the app hasn't written down its app ID to the logs, still record the entry in the
// listing db, with an empty ID. This will make the log eligible for deletion if the app
// does not make progress after the configured max log age.
(None, None)
}
listing.write(LogInfo(logPath.toString(), scanTime, appId, attemptId, fileStatus.getLen()))
}
/**
* Delete event logs from the log directory according to the clean policy defined by the user.
*/
private[history] def cleanLogs(): Unit = Utils.tryLog {
val maxTime = clock.getTimeMillis() - conf.get(MAX_LOG_AGE_S) * 1000
val expired = listing.view(classOf[ApplicationInfoWrapper])
.index("oldestAttempt")
.reverse()
.first(maxTime)
.asScala
.toList
expired.foreach { app =>
// Applications may have multiple attempts, some of which may not need to be deleted yet.
val (remaining, toDelete) = app.attempts.partition { attempt =>
attempt.info.lastUpdated.getTime() >= maxTime
}
if (remaining.nonEmpty) {
val newApp = new ApplicationInfoWrapper(app.info, remaining)
listing.write(newApp)
}
toDelete.foreach { attempt =>
logInfo(s"Deleting expired event log for ${attempt.logPath}")
val logPath = new Path(logDir, attempt.logPath)
listing.delete(classOf[LogInfo], logPath.toString())
cleanAppData(app.id, attempt.info.attemptId, logPath.toString())
deleteLog(logPath)
}
if (remaining.isEmpty) {
listing.delete(app.getClass(), app.id)
}
}
// Delete log files that don't have a valid application and exceed the configured max age.
val stale = listing.view(classOf[LogInfo])
.index("lastProcessed")
.reverse()
.first(maxTime)
.asScala
.toList
stale.foreach { log =>
if (log.appId.isEmpty) {
logInfo(s"Deleting invalid / corrupt event log ${log.logPath}")
deleteLog(new Path(log.logPath))
listing.delete(classOf[LogInfo], log.logPath)
}
}
}
/**
* Replays the events in the specified log file on the supplied `ReplayListenerBus`.
* `ReplayEventsFilter` determines what events are replayed.
*/
private def replay(
eventLog: FileStatus,
bus: ReplayListenerBus,
eventsFilter: ReplayEventsFilter = SELECT_ALL_FILTER): Unit = {
val logPath = eventLog.getPath()
val isCompleted = !logPath.getName().endsWith(EventLoggingListener.IN_PROGRESS)
logInfo(s"Replaying log path: $logPath")
// Note that the eventLog may have *increased* in size since when we grabbed the filestatus,
// and when we read the file here. That is OK -- it may result in an unnecessary refresh
// when there is no update, but will not result in missing an update. We *must* prevent
// an error the other way -- if we report a size bigger (ie later) than the file that is
// actually read, we may never refresh the app. FileStatus is guaranteed to be static
// after it's created, so we get a file size that is no bigger than what is actually read.
Utils.tryWithResource(EventLoggingListener.openEventLog(logPath, fs)) { in =>
bus.replay(in, logPath.toString, !isCompleted, eventsFilter)
logInfo(s"Finished parsing $logPath")
}
}
/**
* Rebuilds the application state store from its event log.
*/
private def rebuildAppStore(
store: KVStore,
eventLog: FileStatus,
lastUpdated: Long): Unit = {
// Disable async updates, since they cause higher memory usage, and it's ok to take longer
// to parse the event logs in the SHS.
val replayConf = conf.clone().set(ASYNC_TRACKING_ENABLED, false)
val trackingStore = new ElementTrackingStore(store, replayConf)
val replayBus = new ReplayListenerBus()
val listener = new AppStatusListener(trackingStore, replayConf, false,
lastUpdateTime = Some(lastUpdated))
replayBus.addListener(listener)
for {
plugin <- loadPlugins()
listener <- plugin.createListeners(conf, trackingStore)
} replayBus.addListener(listener)
try {
replay(eventLog, replayBus)
trackingStore.close(false)
} catch {
case e: Exception =>
Utils.tryLogNonFatalError {
trackingStore.close()
}
throw e
}
}
/**
* Checks whether HDFS is in safe mode.
*
* Note that DistributedFileSystem is a `@LimitedPrivate` class, which for all practical reasons
* makes it more public than not.
*/
private[history] def isFsInSafeMode(): Boolean = fs match {
case dfs: DistributedFileSystem =>
isFsInSafeMode(dfs)
case _ =>
false
}
private[history] def isFsInSafeMode(dfs: DistributedFileSystem): Boolean = {
/* true to check only for Active NNs status */
dfs.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_GET, true)
}
/**
* String description for diagnostics
* @return a summary of the component state
*/
override def toString: String = {
val count = listing.count(classOf[ApplicationInfoWrapper])
s"""|FsHistoryProvider{logdir=$logDir,
| storedir=$storePath,
| last scan time=$lastScanTime
| application count=$count}""".stripMargin
}
private def load(appId: String): ApplicationInfoWrapper = {
listing.read(classOf[ApplicationInfoWrapper], appId)
}
/**
* Write the app's information to the given store. Serialized to avoid the (notedly rare) case
* where two threads are processing separate attempts of the same application.
*/
private def addListing(app: ApplicationInfoWrapper): Unit = listing.synchronized {
val attempt = app.attempts.head
val oldApp = try {
load(app.id)
} catch {
case _: NoSuchElementException =>
app
}
def compareAttemptInfo(a1: AttemptInfoWrapper, a2: AttemptInfoWrapper): Boolean = {
a1.info.startTime.getTime() > a2.info.startTime.getTime()
}
val attempts = oldApp.attempts.filter(_.info.attemptId != attempt.info.attemptId) ++
List(attempt)
val newAppInfo = new ApplicationInfoWrapper(
app.info,
attempts.sortWith(compareAttemptInfo))
listing.write(newAppInfo)
}
private def loadDiskStore(
dm: HistoryServerDiskManager,
appId: String,
attempt: AttemptInfoWrapper): KVStore = {
val metadata = new AppStatusStoreMetadata(AppStatusStore.CURRENT_VERSION)
// First check if the store already exists and try to open it. If that fails, then get rid of
// the existing data.
dm.openStore(appId, attempt.info.attemptId).foreach { path =>
try {
return KVUtils.open(path, metadata)
} catch {
case e: Exception =>
logInfo(s"Failed to open existing store for $appId/${attempt.info.attemptId}.", e)
dm.release(appId, attempt.info.attemptId, delete = true)
}
}
// At this point the disk data either does not exist or was deleted because it failed to
// load, so the event log needs to be replayed.
val status = fs.getFileStatus(new Path(logDir, attempt.logPath))
val isCompressed = EventLoggingListener.codecName(status.getPath()).flatMap { name =>
Try(CompressionCodec.getShortName(name)).toOption
}.isDefined
logInfo(s"Leasing disk manager space for app $appId / ${attempt.info.attemptId}...")
val lease = dm.lease(status.getLen(), isCompressed)
val newStorePath = try {
Utils.tryWithResource(KVUtils.open(lease.tmpPath, metadata)) { store =>
rebuildAppStore(store, status, attempt.info.lastUpdated.getTime())
}
lease.commit(appId, attempt.info.attemptId)
} catch {
case e: Exception =>
lease.rollback()
throw e
}
KVUtils.open(newStorePath, metadata)
}
private def createInMemoryStore(attempt: AttemptInfoWrapper): KVStore = {
val store = new InMemoryStore()
val status = fs.getFileStatus(new Path(logDir, attempt.logPath))
rebuildAppStore(store, status, attempt.info.lastUpdated.getTime())
store
}
private def loadPlugins(): Iterable[AppHistoryServerPlugin] = {
ServiceLoader.load(classOf[AppHistoryServerPlugin], Utils.getContextOrSparkClassLoader).asScala
}
/** For testing. Returns internal data about a single attempt. */
private[history] def getAttempt(appId: String, attemptId: Option[String]): AttemptInfoWrapper = {
load(appId).attempts.find(_.info.attemptId == attemptId).getOrElse(
throw new NoSuchElementException(s"Cannot find attempt $attemptId of $appId."))
}
private def deleteLog(log: Path): Unit = {
try {
fs.delete(log, true)
} catch {
case _: AccessControlException =>
logInfo(s"No permission to delete $log, ignoring.")
case ioe: IOException =>
logError(s"IOException in cleaning $log", ioe)
}
}
}
private[history] object FsHistoryProvider {
private val SPARK_HISTORY_FS_NUM_REPLAY_THREADS = "spark.history.fs.numReplayThreads"
private val APPL_START_EVENT_PREFIX = "{\"Event\":\"SparkListenerApplicationStart\""
private val APPL_END_EVENT_PREFIX = "{\"Event\":\"SparkListenerApplicationEnd\""
private val LOG_START_EVENT_PREFIX = "{\"Event\":\"SparkListenerLogStart\""
private val ENV_UPDATE_EVENT_PREFIX = "{\"Event\":\"SparkListenerEnvironmentUpdate\","
/**
* Current version of the data written to the listing database. When opening an existing
* db, if the version does not match this value, the FsHistoryProvider will throw away
* all data and re-generate the listing data from the event logs.
*/
private[history] val CURRENT_LISTING_VERSION = 1L
}
private[history] case class FsHistoryProviderMetadata(
version: Long,
uiVersion: Long,
logDir: String)
/**
* Tracking info for event logs detected in the configured log directory. Tracks both valid and
* invalid logs (e.g. unparseable logs, recorded as logs with no app ID) so that the cleaner
* can know what log files are safe to delete.
*/
private[history] case class LogInfo(
@KVIndexParam logPath: String,
@KVIndexParam("lastProcessed") lastProcessed: Long,
appId: Option[String],
attemptId: Option[String],
fileSize: Long)
private[history] class AttemptInfoWrapper(
val info: ApplicationAttemptInfo,
val logPath: String,
val fileSize: Long,
val adminAcls: Option[String],
val viewAcls: Option[String],
val adminAclsGroups: Option[String],
val viewAclsGroups: Option[String])
private[history] class ApplicationInfoWrapper(
val info: ApplicationInfo,
val attempts: List[AttemptInfoWrapper]) {
@JsonIgnore @KVIndexParam
def id: String = info.id
@JsonIgnore @KVIndexParam("endTime")
def endTime(): Long = attempts.head.info.endTime.getTime()
@JsonIgnore @KVIndexParam("oldestAttempt")
def oldestAttempt(): Long = attempts.map(_.info.lastUpdated.getTime()).min
def toApplicationInfo(): ApplicationInfo = info.copy(attempts = attempts.map(_.info))
}
private[history] class AppListingListener(log: FileStatus, clock: Clock) extends SparkListener {
private val app = new MutableApplicationInfo()
private val attempt = new MutableAttemptInfo(log.getPath().getName(), log.getLen())
override def onApplicationStart(event: SparkListenerApplicationStart): Unit = {
app.id = event.appId.orNull
app.name = event.appName
attempt.attemptId = event.appAttemptId
attempt.startTime = new Date(event.time)
attempt.lastUpdated = new Date(clock.getTimeMillis())
attempt.sparkUser = event.sparkUser
}
override def onApplicationEnd(event: SparkListenerApplicationEnd): Unit = {
attempt.endTime = new Date(event.time)
attempt.lastUpdated = new Date(log.getModificationTime())
attempt.duration = event.time - attempt.startTime.getTime()
attempt.completed = true
}
override def onEnvironmentUpdate(event: SparkListenerEnvironmentUpdate): Unit = {
val allProperties = event.environmentDetails("Spark Properties").toMap
attempt.viewAcls = allProperties.get("spark.ui.view.acls")
attempt.adminAcls = allProperties.get("spark.admin.acls")
attempt.viewAclsGroups = allProperties.get("spark.ui.view.acls.groups")
attempt.adminAclsGroups = allProperties.get("spark.admin.acls.groups")
}
override def onOtherEvent(event: SparkListenerEvent): Unit = event match {
case SparkListenerLogStart(sparkVersion) =>
attempt.appSparkVersion = sparkVersion
case _ =>
}
def applicationInfo: Option[ApplicationInfoWrapper] = {
if (app.id != null) {
Some(app.toView())
} else {
None
}
}
private class MutableApplicationInfo {
var id: String = null
var name: String = null
var coresGranted: Option[Int] = None
var maxCores: Option[Int] = None
var coresPerExecutor: Option[Int] = None
var memoryPerExecutorMB: Option[Int] = None
def toView(): ApplicationInfoWrapper = {
val apiInfo = ApplicationInfo(id, name, coresGranted, maxCores, coresPerExecutor,
memoryPerExecutorMB, Nil)
new ApplicationInfoWrapper(apiInfo, List(attempt.toView()))
}
}
private class MutableAttemptInfo(logPath: String, fileSize: Long) {
var attemptId: Option[String] = None
var startTime = new Date(-1)
var endTime = new Date(-1)
var lastUpdated = new Date(-1)
var duration = 0L
var sparkUser: String = null
var completed = false
var appSparkVersion = ""
var adminAcls: Option[String] = None
var viewAcls: Option[String] = None
var adminAclsGroups: Option[String] = None
var viewAclsGroups: Option[String] = None
def toView(): AttemptInfoWrapper = {
val apiInfo = ApplicationAttemptInfo(
attemptId,
startTime,
endTime,
lastUpdated,
duration,
sparkUser,
completed,
appSparkVersion)
new AttemptInfoWrapper(
apiInfo,
logPath,
fileSize,
adminAcls,
viewAcls,
adminAclsGroups,
viewAclsGroups)
}
}
}
|
brad-kaiser/spark
|
core/src/main/scala/org/apache/spark/deploy/history/FsHistoryProvider.scala
|
Scala
|
apache-2.0
| 37,907 |
package com.atomist.rug.runtime.js
import com.atomist.project.archive.{AtomistConfig, DefaultAtomistConfig}
import com.atomist.rug.RugArchiveReader
import com.atomist.rug.TestUtils.contentOf
import com.atomist.rug.ts.TypeScriptBuilder
import com.atomist.source.{SimpleFileBasedArtifactSource, StringFileArtifact}
import org.scalatest.{FunSpec, Matchers}
class JavaScriptUtilsTest extends FunSpec with Matchers {
val atomistConfig: AtomistConfig = DefaultAtomistConfig
val parameterInherritance = StringFileArtifact(atomistConfig.handlersRoot + "/Handler.ts",
contentOf(this, "HandlerWithInherritedParameters.ts"))
it("should inherit parameters from the prototype") {
val rugArchive = TypeScriptBuilder.compileWithExtendedModel(
SimpleFileBasedArtifactSource(parameterInherritance))
val rugs = RugArchiveReader(rugArchive)
val handler = rugs.commandHandlers.head
val params = handler.parameters
assert(params.size === 3)
assert(params(1).name === "foo")
assert(params(1).description === "child")
assert(params(2).name === "bar")
assert(params.head.name === "baz")
assert(params.head.description === "dup")
}
it("should inherit mapped parameters from the prototype") {
val rugArchive = TypeScriptBuilder.compileWithExtendedModel(
SimpleFileBasedArtifactSource(parameterInherritance))
val rugs = RugArchiveReader(rugArchive)
val handler = rugs.commandHandlers.head
val params = handler.mappedParameters
assert(params.size === 3)
assert(params.head.localKey === "really")
assert(params.head.foreignKey === "manual")
assert(params(1).localKey === "blah")
assert(params(1).foreignKey === "blah-child")
assert(params(2).localKey === "quz")
assert(params(2).foreignKey === "quz-parent")
}
}
|
atomist/rug
|
src/test/scala/com/atomist/rug/runtime/js/JavaScriptUtilsTest.scala
|
Scala
|
gpl-3.0
| 1,802 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.api.python
import org.apache.spark.api.java.JavaRDD
import org.apache.spark.sql.{DataFrame, SQLContext}
import org.apache.spark.sql.catalyst.analysis.FunctionRegistry
import org.apache.spark.sql.catalyst.expressions.ExpressionInfo
import org.apache.spark.sql.catalyst.parser.CatalystSqlParser
import org.apache.spark.sql.execution.arrow.ArrowConverters
import org.apache.spark.sql.types.DataType
private[sql] object PythonSQLUtils {
def parseDataType(typeText: String): DataType = CatalystSqlParser.parseDataType(typeText)
// This is needed when generating SQL documentation for built-in functions.
def listBuiltinFunctionInfos(): Array[ExpressionInfo] = {
FunctionRegistry.functionSet.flatMap(f => FunctionRegistry.builtin.lookupFunction(f)).toArray
}
/**
* Python Callable function to convert ArrowPayloads into a [[DataFrame]].
*
* @param payloadRDD A JavaRDD of ArrowPayloads.
* @param schemaString JSON Formatted Schema for ArrowPayloads.
* @param sqlContext The active [[SQLContext]].
* @return The converted [[DataFrame]].
*/
def arrowPayloadToDataFrame(
payloadRDD: JavaRDD[Array[Byte]],
schemaString: String,
sqlContext: SQLContext): DataFrame = {
ArrowConverters.toDataFrame(payloadRDD, schemaString, sqlContext)
}
}
|
bravo-zhang/spark
|
sql/core/src/main/scala/org/apache/spark/sql/api/python/PythonSQLUtils.scala
|
Scala
|
apache-2.0
| 2,123 |
package com.mgu.csp
/**
* This `VariableOrdering` selects the unassigned [[Variable]] that has the fewest legal values
* left. It is also known as the "most-constrained value" or "fail-first" heuristic, because it
* select a variable that is most likely to cause a failure soon, thereby pruning the search tree.
*
* @author Markus Günther ([email protected])
*/
class MinimumRemainingValue[+A] extends VariableOrdering[A] {
override def selectUnassignedVariable[B >: A](assignment: Assignment[B]): Option[Variable[B]] =
assignment
.unassignedVariables()
.sortWith(mostConstrainedOf)
.headOption
private def mostConstrainedOf[B >: A](a: Variable[B], b: Variable[B]) =
(a.domain.size - b.domain.size) < 0
}
|
mguenther/csp-solver-scala
|
csp-core/src/main/scala/com/mgu/csp/MinimumRemainingValue.scala
|
Scala
|
mit
| 752 |
/*
* Copyright 2015 Shao Tian-Chen (Austin)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.au9ustine.puzzles.s99
/**
*
* Problem 15: Duplicate the elements of a list a given number of times.
* Example:
*
* scala> duplicateN(3, List('a, 'b, 'c, 'c, 'd))
* res0: List[Symbol] = List('a, 'a, 'a, 'b, 'b, 'b, 'c, 'c, 'c, 'c, 'c, 'c, 'd, 'd, 'd)
*/
object P15 {
def duplicateN[A](n: Int, lst: List[A]): List[A] = lst.flatMap(x => List.fill(n)(x))
}
|
au9ustine/org.au9ustine.puzzles.s99
|
src/main/scala/org/au9ustine/puzzles/s99/P15.scala
|
Scala
|
apache-2.0
| 983 |
/*
* Copyright (C) 2016 Nikos Katzouris
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package mcts
import app.runutils.Globals
import com.mongodb.casbah.Imports.{BasicDBList, BasicDBObject}
import com.mongodb.casbah.MongoClient
import logic.Examples.Example
import logic.Theory
import com.mongodb.casbah.Imports._
import com.typesafe.scalalogging.LazyLogging
import mcts.HillClimbing._
/**
* Created by nkatz on 9/22/17.
*/
/**
* This is just a test to run MCTS with the MathExchange data
*/
object MathExchange extends App with LazyLogging {
Globals.glvalues("perfect-fit") = "false"
val chunkSize = 10
val globals = new Globals("/home/nkatz/dev/MathExchange-for-OLED")
val data = {
val mongoClient = MongoClient()
val collection = mongoClient("MathExchange")("examples")
val exmpls = collection.find().foldLeft(List[Example]()){ (accum, dbObj) =>
val time = dbObj.asInstanceOf[BasicDBObject].get("time").toString
val annotation = dbObj.get("annotation").asInstanceOf[BasicDBList].toList.map(x => x.toString)
val narrative = dbObj.get("narrative").asInstanceOf[BasicDBList].toList.map(x => x.toString)
accum :+ new Example(annot = annotation, nar = narrative, _time = time)
}
val chunked = exmpls.sliding(chunkSize, chunkSize - 1).toList
chunked map { x =>
val merged = x.foldLeft(Example()) { (z, y) =>
new Example(annot = z.annotation ++ y.annotation, nar = z.narrative ++ y.narrative, _time = x.head.time)
}
merged
}
}
val bottomTheory = HillClimbing.constructBottomTheory(data.toIterator, globals)
println(bottomTheory.tostring)
val iterations = 10
val exploreRate = 1.0 / Math.sqrt(2)
val f1 = (t: Theory) => t.stats._6
val rootNode = RootNode()
generateAndScoreChildren(rootNode, bottomTheory, globals, data, 0)
val bestNode = (1 to iterations).foldLeft(rootNode.asInstanceOf[TreeNode]) { (theorySearchedLast, iterCount) =>
logger.info(s"Iteration $iterCount")
val bestChild = rootNode.descendToBestChild(exploreRate)
logger.info(s"Best leaf node selected (MCTS score: ${bestChild.getMCTSScore(exploreRate)} | id: ${bestChild.id}):\n${bestChild.theory.tostring}")
val newNodes = generateAndScoreChildren(bestChild, bottomTheory, globals, data, iterCount)
val bestChildNode = newNodes.maxBy(x => f1(x.theory))
bestChildNode.propagateReward(f1(bestChildNode.theory))
if (theorySearchedLast.theory == Theory()) {
logger.info(s"Best theory so far (F1-score ${f1(bestChildNode.theory)} | id: ${bestChildNode.id}):\n${bestChildNode.theory.tostring}")
bestChildNode
} else {
if (f1(bestChildNode.theory) > f1(theorySearchedLast.theory)) {
logger.info(s"Best theory so far (F1-score ${f1(bestChildNode.theory)} | id: ${bestChildNode.id}):\n${bestChildNode.theory.tostring}")
bestChildNode //.theory
} else {
logger.info(s"Best theory so far (F1-score ${f1(theorySearchedLast.theory)} | id: ${theorySearchedLast.id}):\n${theorySearchedLast.theory.tostring}")
theorySearchedLast
}
}
}
logger.info("Done")
logger.info("Cross-validation...")
val theory_ = Theory(bestNode.theory.clauses).compress
crossVal(theory_, data.toIterator, "", globals) // generate new theory to clear the stats counter
logger.info(s"F1-score on test set: ${theory_.stats._6}")
def generateAndScoreChildren(fromNode: TreeNode, bottomTheory: Theory, gl: Globals, data: List[Example], iterationCount: Int) = {
require(fromNode.isLeafNode())
val newTheories = generateChildrenNodes(fromNode.theory, bottomTheory, data.toIterator, gl)
scoreNodes(newTheories, gl, data)
// The depth is used in the id generation of the children nodes.
val depth = fromNode.getDepth() + 1
val newNodes = newTheories.foldLeft(1, Vector[InnerNode]()) { (x, theory) =>
val (newNodeCount, newNodes) = (x._1, x._2)
val id = s"$iterationCount-$depth-$newNodeCount"
val newNode = InnerNode(id, theory, fromNode)
(newNodeCount + 1, newNodes :+ newNode)
}._2
newNodes foreach { node =>
// Add each theory's f1-score to the corresponding node's rewards vector
// and increment the node's visits counter.
node.updateRewards(node.theory.stats._6)
node.incrementVisits()
// Finally, add the new node as a child to the parent node.
fromNode.addChild(node)
}
/** FOR DEBUGGING ONLY */
//println(newNodes.map(x => x.theory.tostring + " " + x.theory.stats._6).foreach(x => println(x+"\n")))
newNodes
}
def scoreNodes(children: Vector[Theory], gl: Globals, data: List[Example]) = {
logger.info("Scoring children nodes")
children.foreach { childNode =>
crossVal(childNode, data.toIterator, "", gl)
}
//children.foreach(x => println(x.tostring + " " + x.stats._6))
}
}
|
nkatzz/OLED
|
src/main/scala/mcts/MathExchange.scala
|
Scala
|
gpl-3.0
| 5,490 |
package im.tox.antox.callbacks
import android.content.Context
import im.tox.antox.data.AntoxDB
import im.tox.antox.tox.ToxSingleton
import im.tox.antox.utils.AntoxFriend
import im.tox.tox4j.core.callbacks.FriendNameCallback
import scala.None
//remove if not needed
object AntoxOnNameChangeCallback {
private val TAG = "im.tox.antox.TAG"
}
class AntoxOnNameChangeCallback(private var ctx: Context) extends FriendNameCallback {
override def friendName(friendNumber: Int, name: Array[Byte]): Unit = {
val nameString = new String(name, "UTF-8")
ToxSingleton.getAntoxFriend(friendNumber) match {
case Some(friend) => friend.setName(nameString)
case None => throw new Exception("Friend not found.")
}
val db = new AntoxDB(ctx)
db.updateFriendName(ToxSingleton.getIdFromFriendNumber(friendNumber), nameString)
db.close()
ToxSingleton.updateFriendsList(ctx)
}
}
|
0xPoly/Antox
|
app/src/main/scala/im/tox/antox/callbacks/AntoxOnNameChangeCallback.scala
|
Scala
|
gpl-3.0
| 906 |
/*******************************************************************************
Copyright (c) 2012-2014, KAIST, S-Core.
All rights reserved.
Use is subject to license terms.
This distribution may include materials developed by third parties.
******************************************************************************/
package kr.ac.kaist.jsaf.concolic
import _root_.java.util.{List => JList}
import kr.ac.kaist.jsaf.scala_src.useful.Lists._
class TypeInfo(t: String) {
val paramType = t
def getType() = paramType
var constructorNames = List[String]()
def getConstructors() = constructorNames
def addConstructors(x: List[String]) = {
var temp = constructorNames:::x
temp.distinct
constructorNames = temp
}
var properties = List[String]()
def getProperties() = properties
def setProperties(x: List[String]) = {
var temp = properties:::x
temp.distinct
properties = temp
}
def getJavaConstructor(): String = constructorNames(0)
def getJavaProperties(): JList[String] = toJavaList(properties)
}
|
darkrsw/safe
|
src/main/scala/kr/ac/kaist/jsaf/concolic/TypeInfo.scala
|
Scala
|
bsd-3-clause
| 1,070 |
package cakesolutions
import epic.preprocess.MLSentenceSegmenter
trait EnglishParser {
def parseWords(text: String): IndexedSeq[IndexedSeq[String]] = {
val sentenceSplitter = MLSentenceSegmenter.bundled().get // load english by default
val tokenizer = new epic.preprocess.TreebankTokenizer()
sentenceSplitter(text)
.map(tokenizer)
.toIndexedSeq
}
}
|
carlpulley/concordance
|
src/main/scala/cakesolutions/EnglishParser.scala
|
Scala
|
gpl-2.0
| 382 |
package org.jetbrains.plugins.scala.lang.psi.implicits
import org.jetbrains.plugins.scala.lang.psi.types.ScType
import org.jetbrains.plugins.scala.lang.resolve.ScalaResolveResult
final case class ExtensionMethodApplication(resultType: ScType,
implicitParameters: Seq[ScalaResolveResult] = Seq.empty)
|
JetBrains/intellij-scala
|
scala/scala-impl/src/org/jetbrains/plugins/scala/lang/psi/implicits/ExtensionMethodApplication.scala
|
Scala
|
apache-2.0
| 346 |
/*
*/
package see
import org.junit._
import org.junit.Before
import org.junit.Assert._
/** Tests table handling.
*/
//@Ignore
class TableTest extends TestCase {
@Before
override def setUp() {
//TestCase.
super.setUp()
result("ta = table(0, 10->100)")
result("tb = table(0, 10->200)")
}
@Test
def testParsing() {
println("Table Parsing")
parse("table()")
parse("table(a, b, c)")
parse("table(a->x, b->y, c)")
parse("""table(0 -> 1L, 1.0 -> "abc", 10L->(1,2,3))""")
shouldFail("Failed to catch synax error."){
node = parse("table(1, 2, 2, )")
println (node)
}
}
@Test
def testComposition() {
println("Table Composition")
expect("ta@4", 40 )
shouldFail("Undefined key."){
expect("ta@true", 0 )
}
expect("table(1->4, 2->8, yy = 3->16)@0", 4)
expectTrue("yy == 3->16")
expectTrue("type(yy) == Assoc")
expectTrue("type(ta) == Table")
expectTrue("table(yy) == table(3->16)")
expectTrue("t1 = table(1->1, 2->2, 3->10.0)")
expect("len(t1)", 3)
result("t3 = table(1->1, 2->ta, 3->tb, 4->6)")
expect("len(t3)", 4)
shouldFail("Did not catch sequence error."){
result("t = table(1->1 , 2->3, -1 -> 5)")
}
shouldFail("Did not catch undefined."){
expectTrue("t = table(1->1 , 2->x)")
}
scope.set("x", 111)
println(result())
expect("t@2", 111)
expectTrue("defined(table(1->1, 1->x))")
expectFalse("defined(table(1->1, 2->y))")
shouldFail("Did not catch undefined."){
result("t2 = table(1->1 , y->2)")
}
result("t2 = table(1->1 , x->2)")
expect("t2@111", 2)
expectTrue("defined(table(1->1, x->2))")
expectFalse("defined(table(1->1, y->2))")
}
@Test
def testInterpolation() {
println("Table Interpolation")
result("t = table(-10->-5, 0, 0->10, 10 -> 1, 100 -> 100 )")
expect("t@-100", -5 )
expect("t@-10", -5 )
expect("t@-1", -0.5 )
expect("[email protected]", -0.00005 )
expect("t@0", 10.0 )
expect("t@1", 9.1 )
expect("t@10", 1.0 )
expect("t@11", 1 + (1.0 / 90) * 99 )
expect("t@100", 100 )
expect("t@1e500L", 100 )
}
@Test
def testComparison() {
println("Table Comparison")
expectTrue("table() == table()")
expectFalse("table() == ()")
expectFalse("table(1,2,3) == (1,2,3)")
expectFalse("table(1,10) == ta")
expectFalse("table(1->10) == table(2->10)")
expectFalse("ta == tb")
expectFalse("ta == table()")
expectFalse("ta == table(0, 10->1000)")
expectTrue("ta == table(0->0, 10->100)")
expectTrue("ta * 2 == tb")
expectTrue("ta != tb")
expectTrue("ta != table()")
expectTrue("ta != table(0, 10->1000)")
expectFalse("ta != table(0->0, 10->100)")
expectFalse("ta * 2 != tb")
// other relations work, but have no defined meaning
}
@Test
def testDefine() {
println("Table, Defined")
var prog = """
f(x) := { x * a};
g(x) := { x * 2};
tf = table(0, 0 -> f);
tg = table(0, 0 -> g)
"""
expectFalse(prog)
expectTrue(" defined(ta)")
expectTrue(" defined(tf)")
expectTrue(" defined(tg)")
expectTrue(" defined(ta(1))")
expectFalse(" defined(tf(1))")
expectTrue(" defined(tg(1))")
scope.set("a", 1)
expectTrue(" defined(tf(1))")
}
@Test
def testSubscript() {
println("Table Subscripting")
expect("ta@0", 0)
expect("ta@1", 10)
expect("tb@1", 20)
expect("ta@-1", 0)
expect("tb@-1", 0)
expect("ta@10", 100)
expect("ta@100", 100)
expect("[email protected]", 55.0)
expect("ta@2L", BI(20))
expectTrue("defined(ta@1)")
expectTrue("defined(table()@0)")
result("tz = table(1)")
expectTrue("defined(tz)")
expectTrue("defined(tz@0)")
expectFalse("defined(tz@false)")
expectFalse("defined(ta@index)")
scope.set("index", 2)
expectTrue("defined(ta@index)")
expect("ta@index", 20)
// Table with 2 dimensions:
result("tx = table(0 -> ta, 2 -> table(0, 10 -> 20), 3)")
expect("tx@1", 10) // one dim. interpolation, second table ignored
expect("[email protected]", 5.0) // one dim. interpolation, first table ignored
expect("tx@(1,4)", 24) // two dim.: both tables used
}
@Test
def testArith() {
println("Table Arithmetics")
expectTrue("1 + ta == table(0-> 1, 10 -> 101)")
expectTrue("ta - 1 == table(0-> -1, 10 -> 99)")
expectTrue("tx = ta * 2; tx == tb")
expectTrue("tx / 2 == ta")
expectTrue("-ta == table(0-> 0, 10 -> -100)")
shouldFail("Illegal operation."){
result("ta + tb")
}
expectTrue("ta + (1,2) == table(0-> (1,2), 10 -> (101,102))")
println(result("tx = (1,2) - ta"))
expectTrue("tx == table(0-> (1, 2), 10 -> (-99, -98))")
}
@Test
def testConcatenation() {
println("Table Concatenation")
// Concatenation
expectTrue("ta ++ table() == ta")
shouldFail("Illegal operand."){
result("ta ++ ()")
}
shouldFail("Cannot concat overlaps."){
result("tx = ta ++ tb")
}
expectTrue("ta ++ table(11->0) == table(0-> 0, 10 -> 100, 11 -> 0)")
shouldFail("Cannot concat overlaps."){
result("ta ++ table(0, -1)")
}
expectTrue("table(0 -> -1) ++ ta == table(0-> -1, 0 -> 0, 10 -> 100)")
expectTrue("-1 ++ ta == table(-1-> -1, 0 -> 0, 10 -> 100)")
expectTrue("ta ++ (11->0) == table(0-> 0, 10 -> 100, 11 -> 0)")
// !! but !!
// !! although consistent, but probably not what was intended
expectTrue("t = (-1 -> -10) ++ ta; type( t ) == Assoc")
expectTrue("table(-1 -> -10) ++ ta == table(-1 -> -10, 0 -> 0, 10 -> 100)")
expectTrue("ta ++ 1000 == table(0-> 0, 10 -> 100, 1000 -> 1000)")
expect("vx = ta +++ tb; len(vx)", 2)
expectTrue("type(vx) == Vector")
expectTrue("vx@0 == ta")
expectTrue("vx@1 == tb")
}
@Test
def testCall() {
println("Table Calling")
// Once in place, a table works more like a function than a container.
// Consequently, a table call is nearly identical to a subscription.
// The only difference is that subscription will not work, if the table
// contains a global reference, that hasn't been resolved yet.
// an empty table returns zero, whatever arguments
expectTrue("table()(Int) == 0")
// If elements are not callable, linear interpolation is used.
// Excessive arguments are ignored.
expectTrue("table(1,2,3)(1.5,true, Value) == 1.5")
expect("ta(1,true, Value)", 10)
expect("ta(-1)", 0)
expect("ta(10)", 100)
expect("ta(100)", 100)
// If a table contains a function it will be called instead of interpolation:
// Note that we have to supply some upper limit of the domain!
result("f(x) := {2 * x}; t = table(-1 -> 0, 0 -> f, 100->f(100))")
expect("t(-0.5)", 0)
expect("t(90)", 180)
expect("t(100)", 200)
expect("t(200)", 200)
// we may use a table to transform function calls into subscript syntax:
expect("t@10", 20)
// 2-dim table using functions, somewhat conceived
var prog = """
g(x,y) := { 2 * y + x*x};
t = table(0,
0 -> table(0, 0 ->{g(0,_)}, 11),
10 -> table(0, 0 ->{g(10,_)}, 11),
10)
"""
result(prog)
expect("t(-1, -1)", 0)
expect("t(20, 20)", 10)
expect("t(5, 20)", 11)
expect("t(4,5)", 50) // interpolation between both curves, not g(4,5)!
}
}
|
RayRacine/scee
|
src/test/scala/see/TableTest.scala
|
Scala
|
bsd-3-clause
| 7,630 |
package io.buoyant.router.h2
import com.twitter.conversions.DurationOps._
import com.twitter.finagle.buoyant.h2.{Request, Response, param => h2Param}
import com.twitter.finagle.service.Retries
import com.twitter.finagle.{ServiceFactory, Stack, Stackable, param}
import com.twitter.util.Duration
import io.buoyant.router
import io.buoyant.router.ClassifiedRetries.Backoffs
object ClassifiedRetries {
val role = router.ClassifiedRetries.role
case class BufferSize(requestBufferSize: Long, responseBufferSize: Long)
implicit object BufferSize extends Stack.Param[BufferSize] {
override def default =
BufferSize(ClassifiedRetryFilter.DefaultBufferSize, ClassifiedRetryFilter.DefaultBufferSize)
}
case class ClassificationTimeout(timeout: Duration)
implicit object ClassificationTimeout extends Stack.Param[ClassificationTimeout] {
override def default = ClassificationTimeout(100.millis)
}
/**
* A stack module that installs a RetryFilter that uses the stack's
* ResponseClassifier.
*/
def module: Stackable[ServiceFactory[Request, Response]] = {
new Stack.Module[ServiceFactory[Request, Response]] {
val role = ClassifiedRetries.role
val description = "Retries requests that are classified to be retryable"
override def parameters: Seq[Stack.Param[_]] = Seq(
implicitly[Stack.Param[Backoffs]],
implicitly[Stack.Param[h2Param.H2Classifier]],
implicitly[Stack.Param[Retries.Budget]],
implicitly[Stack.Param[ClassificationTimeout]],
implicitly[Stack.Param[BufferSize]],
implicitly[Stack.Param[param.HighResTimer]],
implicitly[Stack.Param[param.Stats]]
)
def make(
params: Stack.Params,
next: Stack[ServiceFactory[Request, Response]]
): Stack[ServiceFactory[Request, Response]] = {
val filter = new ClassifiedRetryFilter(
params[param.Stats].statsReceiver,
params[h2Param.H2Classifier].classifier,
params[Backoffs].backoff,
params[Retries.Budget].retryBudget,
params[ClassificationTimeout].timeout,
params[BufferSize].requestBufferSize,
params[BufferSize].responseBufferSize
)(params[param.HighResTimer].timer)
Stack.leaf(role, filter.andThen(next.make(params)))
}
}
}
}
|
linkerd/linkerd
|
router/h2/src/main/scala/io/buoyant/router/h2/ClassifiedRetries.scala
|
Scala
|
apache-2.0
| 2,331 |
package me.fornever.platonus
sealed trait Word
case class PhraseBegin() extends Word
case class PhraseEnd() extends Word
case class OrdinarWord(word: String) extends Word
|
ForNeVeR/platonus
|
src/main/scala/me/fornever/platonus/Key.scala
|
Scala
|
mit
| 172 |
package com.twitter.util
import org.junit.runner.RunWith
import org.scalatest.FunSuite
import org.scalatest.junit.JUnitRunner
@RunWith(classOf[JUnitRunner])
class TryTest extends FunSuite {
class MyException extends Exception
val e = new Exception("this is an exception")
test("Try.apply(): should catch exceptions and lift into the Try type") {
assert(Try[Int](1) == Return(1))
assert(Try[Int] { throw e } == Throw(e))
}
test("Try.apply(): should propagate fatal exceptions") {
intercept[AbstractMethodError] {
Try[Int] { throw new AbstractMethodError }
}
}
test("Try.withFatals works like Try.apply, but can handle fatals") {
val nonFatal = new Exception
val fatal = new AbstractMethodError
val handler: PartialFunction[Throwable, Try[Int]] = {
case e: AbstractMethodError => Throw(e)
case e: Exception => Return(1)
}
// Works like Try.apply for non fatal errors.
assert(Try.withFatals(1)(handler) == Return(1))
assert(Try.withFatals(throw nonFatal)(handler) == Throw(nonFatal))
// Handles fatal errors
assert(Try.withFatals(throw fatal)(handler) == Throw(fatal))
// Unhandled fatals are propagated
intercept[NoClassDefFoundError] {
Try.withFatals(throw new NoClassDefFoundError)(handler)
}
}
test("Try.throwable: should return e for Throw:s") {
assert(Throw(e).throwable == e)
}
test("Try.throwable: should throw IllegalStateException for Return:s") {
intercept[IllegalStateException] {
Return(1).throwable
}
}
test("Try.rescue") {
val result1 = Return(1) rescue { case _ => Return(2) }
val result2 = Throw(e) rescue { case _ => Return(2) }
val result3 = Throw(e) rescue { case _ => Throw(e) }
assert(result1 == Return(1))
assert(result2 == Return(2))
assert(result3 == Throw(e))
}
test("Try.getOrElse") {
assert(Return(1).getOrElse(2) == 1)
assert(Throw(e).getOrElse(2) == 2)
}
test("Try.apply") {
assert(Return(1)() == 1)
intercept[Exception] { Throw[Int](e)() }
}
test("Try.map: when there is no exception") {
val result1 = Return(1).map(1 + _)
val result2 = Throw[Int](e).map(1 + _)
assert(result1 == Return(2))
assert(result2 == Throw(e))
}
test("Try.map: when there is an exception") {
val result1 = Return(1) map(_ => throw e)
assert(result1 == Throw(e))
val e2 = new Exception
val result2 = Throw[Int](e) map(_ => throw e2)
assert(result2 == Throw(e))
}
test("Try.flatMap: when there is no exception") {
val result1 = Return(1) flatMap(x => Return(1 + x))
val result2 = Throw[Int](e) flatMap(x => Return(1 + x))
assert(result1 == Return(2))
assert(result2 == Throw(e))
}
test("Try.flatMap: when there is an exception") {
val result1 = Return(1).flatMap[Int](_ => throw e)
assert(result1 == Throw(e))
val e2 = new Exception
val result2 = Throw[Int](e).flatMap[Int](_ => throw e2)
assert(result2 == Throw(e))
}
test("Try.exists: should return true when predicate passes for a Return value") {
val t = Return(4)
assert(t.exists(_ > 0) == true)
}
test("Try.exists: should return false when predicate doesn't pass for a Return value") {
val t = Return(4)
assert(t.exists(_ < 0) == false)
}
test("Try.exists: should return false for Throw") {
val t = Throw(new Exception)
assert(t.exists(_ => true) == false)
}
test("Try.flatten: is a Return(Return)") {
assert(Return(Return(1)).flatten == Return(1))
}
test("Try.flatten: is a Return(Throw)") {
val e = new Exception
assert(Return(Throw(e)).flatten == Throw(e))
}
test("Try.flatten: is a Throw") {
val e = new Exception
assert(Throw[Try[Int]](e).flatten == Throw(e))
}
test("Try in for comprehension with no Throw values") {
val result = for {
i <- Return(1)
j <- Return(1)
} yield (i + j)
assert(result == Return(2))
}
test("Try in for comprehension with Throw values throws before") {
val result = for {
i <- Throw[Int](e)
j <- Return(1)
} yield (i + j)
assert(result == Throw(e))
}
test("Try in for comprehension with Throw values throws after") {
val result = for {
i <- Return(1)
j <- Throw[Int](e)
} yield (i + j)
assert(result == Throw(e))
}
test("Try in for comprehension with Throw values returns the FIRST Throw") {
val e2 = new Exception
val result = for {
i <- Throw[Int](e)
j <- Throw[Int](e2)
} yield (i + j)
assert(result == Throw(e))
}
test("Try.collect: with an empty Seq") {
assert(Try.collect(Seq.empty) == Return(Seq.empty))
}
test("Try.collect: with a Throw") {
assert(Try.collect(Seq(Return(1), Throw(e))) == Throw(e))
}
test("Try.collect: with Returns") {
assert(Try.collect(Seq(Return(1), Return(2))) == Return(Seq(1, 2)))
}
test("Try.orThrow: returns on Some") {
val exc = new Exception("boom!")
assert(Try.orThrow(Some("OK")) { () => exc } == Return("OK"))
}
test("Try.orThrow: fails on empty on Some") {
val exc = new Exception("boom!")
assert(Try.orThrow(None) { () => exc } == Throw(exc))
}
test("Try.orThrow: OK if you throw") {
val exc = new Exception("boom!")
assert(Try.orThrow(None) { () => throw exc } == Throw(exc))
}
test("OrThrow implicits in nicely") {
import Try._
val exc = new Exception("boom!")
assert(Some("OK").orThrow { exc } == Return("OK"))
}
}
|
edombowsky/util
|
util-core/src/test/scala/com/twitter/util/TryTest.scala
|
Scala
|
apache-2.0
| 5,517 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.ui
import scala.xml.Node
import org.apache.spark.SparkFunSuite
class PagedDataSourceSuite extends SparkFunSuite {
test("basic") {
val dataSource1 = new SeqPagedDataSource[Int](1 to 5, pageSize = 2)
assert(dataSource1.pageData(1) === PageData(3, (1 to 2)))
val dataSource2 = new SeqPagedDataSource[Int](1 to 5, pageSize = 2)
assert(dataSource2.pageData(2) === PageData(3, (3 to 4)))
val dataSource3 = new SeqPagedDataSource[Int](1 to 5, pageSize = 2)
assert(dataSource3.pageData(3) === PageData(3, Seq(5)))
// If the page number is more than maximum page, fall back to the last page
val dataSource4 = new SeqPagedDataSource[Int](1 to 5, pageSize = 2)
assert(dataSource4.pageData(4) === PageData(3, Seq(5)))
// If the page number is less than or equal to zero, fall back to the first page
val dataSource5 = new SeqPagedDataSource[Int](1 to 5, pageSize = 2)
assert(dataSource5.pageData(0) === PageData(3, 1 to 2))
}
}
class PagedTableSuite extends SparkFunSuite {
test("pageNavigation") {
// Create a fake PagedTable to test pageNavigation
val pagedTable = new PagedTable[Int] {
override def tableId: String = ""
override def tableCssClass: String = ""
override def dataSource: PagedDataSource[Int] = null
override def pageLink(page: Int): String = page.toString
override def headers: Seq[Node] = Nil
override def row(t: Int): Seq[Node] = Nil
override def pageSizeFormField: String = "pageSize"
override def pageNumberFormField: String = "page"
override def goButtonFormPath: String = ""
}
assert((pagedTable.pageNavigation(1, 10, 1).head \\ "li").map(_.text.trim) === Seq("1"))
assert(
(pagedTable.pageNavigation(1, 10, 2).head \\ "li").map(_.text.trim) === Seq("1", "2", ">"))
assert(
(pagedTable.pageNavigation(2, 10, 2).head \\ "li").map(_.text.trim) === Seq("<", "1", "2"))
assert((pagedTable.pageNavigation(1, 10, 100).head \\ "li").map(_.text.trim) ===
(1 to 10).map(_.toString) ++ Seq(">", ">>"))
assert((pagedTable.pageNavigation(2, 10, 100).head \\ "li").map(_.text.trim) ===
Seq("<") ++ (1 to 10).map(_.toString) ++ Seq(">", ">>"))
assert((pagedTable.pageNavigation(100, 10, 100).head \\ "li").map(_.text.trim) ===
Seq("<<", "<") ++ (91 to 100).map(_.toString))
assert((pagedTable.pageNavigation(99, 10, 100).head \\ "li").map(_.text.trim) ===
Seq("<<", "<") ++ (91 to 100).map(_.toString) ++ Seq(">"))
assert((pagedTable.pageNavigation(11, 10, 100).head \\ "li").map(_.text.trim) ===
Seq("<<", "<") ++ (11 to 20).map(_.toString) ++ Seq(">", ">>"))
assert((pagedTable.pageNavigation(93, 10, 97).head \\ "li").map(_.text.trim) ===
Seq("<<", "<") ++ (91 to 97).map(_.toString) ++ Seq(">"))
}
test("pageNavigation with different id") {
val pagedTable = new PagedTable[Int] {
override def tableId: String = "testTable"
override def tableCssClass: String = ""
override def dataSource: PagedDataSource[Int] = null
override def pageLink(page: Int): String = ""
override def headers: Seq[Node] = Nil
override def row(t: Int): Seq[Node] = Nil
override def pageSizeFormField: String = ""
override def pageNumberFormField: String = ""
override def goButtonFormPath: String = ""
}
val defaultIdNavigation = pagedTable.pageNavigation(1, 10, 2).head \\ "form"
assert(defaultIdNavigation \@ "id" === "form-testTable-page")
val customIdNavigation = pagedTable.pageNavigation(1, 10, 2, "customIdTable").head \\ "form"
assert(customIdNavigation \@ "id" === "form-customIdTable-page")
assert(defaultIdNavigation !== customIdNavigation)
}
}
private[spark] class SeqPagedDataSource[T](seq: Seq[T], pageSize: Int)
extends PagedDataSource[T](pageSize) {
override protected def dataSize: Int = seq.size
override protected def sliceData(from: Int, to: Int): Seq[T] = seq.slice(from, to)
}
|
maropu/spark
|
core/src/test/scala/org/apache/spark/ui/PagedTableSuite.scala
|
Scala
|
apache-2.0
| 4,830 |
package spk
import Generators._
import spk.model._
import scala.util.Random
case object Config {
val stringLength = 10
val multivaluedLength = 5
val stringGroupLength = 5
val stringGroupCount = 10
val intGroup = 10
val rows = 100
val groups = 10
val stringGroups: Seq[String] = (readableGroups(stringGroupLength, stringGroupCount) ~>)
def filterable(implicit rand: Random): Seq[Filterable] = Seq(
// Use trie fields for range quires.
RangeFilterable(Field("int1_ti"), int),
RangeFilterable(Field("double1_td"), double),
RangeFilterable(Field("date1_tdt"), formattedDate),
ExactFilterable(Field("str1_group_s"), oneOf(stringGroups)),
ExactFilterable(Field("str2_group_s"), oneOf(stringGroups)),
ExactFilterable(Field("int1_group_i"), int(intGroup)),
ExactFilterable(Field("int2_group_i"), int(intGroup)),
MultiFilerable(Field("str1_group_ss"), seq(int(1, multivaluedLength), oneOf(stringGroups))),
MultiFilerable(Field("str2_group_ss"), seq(int(1, multivaluedLength), oneOf(stringGroups)))
)
def sortable: Seq[Field] = Seq(
Field("str1_s"),
Field("str2_s"),
Field("int1_i"),
Field("double1_d"),
Field("date1_dt"),
Field("int1_ti"),
Field("double1_td"),
Field("date1_tdt")
)
def groupable: Seq[Field] = Seq(
Field("str1_group_s"),
Field("str2_group_s"),
Field("int1_group_i"),
Field("int2_group_i")
)
def facetable: Seq[Field] = groupable ++ Seq(
Field("str1_group_ss"),
Field("str2_group_ss")
)
}
|
eqw3rty/solr-performance-kit
|
src/main/scala/spk/Config.scala
|
Scala
|
mit
| 1,540 |
import sbt._
import Keys._
import sbt.contraband.ContrabandPlugin.autoImport._
object Dependencies {
// WARNING: Please Scala update versions in PluginCross.scala too
val scala212 = "2.12.15"
val scala213 = "2.13.6"
val checkPluginCross = settingKey[Unit]("Make sure scalaVersion match up")
val baseScalaVersion = scala212
def nightlyVersion: Option[String] =
sys.env.get("BUILD_VERSION") orElse sys.props.get("sbt.build.version")
// sbt modules
private val ioVersion = nightlyVersion.getOrElse("1.6.0-M2")
private val lmVersion =
sys.props.get("sbt.build.lm.version").orElse(nightlyVersion).getOrElse("1.6.0-M2")
val zincVersion = nightlyVersion.getOrElse("1.6.0-M2")
private val sbtIO = "org.scala-sbt" %% "io" % ioVersion
private val libraryManagementCore = "org.scala-sbt" %% "librarymanagement-core" % lmVersion
private val libraryManagementIvy = "org.scala-sbt" %% "librarymanagement-ivy" % lmVersion
val launcherVersion = "1.3.3"
val launcherInterface = "org.scala-sbt" % "launcher-interface" % launcherVersion
val rawLauncher = "org.scala-sbt" % "launcher" % launcherVersion
val testInterface = "org.scala-sbt" % "test-interface" % "1.0"
val ipcSocket = "org.scala-sbt.ipcsocket" % "ipcsocket" % "1.3.1"
private val compilerInterface = "org.scala-sbt" % "compiler-interface" % zincVersion
private val compilerClasspath = "org.scala-sbt" %% "zinc-classpath" % zincVersion
private val compilerApiInfo = "org.scala-sbt" %% "zinc-apiinfo" % zincVersion
private val compilerBridge = "org.scala-sbt" %% "compiler-bridge" % zincVersion
private val zinc = "org.scala-sbt" %% "zinc" % zincVersion
private val zincCompile = "org.scala-sbt" %% "zinc-compile" % zincVersion
private val zincCompileCore = "org.scala-sbt" %% "zinc-compile-core" % zincVersion
def getSbtModulePath(key: String) = {
val localProps = new java.util.Properties()
IO.load(localProps, file("project/local.properties"))
val path = Option(localProps.getProperty(key)).orElse(sys.props.get(key))
path.foreach(f => println(s"Using $key=$f"))
path
}
lazy val sbtIoPath = getSbtModulePath("sbtio.path")
lazy val sbtUtilPath = getSbtModulePath("sbtutil.path")
lazy val sbtLmPath = getSbtModulePath("sbtlm.path")
lazy val sbtZincPath = getSbtModulePath("sbtzinc.path")
def addSbtModule(
path: Option[String],
projectName: String,
moduleId: ModuleID,
c: Option[Configuration] = None
) = (p: Project) => {
val m = moduleId.withConfigurations(c.map(_.name))
path match {
case Some(f) =>
p.dependsOn(ClasspathDependency(ProjectRef(file(f), projectName), c.map(_.name)))
case None => p.settings(libraryDependencies += m, dependencyOverrides += m)
}
}
def addSbtIO = addSbtModule(sbtIoPath, "io", sbtIO)
def addSbtLmCore = addSbtModule(sbtLmPath, "lmCore", libraryManagementCore)
def addSbtLmIvy = addSbtModule(sbtLmPath, "lmIvy", libraryManagementIvy)
def addSbtLmIvyTest = addSbtModule(sbtLmPath, "lmIvy", libraryManagementIvy, Some(Test))
def addSbtCompilerInterface = addSbtModule(sbtZincPath, "compilerInterfaceJVM", compilerInterface)
def addSbtCompilerClasspath = addSbtModule(sbtZincPath, "zincClasspathJVM2_12", compilerClasspath)
def addSbtCompilerApiInfo = addSbtModule(sbtZincPath, "zincApiInfoJVM2_12", compilerApiInfo)
def addSbtCompilerBridge = addSbtModule(sbtZincPath, "compilerBridgeJVM2_12", compilerBridge)
def addSbtZinc = addSbtModule(sbtZincPath, "zincJVM2_12", zinc)
def addSbtZincCompile = addSbtModule(sbtZincPath, "zincCompileJVM2_12", zincCompile)
def addSbtZincCompileCore = addSbtModule(sbtZincPath, "zincCompileCoreJVM2_12", zincCompileCore)
val lmCoursierShaded = "io.get-coursier" %% "lm-coursier-shaded" % "2.0.10"
def sjsonNew(n: String) =
Def.setting("com.eed3si9n" %% n % "0.9.1") // contrabandSjsonNewVersion.value
val sjsonNewScalaJson = sjsonNew("sjson-new-scalajson")
val sjsonNewMurmurhash = sjsonNew("sjson-new-murmurhash")
// JLine 3 version must be coordinated together with JAnsi version
// and the JLine 2 fork version, which uses the same JAnsi
val jline = "org.scala-sbt.jline" % "jline" % "2.14.7-sbt-a1b0ffbb8f64bb820f4f84a0c07a0c0964507493"
val jline3Version = "3.19.0"
val jline3Terminal = "org.jline" % "jline-terminal" % jline3Version
val jline3Jansi = "org.jline" % "jline-terminal-jansi" % jline3Version
val jline3JNA = "org.jline" % "jline-terminal-jna" % jline3Version
val jline3Reader = "org.jline" % "jline-reader" % jline3Version
val jline3Builtins = "org.jline" % "jline-builtins" % jline3Version
val jansi = "org.fusesource.jansi" % "jansi" % "2.1.0"
val scalatest = "org.scalatest" %% "scalatest" % "3.2.10"
val scalacheck = "org.scalacheck" %% "scalacheck" % "1.15.4"
val junit = "junit" % "junit" % "4.13.1"
val scalaVerify = "com.eed3si9n.verify" %% "verify" % "1.0.0"
val templateResolverApi = "org.scala-sbt" % "template-resolver" % "0.1"
val scalaXml = Def.setting(
if (scalaBinaryVersion.value == "3") {
"org.scala-lang.modules" %% "scala-xml" % "2.0.1"
} else {
"org.scala-lang.modules" %% "scala-xml" % "1.3.0"
}
)
val scalaParsers = Def.setting(
if (scalaBinaryVersion.value == "3") {
"org.scala-lang.modules" %% "scala-parser-combinators" % "2.1.0"
} else {
"org.scala-lang.modules" %% "scala-parser-combinators" % "1.1.2"
}
)
val scalaReflect = Def.setting(
if (scalaBinaryVersion.value == "3") {
"org.scala-lang" % "scala-reflect" % scala213
} else {
"org.scala-lang" % "scala-reflect" % scalaVersion.value
}
)
val scalaPar = "org.scala-lang.modules" %% "scala-parallel-collections" % "1.0.0"
// specify all of log4j modules to prevent misalignment
def log4jModule = (n: String) => "org.apache.logging.log4j" % n % "2.16.0"
val log4jApi = log4jModule("log4j-api")
val log4jCore = log4jModule("log4j-core")
val log4jSlf4jImpl = log4jModule("log4j-slf4j-impl")
val log4jModules = Vector(log4jApi, log4jCore, log4jSlf4jImpl)
val caffeine = "com.github.ben-manes.caffeine" % "caffeine" % "2.8.5"
val hedgehog = "qa.hedgehog" %% "hedgehog-sbt" % "0.7.0"
val disruptor = "com.lmax" % "disruptor" % "3.4.2"
val kindProjector = ("org.typelevel" % "kind-projector" % "0.13.2").cross(CrossVersion.full)
}
|
sbt/sbt
|
project/Dependencies.scala
|
Scala
|
apache-2.0
| 6,370 |
package edu.colorado.plv.cuanto.jsy
package common
import edu.colorado.plv.cuanto.parsing.{ParserLike, RichParsers}
import scala.util.parsing.combinator.JavaTokenParsers
import scala.util.parsing.input.Reader
/** Common trait for a JavaScripty parser.
*
* Mixes [[scala.util.parsing.combinator.JavaTokenParsers]] for basic
* tokens (e.g., identifiers, numbers) with some utilities in
* [[RichParsers]] and a top-level interface in [[ParserLike]].
*
* @author Bor-Yuh Evan Chang
*/
trait JsyParserLike extends JavaTokenParsers with RichParsers with ParserLike[Expr] {
override def scan(in: Reader[Char]): Input = in
}
|
cuplv/cuanto
|
src/main/scala/edu/colorado/plv/cuanto/jsy/common/JsyParserLike.scala
|
Scala
|
apache-2.0
| 636 |
package com.socrata.datacoordinator.truth.sql
package sample
sealed abstract class SampleType
case object SampleSidColumn extends SampleType
case object SampleTextColumn extends SampleType
case object SamplePointColumn extends SampleType
|
socrata-platform/data-coordinator
|
coordinatorlib/src/main/scala/com/socrata/datacoordinator/truth/sql/sample/SampleType.scala
|
Scala
|
apache-2.0
| 239 |
package mesosphere.marathon.tasks
import mesosphere.marathon.core.launcher.impl.TaskOpFactoryHelper
import mesosphere.marathon.core.task.TaskStateOp
import mesosphere.marathon.test.{ MarathonSpec, MarathonTestHelper, Mockito }
import org.apache.mesos.{ Protos => Mesos }
import org.scalatest.{ GivenWhenThen, Matchers }
class TaskOpFactoryHelperTest extends MarathonSpec with GivenWhenThen with Mockito with Matchers {
test("exception when newTask.taskId and taskInfo.id don't match") {
val f = new Fixture
Given("A non-matching task and taskInfo")
val task = MarathonTestHelper.mininimalTask("123")
val taskInfo = MarathonTestHelper.makeOneCPUTask("456").build()
When("We create a launch operation")
val error = intercept[AssertionError] {
f.helper.launchEphemeral(taskInfo, task)
}
Then("An exception is thrown")
error.getMessage shouldEqual "assumption failed: marathon task id and mesos task id must be equal"
}
test("Create a launch TaskOp") {
val f = new Fixture
Given("a task and a taskInfo")
val task = MarathonTestHelper.mininimalTask("123")
val taskInfo = MarathonTestHelper.makeOneCPUTask(task.taskId.idString).build()
When("We create a launch operation")
val launch = f.helper.launchEphemeral(taskInfo, task)
Then("The result is as expected")
launch.stateOp shouldEqual TaskStateOp.LaunchEphemeral(task)
launch.taskInfo shouldEqual taskInfo
launch.oldTask shouldBe empty
launch.offerOperations should have size 1
launch.offerOperations.head.getType shouldEqual Mesos.Offer.Operation.Type.LAUNCH
}
class Fixture {
val helper = new TaskOpFactoryHelper(Some("principal"), Some("role"))
}
}
|
timcharper/marathon
|
src/test/scala/mesosphere/marathon/tasks/TaskOpFactoryHelperTest.scala
|
Scala
|
apache-2.0
| 1,716 |
package net.hamnaberg.json.collection.data
import net.hamnaberg.json.collection.Property
trait DataExtractor[A] {
def unapply(data: List[Property]): Option[A]
}
|
hamnis/scala-collection-json
|
src/main/scala/net/hamnaberg/json/collection/data/DataExtractor.scala
|
Scala
|
apache-2.0
| 165 |
package webshop.webservice
import webshop.webservice.OrderStatus._
sealed trait OrderStatus {
override def toString: String = this match {
case InfoPending(pending) => "info-pending:" + pending.mkString(",")
case UnavailableItems(items) => "unavailable-items:" + items.length
case PaymentFailed => "payment-failed"
case ShippingItems => "shipping-items"
case ProcessingPayment => "processing-payment"
case Complete => "complete"
}
}
object OrderStatus {
case class InfoPending(pending: List[String]) extends OrderStatus
case class UnavailableItems(items: List[String]) extends OrderStatus
case object PaymentFailed extends OrderStatus
case object ShippingItems extends OrderStatus
case object ProcessingPayment extends OrderStatus
case object Complete extends OrderStatus
}
|
ing-bank/baker
|
examples/bakery-client-example/src/main/scala/webshop/webservice/OrderStatus.scala
|
Scala
|
mit
| 824 |
package org.jetbrains.plugins.scala
package annotator
import com.intellij.codeInsight.intention.IntentionAction
import com.intellij.codeInspection.ProblemHighlightType
import com.intellij.psi.PsiElement
import com.intellij.openapi.util.TextRange
import com.intellij.lang.annotation.AnnotationHolder
import com.intellij.lang.annotation.Annotation
import lang.psi.api.base.types.ScTypeElement
import quickfix.ReportHighlightingErrorQuickFix
import lang.psi.api.toplevel.ScTypeParametersOwner
import lang.psi.api.statements.params.ScParameters
import lang.psi.api.expr.{ScBlockExpr, ScExpression}
/**
* @author Aleksander Podkhalyuzin
* Date: 25.03.2009
*/
private[annotator] object AnnotatorUtils {
def proccessError(error: String, element: PsiElement, holder: AnnotationHolder, fixes: IntentionAction*) {
proccessError(error, element.getTextRange, holder, fixes: _*)
}
def proccessError(error: String, range: TextRange, holder: AnnotationHolder, fixes: IntentionAction*) {
val annotation = holder.createErrorAnnotation(range, error)
annotation.setHighlightType(ProblemHighlightType.GENERIC_ERROR_OR_WARNING)
for (fix <- fixes) annotation.registerFix(fix)
}
def proccessWarning(error: String, element: PsiElement, holder: AnnotationHolder, fixes: IntentionAction*) {
proccessWarning(error, element.getTextRange, holder, fixes: _*)
}
def proccessWarning(error: String, range: TextRange, holder: AnnotationHolder, fixes: IntentionAction*) {
val annotation: Annotation = holder.createWarningAnnotation(range, error)
annotation.setHighlightType(ProblemHighlightType.GENERIC_ERROR_OR_WARNING)
for (fix <- fixes) annotation.registerFix(fix)
}
def checkConformance(expression: ScExpression, typeElement: ScTypeElement, holder: AnnotationHolder) {
expression.getTypeAfterImplicitConversion().tr.foreach {actual =>
val expected = typeElement.calcType
if (!actual.conforms(expected)) {
val expr = expression match {
case b: ScBlockExpr => b.getRBrace.map(_.getPsi).getOrElse(b)
case _ => expression
}
val annotation = holder.createErrorAnnotation(expr,
"Type mismatch, found: %s, required: %s".format(actual.presentableText, expected.presentableText))
annotation.registerFix(ReportHighlightingErrorQuickFix)
}
}
}
def checkImplicitParametersAndBounds(paramOwner: ScTypeParametersOwner, parameters: Option[ScParameters], holder: AnnotationHolder) {
val hasImplicitBound = paramOwner.typeParameters.exists(_.hasImplicitBound)
val implicitToken: Option[PsiElement] = parameters.toList.flatMap(_.clauses).flatMap(_.implicitToken).headOption
(hasImplicitBound, implicitToken) match {
case (true, Some(element)) =>
val message = ScalaBundle.message("cannot.have.implicit.parameters.and.implicit.bounds")
holder.createErrorAnnotation(element, message)
case _ =>
}
}
}
|
consulo/consulo-scala
|
src/org/jetbrains/plugins/scala/annotator/AnnotatorUtils.scala
|
Scala
|
apache-2.0
| 2,947 |
/*******************************************************************************
* Copyright (c) 2019. Carl Minden
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
******************************************************************************/
package com.anathema_roguelike
package fov
import com.anathema_roguelike.entities.Entity
import squidpony.squidgrid.FOV
import squidpony.squidgrid.Radius
abstract class FOVProcessor(var width: Int, var height: Int, var resistances: Array[Array[Double]]) {
protected def visit(entity: Entity, x: Int, y: Int, light: Double): Unit
protected def doFOV(entity: Entity, radius: Double, angle: Double, span: Double, fov: FOV): Unit = {
val startx = entity.getX
val starty = entity.getY
val light = fov.calculateFOV(resistances, startx, starty, radius, Radius.CIRCLE, angle, span)
for (
x <- Math.max(startx - radius, 0).toInt until Math.min(startx + radius, width).toInt;
y <- Math.max(starty - radius, 0).toInt until Math.min(starty + radius, height).toInt
) {
visit(entity, x, y, light(x)(y) / 2)
}
}
}
|
carlminden/anathema-roguelike
|
src/com/anathema_roguelike/fov/FOVProcessor.scala
|
Scala
|
gpl-3.0
| 1,698 |
package code.model
import _root_.net.liftweb.mapper._
import _root_.net.liftweb.util._
import _root_.net.liftweb.common._
class MessageRecipient extends LongKeyedMapper[MessageRecipient] with IdPK {
def getSingleton = MessageRecipient
object message extends MappedLongForeignKey(this, Message)
object recipient extends MappedLongForeignKey(this, Recipient)
}
object MessageRecipient extends MessageRecipient with LongKeyedMetaMapper[MessageRecipient] {
override def dbTableName = "messagerecipients"
def join (rpt : Recipient, msg : Message) =
this.create.recipient(rpt).message(msg).save
def recentMessagesForRecipient(rcpt: Long, count: Int): List[Message] = {
val msgrcpts = MessageRecipient.findAll(
By(MessageRecipient.recipient,rcpt),
MaxRows(count),
OrderBy(MessageRecipient.id,Descending))
msgrcpts.map(_.message.obj.open_!)
}
}
|
scsibug/fermata
|
src/main/scala/code/model/MessageRecipient.scala
|
Scala
|
bsd-3-clause
| 887 |
package core
import me.mtrupkin.console.{Colors, ScreenChar}
import me.mtrupkin.core.{Point, Size}
import scala.Array._
/**
* Created by mtrupkin on 12/14/2014.
*/
trait Square {
def name: String
def sc: ScreenChar
def move: Boolean = true
var cost: Int = 1
}
trait GameMap {
def size: Size
val squares = ofDim[Square](size.width, size.height)
def apply(p: Point): Square = if (size.in(p)) squares(p.x)(p.y) else Bounds
def update(p: Point, value: Square): Unit = squares(p.x)(p.y) = value
}
object Bounds extends Square {
val name = "Out Of Bounds"
override val move = false
def sc = ScreenChar('\\u25A0', fg = Colors.White)
}
class Space extends Square {
val name = "Space"
val sc = ScreenChar('\\u00B7', fg = Colors.White)
}
class Star(val sc: ScreenChar) extends Square {
val name = "Star"
}
object Square {
implicit def toTile(s: ScreenChar): Square = {
s.c match {
case ' ' => new Space
case _ => new Star(s)
}
}
}
|
mtrupkin/flagship
|
src/main/scala/core/Square.scala
|
Scala
|
mit
| 1,032 |
package org.mybatis.scala.infrastructure
import org.mybatis.scala.domain.User
import org.mybatis.scala.mapping._
import org.mybatis.scala.mapping.Binding._
import scala.language.postfixOps
object UserRepository {
val defaultResultMap = new ResultMap[User] {
idArg(column = "id", javaType = T[Int])
arg(column = "name", javaType = T[String])
arg(column = "email", javaType = T[String])
}
val create = new Insert[User] {
keyGenerator = JdbcGeneratedKey(null, "id")
def xsql = <xsql>INSERT INTO user(name, email) VALUES({"name" ?}, {"email" ?})</xsql>
}
val createFromTuple2 = new Insert[(String, String)] {
keyGenerator = JdbcGeneratedKey(null, "id")
def xsql = <xsql>INSERT INTO user(name, email) VALUES({"_1" ?}, {"_2" ?})</xsql>
}
val findById = new SelectOneBy[Int, User] {
resultMap = defaultResultMap
def xsql = <xsql>SELECT * FROM user WHERE id = {"id" ?}</xsql>
}
val findAll = new SelectList[User] {
resultMap = defaultResultMap
def xsql = <xsql>SELECT * FROM user</xsql>
}
val lastInsertId = new SelectOne[Int] {
def xsql = <xsql>CALL IDENTITY()</xsql>
}
def bind = Seq(create, createFromTuple2, findById, findAll, lastInsertId)
}
|
tempbottle/scala-1
|
mybatis-scala-core/src/test/scala/org/mybatis/scala/infrastructure/UserRepository.scala
|
Scala
|
apache-2.0
| 1,221 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License
*/
package org.apache.toree.magic.builtin
import org.apache.toree.interpreter.{ExecuteFailure, Results, ExecuteAborted, ExecuteError}
import org.apache.toree.kernel.protocol.v5.MIMEType
import org.apache.toree.magic._
import org.apache.toree.magic.dependencies.{IncludeKernelInterpreter, IncludeInterpreter}
import org.apache.toree.utils.LogLike
import org.apache.toree.utils.json.RddToJson
import org.apache.spark.sql.{DataFrame, SchemaRDD}
/**
* Temporary magic to show an RDD as JSON
*/
class RDD extends CellMagic with IncludeKernelInterpreter with LogLike {
private def convertToJson(code: String) = {
val (result, message) = kernelInterpreter.interpret(code)
result match {
case Results.Success =>
val rddVarName = kernelInterpreter.lastExecutionVariableName.getOrElse("")
kernelInterpreter.read(rddVarName).map(rddVal => {
try{
CellMagicOutput(MIMEType.ApplicationJson -> RddToJson.convert(rddVal.asInstanceOf[DataFrame]))
} catch {
case _: Throwable =>
CellMagicOutput(MIMEType.PlainText -> s"Could note convert RDD to JSON: ${rddVarName}->${rddVal}")
}
}).getOrElse(CellMagicOutput(MIMEType.PlainText -> "No RDD Value found!"))
case _ =>
val errorMessage = message.right.toOption match {
case Some(executeFailure) => executeFailure match {
case _: ExecuteAborted => throw new Exception("RDD magic aborted!")
case executeError: ExecuteError => throw new Exception(executeError.value)
}
case _ => "No error information available!"
}
logger.error(s"Error retrieving RDD value: ${errorMessage}")
CellMagicOutput(MIMEType.PlainText ->
(s"An error occurred converting RDD to JSON.\n${errorMessage}"))
}
}
override def execute(code: String): CellMagicOutput =
convertToJson(code)
}
|
asorianostratio/incubator-toree
|
kernel/src/main/scala/org/apache/toree/magic/builtin/RDD.scala
|
Scala
|
apache-2.0
| 2,723 |
package livehl.common.db
import java.security.Key
import java.util.Date
import com.aliyun.openservices.ots._
import com.aliyun.openservices.ots.model._
import scala.collection.JavaConversions._
class BDBEntity(val tableName: String) extends DataStorage{
private lazy val conf = ConfigFactory.load()
private lazy val client = new OTSClient(conf.getString("ots.url"), conf.getString("ots.accessId"), conf.getString("ots.accessKey"), conf.getString("ots.instanceName"))
private lazy val created = getConstructorParamNames(this.getClass())
private lazy val methods = this.getClass().getMethods() filter {
m => !m.getName().contains("_") && m.getParameterTypes().length == 0
}
private def getFiledValue(key: String) = {
methods.filter(_.getName() == key)(0).invoke(this)
}
private def getBDBFiledValue(key: String) = {
val v = getFiledValue(key)
v match {
case null => null
case b: java.lang.Boolean => ColumnValue.fromBoolean(b)
case i: java.lang.Integer => ColumnValue.fromLong(i.toLong)
case d: java.lang.Double => ColumnValue.fromDouble(d)
case f: java.lang.Float => ColumnValue.fromDouble(f.toDouble)
case l: java.lang.Long => ColumnValue.fromLong(l)
case s: String => ColumnValue.fromString(s)
case d: Date => ColumnValue.fromLong(d.getTime)
case bd: BigDecimal => ColumnValue.fromDouble(bd.toDouble)
case b: Array[Byte] => ColumnValue.fromBinary(b)
case o: Any => ColumnValue.fromString(o.toString)
}
}
private def getBDBKeyFiledValue(key: String) = {
val v = getFiledValue(key)
v match {
case null => null
case i: Integer => PrimaryKeyValue.fromLong(i.toLong)
case l: java.lang.Long => PrimaryKeyValue.fromLong(l)
case s: String => PrimaryKeyValue.fromString(s)
}
}
private def getBDBKeyValue(key: AnyRef) = {
key match {
case null => null
case i: Integer => PrimaryKeyValue.fromLong(i.toLong)
case l: java.lang.Long => PrimaryKeyValue.fromLong(l)
case s: String => PrimaryKeyValue.fromString(s)
case o:AnyRef => PrimaryKeyValue.fromString(o.toString())
}
}
def getFieldKeys(fields: String*) = {
(if (fields.size == 0) {
getConstructorParamNames(this.getClass()).maxBy(_._2.length)._2 map (_._1)
} else {
fields.toList
})
}
def getFieldKeyValues(fields: String*) = {
(if (fields.size == 0) {
getConstructorParamNames(this.getClass()).maxBy(_._2.length)._2 map (_._1)
} else {
fields.toList
}) map (f => f -> methods.filter(_.getName() == f)(0).invoke(this)) filter (_._2 != null) map (kv => kv._1 -> (kv._2 match {
case b: java.lang.Boolean => b
case s: String => s
case d: Date => d.getTime
case bd: BigDecimal => bd.toDouble
case o: Any => o
}))
}
def getFieldKeyTypes(fields: String*) = {
val keys = (if (fields.size == 0) {
getConstructorParamNames(this.getClass()).maxBy(_._2.length)._2 map (_._1)
} else {
fields.toList
})
keys map (f => f -> methods.filter(_.getName() == f)(0).invoke(this)) filter (_._2 != null) map (kv => kv._1 -> (kv._2 match {
case b: java.lang.Boolean => "Boolean"
case s: String => "String"
case i: java.lang.Integer => "Integer"
case l: java.lang.Long => "BIGINT"
case d: java.lang.Double => "DOUBLE"
case d: Date => "Integer"
case bd: BigDecimal => "DOUBLE"
case o: Any => "String"
}))
}
/**
* 数据库表
*/
def createTable() {
val tableMeta = new TableMeta(tableName)
tableMeta.addPrimaryKeyColumn("id", PrimaryKeyType.INTEGER)
// 将该表的读写CU都设置为100
val capacityUnit = new CapacityUnit(1, 1)
val request = new CreateTableRequest()
request.setTableMeta(tableMeta)
request.setReservedThroughput(capacityUnit)
client.createTable(request)
}
def deleteTable() {
val request = new DeleteTableRequest()
request.setTableName(tableName)
client.deleteTable(request)
}
def update(where: String, fields: String*) = {
val rowChange = new RowUpdateChange(tableName)
val primaryKeys = new RowPrimaryKey()
val whereValue = getBDBKeyFiledValue(where)
if (whereValue == null) throw new EmptyFieldExcepiton
primaryKeys.addPrimaryKeyColumn(where, whereValue)
rowChange.setPrimaryKey(primaryKeys)
getFieldKeys(fields: _*).foreach { k =>
val v = getBDBFiledValue(k)
if (v == null) rowChange.deleteAttributeColumn(k)
else
rowChange.addAttributeColumn(k, v);
}
rowChange.setCondition(new Condition(RowExistenceExpectation.EXPECT_EXIST));
val request = new UpdateRowRequest();
request.setRowChange(rowChange);
val result = client.updateRow(request);
result.getConsumedCapacity.getCapacityUnit.getWriteCapacityUnit
}
def updateNoEmptyById() = {
val m = getFieldKeyValues() filter (v => v._2 != null && !DBEntity.isEmpty(v._2))
val updates = (m map (_._1) filter (_ != "id") toList)
update("id", updates: _*)
}
def insert(fields: String*) = {
val rowChange = new RowPutChange(tableName)
val primaryKey = new RowPrimaryKey()
val idValue = getBDBKeyFiledValue("id")
if (idValue == null) throw new EmptyFieldExcepiton
primaryKey.addPrimaryKeyColumn("id", idValue)
rowChange.setPrimaryKey(primaryKey)
getFieldKeys(fields: _*).filter(_ != "tableName").filter(_ != "id").foreach { k =>
val v = getBDBFiledValue(k)
if (v != null) rowChange.addAttributeColumn(k, v);
}
rowChange.setCondition(new Condition(RowExistenceExpectation.IGNORE))
val request = new PutRowRequest()
request.setRowChange(rowChange)
Tool.reTry(3,conf.getInt("ots.sleep")) {
val result = client.putRow(request)
result.getConsumedCapacity().getCapacityUnit().getWriteCapacityUnit()
}
}
def insertMutile(bdbes:List[_ <:BDBEntity],fields: String*)={
val request = new BatchWriteRowRequest()
bdbes.foreach{bdb=>
val rowChange = new RowPutChange(bdb.tableName)
val primaryKey = new RowPrimaryKey()
val idValue = bdb.getBDBKeyFiledValue("id")
if (idValue == null) throw new EmptyFieldExcepiton
primaryKey.addPrimaryKeyColumn("id", idValue)
rowChange.setPrimaryKey(primaryKey)
bdb.getFieldKeys(fields: _*).filter(_ != "tableName").filter(_ != "id").foreach { k =>
val v = bdb.getBDBFiledValue(k)
if (v != null) rowChange.addAttributeColumn(k, v);
}
rowChange.setCondition(new Condition(RowExistenceExpectation.IGNORE))
request.addRowPutChange(rowChange)
}
def dealFail(bwrr:BatchWriteRowResult,req:BatchWriteRowRequest):Unit={
val failedOperations = new BatchWriteRowRequest()
bwrr.getPutRowStatus.map { kv =>
val tableName = kv._1
0 until kv._2.size map { i =>
val status = kv._2(i)
if (status.isSucceed) None
else Some(req.getRowPutChange(tableName, i))
} filter (_.isDefined) foreach { v => failedOperations.addRowPutChange(v.get)}
}
if(failedOperations.getRowPutChange.size()>0){
Tool.reTry(3,conf.getInt("ots.sleep")) {
val result = client.batchWriteRow(failedOperations)
dealFail(result,failedOperations)
}
}
}
Tool.reTry(3,conf.getInt("ots.sleep")) {
val result = client.batchWriteRow(request)
dealFail(result,request)
}
}
def insertUpdate(updateFields: List[String], fields: String*) {
val rowChange = new RowUpdateChange(tableName)
val primaryKey = new RowPrimaryKey()
updateFields.foreach{updateField=>
val idValue = getBDBKeyFiledValue(updateField)
if (idValue == null) throw new EmptyFieldExcepiton
primaryKey.addPrimaryKeyColumn(updateField, idValue)
}
rowChange.setPrimaryKey(primaryKey)
getFieldKeys(fields: _*).filter(v=> !updateFields.contains(v)).foreach { k =>
val v = getBDBFiledValue(k)
if (v != null) rowChange.addAttributeColumn(k, v);
}
rowChange.setCondition(new Condition(RowExistenceExpectation.IGNORE))
val request = new UpdateRowRequest()
request.setRowChange(rowChange)
val result = client.updateRow(request)
result.getConsumedCapacity().getCapacityUnit().getWriteCapacityUnit()
}
def delete(where: String) {
val rowChange = new RowDeleteChange(tableName);
val primaryKeys = new RowPrimaryKey();
val idValue = getBDBKeyFiledValue(where)
if (idValue == null) throw new EmptyFieldExcepiton
primaryKeys.addPrimaryKeyColumn(where, idValue);
rowChange.setPrimaryKey(primaryKeys);
val request = new DeleteRowRequest();
request.setRowChange(rowChange);
val result = client.deleteRow(request);
result.getConsumedCapacity().getCapacityUnit().getWriteCapacityUnit();
}
def queryById(id: String, fields: String*): Option[_ <: BDBEntity] = {
val criteria = new SingleRowQueryCriteria(tableName)
val primaryKeys = new RowPrimaryKey()
val idValue = getBDBKeyValue(id)
// if (idValue == null) throw new EmptyFieldExcepiton
primaryKeys.addPrimaryKeyColumn("id", idValue)
criteria.setPrimaryKey(primaryKeys)
if(fields.size>0)criteria.addColumnsToGet(fields.toArray)
val request = new GetRowRequest()
request.setRowQueryCriteria(criteria)
var value:Option[_ <: BDBEntity]=None
Tool.reTry(3) {
val result = client.getRow(request)
val row = result.getRow()
if (result.getRow.getColumns.isEmpty)
value=None
else {
val dataMap = getFieldKeys(fields: _*).toList.map(k => k -> getColData(row.getColumns.get(k))).toMap + ("id" ->id)
value=Some(BDBEntity.apply(this.getClass(), dataMap))
}
}
return value;
}
def getColData(v:ColumnValue):Object={
if(v==null) return null
v.getType match{
case ColumnType.BINARY =>v.asBinary()
case ColumnType.BOOLEAN =>Boolean.box(v.asBoolean())
case ColumnType.DOUBLE=>Double.box(v.asDouble())
case ColumnType.INTEGER => Long.box(v.asLong())
case ColumnType.STRING => v.asString()
}
}
def queryByIds(idName: String, ids: List[Long], fields: String*): List[_ <: BDBEntity] = {
val request = new BatchGetRowRequest()
val tableRows = new MultiRowQueryCriteria(tableName)
ids.foreach { i =>
val primaryKeys = new RowPrimaryKey()
primaryKeys.addPrimaryKeyColumn(idName,
PrimaryKeyValue.fromLong(i))
tableRows.addRow(primaryKeys);
}
if(fields.size>0)tableRows.addColumnsToGet(fields.toArray)
request.addMultiRowQueryCriteria(tableRows)
val result = client.batchGetRow(request)
val status = result.getTableToRowsStatus()
status.values().map(v => v.filter(_.isSucceed)).flatten.map { v =>
val dataMap = getFieldKeys(fields: _*).toList.map(k => k ->getColData(v.getRow.getColumns.get(k))).toMap + (idName -> getColData(v.getRow.getColumns.get(idName)))
BDBEntity.apply(this.getClass(), dataMap)
} toList
}
//范围查询
def queryRange(id: String, start: Long, end: Long, fields: String*): List[_ <: BDBEntity] = {
val criteria = new RangeRowQueryCriteria(tableName)
val inclusiveStartKey = new RowPrimaryKey()
inclusiveStartKey.addPrimaryKeyColumn(id, PrimaryKeyValue.fromLong(start))
inclusiveStartKey.addPrimaryKeyColumn(id, PrimaryKeyValue.INF_MIN)
// 范围的边界需要提供完整的PK,若查询的范围不涉及到某一列值的范围,则需要将该列设置为无穷大或者无穷小
val exclusiveEndKey = new RowPrimaryKey()
exclusiveEndKey.addPrimaryKeyColumn(id, PrimaryKeyValue.fromLong(end))
exclusiveEndKey.addPrimaryKeyColumn(id, PrimaryKeyValue.INF_MAX)
// 范围的边界需要提供完整的PK,若查询的范围不涉及到某一列值的范围,则需要将该列设置为无穷大或者无穷小
criteria.setInclusiveStartPrimaryKey(inclusiveStartKey)
criteria.setExclusiveEndPrimaryKey(exclusiveEndKey)
val request = new GetRangeRequest()
request.setRangeRowQueryCriteria(criteria)
val result = client.getRange(request)
val rows = result.getRows()
rows.map { v =>
val dataMap = getFieldKeys(fields: _*).toList.map(k => k -> getColData(v.getColumns.get(k))).toMap + (id -> getColData(v.getColumns.get(id)))
BDBEntity.apply(this.getClass(), dataMap)
} toList
}
def queryCount(where: String, param: String*): Int = {
throw new UnSupportExcepiton
}
def queryPage(where: String, pageNum: Int, pageSize: Int, fields: String*): List[_ <: BDBEntity] = {
val realPageNum = if (pageNum < 1) 1 else pageNum
val realPageSize = if (pageSize < 1) 1 else pageSize
queryRange(where, realPageNum * realPageSize, realPageSize, fields: _*)
}
}
object BDBEntity {
private var aesKey: Key = null
def setAesKey(s: Key) {
aesKey = s
}
def isActiveAES = aesKey == null
/*从map 构造一个实例*/
def apply[T](clazz: Class[_ <: T], map: Map[String, Object]): T = {
val created = getConstructorParamNames(clazz).maxBy(_._2.length)
val params = created._2 map {
name_type =>
val value = map.getOrElse(name_type._1, null)
val t = name_type._2
if (null != value && (value.getClass().isInstance(t) || value.getClass() == t)) {
value
} else {
t.getName match {
case "java.sql.Date" => if (value == null) null else new java.sql.Date(value.asInstanceOf[java.util.Date].getTime())
case "java.sql.Time" => if (value == null) null else new java.sql.Time(value.asInstanceOf[java.util.Date].getTime())
case "java.sql.Timestamp" => if (value == null) null else new java.sql.Timestamp(value.asInstanceOf[java.util.Date].getTime())
case "java.lang.String" => if (value == null) null else value.asInstanceOf[String]
case "scala.math.BigDecimal" => if (value == null) null else BigDecimal(value.toString)
case "boolean" => if (value == null) null else if (value.isInstanceOf[Boolean]) value else Boolean.box(value.asInstanceOf[Int] == 1)
case _ => value
}
}
}
// params foreach (v=> if(v==null) print("null") else print(v.getClass() + ":"+v))
created._1.newInstance(params: _*).asInstanceOf[T]
}
def isEmpty(str: String) = {
(null == str || str.isEmpty)
}
def isEmpty(bean: Any): Boolean = {
bean match {
case s: String => isEmpty(bean.asInstanceOf[String])
case i: Int => bean.asInstanceOf[Int] == -1
case d: Double => bean.asInstanceOf[Double] == -1
case d: Boolean => !bean.asInstanceOf[Boolean]
case b: BigDecimal => b == null || b.asInstanceOf[BigDecimal] == -1
case _ => bean == null
}
}
}
|
livehl/common
|
src/main/scala/livehl/common/db/BDBEntity.scala
|
Scala
|
apache-2.0
| 14,829 |
/*
Copyright 2013 Twitter, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package com.twitter.summingbird.batch.state
import org.apache.hadoop.fs.FileSystem
import org.apache.hadoop.fs.Path
import scala.collection.JavaConverters._
import scala.util.{ Try, Success, Failure }
import org.slf4j.LoggerFactory
private[summingbird] object FileVersionTracking {
@transient private val logger = LoggerFactory.getLogger(classOf[FileVersionTracking])
val FINISHED_VERSION_SUFFIX = ".version"
implicit def path(strPath: String): Path = new Path(strPath)
def path(basePath: String, fileName: String): Path = new Path(basePath, fileName)
}
private[summingbird] case class FileVersionTracking(root: String, fs: FileSystem) extends Versioning {
import FileVersionTracking._
fs.mkdirs(root)
override def mostRecentVersion: Option[Long] = getAllVersions.headOption
override def failVersion(version: Long) = deleteVersion(version)
override def deleteVersion(version: Long) = fs.delete(tokenPath(version), false)
override def succeedVersion(version: Long) = fs.createNewFile(tokenPath(version))
private def getOnDiskVersions: List[Try[Long]] =
listDir(root)
.filter(p => !(p.getName.startsWith("_")))
.filter(_.getName().endsWith(FINISHED_VERSION_SUFFIX))
.map(f => parseVersion(f.toString()))
private def logVersion(v: Try[Long]) = logger.debug("Version on disk : " + v.toString)
override def getAllVersions: List[Long] =
getOnDiskVersions
.map { v =>
logVersion(v)
v
}
.collect { case Success(s) => s }
.sorted
.reverse
override def hasVersion(version: Long) = getAllVersions.contains(version)
def tokenPath(version: Long): Path =
path(root, version.toString + FINISHED_VERSION_SUFFIX)
def parseVersion(p: String): Try[Long] =
Try(p.getName().dropRight(FINISHED_VERSION_SUFFIX.length()).toLong)
def listDir(dir: String): List[Path] = fs.listStatus(dir).map(_.getPath).toList
}
|
zirpins/summingbird
|
summingbird-batch-hadoop/src/main/scala/com/twitter/summingbird/batch/state/FileVersionTracking.scala
|
Scala
|
apache-2.0
| 2,494 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql
import java.io.{File, FileNotFoundException}
import java.nio.file.{Files, StandardOpenOption}
import java.util.Locale
import scala.collection.mutable
import org.apache.hadoop.fs.Path
import org.apache.spark.SparkException
import org.apache.spark.scheduler.{SparkListener, SparkListenerTaskEnd}
import org.apache.spark.sql.TestingUDT.{IntervalUDT, NullData, NullUDT}
import org.apache.spark.sql.catalyst.expressions.AttributeReference
import org.apache.spark.sql.catalyst.planning.PhysicalOperation
import org.apache.spark.sql.catalyst.plans.logical.Filter
import org.apache.spark.sql.execution.adaptive.AdaptiveSparkPlanHelper
import org.apache.spark.sql.execution.datasources.FilePartition
import org.apache.spark.sql.execution.datasources.v2.{BatchScanExec, DataSourceV2ScanRelation, FileScan}
import org.apache.spark.sql.execution.datasources.v2.parquet.ParquetTable
import org.apache.spark.sql.execution.joins.{BroadcastHashJoinExec, SortMergeJoinExec}
import org.apache.spark.sql.functions._
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.test.SharedSparkSession
import org.apache.spark.sql.types._
class FileBasedDataSourceSuite extends QueryTest
with SharedSparkSession
with AdaptiveSparkPlanHelper {
import testImplicits._
override def beforeAll(): Unit = {
super.beforeAll()
spark.sessionState.conf.setConf(SQLConf.ORC_IMPLEMENTATION, "native")
}
override def afterAll(): Unit = {
try {
spark.sessionState.conf.unsetConf(SQLConf.ORC_IMPLEMENTATION)
} finally {
super.afterAll()
}
}
private val allFileBasedDataSources = Seq("orc", "parquet", "csv", "json", "text")
private val nameWithSpecialChars = "sp&cial%c hars"
allFileBasedDataSources.foreach { format =>
test(s"Writing empty datasets should not fail - $format") {
withTempPath { dir =>
Seq("str").toDS().limit(0).write.format(format).save(dir.getCanonicalPath)
}
}
}
// `TEXT` data source always has a single column whose name is `value`.
allFileBasedDataSources.filterNot(_ == "text").foreach { format =>
test(s"SPARK-23072 Write and read back unicode column names - $format") {
withTempPath { path =>
val dir = path.getCanonicalPath
// scalastyle:off nonascii
val df = Seq("a").toDF("한글")
// scalastyle:on nonascii
df.write.format(format).option("header", "true").save(dir)
val answerDf = spark.read.format(format).option("header", "true").load(dir)
assert(df.schema.sameType(answerDf.schema))
checkAnswer(df, answerDf)
}
}
}
// Only ORC/Parquet support this. `CSV` and `JSON` returns an empty schema.
// `TEXT` data source always has a single column whose name is `value`.
Seq("orc", "parquet").foreach { format =>
test(s"SPARK-15474 Write and read back non-empty schema with empty dataframe - $format") {
withTempPath { file =>
val path = file.getCanonicalPath
val emptyDf = Seq((true, 1, "str")).toDF().limit(0)
emptyDf.write.format(format).save(path)
val df = spark.read.format(format).load(path)
assert(df.schema.sameType(emptyDf.schema))
checkAnswer(df, emptyDf)
}
}
}
Seq("orc", "parquet").foreach { format =>
test(s"SPARK-23271 empty RDD when saved should write a metadata only file - $format") {
withTempPath { outputPath =>
val df = spark.emptyDataFrame.select(lit(1).as("i"))
df.write.format(format).save(outputPath.toString)
val partFiles = outputPath.listFiles()
.filter(f => f.isFile && !f.getName.startsWith(".") && !f.getName.startsWith("_"))
assert(partFiles.length === 1)
// Now read the file.
val df1 = spark.read.format(format).load(outputPath.toString)
checkAnswer(df1, Seq.empty[Row])
assert(df1.schema.equals(df.schema.asNullable))
}
}
}
allFileBasedDataSources.foreach { format =>
test(s"SPARK-23372 error while writing empty schema files using $format") {
withTempPath { outputPath =>
val errMsg = intercept[AnalysisException] {
spark.emptyDataFrame.write.format(format).save(outputPath.toString)
}
assert(errMsg.getMessage.contains(
"Datasource does not support writing empty or nested empty schemas"))
}
// Nested empty schema
withTempPath { outputPath =>
val schema = StructType(Seq(
StructField("a", IntegerType),
StructField("b", StructType(Nil)),
StructField("c", IntegerType)
))
val df = spark.createDataFrame(sparkContext.emptyRDD[Row], schema)
val errMsg = intercept[AnalysisException] {
df.write.format(format).save(outputPath.toString)
}
assert(errMsg.getMessage.contains(
"Datasource does not support writing empty or nested empty schemas"))
}
}
}
allFileBasedDataSources.foreach { format =>
test(s"SPARK-22146 read files containing special characters using $format") {
withTempDir { dir =>
val tmpFile = s"$dir/$nameWithSpecialChars"
spark.createDataset(Seq("a", "b")).write.format(format).save(tmpFile)
val fileContent = spark.read.format(format).load(tmpFile)
checkAnswer(fileContent, Seq(Row("a"), Row("b")))
}
}
}
// Separate test case for formats that support multiLine as an option.
Seq("json", "csv").foreach { format =>
test("SPARK-23148 read files containing special characters " +
s"using $format with multiline enabled") {
withTempDir { dir =>
val tmpFile = s"$dir/$nameWithSpecialChars"
spark.createDataset(Seq("a", "b")).write.format(format).save(tmpFile)
val reader = spark.read.format(format).option("multiLine", true)
val fileContent = reader.load(tmpFile)
checkAnswer(fileContent, Seq(Row("a"), Row("b")))
}
}
}
allFileBasedDataSources.foreach { format =>
testQuietly(s"Enabling/disabling ignoreMissingFiles using $format") {
def testIgnoreMissingFiles(): Unit = {
withTempDir { dir =>
val basePath = dir.getCanonicalPath
Seq("0").toDF("a").write.format(format).save(new Path(basePath, "second").toString)
Seq("1").toDF("a").write.format(format).save(new Path(basePath, "fourth").toString)
val firstPath = new Path(basePath, "first")
val thirdPath = new Path(basePath, "third")
val fs = thirdPath.getFileSystem(spark.sessionState.newHadoopConf())
Seq("2").toDF("a").write.format(format).save(firstPath.toString)
Seq("3").toDF("a").write.format(format).save(thirdPath.toString)
val files = Seq(firstPath, thirdPath).flatMap { p =>
fs.listStatus(p).filter(_.isFile).map(_.getPath)
}
val df = spark.read.format(format).load(
new Path(basePath, "first").toString,
new Path(basePath, "second").toString,
new Path(basePath, "third").toString,
new Path(basePath, "fourth").toString)
// Make sure all data files are deleted and can't be opened.
files.foreach(f => fs.delete(f, false))
assert(fs.delete(thirdPath, true))
for (f <- files) {
intercept[FileNotFoundException](fs.open(f))
}
checkAnswer(df, Seq(Row("0"), Row("1")))
}
}
for {
ignore <- Seq("true", "false")
sources <- Seq("", format)
} {
withSQLConf(SQLConf.IGNORE_MISSING_FILES.key -> ignore,
SQLConf.USE_V1_SOURCE_LIST.key -> sources) {
if (ignore.toBoolean) {
testIgnoreMissingFiles()
} else {
val exception = intercept[SparkException] {
testIgnoreMissingFiles()
}
assert(exception.getMessage().contains("does not exist"))
}
}
}
}
}
// Text file format only supports string type
test("SPARK-24691 error handling for unsupported types - text") {
withTempDir { dir =>
// write path
val textDir = new File(dir, "text").getCanonicalPath
var msg = intercept[AnalysisException] {
Seq(1).toDF.write.text(textDir)
}.getMessage
assert(msg.contains("Text data source does not support int data type"))
msg = intercept[AnalysisException] {
Seq(1.2).toDF.write.text(textDir)
}.getMessage
assert(msg.contains("Text data source does not support double data type"))
msg = intercept[AnalysisException] {
Seq(true).toDF.write.text(textDir)
}.getMessage
assert(msg.contains("Text data source does not support boolean data type"))
msg = intercept[AnalysisException] {
Seq(1).toDF("a").selectExpr("struct(a)").write.text(textDir)
}.getMessage
assert(msg.contains("Text data source does not support struct<a:int> data type"))
msg = intercept[AnalysisException] {
Seq((Map("Tesla" -> 3))).toDF("cars").write.mode("overwrite").text(textDir)
}.getMessage
assert(msg.contains("Text data source does not support map<string,int> data type"))
msg = intercept[AnalysisException] {
Seq((Array("Tesla", "Chevy", "Ford"))).toDF("brands")
.write.mode("overwrite").text(textDir)
}.getMessage
assert(msg.contains("Text data source does not support array<string> data type"))
// read path
Seq("aaa").toDF.write.mode("overwrite").text(textDir)
msg = intercept[AnalysisException] {
val schema = StructType(StructField("a", IntegerType, true) :: Nil)
spark.read.schema(schema).text(textDir).collect()
}.getMessage
assert(msg.contains("Text data source does not support int data type"))
msg = intercept[AnalysisException] {
val schema = StructType(StructField("a", DoubleType, true) :: Nil)
spark.read.schema(schema).text(textDir).collect()
}.getMessage
assert(msg.contains("Text data source does not support double data type"))
msg = intercept[AnalysisException] {
val schema = StructType(StructField("a", BooleanType, true) :: Nil)
spark.read.schema(schema).text(textDir).collect()
}.getMessage
assert(msg.contains("Text data source does not support boolean data type"))
}
}
// Unsupported data types of csv, json, orc, and parquet are as follows;
// csv -> R/W: Null, Array, Map, Struct
// json -> R/W: Interval
// orc -> R/W: Interval, W: Null
// parquet -> R/W: Interval, Null
test("SPARK-24204 error handling for unsupported Array/Map/Struct types - csv") {
withTempDir { dir =>
val csvDir = new File(dir, "csv").getCanonicalPath
var msg = intercept[AnalysisException] {
Seq((1, "Tesla")).toDF("a", "b").selectExpr("struct(a, b)").write.csv(csvDir)
}.getMessage
assert(msg.contains("CSV data source does not support struct<a:int,b:string> data type"))
msg = intercept[AnalysisException] {
val schema = StructType.fromDDL("a struct<b: Int>")
spark.range(1).write.mode("overwrite").csv(csvDir)
spark.read.schema(schema).csv(csvDir).collect()
}.getMessage
assert(msg.contains("CSV data source does not support struct<b:int> data type"))
msg = intercept[AnalysisException] {
Seq((1, Map("Tesla" -> 3))).toDF("id", "cars").write.mode("overwrite").csv(csvDir)
}.getMessage
assert(msg.contains("CSV data source does not support map<string,int> data type"))
msg = intercept[AnalysisException] {
val schema = StructType.fromDDL("a map<int, int>")
spark.range(1).write.mode("overwrite").csv(csvDir)
spark.read.schema(schema).csv(csvDir).collect()
}.getMessage
assert(msg.contains("CSV data source does not support map<int,int> data type"))
msg = intercept[AnalysisException] {
Seq((1, Array("Tesla", "Chevy", "Ford"))).toDF("id", "brands")
.write.mode("overwrite").csv(csvDir)
}.getMessage
assert(msg.contains("CSV data source does not support array<string> data type"))
msg = intercept[AnalysisException] {
val schema = StructType.fromDDL("a array<int>")
spark.range(1).write.mode("overwrite").csv(csvDir)
spark.read.schema(schema).csv(csvDir).collect()
}.getMessage
assert(msg.contains("CSV data source does not support array<int> data type"))
msg = intercept[AnalysisException] {
Seq((1, new TestUDT.MyDenseVector(Array(0.25, 2.25, 4.25)))).toDF("id", "vectors")
.write.mode("overwrite").csv(csvDir)
}.getMessage
assert(msg.contains("CSV data source does not support array<double> data type"))
msg = intercept[AnalysisException] {
val schema = StructType(StructField("a", new TestUDT.MyDenseVectorUDT(), true) :: Nil)
spark.range(1).write.mode("overwrite").csv(csvDir)
spark.read.schema(schema).csv(csvDir).collect()
}.getMessage
assert(msg.contains("CSV data source does not support array<double> data type."))
}
}
test("SPARK-24204 error handling for unsupported Interval data types - csv, json, parquet, orc") {
withTempDir { dir =>
val tempDir = new File(dir, "files").getCanonicalPath
// TODO: test file source V2 after write path is fixed.
Seq(true).foreach { useV1 =>
val useV1List = if (useV1) {
"csv,json,orc,parquet"
} else {
""
}
def validateErrorMessage(msg: String): Unit = {
val msg1 = "cannot save interval data type into external storage."
val msg2 = "data source does not support interval data type."
assert(msg.toLowerCase(Locale.ROOT).contains(msg1) ||
msg.toLowerCase(Locale.ROOT).contains(msg2))
}
withSQLConf(SQLConf.USE_V1_SOURCE_LIST.key -> useV1List) {
// write path
Seq("csv", "json", "parquet", "orc").foreach { format =>
val msg = intercept[AnalysisException] {
sql("select interval 1 days").write.format(format).mode("overwrite").save(tempDir)
}.getMessage
validateErrorMessage(msg)
}
// read path
Seq("parquet", "csv").foreach { format =>
var msg = intercept[AnalysisException] {
val schema = StructType(StructField("a", CalendarIntervalType, true) :: Nil)
spark.range(1).write.format(format).mode("overwrite").save(tempDir)
spark.read.schema(schema).format(format).load(tempDir).collect()
}.getMessage
validateErrorMessage(msg)
msg = intercept[AnalysisException] {
val schema = StructType(StructField("a", new IntervalUDT(), true) :: Nil)
spark.range(1).write.format(format).mode("overwrite").save(tempDir)
spark.read.schema(schema).format(format).load(tempDir).collect()
}.getMessage
validateErrorMessage(msg)
}
}
}
}
}
test("SPARK-24204 error handling for unsupported Null data types - csv, parquet, orc") {
// TODO: test file source V2 after write path is fixed.
Seq(true).foreach { useV1 =>
val useV1List = if (useV1) {
"csv,orc,parquet"
} else {
""
}
def errorMessage(format: String): String = {
s"$format data source does not support null data type."
}
withSQLConf(SQLConf.USE_V1_SOURCE_LIST.key -> useV1List) {
withTempDir { dir =>
val tempDir = new File(dir, "files").getCanonicalPath
Seq("parquet", "csv", "orc").foreach { format =>
// write path
var msg = intercept[AnalysisException] {
sql("select null").write.format(format).mode("overwrite").save(tempDir)
}.getMessage
assert(msg.toLowerCase(Locale.ROOT)
.contains(errorMessage(format)))
msg = intercept[AnalysisException] {
spark.udf.register("testType", () => new NullData())
sql("select testType()").write.format(format).mode("overwrite").save(tempDir)
}.getMessage
assert(msg.toLowerCase(Locale.ROOT)
.contains(errorMessage(format)))
// read path
msg = intercept[AnalysisException] {
val schema = StructType(StructField("a", NullType, true) :: Nil)
spark.range(1).write.format(format).mode("overwrite").save(tempDir)
spark.read.schema(schema).format(format).load(tempDir).collect()
}.getMessage
assert(msg.toLowerCase(Locale.ROOT)
.contains(errorMessage(format)))
msg = intercept[AnalysisException] {
val schema = StructType(StructField("a", new NullUDT(), true) :: Nil)
spark.range(1).write.format(format).mode("overwrite").save(tempDir)
spark.read.schema(schema).format(format).load(tempDir).collect()
}.getMessage
assert(msg.toLowerCase(Locale.ROOT)
.contains(errorMessage(format)))
}
}
}
}
}
Seq("parquet", "orc").foreach { format =>
test(s"Spark native readers should respect spark.sql.caseSensitive - ${format}") {
withTempDir { dir =>
val tableName = s"spark_25132_${format}_native"
val tableDir = dir.getCanonicalPath + s"/$tableName"
withTable(tableName) {
val end = 5
val data = spark.range(end).selectExpr("id as A", "id * 2 as b", "id * 3 as B")
withSQLConf(SQLConf.CASE_SENSITIVE.key -> "true") {
data.write.format(format).mode("overwrite").save(tableDir)
}
sql(s"CREATE TABLE $tableName (a LONG, b LONG) USING $format LOCATION '$tableDir'")
withSQLConf(SQLConf.CASE_SENSITIVE.key -> "false") {
checkAnswer(sql(s"select a from $tableName"), data.select("A"))
checkAnswer(sql(s"select A from $tableName"), data.select("A"))
// RuntimeException is triggered at executor side, which is then wrapped as
// SparkException at driver side
val e1 = intercept[SparkException] {
sql(s"select b from $tableName").collect()
}
assert(
e1.getCause.isInstanceOf[RuntimeException] &&
e1.getCause.getMessage.contains(
"""Found duplicate field(s) "b": [b, B] in case-insensitive mode"""))
val e2 = intercept[SparkException] {
sql(s"select B from $tableName").collect()
}
assert(
e2.getCause.isInstanceOf[RuntimeException] &&
e2.getCause.getMessage.contains(
"""Found duplicate field(s) "b": [b, B] in case-insensitive mode"""))
}
withSQLConf(SQLConf.CASE_SENSITIVE.key -> "true") {
checkAnswer(sql(s"select a from $tableName"), (0 until end).map(_ => Row(null)))
checkAnswer(sql(s"select b from $tableName"), data.select("b"))
}
}
}
}
}
test("SPARK-25237 compute correct input metrics in FileScanRDD") {
// TODO: Test CSV V2 as well after it implements [[SupportsReportStatistics]].
withSQLConf(SQLConf.USE_V1_SOURCE_LIST.key -> "csv") {
withTempPath { p =>
val path = p.getAbsolutePath
spark.range(1000).repartition(1).write.csv(path)
val bytesReads = new mutable.ArrayBuffer[Long]()
val bytesReadListener = new SparkListener() {
override def onTaskEnd(taskEnd: SparkListenerTaskEnd): Unit = {
bytesReads += taskEnd.taskMetrics.inputMetrics.bytesRead
}
}
sparkContext.addSparkListener(bytesReadListener)
try {
spark.read.csv(path).limit(1).collect()
sparkContext.listenerBus.waitUntilEmpty()
assert(bytesReads.sum === 7860)
} finally {
sparkContext.removeSparkListener(bytesReadListener)
}
}
}
}
test("Do not use cache on overwrite") {
Seq("", "orc").foreach { useV1SourceReaderList =>
withSQLConf(SQLConf.USE_V1_SOURCE_LIST.key -> useV1SourceReaderList) {
withTempDir { dir =>
val path = dir.toString
spark.range(1000).write.mode("overwrite").orc(path)
val df = spark.read.orc(path).cache()
assert(df.count() == 1000)
spark.range(10).write.mode("overwrite").orc(path)
assert(df.count() == 10)
assert(spark.read.orc(path).count() == 10)
}
}
}
}
test("Do not use cache on append") {
Seq("", "orc").foreach { useV1SourceReaderList =>
withSQLConf(SQLConf.USE_V1_SOURCE_LIST.key -> useV1SourceReaderList) {
withTempDir { dir =>
val path = dir.toString
spark.range(1000).write.mode("append").orc(path)
val df = spark.read.orc(path).cache()
assert(df.count() == 1000)
spark.range(10).write.mode("append").orc(path)
assert(df.count() == 1010)
assert(spark.read.orc(path).count() == 1010)
}
}
}
}
test("UDF input_file_name()") {
Seq("", "orc").foreach { useV1SourceReaderList =>
withSQLConf(SQLConf.USE_V1_SOURCE_LIST.key -> useV1SourceReaderList) {
withTempPath { dir =>
val path = dir.getCanonicalPath
spark.range(10).write.orc(path)
val row = spark.read.orc(path).select(input_file_name).first()
assert(row.getString(0).contains(path))
}
}
}
}
test("Option pathGlobFilter: filter files correctly") {
withTempPath { path =>
val dataDir = path.getCanonicalPath
Seq("foo").toDS().write.text(dataDir)
Seq("bar").toDS().write.mode("append").orc(dataDir)
val df = spark.read.option("pathGlobFilter", "*.txt").text(dataDir)
checkAnswer(df, Row("foo"))
// Both glob pattern in option and path should be effective to filter files.
val df2 = spark.read.option("pathGlobFilter", "*.txt").text(dataDir + "/*.orc")
checkAnswer(df2, Seq.empty)
val df3 = spark.read.option("pathGlobFilter", "*.txt").text(dataDir + "/*xt")
checkAnswer(df3, Row("foo"))
}
}
test("Option pathGlobFilter: simple extension filtering should contains partition info") {
withTempPath { path =>
val input = Seq(("foo", 1), ("oof", 2)).toDF("a", "b")
input.write.partitionBy("b").text(path.getCanonicalPath)
Seq("bar").toDS().write.mode("append").orc(path.getCanonicalPath + "/b=1")
// If we use glob pattern in the path, the partition column won't be shown in the result.
val df = spark.read.text(path.getCanonicalPath + "/*/*.txt")
checkAnswer(df, input.select("a"))
val df2 = spark.read.option("pathGlobFilter", "*.txt").text(path.getCanonicalPath)
checkAnswer(df2, input)
}
}
test("Option recursiveFileLookup: recursive loading correctly") {
val expectedFileList = mutable.ListBuffer[String]()
def createFile(dir: File, fileName: String, format: String): Unit = {
val path = new File(dir, s"${fileName}.${format}")
Files.write(
path.toPath,
s"content of ${path.toString}".getBytes,
StandardOpenOption.CREATE, StandardOpenOption.WRITE
)
val fsPath = new Path(path.getAbsoluteFile.toURI).toString
expectedFileList.append(fsPath)
}
def createDir(path: File, dirName: String, level: Int): Unit = {
val dir = new File(path, s"dir${dirName}-${level}")
dir.mkdir()
createFile(dir, s"file${level}", "bin")
createFile(dir, s"file${level}", "text")
if (level < 4) {
// create sub-dir
createDir(dir, "sub0", level + 1)
createDir(dir, "sub1", level + 1)
}
}
withTempPath { path =>
path.mkdir()
createDir(path, "root", 0)
val dataPath = new File(path, "dirroot-0").getAbsolutePath
val fileList = spark.read.format("binaryFile")
.option("recursiveFileLookup", true)
.load(dataPath)
.select("path").collect().map(_.getString(0))
assert(fileList.toSet === expectedFileList.toSet)
val fileList2 = spark.read.format("binaryFile")
.option("recursiveFileLookup", true)
.option("pathGlobFilter", "*.bin")
.load(dataPath)
.select("path").collect().map(_.getString(0))
assert(fileList2.toSet === expectedFileList.filter(_.endsWith(".bin")).toSet)
}
}
test("Option recursiveFileLookup: disable partition inferring") {
val dataPath = Thread.currentThread().getContextClassLoader
.getResource("test-data/text-partitioned").toString
val df = spark.read.format("binaryFile")
.option("recursiveFileLookup", true)
.load(dataPath)
assert(!df.columns.contains("year"), "Expect partition inferring disabled")
val fileList = df.select("path").collect().map(_.getString(0))
val expectedFileList = Array(
dataPath + "/year=2014/data.txt",
dataPath + "/year=2015/data.txt"
).map(path => new Path(path).toString)
assert(fileList.toSet === expectedFileList.toSet)
}
test("Return correct results when data columns overlap with partition columns") {
Seq("parquet", "orc", "json").foreach { format =>
withTempPath { path =>
val tablePath = new File(s"${path.getCanonicalPath}/cOl3=c/cOl1=a/cOl5=e")
Seq((1, 2, 3, 4, 5)).toDF("cOl1", "cOl2", "cOl3", "cOl4", "cOl5")
.write.format(format).save(tablePath.getCanonicalPath)
val df = spark.read.format(format).load(path.getCanonicalPath)
.select("CoL1", "Col2", "CoL5", "CoL3")
checkAnswer(df, Row("a", 2, "e", "c"))
}
}
}
test("Return correct results when data columns overlap with partition columns (nested data)") {
Seq("parquet", "orc", "json").foreach { format =>
withSQLConf(SQLConf.NESTED_SCHEMA_PRUNING_ENABLED.key -> "true") {
withTempPath { path =>
val tablePath = new File(s"${path.getCanonicalPath}/c3=c/c1=a/c5=e")
val inputDF = sql("SELECT 1 c1, 2 c2, 3 c3, named_struct('c4_1', 2, 'c4_2', 3) c4, 5 c5")
inputDF.write.format(format).save(tablePath.getCanonicalPath)
val resultDF = spark.read.format(format).load(path.getCanonicalPath)
.select("c1", "c4.c4_1", "c5", "c3")
checkAnswer(resultDF, Row("a", 2, "e", "c"))
}
}
}
}
test("sizeInBytes should be the total size of all files") {
Seq("orc", "").foreach { useV1SourceReaderList =>
withSQLConf(SQLConf.USE_V1_SOURCE_LIST.key -> useV1SourceReaderList) {
withTempDir { dir =>
dir.delete()
spark.range(1000).write.orc(dir.toString)
val df = spark.read.orc(dir.toString)
assert(df.queryExecution.optimizedPlan.stats.sizeInBytes === BigInt(getLocalDirSize(dir)))
}
}
}
}
test("SPARK-22790,SPARK-27668: spark.sql.sources.compressionFactor takes effect") {
Seq(1.0, 0.5).foreach { compressionFactor =>
withSQLConf(SQLConf.FILE_COMPRESSION_FACTOR.key -> compressionFactor.toString,
SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "250") {
withTempPath { workDir =>
// the file size is 486 bytes
val workDirPath = workDir.getAbsolutePath
val data1 = Seq(100, 200, 300, 400).toDF("count")
data1.write.orc(workDirPath + "/data1")
val df1FromFile = spark.read.orc(workDirPath + "/data1")
val data2 = Seq(100, 200, 300, 400).toDF("count")
data2.write.orc(workDirPath + "/data2")
val df2FromFile = spark.read.orc(workDirPath + "/data2")
val joinedDF = df1FromFile.join(df2FromFile, Seq("count"))
if (compressionFactor == 0.5) {
val bJoinExec = collect(joinedDF.queryExecution.executedPlan) {
case bJoin: BroadcastHashJoinExec => bJoin
}
assert(bJoinExec.nonEmpty)
val smJoinExec = collect(joinedDF.queryExecution.executedPlan) {
case smJoin: SortMergeJoinExec => smJoin
}
assert(smJoinExec.isEmpty)
} else {
// compressionFactor is 1.0
val bJoinExec = collect(joinedDF.queryExecution.executedPlan) {
case bJoin: BroadcastHashJoinExec => bJoin
}
assert(bJoinExec.isEmpty)
val smJoinExec = collect(joinedDF.queryExecution.executedPlan) {
case smJoin: SortMergeJoinExec => smJoin
}
assert(smJoinExec.nonEmpty)
}
}
}
}
}
test("File source v2: support partition pruning") {
withSQLConf(SQLConf.USE_V1_SOURCE_LIST.key -> "") {
allFileBasedDataSources.foreach { format =>
withTempPath { dir =>
Seq(("a", 1, 2), ("b", 1, 2), ("c", 2, 1))
.toDF("value", "p1", "p2")
.write
.format(format)
.partitionBy("p1", "p2")
.option("header", true)
.save(dir.getCanonicalPath)
val df = spark
.read
.format(format)
.option("header", true)
.load(dir.getCanonicalPath)
.where("p1 = 1 and p2 = 2 and value != \\"a\\"")
val filterCondition = df.queryExecution.optimizedPlan.collectFirst {
case f: Filter => f.condition
}
assert(filterCondition.isDefined)
// The partitions filters should be pushed down and no need to be reevaluated.
assert(filterCondition.get.collectFirst {
case a: AttributeReference if a.name == "p1" || a.name == "p2" => a
}.isEmpty)
val fileScan = df.queryExecution.executedPlan collectFirst {
case BatchScanExec(_, f: FileScan) => f
}
assert(fileScan.nonEmpty)
assert(fileScan.get.partitionFilters.nonEmpty)
assert(fileScan.get.dataFilters.nonEmpty)
assert(fileScan.get.planInputPartitions().forall { partition =>
partition.asInstanceOf[FilePartition].files.forall { file =>
file.filePath.contains("p1=1") && file.filePath.contains("p2=2")
}
})
checkAnswer(df, Row("b", 1, 2))
}
}
}
}
test("File source v2: support passing data filters to FileScan without partitionFilters") {
withSQLConf(SQLConf.USE_V1_SOURCE_LIST.key -> "") {
allFileBasedDataSources.foreach { format =>
withTempPath { dir =>
Seq(("a", 1, 2), ("b", 1, 2), ("c", 2, 1))
.toDF("value", "p1", "p2")
.write
.format(format)
.partitionBy("p1", "p2")
.option("header", true)
.save(dir.getCanonicalPath)
val df = spark
.read
.format(format)
.option("header", true)
.load(dir.getCanonicalPath)
.where("value = 'a'")
val filterCondition = df.queryExecution.optimizedPlan.collectFirst {
case f: Filter => f.condition
}
assert(filterCondition.isDefined)
val fileScan = df.queryExecution.executedPlan collectFirst {
case BatchScanExec(_, f: FileScan) => f
}
assert(fileScan.nonEmpty)
assert(fileScan.get.partitionFilters.isEmpty)
assert(fileScan.get.dataFilters.nonEmpty)
checkAnswer(df, Row("a", 1, 2))
}
}
}
}
test("File table location should include both values of option `path` and `paths`") {
withSQLConf(SQLConf.USE_V1_SOURCE_LIST.key -> "") {
withTempPaths(3) { paths =>
paths.zipWithIndex.foreach { case (path, index) =>
Seq(index).toDF("a").write.mode("overwrite").parquet(path.getCanonicalPath)
}
val df = spark
.read
.option("path", paths.head.getCanonicalPath)
.parquet(paths(1).getCanonicalPath, paths(2).getCanonicalPath)
df.queryExecution.optimizedPlan match {
case PhysicalOperation(_, _, DataSourceV2ScanRelation(table: ParquetTable, _, _)) =>
assert(table.paths.toSet == paths.map(_.getCanonicalPath).toSet)
case _ =>
throw new AnalysisException("Can not match ParquetTable in the query.")
}
checkAnswer(df, Seq(0, 1, 2).map(Row(_)))
}
}
}
}
object TestingUDT {
@SQLUserDefinedType(udt = classOf[IntervalUDT])
class IntervalData extends Serializable
class IntervalUDT extends UserDefinedType[IntervalData] {
override def sqlType: DataType = CalendarIntervalType
override def serialize(obj: IntervalData): Any =
throw new UnsupportedOperationException("Not implemented")
override def deserialize(datum: Any): IntervalData =
throw new UnsupportedOperationException("Not implemented")
override def userClass: Class[IntervalData] = classOf[IntervalData]
}
@SQLUserDefinedType(udt = classOf[NullUDT])
private[sql] class NullData extends Serializable
private[sql] class NullUDT extends UserDefinedType[NullData] {
override def sqlType: DataType = NullType
override def serialize(obj: NullData): Any =
throw new UnsupportedOperationException("Not implemented")
override def deserialize(datum: Any): NullData =
throw new UnsupportedOperationException("Not implemented")
override def userClass: Class[NullData] = classOf[NullData]
}
}
|
goldmedal/spark
|
sql/core/src/test/scala/org/apache/spark/sql/FileBasedDataSourceSuite.scala
|
Scala
|
apache-2.0
| 34,457 |
import play.api.ApplicationLoader.Context
import play.api.{Application, ApplicationLoader, BuiltInComponentsFromContext}
import akka.stream.OverflowStrategy
import akka.stream.scaladsl.{BroadcastHub, Keep, Source}
import command.{AccountCommandService, AccountEvent}
import play.filters.HttpFiltersComponents
import query.AccountQueryService
import router.Routes
class MyApplicationLoader extends ApplicationLoader {
def load(context: Context): Application = {
new MyComponents(context).application
}
}
class MyComponents(context: Context)
extends BuiltInComponentsFromContext(context)
with HttpFiltersComponents {
val (eventQueue, eventSource) = Source.queue(100, OverflowStrategy.fail)
.toMat(BroadcastHub.sink[AccountEvent])(Keep.both)
.run()(materializer)
val accountCommandService = new AccountCommandService(actorSystem, eventQueue)
val accountQueryService = new AccountQueryService(actorSystem, eventSource, materializer)
lazy val router = new Routes(
httpErrorHandler,
new controllers.ApplicationController(controllerComponents),
new controllers.AccountController(controllerComponents, accountCommandService, accountQueryService)
)
}
|
srottenberg/es-cqrs-bank-account
|
app/MyApplicationLoader.scala
|
Scala
|
mit
| 1,191 |
/*
* Implements the Delta Table used to store and find valid decompositions (grammars) of a
* term set.
*
* Implements the delta-different of a set of terms
* (E.g.: delta(f(a), f(b)) = [f(alpha)], [a, b]
*
*
* !NOTE!
* The delta-table method of cut-introduction is sound but NOT complete!
* First, a complete method would require a merging of keys across lines, not
* just the selection of subsets in the same line
* (see technical report deltavec.tex for details).
*
* Second, folding the delta-table only produces decompositions U,S which
* result in EXACTLY the termset T being decomposed, not a superset of it.
* That is, if a decomposition U ° S is returned, then T = U ° S, not
* T \subseteq U ° S.
*
* To give an example where producing supersets would be advantageous:
* imagine a proof where the terms P(0),...,P(2^n - 1) occur.
* The decomposition of this termset is very inelegant, but if we
* we to produce a decomposition for P(0),...,P(2^n), we'd have a
* much smaller and equally serviceable solution.
*
* */
package at.logic.gapt.proofs.lk.algorithms.cutIntroduction
import at.logic.gapt.language.fol._
import at.logic.gapt.expr._
import at.logic.gapt.language.fol.Utils._
import at.logic.gapt.proofs.occurrences._
import scala.collection.immutable.HashMap
import at.logic.gapt.utils.dssupport.ListSupport._
import at.logic.gapt.proofs.lk.algorithms.cutIntroduction.Deltas._
//package-global definitions
package object types {
/** A term with variables */
type U = FOLTerm
/** The s-vector for a single term (the elements are the substitutions for the term's variables) */
type SVector = List[FOLTerm]
/** The set of s-vectors of a substitution */
type S = Set[SVector]
/**
* A raw s-vector, computed inside a delta-vector.
* Raw s-vectors are transposed and turned into sets to become s-vectors.
*/
type RawS = List[List[FOLTerm]]
/** A raw decomposition, i.e. a decomposition with a raw s-vector. */
type RawDecomposition = ( U, RawS )
/* A decomposition consisting u and S */
type Decomposition = ( U, S )
}
class DeltaTableException( msg: String ) extends Exception( msg )
/**
* A generalized delta table whose rows contains the results
* of Delta_G(...) instead of Delta(...).
*
* A generalized delta table contains decompositions for subsets of a termset and
* one can extract grammars from it by simply iterating through its rows.
*
* For details, see "Algorithmic Introduction of Quantified Cuts (Hetzl et al 2013)"
* and deltavector.tex/.pdf in the /doc-directory.
*
* @param terms The terms occurring in an LK proof.
* @param eigenvariable The name of eigenvariable that should be introduced in the decompositions.
*/
class DeltaTable( terms: List[FOLTerm], eigenvariable: String, delta: DeltaVector ) {
var termsAdded: Int = 0
var table = new HashMap[types.S, List[( types.U, List[FOLTerm] )]]
val trivialEv = FOLVar( eigenvariable + "_0" )
// Initialize with empty decomposition
add( Set(), null, Nil )
for ( n <- 1 until terms.length + 1 ) {
// Take only the simple grammars of term sets of size (n-1) from the current delta table
// Filter the keys (S) according to size
val one_less = table.filter( e => e._1.size == n - 1 )
termsAdded = 0
//Go through each the decompositions for each (n-1)-sized key and try to add terms.
one_less.foreach {
case ( s, pairs ) =>
// Iterate over the list of decompositions
pairs.foreach {
case ( u, ti ) =>
// Only choose terms that are after the last term in tl
val maxIdx = terms.lastIndexWhere( e => ti.contains( e ) )
val termsToAdd = terms.slice( maxIdx + 1, ( terms.size + 1 ) )
// Compute delta of the incremented list
termsToAdd.foreach {
case e =>
val incrementedtermset = ti :+ e
val p = delta.computeDelta( incrementedtermset, eigenvariable )
termsAdded = termsAdded + 1
// If non-trivial or equal to 1 (for the term set of size
// 1, the decomposition is always trivial and should be added)
// NOTE:
// When the delta algorithm 2 is applied to an
// f_i-prefixed set of terms as computed in step 1 and T_i corresponds to
// a formula with only a single quantifier then every subset of {
// f_i(t_1),...,f_i(t_l) } of f_i(T_i) will have the non-trivial
// decomposition f_i(\alpha) o (t_1,...,t_l). This will not happen if T_i
// corresponds to a formula with more than one quantifier. Right now, it
// is better to not worry about this and rather consider it a potential
// for further improvement.
p.foreach {
case ( u, s ) =>
if ( incrementedtermset.size == 1 || u != trivialEv ) add( s, u, incrementedtermset )
}
}
}
}
}
/**
* Adds a decomposition (u,s), under the key s, to the delta table.
* Specifically, s is the index and (u,T) is the key, where (u,S) is
* a decomposition of T.
* If the key already exists, (u,T) is appended the list of existing values
*/
def add( s: types.S, u: types.U, t: List[FOLTerm] ) {
if ( table.contains( s ) ) {
val lst = table( s )
table += ( s -> ( ( u, t ) :: lst ) )
} else {
table += ( s -> ( ( u, t ) :: Nil ) )
}
}
def numberOfPairs = table.foldRight( 0 ) { case ( ( k, lst ), acc ) => lst.size + acc }
def minNumOfPairsPerLine = table.foldRight( Int.MaxValue ) { case ( ( k, lst ), acc ) => acc.min( lst.size ) }
def maxNumOfPairsPerLine = table.foldRight( 0 ) { case ( ( k, lst ), acc ) => acc.max( lst.size ) }
/**
* compute and print statistics about this delta-table
* @prln the function used for printing
*/
def printStats( prln: String => Unit ) {
prln( "number of lines: " + table.size )
prln( "total number of pairs: " + numberOfPairs )
prln( "avg. number of pairs / line: " + ( numberOfPairs.toFloat / table.size ) )
prln( "min. number of pairs / line: " + minNumOfPairsPerLine )
prln( "max. number of pairs / line: " + maxNumOfPairsPerLine )
val linestats = table.foldRight( new HashMap[Int, Int]() ) {
case ( ( k, lst ), acc ) => acc + ( lst.size -> ( acc.getOrElse( lst.size, 0 ) + 1 ) )
}
prln( " k number of lines with k pairs" )
linestats.toSeq.sortBy( _._1 ).foreach {
case ( k, num ) => prln( "% 3d".format( k ) + " " + num )
}
}
}
|
gisellemnr/gapt
|
src/main/scala/at/logic/gapt/proofs/lk/algorithms/cutIntroduction/DeltaTable.scala
|
Scala
|
gpl-3.0
| 6,663 |
/*
* @author Philip Stutz
* @author Mihaela Verman
*
* Copyright 2013 University of Zurich
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.signalcollect.triplerush.loading
import com.signalcollect.interfaces.AggregationOperation
import com.signalcollect.Vertex
case class EdgesPerIndexType() extends AggregationOperation[Map[String, Int]] {
def extract(v: Vertex[_, _, _, _]): Map[String, Int] = {
Map(v.getClass.toString -> v.edgeCount).withDefaultValue(0)
}
def reduce(elements: Stream[Map[String, Int]]): Map[String, Int] = {
val result: Map[String, Int] = elements.reduce { (m1: Map[String, Int], m2: Map[String, Int]) =>
val keys = m1.keys ++ m2.keys
val merged = keys.map(k => (k, m1(k) + m2(k)))
merged.toMap.withDefaultValue(0)
}
result
}
}
case class CountVerticesByType() extends AggregationOperation[Map[String, Int]] {
def extract(v: Vertex[_, _, _, _]): Map[String, Int] = {
Map(v.getClass.toString -> 1).withDefaultValue(0)
}
def reduce(elements: Stream[Map[String, Int]]): Map[String, Int] = {
val result: Map[String, Int] = elements.foldLeft(Map.empty[String, Int].withDefaultValue(0)) { (m1: Map[String, Int], m2: Map[String, Int]) =>
val keys = m1.keys ++ m2.keys
val merged = keys.map(k => (k, m1(k) + m2(k)))
merged.toMap.withDefaultValue(0)
}
result
}
}
|
uzh/triplerush
|
src/main/scala/com/signalcollect/triplerush/loading/VertexCounting.scala
|
Scala
|
apache-2.0
| 1,913 |
package nl.rabobank.oss.rules.dsl.core.operators
import nl.rabobank.oss.rules.dsl.core.types.AddableValues
import scala.annotation.implicitNotFound
/**
* This type class allows values of different types to be added in the DSL.
*
* @tparam A type of the left hand side of the adding operation
* @tparam B type of the right hand side of the adding operation
* @tparam C type of the result of the adding operation
*/
@implicitNotFound("No member of type class Addable available in scope for combination ${A} + ${B} = ${C}")
sealed trait Addable[A, B, C] extends BinaryOperable[A, B, C] {
def operation(a: A, b: B): C
def identityLeft: A
def identityRight: B
def representation: String = "+"
}
object Addable {
implicit def valueAddedToValue[A, B, C](implicit ev: AddableValues[A, B, C]): Addable[A, B, C] = new Addable[A, B, C] {
override def operation(n: A, m: B): C = ev.plus(n, m)
override def identityLeft = ev.leftUnit
override def identityRight = ev.rightUnit
}
implicit def listAddedToList[A, B, C](implicit ev: AddableValues[A, B, C]): Addable[List[A], List[B], List[C]] = new Addable[List[A], List[B], List[C]] {
override def operation(n: List[A], m: List[B]): List[C] = n.zipAll(m, ev.leftUnit, ev.rightUnit).map(t => ev.plus(t._1, t._2))
override def identityLeft = List(ev.leftUnit)
override def identityRight = List(ev.rightUnit)
}
implicit def listAddedToValue[A, B, C](implicit ev: AddableValues[A, B, C]): Addable[List[A], B, List[C]] = new Addable[List[A], B, List[C]] {
override def operation(n: List[A], m: B): List[C] = n.map( ev.plus(_, m) )
override def identityLeft = List(ev.leftUnit)
override def identityRight = ev.rightUnit
}
implicit def valueAddedToList[A, B, C](implicit ev: AddableValues[A, B, C]): Addable[A, List[B], List[C]] = new Addable[A, List[B], List[C]] {
override def operation(n: A, m: List[B]): List[C] = m.map( ev.plus(n, _) )
override def identityLeft = ev.leftUnit
override def identityRight = List(ev.rightUnit)
}
}
|
scala-rules/scala-rules
|
engine/src/main/scala/nl/rabobank/oss/rules/dsl/core/operators/Addable.scala
|
Scala
|
mit
| 2,050 |
/***********************************************************************
* Copyright (c) 2013-2020 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.convert.avro
import org.apache.avro.generic._
import org.junit.runner.RunWith
import org.specs2.mutable.Specification
import org.specs2.runner.JUnitRunner
import scala.collection.JavaConversions._
@RunWith(classOf[JUnitRunner])
class AvroPathTest extends Specification with AvroUtils {
sequential
"AvroPath" should {
"select a top level path" in {
val path = "/content"
val avroPath = AvroPath(path)
val result = avroPath.eval(gr1)
val gr = result.get.asInstanceOf[GenericRecord]
gr.getSchema.getName mustEqual "TObj"
}
"select from a union by schema type" in {
val path = "/content$type=TObj"
val avroPath = AvroPath(path)
val result = avroPath.eval(gr1)
val gr = result.get.asInstanceOf[GenericRecord]
gr.getSchema.getName mustEqual "TObj"
}
"return None when element in union has wrong type" in {
val path = "/content$type=TObj"
val avroPath = AvroPath(path)
val result = avroPath.eval(gr2)
result.isDefined mustEqual false
}
"return nested records" in {
val path = "/content$type=TObj/kvmap"
val avroPath = AvroPath(path)
val result = avroPath.eval(gr1).asInstanceOf[Option[AnyRef]]
result must beSome(beAnInstanceOf[java.util.List[AnyRef]])
val arr = result.get.asInstanceOf[java.util.List[AnyRef]]
arr.length mustEqual 5
arr.head must beAnInstanceOf[GenericRecord]
}
"filter arrays of records by a field predicate" in {
val path = "/content$type=TObj/kvmap[$k=lat]"
val avroPath = AvroPath(path)
val result = avroPath.eval(gr1)
result.isDefined mustEqual true
val r = result.get.asInstanceOf[GenericRecord]
r.get("v").asInstanceOf[Double] mustEqual 45.0
}
"select a property out of a record in an array" in {
"filter arrays of records by a field predicate" in {
val path = "/content$type=TObj/kvmap[$k=lat]/v"
val avroPath = AvroPath(path)
val result = avroPath.eval(gr1)
result.isDefined mustEqual true
val v = result.get.asInstanceOf[Double]
v mustEqual 45.0
}
}
}
}
|
aheyne/geomesa
|
geomesa-convert/geomesa-convert-avro/src/test/scala/org/locationtech/geomesa/convert/avro/AvroPathTest.scala
|
Scala
|
apache-2.0
| 2,659 |
//
// Copyright 2013 Mirko Nasato
//
// Licensed under the Apache License, Version 2.0
// http://www.apache.org/licenses/LICENSE-2.0
//
package io.encoded.seriala
trait SerialWriter[T] {
def write(x: T)
def close()
}
|
mirkonasato/seriala
|
seriala-core/src/main/scala/io/encoded/seriala/SerialWriter.scala
|
Scala
|
apache-2.0
| 225 |
package org.scalatest.examples.fixture.propspec.multi
import org.scalatest._
import prop.PropertyChecks
import scala.collection.mutable.ListBuffer
class ExampleSpec extends fixture.PropSpec with PropertyChecks with ShouldMatchers {
case class FixtureParam(builder: StringBuilder, buffer: ListBuffer[String])
def withFixture(test: OneArgTest) = {
// Create needed mutable objects
val stringBuilder = new StringBuilder("ScalaTest is ")
val listBuffer = new ListBuffer[String]
val theFixture = FixtureParam(stringBuilder, listBuffer)
// Invoke the test function, passing in the mutable objects
withFixture(test.toNoArgTest(theFixture))
}
property("testing should be easy") { f =>
f.builder.append("easy!")
assert(f.builder.toString === "ScalaTest is easy!")
assert(f.buffer.isEmpty)
val firstChar = f.builder(0)
forAll { (c: Char) =>
whenever (c != 'S') {
c should not equal firstChar
}
}
f.buffer += "sweet"
}
property("testing should be fun") { f =>
f.builder.append("fun!")
assert(f.builder.toString === "ScalaTest is fun!")
assert(f.buffer.isEmpty)
val firstChar = f.builder(0)
forAll { (c: Char) =>
whenever (c != 'S') {
c should not equal firstChar
}
}
}
}
|
svn2github/scalatest
|
examples/src/main/scala/org/scalatest/examples/propspec/multi/ExampleSpec.scala
|
Scala
|
apache-2.0
| 1,298 |
/*
* Copyright 2013 Damien Lecan
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package play.core.server.servlet
import java.io.ByteArrayOutputStream
import java.net.URLDecoder
import java.util.concurrent.atomic.AtomicBoolean
import javax.servlet.http.{ Cookie => ServletCookie }
import javax.servlet.http.HttpServletRequest
import javax.servlet.http.HttpServletResponse
import play.api._
import play.api.Logger
import play.api.http.{HttpProtocol, HeaderNames}
import play.api.http.HeaderNames.CONTENT_LENGTH
import play.api.http.HeaderNames.X_FORWARDED_FOR
import play.api.libs.iteratee._
import play.api.libs.iteratee.Enumerator
import play.api.mvc._
import scala.concurrent.Future
import scala.util.control.Exception
import scala.util.{Failure, Success}
trait RequestHandler {
def apply(server: Play2WarServer)
}
trait HttpServletRequestHandler extends RequestHandler {
protected def getPlayHeaders(request: HttpServletRequest): Headers
protected def getPlayCookies(request: HttpServletRequest): Cookies
/**
* Get a list of cookies from "flat" cookie representation (one-line-string cookie).
*/
protected def getServletCookies(flatCookie: String): Seq[ServletCookie]
/**
* Get HTTP request.
*/
protected def getHttpRequest(): RichHttpServletRequest
/**
* Get HTTP response.
*/
protected def getHttpResponse(): RichHttpServletResponse
/**
* Call just before end of service(...).
*/
protected def onFinishService(): Unit
/**
* Call every time the HTTP response must be terminated (completed).
*/
protected def onHttpResponseComplete(): Unit
protected def feedBodyParser(bodyParser: Iteratee[Array[Byte], Result]): Future[Result] = {
// default synchronous blocking body enumerator
// FIXME this default body enumerator reads the entire stream in memory
// uploading a lot of data can lead to OutOfMemoryException
// For more details: https://github.com/dlecan/play2-war-plugin/issues/223
val bodyEnumerator = getHttpRequest().getRichInputStream.fold(Enumerator.eof[Array[Byte]]) { is =>
val output = new java.io.ByteArrayOutputStream()
val buffer = new Array[Byte](1024 * 8)
var length = is.read(buffer)
while(length != -1){
output.write(buffer, 0, length)
length = is.read(buffer)
}
Enumerator(output.toByteArray) andThen Enumerator.eof
}
bodyEnumerator |>>> bodyParser
}
protected def setHeaders(headers: Map[String, String], httpResponse: HttpServletResponse): Unit = {
// Set response headers
headers.foreach {
case (CONTENT_LENGTH, "-1") => // why is it skip?
// Fix a bug for Set-Cookie header.
// Multiple cookies could be merged in a single header
// but it's not properly supported by some browsers
case (name, value) if name.equalsIgnoreCase(play.api.http.HeaderNames.SET_COOKIE) =>
getServletCookies(value).foreach(httpResponse.addCookie)
case (name, value) if name.equalsIgnoreCase(HeaderNames.TRANSFER_ENCODING) && value == HttpProtocol.CHUNKED =>
// ignore this header
// the JEE container sets this header itself. Avoid duplication of header (issues/289)
case (name, value) =>
httpResponse.setHeader(name, value)
}
}
/**
* default implementation to push a play result to the servlet output stream
* @param futureResult the result of the play action
* @param cleanup clean up callback
*/
protected def pushPlayResultToServletOS(futureResult: Future[Result], cleanup: () => Unit): Unit = {
// TODO: should use the servlet thread here or use special thread pool for blocking IO operations
// (https://github.com/dlecan/play2-war-plugin/issues/223)
import play.api.libs.iteratee.Execution.Implicits.trampoline
futureResult.map { result =>
getHttpResponse().getHttpServletResponse.foreach { httpResponse =>
val status = result.header.status
val headers = result.header.headers
val body: Enumerator[Array[Byte]] = result.body
// TODO: handle connection KeepAlive and Close?
val connection = result.connection
Logger("play").trace("Sending simple result: " + result)
httpResponse.setStatus(status)
setHeaders(headers, httpResponse)
val withContentLength = headers.exists(_._1.equalsIgnoreCase(CONTENT_LENGTH))
val chunked = headers.exists {
case (key, value) => key.equalsIgnoreCase(HeaderNames.TRANSFER_ENCODING) && value == HttpProtocol.CHUNKED
}
// TODO do not allow chunked for http 1.0?
// if (chunked && connection == KeepAlive) { send Results.HttpVersionNotSupported("The response to this request is chunked and hence requires HTTP 1.1 to be sent, but this is a HTTP 1.0 request.") }
// Stream the result
if (withContentLength || chunked) {
val hasError: AtomicBoolean = new AtomicBoolean(false)
val bodyIteratee: Iteratee[Array[Byte], Unit] = {
def step(in: Input[Array[Byte]]): Iteratee[Array[Byte], Unit] = (!hasError.get, in) match {
case (true, Input.El(x)) =>
Iteratee.flatten(
Future.successful(
if (hasError.get) {
()
} else {
getHttpResponse().getRichOutputStream.map {
os =>
os.write(x)
os.flush()
}
})
.map { _ => if (!hasError.get) Cont(step) else Done((), Input.Empty: Input[Array[Byte]]) }
.andThen {
case Failure(ex) =>
hasError.set(true)
Logger("play").debug(ex.toString)
throw ex
})
case (true, Input.Empty) => Cont(step)
case (_, inp) => Done((), inp)
}
Iteratee.flatten(
Future.successful(())
.map(_ => if (!hasError.get) Cont(step) else Done((), Input.Empty: Input[Array[Byte]])))
}
val bodyConsumer = if (chunked) {
// if the result body is chunked, the chunks are already encoded with metadata in Results.chunk
// The problem is that the servlet container adds metadata again, leading the chunks encoded 2 times.
// As workaround, we 'dechunk' the body one time before sending it to the servlet container
body &> Results.dechunk |>>> bodyIteratee
} else {
body |>>> bodyIteratee
}
bodyConsumer.andThen {
case Success(_) =>
cleanup()
onHttpResponseComplete()
case Failure(ex) =>
Logger("play").debug(ex.toString)
hasError.set(true)
onHttpResponseComplete()
}
} else {
Logger("play").trace("Result without Content-length")
// No Content-Length header specified, buffer in-memory
val byteBuffer = new ByteArrayOutputStream
val byteArrayOSIteratee = Iteratee.fold(byteBuffer)((b, e: Array[Byte]) => {
b.write(e); b
})
val p = body |>>> Enumeratee.grouped(byteArrayOSIteratee) &>> Cont {
case Input.El(buffer) =>
Logger("play").trace("Buffer size to send: " + buffer.size)
getHttpResponse().getHttpServletResponse.map { response =>
// set the content length ourselves
response.setContentLength(buffer.size)
val os = response.getOutputStream
os.flush()
buffer.writeTo(os)
}
val p = Future.successful()
Iteratee.flatten(p.map(_ => Done(1, Input.Empty: Input[ByteArrayOutputStream])))
case other => Error("unexpected input", other)
}
p.andThen {
case Success(_) =>
cleanup()
onHttpResponseComplete()
case Failure(ex) =>
Logger("play").debug(ex.toString)
onHttpResponseComplete()
}
}
} // end match foreach
}.onComplete { _ => cleanup() }
}
}
/**
* Generic implementation of HttpServletRequestHandler.
* One instance per incoming HTTP request.
*
* <strong>/!\\ Warning: this class and its subclasses are intended to thread-safe.</strong>
*/
abstract class Play2GenericServletRequestHandler(val servletRequest: HttpServletRequest, val servletResponse: Option[HttpServletResponse]) extends HttpServletRequestHandler {
override def apply(server: Play2WarServer) = {
// val keepAlive -> non-sens
// val websocketableRequest -> non-sens
val httpVersion = servletRequest.getProtocol
val servletPath = servletRequest.getRequestURI
val servletUri = servletPath + Option(servletRequest.getQueryString).filterNot(_.isEmpty).fold("")("?" + _)
val parameters = getHttpParameters(servletRequest)
val rHeaders = getPlayHeaders(servletRequest)
val httpMethod = servletRequest.getMethod
val isSecure = servletRequest.isSecure
def rRemoteAddress = {
val remoteAddress = servletRequest.getRemoteAddr
(for {
xff <- rHeaders.get(X_FORWARDED_FOR)
app <- server.applicationProvider.get.toOption
trustxforwarded <- app.configuration.getBoolean("trustxforwarded").orElse(Some(false))
if remoteAddress == "127.0.0.1" || trustxforwarded
} yield xff).getOrElse(remoteAddress)
}
def tryToCreateRequest = createRequestHeader(parameters)
def createRequestHeader(parameters: Map[String, Seq[String]] = Map.empty[String, Seq[String]]) = {
//mapping servlet request to Play's
val untaggedRequestHeader = new RequestHeader {
val id = server.newRequestId
val tags = Map.empty[String,String]
def uri = servletUri
def path = servletPath
def method = httpMethod
def version = httpVersion
def queryString = parameters
def headers = rHeaders
lazy val remoteAddress = rRemoteAddress
def secure: Boolean = isSecure
}
untaggedRequestHeader
}
// get handler for request
val (requestHeader, handler: Either[Future[Result], (Handler,Application)]) = Exception
.allCatch[RequestHeader].either(tryToCreateRequest)
.fold(
e => {
val rh = createRequestHeader()
val r = server.applicationProvider.get.map(_.global).getOrElse(DefaultGlobal).onBadRequest(rh, e.getMessage)
(rh, Left(r))
},
rh => server.getHandlerFor(rh) match {
case directResult @ Left(_) => (rh, directResult)
case Right((taggedRequestHeader, handler, application)) => (taggedRequestHeader, Right((handler, application)))
}
)
// Call onRequestCompletion after all request processing is done. Protected with an AtomicBoolean to ensure can't be executed more than once.
val alreadyClean = new java.util.concurrent.atomic.AtomicBoolean(false)
def cleanup() {
if (!alreadyClean.getAndSet(true)) {
play.api.Play.maybeApplication.foreach(_.global.onRequestCompletion(requestHeader))
}
}
trait Response {
def handle(result: Result): Unit
}
def cleanFlashCookie(result: Result): Result = {
val header = result.header
val flashCookie = {
header.headers.get(HeaderNames.SET_COOKIE)
.map(Cookies.decode)
.flatMap(_.find(_.name == Flash.COOKIE_NAME)).orElse {
Option(requestHeader.flash).filterNot(_.isEmpty).map { _ =>
Flash.discard.toCookie
}
}
}
flashCookie.fold(result) { newCookie =>
result.withHeaders(HeaderNames.SET_COOKIE -> Cookies.merge(header.headers.getOrElse(HeaderNames.SET_COOKIE, ""), Seq(newCookie)))
}
}
handler match {
//execute normal action
case Right((action: EssentialAction, app)) =>
val a = EssentialAction { rh =>
import play.api.libs.iteratee.Execution.Implicits.trampoline
Iteratee.flatten(action(rh).unflatten.map(_.it).recover {
case error =>
Iteratee.flatten(
app.errorHandler.onServerError(requestHeader, error).map(result => Done(result, Input.Empty))
): Iteratee[Array[Byte], Result]
})
}
handleAction(a, Some(app))
//handle all websocket request as bad, since websocket are not handled
//handle bad websocket request
case Right((WebSocket(_), app)) =>
Logger("play").trace("Bad websocket request")
val a = EssentialAction(_ => Done(Results.BadRequest, Input.Empty))
handleAction(a, Some(app))
case Left(e) =>
Logger("play").trace("No handler, got direct result: " + e)
import play.api.libs.iteratee.Execution.Implicits.trampoline
val a = EssentialAction(_ => Iteratee.flatten(e.map(result => Done(result, Input.Empty))))
handleAction(a,None)
}
def handleAction(action: EssentialAction, app: Option[Application]) {
val bodyParser = Iteratee.flatten(
scala.concurrent.Future(action(requestHeader))(play.api.libs.concurrent.Execution.defaultContext)
)
import play.api.libs.iteratee.Execution.Implicits.trampoline
// Remove Except: 100-continue handling, since it's impossible to handle it
//val expectContinue: Option[_] = requestHeader.headers.get("Expect").filter(_.equalsIgnoreCase("100-continue"))
val eventuallyResult: Future[Result] = feedBodyParser(bodyParser)
val eventuallyResultWithError = eventuallyResult.recoverWith {
case error =>
Logger("play").error("Cannot invoke the action, eventually got an error: " + error)
app.fold(DefaultGlobal.onError(requestHeader, error)) {
_.errorHandler.onServerError(requestHeader, error)
}
}.map { result => cleanFlashCookie(result) }
pushPlayResultToServletOS(eventuallyResultWithError, cleanup)
}
onFinishService()
}
private def getHttpParameters(request: HttpServletRequest): Map[String, Seq[String]] = {
request.getQueryString match {
case null | "" => Map.empty
case queryString => queryString.replaceFirst("^?", "").split("&").flatMap { queryElement =>
val array = queryElement.split("=")
array.length match {
case 0 => None
case 1 => Some(URLDecoder.decode(array(0), "UTF-8") -> "")
case _ => Some(URLDecoder.decode(array(0), "UTF-8") -> URLDecoder.decode(array(1), "UTF-8"))
}
}.groupBy(_._1).map { case (key, value) => key -> value.map(_._2).toSeq }
}
}
}
|
swatikiran123/play2-war-plugin
|
project-code/core/common/src/main/scala/play/core/server/servlet/RequestHandler.scala
|
Scala
|
apache-2.0
| 15,374 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.planner.plan.rules.logical
import org.apache.flink.api.scala._
import org.apache.flink.table.api._
import org.apache.flink.table.planner.plan.optimize.program.{BatchOptimizeContext, FlinkChainedProgram, FlinkHepRuleSetProgramBuilder, HEP_RULES_EXECUTION_TYPE}
import org.apache.flink.table.planner.utils.TableTestBase
import org.apache.calcite.plan.hep.HepMatchOrder
import org.apache.calcite.rel.rules.{CoreRules, PruneEmptyRules}
import org.apache.calcite.tools.RuleSets
import org.junit.{Before, Test}
/**
* Test for [[FlinkPruneEmptyRules]].
*/
class FlinkPruneEmptyRulesTest extends TableTestBase {
private val util = batchTestUtil()
@Before
def setup(): Unit = {
val programs = new FlinkChainedProgram[BatchOptimizeContext]()
programs.addLast(
"rules",
FlinkHepRuleSetProgramBuilder.newBuilder
.setHepRulesExecutionType(HEP_RULES_EXECUTION_TYPE.RULE_SEQUENCE)
.setHepMatchOrder(HepMatchOrder.BOTTOM_UP)
.add(RuleSets.ofList(
FlinkSubQueryRemoveRule.FILTER,
CoreRules.FILTER_REDUCE_EXPRESSIONS,
CoreRules.PROJECT_REDUCE_EXPRESSIONS,
PruneEmptyRules.FILTER_INSTANCE,
PruneEmptyRules.PROJECT_INSTANCE,
FlinkPruneEmptyRules.JOIN_RIGHT_INSTANCE))
.build()
)
util.replaceBatchProgram(programs)
util.addTableSource[(Int, Long, String)]("T1", 'a, 'b, 'c)
util.addTableSource[(Int, Long, String)]("T2", 'd, 'e, 'f)
}
@Test
def testSemiJoinRightIsEmpty(): Unit = {
util.verifyPlan("SELECT * FROM T1 WHERE a IN (SELECT d FROM T2 WHERE 1=0)")
}
@Test
def testAntiJoinRightIsEmpty(): Unit = {
util.verifyPlan("SELECT * FROM T1 WHERE a NOT IN (SELECT d FROM T2 WHERE 1=0)")
}
}
|
greghogan/flink
|
flink-table/flink-table-planner-blink/src/test/scala/org/apache/flink/table/planner/plan/rules/logical/FlinkPruneEmptyRulesTest.scala
|
Scala
|
apache-2.0
| 2,577 |
package de.ahus1.bdd
import de.ahus1.bdd.calculator.CalculatorSUT
import org.scalatest.FeatureSpec
class CalculatorFeature extends FeatureSpec with GwenTrait {
val theCalculator = new CalculatorSUT();
feature("calculator") {
scenario("simple addition") {
given(theCalculator) turnedOn;
when(theCalculator) adds 4 adds 5
then(theCalculator) shows 9
}
scenario("multiply") {
given(theCalculator) isInitializedWith 2
when(theCalculator) multipliesBy 2
then(theCalculator) shows 4
}
scenario("power") {
given(theCalculator) isInitializedWith 4
when(theCalculator) powerBy 2
then(theCalculator) shows 16
}
}
}
|
ahus1/bdd-examples
|
gwen-calculator-scalatest/src/test/java/de/ahus1/bdd/CalculatorFeature.scala
|
Scala
|
apache-2.0
| 698 |
import sbt._
object Boilerplate {
val arities = 1 to 22
val aritiesExceptOne = 2 to 22
val arityChars: Map[Int, Char] = arities.map(n => (n, ('A' + n - 1).toChar)).toMap
def write(path: File, fileContents: String): File = {
IO.write(path, fileContents)
path
}
def gen(dir: File) = {
val generatedDecodeOE = write(dir / "com" / "tactix4" / "t4openerp-connector" / "GeneratedDecodeOE.scala", genDecodeOE)
val generatedEncodeOE = write(dir / "com" / "tactix4" / "t4openerp-connector" / "GeneratedEncodeOE.scala", genEncodeOE)
Seq(generatedDecodeOE, generatedEncodeOE)
}
def header = {
"""|
|package com.tactix4.t4openerp.connector.codecs
|import com.tactix4.t4openerp.connector.transport.OENull
|import scalaz._
|import Scalaz._
|
| """.stripMargin
}
def functionTypeParameters(arity: Int): String = (1 to arity).map(n => arityChars(n)).mkString(", ")
def tupleFields(arity: Int): String = (1 to arity).map(n => "x._" + n).mkString(", ")
def listPatternMatch(arity: Int): String = ((1 to arity).map(n => "c" + arityChars(n).toLower).toList ::: "Nil" :: Nil).mkString(" :: ")
def jsonStringParams(arity: Int): String = (1 to arity).map(n => "%sn: String".format(arityChars(n).toLower)).mkString(", ")
def tuples(arity: Int): String = (1 to arity).map(n => "%sn -> t._%d.encode".format(arityChars(n).toLower, n)).mkString(", ")
def encodeTupleValues(arity: Int): String = (1 to arity).map(n => "t._%d.encode".format(n)).mkString(", ")
def jsonStringParamNames(arity: Int): String = (1 to arity).map(n => "%sn".format(arityChars(n).toLower)).mkString(", ")
def genEncodeOE = {
def encodeOEContextArities(n: Int): String = (1 to n).map(n => "%s: OEDataEncoder".format(arityChars(n))).mkString(", ")
def content = {
val encode1M =
"""|
|
| def encode1M[A:OEDataEncoder,X](f: X => A)(an: String): OEDataEncoder[X] =
| OEDataEncoder[X](x => {
| val t = f(x).encode
| t.map(e => OEDictionary(an -> e) )
| })
| """.stripMargin
val encodeMs = aritiesExceptOne.map {
arity =>
"""|
| def encode%sM[%s, X](f: X => (%s))(%s): OEDataEncoder[X] =
| OEDataEncoder[X](x => {
| val t = f(x)
| val e = List(%s).sequence[CodecResult,OEType]
| e.map(l => OEDictionary(List(%s) zip l toMap))
| })
| """.format(
arity,
encodeOEContextArities(arity),
functionTypeParameters(arity),
jsonStringParams(arity),
encodeTupleValues(arity),
jsonStringParamNames(arity)
).stripMargin
}
(encode1M +: encodeMs).mkString
}
header +
"""|
|import com.tactix4.t4openerp.connector.transport.OEDictionary
|import com.tactix4.t4openerp.connector.transport.OEType
|import com.tactix4.t4openerp.connector._
|import scala.language.postfixOps
|
|object GeneratedEncodeOE {
|%s
|}
| """.format(content).stripMargin
}
def genDecodeOE = {
def decodeOEContextArities(n: Int): String = (1 to n).map(n => "%s: OEDataDecoder".format(arityChars(n))).mkString(", ")
def content = {
val jdecode1L =
"""|
| def decode1M[A: OEDataDecoder, X](f: A => X)(an: String): OEDataDecoder[X] =
| OEDataDecoder(c => {
| val r = c.asDictionary(d => for {
| f1 <- (d.get(an) | OENull).decodeAs[A]
| } yield f(f1))
| r | s"Unable to decode: $c".left
| })
|
| """.stripMargin
val jdecodeLs = aritiesExceptOne.map {
arity =>
val secondForComprehensionLines: String = (1 to arity).map {
n =>
val upperChar = arityChars(n)
val lowerChar = upperChar.toLower
" f%d <- (d.get(%sn) | OENull).decodeAs[%s]".format(n, lowerChar,upperChar)
}.mkString("\\n")
val secondYieldExpression: String = (1 to arity).map {
n =>
"f%d".format(n)
}.mkString(", ")
"""|
| def decode%sM[%s, X](f: (%s) => X)(%s): OEDataDecoder[X] =
| OEDataDecoder(c => {
| val s = c.asDictionary( d => for {
|%s
| } yield f(%s))
|
| s | s"Unable to decode $c".left
| })
| """.format(
arity,
decodeOEContextArities(arity),
functionTypeParameters(arity),
jsonStringParams(arity),
secondForComprehensionLines,
secondYieldExpression
).stripMargin
}
(jdecode1L +: jdecodeLs).mkString
}
header +
"""|
|
|object GeneratedDecodeOE {
|%s
|}
| """.format(content).stripMargin
}
}
|
NeovaHealth/t4openerp-connector
|
project/Boilerplate.scala
|
Scala
|
agpl-3.0
| 5,050 |
import sbt._
class RyuProject(info: ProjectInfo) extends DefaultProject(info) with posterous.Publish with GrowlPlugin {
// databinder
val databinderNet = "databinder.net repository" at "http://databinder.net/repo"
def databind(p:String) = "net.databinder" %% "dispatch-%s".format(p) % "0.7.2"
val ljs = databind("lift-json")
val hjs = databind("http-json")
val js = databind("json")
val mime = databind("mime")
// lift js
//val liftJson = "net.liftweb" %% "lift-json" % "2.0-M4"
// testing
val snapshots = "Scala Tools Snapshots" at "http://www.scala-tools.org/repo-snapshots/"
val specs = "org.scala-tools.testing" % "specs" % "1.6.2.1-SNAPSHOT" % "test"
}
|
softprops/ryu
|
project/build/RyuProject.scala
|
Scala
|
mit
| 688 |
package russoul.lib.common.math.geometry.simple
import russoul.lib.common.TypeClasses._
import russoul.lib.common.utils.Arr
import russoul.lib.common.immutable
import russoul.lib.common.Implicits._
import shapeless.Nat._
import russoul.lib.common._
import russoul.lib.common.math.geometry.simple.general.{CenteredShape, GeometricShape}
import shapeless.Nat
import Abstraction._
import scala.reflect.ClassTag
@immutable case class AABBOver[V[_,_ <: Nat], @tbsp F]private (override val center: V[F,_3],val extent: V[F,_3]) extends CenteredShape[V,F,_3] {
def genMin()(implicit ev1 : CES[V,F,_3], tensor1:T1[F,V,_3], field: Field[F]): V[F,_3] = center - extent
def genMax()(implicit ev1 : CES[V,F,_3], tensor1:T1[F,V,_3], field: Field[F]): V[F,_3] = center + extent
override def translate(v: V[F,_3])(implicit ev1 : CES[V,F,_3], tensor1:T1[F,V,_3], field: Field[F]): AABBOver[V,F] =
{
new AABBOver(center + v, extent)
}
/**
*
* @return scaled version (around AABB's center point)
*/
override def scale(s:F)(implicit ev1 : CES[V,F,_3], tensor1:T1[F,V,_3], field: Field[F]): AABBOver[V,F] =
{
new AABBOver(center, extent * s)
}
override def scaleAroundBasis(factor: F)(implicit ev1: CES[V, F, _3], ev2: T1[F, V, _3], ev3: Field[F]): AABBOver[V, F] = {
new AABBOver(center * factor, extent * factor)
}
def genVertices()(implicit ev1 : CES[V,F,_3], tensor1:T1[F,V,_3], field: Field[F], tag: ClassTag[V[F,_3]]): Array[V[F,_3]] =
{
val a = new Array[V[F,_3]](8)
val sx = extent.x
val sy = extent.y
val sz = extent.z
a(0) = makeVector(_3, center.x-sx, center.y-sy, center.z-sz)
a(1) = makeVector(_3, center.x-sx, center.y-sy, center.z+sz)
a(2) = makeVector(_3, center.x+sx, center.y-sy, center.z+sz)
a(3) = makeVector(_3, center.x+sx, center.y-sy, center.z-sz)
a(4) = makeVector(_3, center.x-sx, center.y+sy, center.z-sz)
a(5) = makeVector(_3, center.x-sx, center.y+sy, center.z+sz)
a(6) = makeVector(_3, center.x+sx, center.y+sy, center.z+sz)
a(7) = makeVector(_3, center.x+sx, center.y+sy, center.z-sz)
a
}
/**
*
*
*/
def genRectangles()(implicit ev1 : CES[V,F,_3], tensor1:T1[F,V,_3], field: Field[F], tag: ClassTag[V[F,_3]]): Array[RectangleOver[V,F]] =
{
val a = new Array[RectangleOver[V,F]](6)
val sx = extent.x
val sy = extent.y
val sz = extent.z
a(0) = RectangleOver[V,F](center + makeVector(_3,field.zero, sy, field.zero), makeVector(_3,sx, field.zero,field.zero), makeVector(_3,field.zero,field.zero,-sz))//top
a(1) = RectangleOver[V,F](center + makeVector(_3,field.zero, -sy, field.zero), makeVector(_3,sx, field.zero,field.zero), makeVector(_3,field.zero,field.zero,sz))//bottom
a(2) = RectangleOver[V,F](center + makeVector(_3,-sx, field.zero, field.zero), makeVector(_3,field.zero, field.zero,sz), makeVector(_3,field.zero,sy,field.zero))//left
a(3) = RectangleOver[V,F](center + makeVector(_3,sx, field.zero, field.zero), makeVector(_3,field.zero, field.zero,-sz), makeVector(_3,field.zero,sy,field.zero))//right
a(4) = RectangleOver[V,F](center + makeVector(_3,field.zero, field.zero, -sz), makeVector(_3,-sx, field.zero,field.zero), makeVector(_3,field.zero,sy,field.zero))//back
a(5) = RectangleOver[V,F](center + makeVector(_3,field.zero, field.zero, sz), makeVector(_3,sx, field.zero,field.zero), makeVector(_3,field.zero,sy,field.zero))//front
a
}
override def toString(): String =
{
"AABB(center = " + center + ";extent = " + extent + ")"
}
}
object AABBOver
{
def genFromMinMax[V[_,_ <: Nat],@tbsp F](min:V[F,_3], max:V[F,_3])(implicit ev1 : CES[V,F,_3], tensor1:T1[F,V,_3], field: Field[F], tag: ClassTag[V[F,_3]], con: Con[F]):AABBOver[V,F] =
{
val extent = (max-min) * 0.5D.as[F]
val center = min + extent
new AABBOver[V,F](center,extent)
}
def apply[V[_,_ <: Nat], @tbsp F](center: V[F,_3], extent: V[F,_3]) = new AABBOver[V,F](center, extent)
}
|
Russoul/UniScalaLib
|
src/main/scala/russoul/lib/common/math/geometry/simple/AABBOver.scala
|
Scala
|
mit
| 3,996 |
package pureconfig
import com.typesafe.config.{ ConfigObject, ConfigValue, ConfigValueType }
import pureconfig.error._
import pureconfig.syntax._
/**
* A trait that can be implemented to disambiguate between the different options of a coproduct or sealed family.
*
* @tparam T the type of the coproduct or sealed family for which this hint applies
*/
trait CoproductHint[T] {
/**
* Given a `ConfigValue` for the sealed family, disambiguate and extract the `ConfigValue` associated to the
* implementation for the given class or coproduct option name.
*
* If `cv` is a config for the given class name, this method returns `Right(Some(v))`, where `v` is the config
* related to the specific class (possibly the same as `cv`). If it determines that `cv` is a config for a different
* class, it returns `Right(None)`. If `cv` is missing information for disambiguation or has a wrong type, a
* `Left` containing a `Failure` is returned.
*
* @param cv the `ConfigValue` of the sealed family
* @param name the name of the class or coproduct option to try
* @return a `Either[ConfigReaderFailure, Option[ConfigValue]]` as defined above.
*/
def from(cv: ConfigValue, name: String): Either[ConfigReaderFailures, Option[ConfigValue]]
/**
* Given the `ConfigValue` for a specific class or coproduct option, encode disambiguation information and return a
* config for the sealed family or coproduct.
*
* @param cv the `ConfigValue` of the class or coproduct option
* @param name the name of the class or coproduct option
* @return the config for the sealed family or coproduct wrapped in a `Right`, or a `Left` with the failure if some error
* occurred.
*/
def to(cv: ConfigValue, name: String): Either[ConfigReaderFailures, ConfigValue]
/**
* Defines what to do if `from` returns `Success(Some(_))` for a class or coproduct option, but its `ConfigConvert`
* fails to deserialize the config.
*
* @param name the name of the class or coproduct option
* @return `true` if the next class or coproduct option should be tried, `false` otherwise.
*/
def tryNextOnFail(name: String): Boolean
}
/**
* Hint where the options are disambiguated by a `key = "value"` field inside the config.
*
* This hint will cause derived `ConfigConvert` instance to fail to convert configs to objects if the object has a
* field with the same name as the disambiguation key.
*
* By default, the field value written is the class or coproduct option name converted to lower case. This mapping can
* be changed by overriding the method `fieldValue` of this class.
*/
class FieldCoproductHint[T](key: String) extends CoproductHint[T] {
/**
* Returns the field value for a class or coproduct option name.
*
* @param name the name of the class or coproduct option
* @return the field value associated with the given class or coproduct option name.
*/
protected def fieldValue(name: String): String = name.toLowerCase
def from(cv: ConfigValue, name: String): Either[ConfigReaderFailures, Option[ConfigValue]] = cv match {
case co: ConfigObject =>
Option(co.get(key)) match {
case Some(fv) => fv.unwrapped match {
case v: String if v == fieldValue(name) => Right(Some(cv))
case _: String => Right(None)
case _ => Left(ConfigReaderFailures(WrongType(fv.valueType, Set(ConfigValueType.STRING), ConfigValueLocation(fv), Some(key))))
}
case None => Left(ConfigReaderFailures(KeyNotFound(key, ConfigValueLocation(co))))
}
case _ => Left(ConfigReaderFailures(WrongType(cv.valueType, Set(ConfigValueType.OBJECT), ConfigValueLocation(cv), None)))
}
def to(cv: ConfigValue, name: String): Either[ConfigReaderFailures, ConfigValue] = cv match {
case co: ConfigObject =>
if (co.containsKey(key)) Left(ConfigReaderFailures(CollidingKeys(key, co.get(key).toString, ConfigValueLocation(co))))
else Right(Map(key -> fieldValue(name)).toConfig.withFallback(co.toConfig))
case _ =>
Left(ConfigReaderFailures(WrongType(cv.valueType, Set(ConfigValueType.OBJECT), ConfigValueLocation(cv), None)))
}
def tryNextOnFail(name: String) = false
}
/**
* Hint applicable to sealed families of case objects where objects are written and read as strings with their type
* names. Trying to read or write values that are not case objects results in failure.
*
* @tparam T the type of the coproduct or sealed family for which this hint applies
*/
class EnumCoproductHint[T] extends CoproductHint[T] {
/**
* Returns the field value for a class or coproduct option name.
*
* @param name the name of the class or coproduct option
* @return the field value associated with the given class or coproduct option name.
*/
protected def fieldValue(name: String): String = name.toLowerCase
def from(cv: ConfigValue, name: String) = cv.valueType match {
case ConfigValueType.STRING if cv.unwrapped.toString == fieldValue(name) => Right(Some(Map.empty[String, String].toConfig))
case ConfigValueType.STRING => Right(None)
case typ => Left(ConfigReaderFailures(WrongType(typ, Set(ConfigValueType.STRING), ConfigValueLocation(cv), None)))
}
def to(cv: ConfigValue, name: String) = cv match {
case co: ConfigObject if co.isEmpty => Right(fieldValue(name).toConfig)
case _: ConfigObject => Left(ConfigReaderFailures(NonEmptyObjectFound(name, ConfigValueLocation(cv), None)))
case _ => Left(ConfigReaderFailures(WrongType(cv.valueType, Set(ConfigValueType.OBJECT), ConfigValueLocation(cv), None)))
}
def tryNextOnFail(name: String) = false
}
/**
* Hint where all coproduct options are tried in order. `from` will choose the first option able to deserialize
* the config without errors, while `to` will write the config as is, with no disambiguation information.
*/
class FirstSuccessCoproductHint[T] extends CoproductHint[T] {
def from(cv: ConfigValue, name: String) = Right(Some(cv))
def to(cv: ConfigValue, name: String) = Right(cv)
def tryNextOnFail(name: String) = true
}
object CoproductHint {
implicit def default[T]: CoproductHint[T] = new FieldCoproductHint[T]("type")
}
|
derekmorr/pureconfig
|
core/src/main/scala/pureconfig/CoproductHint.scala
|
Scala
|
mpl-2.0
| 6,209 |
/*
* Copyright (C) 2009-2018 Lightbend Inc. <https://www.lightbend.com>
*/
package play.api
import org.specs2.mutable.Specification
class PlayGlobalAppSpec extends Specification {
sequential
def testApp(allowGlobalApp: Boolean): PlayCoreTestApplication =
PlayCoreTestApplication(Map(
"play.allowGlobalApplication" -> allowGlobalApp,
"play.akka.config" -> "akka",
"play.akka.actor-system" -> "global-app-spec",
"akka.coordinated-shutdown.phases.actor-system-terminate.timeout" -> "90 second"
))
"play.api.Play" should {
"start apps with global state enabled" in {
val app = testApp(true)
Play.start(app)
Play.privateMaybeApplication must beSuccessfulTry.withValue(app)
Play.stop(app)
success
}
"start apps with global state disabled" in {
val app = testApp(false)
Play.start(app)
Play.privateMaybeApplication must throwA[RuntimeException]
Play.stop(app)
success
}
"shut down the first app when starting a second app with global state enabled" in {
val app1 = testApp(true)
Play.start(app1)
val app2 = testApp(true)
Play.start(app2)
app1.isTerminated must beTrue
app2.isTerminated must beFalse
Play.privateMaybeApplication must beSuccessfulTry.withValue(app2)
Play.current must_== app2
Play.stop(app1)
Play.stop(app2)
success
}
"start one app with global state after starting another without global state" in {
val app1 = testApp(false)
Play.start(app1)
val app2 = testApp(true)
Play.start(app2)
app1.isTerminated must beFalse
app2.isTerminated must beFalse
Play.privateMaybeApplication must beSuccessfulTry.withValue(app2)
Play.stop(app1)
Play.stop(app2)
success
}
"start one app without global state after starting another with global state" in {
val app1 = testApp(true)
Play.start(app1)
val app2 = testApp(false)
Play.start(app2)
app1.isTerminated must beFalse
app2.isTerminated must beFalse
Play.privateMaybeApplication must beSuccessfulTry.withValue(app1)
Play.stop(app1)
Play.stop(app2)
success
}
"start multiple apps with global state disabled" in {
val app1 = testApp(false)
Play.start(app1)
val app2 = testApp(false)
Play.start(app2)
app1.isTerminated must beFalse
app2.isTerminated must beFalse
Play.privateMaybeApplication must throwA[RuntimeException]
Play.stop(app1)
Play.stop(app2)
success
}
"should stop an app with global state disabled" in {
val app = testApp(false)
Play.start(app)
Play.privateMaybeApplication must throwA[RuntimeException]
Play.stop(app)
app.isTerminated must beTrue
}
"should unset current app when stopping with global state enabled" in {
val app = testApp(true)
Play.start(app)
Play.privateMaybeApplication must beSuccessfulTry.withValue(app)
Play.stop(app)
app.isTerminated must beTrue
Play.privateMaybeApplication must throwA[RuntimeException]
}
}
}
|
Shenker93/playframework
|
framework/src/play/src/test/scala/play/api/PlayGlobalAppSpec.scala
|
Scala
|
apache-2.0
| 3,176 |
package vggames.shared
import org.junit.runner.RunWith
import org.specs2.mutable.Specification
import vggames.shared.task.Descriptions
import org.specs2.runner.JUnitRunner
@RunWith(classOf[JUnitRunner])
class DescriptionsSpec extends Specification {
"descriptions" should {
"read description according to group name" in {
new Descriptions("asdrubal").forGroup("test") must_== "<p>test <br /></p>\\n"
}
"return no description for group when group has no description" in {
new Descriptions("asdrubal").forGroup("test.not.exists") must_== "No description for group test.not.exists"
}
"cache description to avoid lookup" in {
val descriptions = new Descriptions("asdrubal")
descriptions.forGroup("test") must_== descriptions.forGroup("test")
descriptions.forGroup("test") must_== descriptions.forGroup("test")
descriptions.forGroup("test") must_== descriptions.forGroup("test")
}
"compile markdown syntax" in {
new Descriptions("asdrubal").forGroup("markdown") must_== "<h1>abc</h1>\\n<p>cde</p>\\n"
}
"add prettyprint class to code elements inside pre" in {
new Descriptions("asdrubal").forGroup("code") must contain("""<pre class="prettyprint"><code class="language-asdrubal">""")
}
}
}
|
vidageek/games
|
games/game/src/test/scala/vggames/shared/DescriptionsSpec.scala
|
Scala
|
gpl-3.0
| 1,281 |
package com.socrata.datacoordinator
package truth.metadata
import com.rojoma.json.v3.util.{JsonKey, AutomaticJsonCodecBuilder}
import com.socrata.datacoordinator.id.DatasetId
import com.rojoma.json.v3.codec.{DecodeError, JsonDecode, JsonEncode}
import com.rojoma.json.v3.ast.{JString, JValue}
trait DatasetInfoLike extends Product {
val systemId: DatasetId
val nextCounterValue: Long
val localeName: String
val obfuscationKey: Array[Byte]
val resourceName: Option[String]
lazy val tableBase = "t" + systemId.underlying
lazy val auditTableName = tableBase + "_audit"
lazy val logTableName = tableBase + "_log"
}
case class UnanchoredDatasetInfo(@JsonKey("sid") systemId: DatasetId,
@JsonKey("ctr") nextCounterValue: Long,
@JsonKey("locale") localeName: String,
@JsonKey("obfkey") obfuscationKey: Array[Byte],
@JsonKey("resource") resourceName: Option[String]) extends DatasetInfoLike
object UnanchoredDatasetInfo extends ((DatasetId, Long, String, Array[Byte], Option[String]) => UnanchoredDatasetInfo) {
override def toString = "DatasetInfo"
private implicit val byteCodec = new JsonDecode[Array[Byte]] with JsonEncode[Array[Byte]] {
def encode(x: Array[Byte]): JValue =
JString(new sun.misc.BASE64Encoder().encode(x))
def decode(x: JValue): JsonDecode.DecodeResult[Array[Byte]] = x match {
case JString(s) =>
try { Right(new sun.misc.BASE64Decoder().decodeBuffer(s)) }
catch { case _: java.io.IOException => Left(DecodeError.InvalidValue(x)) }
case other =>
Left(DecodeError.InvalidType(JString, other.jsonType))
}
}
implicit val jCodec = AutomaticJsonCodecBuilder[UnanchoredDatasetInfo]
}
/** This class should not be instantiated except by a [[com.socrata.datacoordinator.truth.metadata.DatasetMapReader]]
* or [[com.socrata.datacoordinator.truth.metadata.DatasetMapWriter]].
* @param tag Guard against a non-map accidentially instantiating this.
*/
case class DatasetInfo(systemId: DatasetId, nextCounterValue: Long, val latestDataVersion: Long, localeName: String, obfuscationKey: Array[Byte], resourceName: Option[String])(implicit tag: com.socrata.datacoordinator.truth.metadata.`-impl`.Tag) extends DatasetInfoLike {
def unanchored: UnanchoredDatasetInfo = UnanchoredDatasetInfo(systemId, nextCounterValue, localeName, obfuscationKey, resourceName)
override def equals(o: Any) = o match {
case that: DatasetInfo =>
this.systemId == that.systemId &&
this.nextCounterValue == that.nextCounterValue &&
this.latestDataVersion == that.latestDataVersion &&
this.localeName == that.localeName &&
java.util.Arrays.equals(this.obfuscationKey, that.obfuscationKey) && // thanks, java
this.resourceName == that.resourceName
case _ =>
false
}
}
|
socrata-platform/data-coordinator
|
coordinatorlib/src/main/scala/com/socrata/datacoordinator/truth/metadata/DatasetInfo.scala
|
Scala
|
apache-2.0
| 2,931 |
/*
* Copyright 2015 ligaDATA
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.ligadata.pmml.transforms.xmltoraw.common
import scala.collection.mutable.ArrayBuffer
import org.xml.sax.Attributes
import com.ligadata.pmml.compiler._
import com.ligadata.pmml.traits._
import com.ligadata.pmml.support._
import com.ligadata.pmml.syntaxtree.raw.common._
import com.ligadata.pmml.transforms.xmltoraw.common._
class DefineFunctionPmmlNodeGenerator extends PmmlNodeGenerator {
/**
With the supplied xml arguments build a PmmlNode and return it to the dispatcher that is calling.
@param namespaceURI: String
@param localName: String
@param qName:String
@param atts: Attributes
@param lineNumber : Int
@param columnNumber : Int
@return a PmmlNode
*/
def make(namespaceURI: String
, localName: String
, qName:String
, atts: Attributes
, lineNumber : Int
, columnNumber : Int) : PmmlNode = {
val ofInterest : ArrayBuffer[String] = ArrayBuffer("name", "optype", "dataType")
val selectedValues = PmmlNode.hlpOrganizeAttributes(atts, ofInterest).asInstanceOf[ArrayBuffer[_]]
val name : String = selectedValues.apply(0).asInstanceOf[String]
val optype : String = selectedValues.apply(1).asInstanceOf[String]
val dataType : String = selectedValues.apply(2).asInstanceOf[String]
new PmmlDefineFunction(namespaceURI, localName , qName, lineNumber, columnNumber, name, optype, dataType)
}
}
class ParameterFieldPmmlNodeGenerator extends PmmlNodeGenerator {
/**
With the supplied xml arguments build a PmmlNode and return it to the dispatcher that is calling.
@param namespaceURI: String
@param localName: String
@param qName:String
@param atts: Attributes
@param lineNumber : Int
@param columnNumber : Int
@return a PmmlNode
*/
def make(namespaceURI: String
, localName: String
, qName:String
, atts: Attributes
, lineNumber : Int
, columnNumber : Int) : PmmlNode = {
val ofInterest : ArrayBuffer[String] = ArrayBuffer("name", "optype", "dataType")
val selectedValues = PmmlNode.hlpOrganizeAttributes(atts, ofInterest).asInstanceOf[ArrayBuffer[_]]
val name : String = selectedValues.apply(0).asInstanceOf[String]
val optype : String = selectedValues.apply(1).asInstanceOf[String]
val dataType : String = selectedValues.apply(2).asInstanceOf[String]
new PmmlParameterField(namespaceURI, localName , qName, lineNumber, columnNumber, name, optype, dataType)
}
}
|
traytonwhite/Kamanja
|
trunk/Pmml/PmmlCompiler/src/main/scala/com/ligadata/pmml/transforms/xmltoraw/common/FunctionNodes.scala
|
Scala
|
apache-2.0
| 3,048 |
package mesosphere.marathon.api.v2
import java.util
import mesosphere.marathon.core.appinfo.AppInfo
import mesosphere.marathon.core.appinfo.AppInfo.Embed
import org.slf4j.LoggerFactory
/**
* Resolves AppInfo.Embed from query parameters.
*/
private[v2] object AppInfoEmbedResolver {
private[this] val log = LoggerFactory.getLogger(getClass)
private[this] val EmbedAppsPrefixes = Set("apps.", "app.")
private[this] val EmbedTasks = "tasks"
private[this] val EmbedDeployments = "deployments"
/* deprecated, use lastTaskFailure, tasks, deployments instead */
private[this] val EmbedTasksAndFailures = "failures"
private[this] val EmbedLastTaskFailure = "lastTaskFailure"
private[this] val EmbedCounts = "counts"
/**
* Converts embed arguments to our internal representation.
*
* Accepts the arguments with all prefixes or even no prefix at all
* to avoid subtle user errors confusing the two.
*/
def resolve(embed: util.Set[String]): Set[Embed] = {
def mapEmbedStrings(prefix: String, withoutPrefix: String): Set[AppInfo.Embed] = withoutPrefix match {
case EmbedTasks => Set(AppInfo.Embed.Tasks, /* deprecated */ AppInfo.Embed.Deployments)
case EmbedTasksAndFailures =>
log.warn(s"Using deprecated embed=s$prefix$withoutPrefix. " +
s"Use ${prefix}tasks, ${prefix}lastTaskFailure, ${prefix}deployments instead.")
Set(AppInfo.Embed.Tasks, AppInfo.Embed.LastTaskFailure, AppInfo.Embed.Deployments)
case EmbedDeployments => Set(AppInfo.Embed.Deployments)
case EmbedLastTaskFailure => Set(AppInfo.Embed.LastTaskFailure)
case EmbedCounts => Set(AppInfo.Embed.Counts)
case unknown: String =>
log.warn(s"unknown embed argument: $prefix$unknown")
Set.empty
}
def separatePrefix(embedMe: String): (String, String) = {
val removablePrefix = EmbedAppsPrefixes.find(embedMe.startsWith(_)).getOrElse("")
val withoutPrefix = embedMe.substring(removablePrefix.length)
(removablePrefix, withoutPrefix)
}
import scala.collection.JavaConverters._
val embedWithSeparatedPrefixes = embed.asScala.map(separatePrefix)
embedWithSeparatedPrefixes.flatMap { case (prefix, withoutPrefix) => mapEmbedStrings(prefix, withoutPrefix) }.toSet
}
}
|
cgvarela/marathon
|
src/main/scala/mesosphere/marathon/api/v2/AppInfoEmbedResolver.scala
|
Scala
|
apache-2.0
| 2,303 |
package com.mentatlabs.nsa
package scalac
package options
/* -Y
* ==
* 2.6.0 - 2.6.1: Print a synopsis of private options
* 2.7.0 - 2.7.7: !! missing !!
* 2.8.0 - 2.8.2: Print a synopsis of private options
* 2.9.0 - 2.12.0: Print a synopsis of private options.
*/
case object ScalacY
extends ScalacOptionBoolean("-Y", ScalacVersions.`2.6.0`)
|
melezov/sbt-nsa
|
nsa-core/src/main/scala/com/mentatlabs/nsa/scalac/options/advanced/ScalacY.scala
|
Scala
|
bsd-3-clause
| 368 |
package tethys.circe
import org.scalatest.matchers.should.Matchers
import org.scalatest.flatspec.AnyFlatSpec
import io.circe.{Json, JsonObject}
import tethys.commons.TokenNode
import tethys.commons.TokenNode.{value => token, _}
import tethys.circe.SimpleTokenWriterRaw._
class CirceSupportTest extends AnyFlatSpec with Matchers {
behavior of "Circe ast JsonReader"
it should "parse Int" in {
token(100L).tokensAs[Json] shouldBe Json.fromInt(100)
}
it should "parse Long" in {
token(100L).tokensAs[Json] shouldBe Json.fromLong(100L)
}
it should "parse Float" in {
token(100.0f).tokensAs[Json] shouldBe Json.fromFloatOrNull(100.0f)
}
it should "parse Double" in {
token(100.0D).tokensAs[Json] shouldBe Json.fromDoubleOrNull(100.0D)
}
it should "parse BigInt" in {
token(BigInt(100L)).tokensAs[Json] shouldBe Json.fromBigInt(BigInt(100L))
}
it should "parse BigDecimal" in {
token(BigDecimal(100.0D)).tokensAs[Json] shouldBe Json.fromBigDecimal(100.0D)
}
it should "parse String" in {
token("str").tokensAs[Json] shouldBe Json.fromString("str")
}
it should "parse Boolean.True" in {
token(true).tokensAs[Json] shouldBe Json.True
}
it should "parse Boolean.False" in {
token(false).tokensAs[Json] shouldBe Json.False
}
it should "parse Null" in {
List(TokenNode.NullValueNode).tokensAs[Json] shouldBe Json.Null
}
it should "parse Array" in {
arr(1, 2L, 3).tokensAs[Json] shouldBe
Json.fromValues(List(Json.fromLong(1L), Json.fromLong(2L), Json.fromLong(3L)))
}
it should "parse JsonObject" in {
obj(
"a" -> arr(1L, 2L),
"b" -> obj("c" -> null),
"c" -> token("demo"),
"d" -> token(true),
"e" -> token(false)
).tokensAs[JsonObject] shouldBe JsonObject(
"a" -> Json.fromValues(List(Json.fromLong(1L), Json.fromLong(2L))),
"b" -> Json.fromJsonObject(JsonObject("c" -> Json.Null)),
"c" -> Json.fromString("demo"),
"d" -> Json.True,
"e" -> Json.False
)
}
it should "parse Array of JsonObject" in {
arr(obj("a" -> "b"), obj("c" -> "d")).tokensAs[Json] shouldBe Json.fromValues(List(
Json.fromJsonObject(JsonObject("a" -> Json.fromString("b"))),
Json.fromJsonObject(JsonObject("c" -> Json.fromString("d")))
))
}
behavior of "Circe ast JsonWriter"
it should "write Int" in {
Json.fromInt(100).asTokenList shouldBe token(100L)
}
it should "write Long" in {
Json.fromLong(10000000000L).asTokenList shouldBe token(10000000000L)
}
it should "write Float" in {
Json.fromFloat(100.0f).asTokenList shouldBe token(100.0f)
}
it should "write Double" in {
Json.fromDouble(100.0D).asTokenList shouldBe token(100.0D)
}
it should "write BigInt" in {
Json.fromBigInt(BigInt("10000000000")).asTokenList match {
case DoubleValueNode(d) :: Nil => d shouldBe 1.0e10 // 2.11 only behavior
case LongValueNode(l) :: Nil => l shouldBe 10000000000L
case _ => fail()
}
}
it should "write BigDecimal" in {
Json.fromBigDecimal(BigDecimal(100.0D)).asTokenList shouldBe token(BigDecimal(100.0D))
}
it should "write String" in {
Json.fromString("str").asTokenList shouldBe token("str")
}
it should "write Boolean.True" in {
Json.fromBoolean(true).asTokenList shouldBe token(true)
}
it should "write Boolean.False" in {
Json.fromBoolean(false).asTokenList shouldBe token(false)
}
it should "write Null" in {
Json.Null.asTokenList shouldBe List(TokenNode.NullValueNode)
}
it should "write Array" in {
Json.fromValues(List(
Json.fromInt(1),
Json.fromInt(2),
Json.fromInt(3)
)).asTokenList shouldBe arr(1L, 2L, 3L)
}
it should "write JsonObject" in {
val jobj = JsonObject(
"a" -> Json.fromValues(List(Json.fromInt(1), Json.fromInt(2))),
"b" -> Json.fromJsonObject(JsonObject("c" -> Json.Null)),
"c" -> Json.fromString("demo"),
"d" -> Json.True,
"e" -> Json.False
)
jobj.asTokenList shouldBe obj(
"a" -> arr(1L, 2L),
"b" -> obj("c" -> null),
"c" -> token("demo"),
"d" -> token(true),
"e" -> token(false)
)
}
}
|
tethys-json/tethys
|
modules/circe/src/test/scala/tethys/circe/CirceSupportTest.scala
|
Scala
|
apache-2.0
| 4,204 |
/*
* EditAddRemoveOutput.scala
* (Mellite)
*
* Copyright (c) 2012-2022 Hanns Holger Rutz. All rights reserved.
*
* This software is published under the GNU Affero General Public License v3+
*
*
* For further information, please contact Hanns Holger Rutz at
* [email protected]
*/
package de.sciss.mellite.edit
import de.sciss.lucre.edit.UndoManager
import de.sciss.lucre.edit.impl.BasicUndoableEdit
import de.sciss.lucre.{Source, Txn}
import de.sciss.proc.Proc
// direction: true = insert, false = remove
// XXX TODO - should disconnect links and restore them in undo
private[edit] final class EditAddRemoveProcOutput[T <: Txn[T]](isAdd: Boolean,
procH: Source[T, Proc[T]],
key: String)
extends BasicUndoableEdit[T] {
override protected def undoImpl()(implicit tx: T): Unit =
perform(isUndo = true)
override protected def redoImpl()(implicit tx: T): Unit =
perform()
def perform()(implicit tx: T): Unit = perform(isUndo = false)
private def perform(isUndo: Boolean)(implicit tx: T): Unit = {
val proc = procH()
val outputs = proc.outputs
if (isAdd ^ isUndo)
outputs.add (key)
else
outputs.remove(key)
}
override def name: String = s"${if (isAdd) "Add" else "Remove"} Output"
}
object EditAddProcOutput {
def apply[T <: Txn[T]](proc: Proc[T], key: String)
(implicit tx: T, undo: UndoManager[T]): Unit = {
val procH = tx.newHandle(proc)
val res = new EditAddRemoveProcOutput(isAdd = true, procH = procH, key = key)
res.perform()
undo.addEdit(res)
}
}
object EditRemoveProcOutput {
def apply[T <: Txn[T]](proc: Proc[T], key: String)
(implicit tx: T, undo: UndoManager[T]): Unit = {
val procH = tx.newHandle(proc)
val res = new EditAddRemoveProcOutput(isAdd = false, procH = procH, key = key)
res.perform()
undo.addEdit(res)
}
}
|
Sciss/Mellite
|
app/src/main/scala/de/sciss/mellite/edit/EditAddRemoveProcOutput.scala
|
Scala
|
agpl-3.0
| 2,018 |
package net.caoticode
import akka.actor.ActorRef
package object synergy {
object MasterWorkerProtocol {
// Messages from Workers
case class WorkerCreated(worker: ActorRef)
case class WorkerRequestsWork(worker: ActorRef)
case class WorkIsDone(worker: ActorRef)
// Messages to Workers
case class WorkToBeDone(work: Any)
case object WorkIsReady
case object NoWorkToBeDone
}
object CommonProtocol {
case class SubscriberConnected(worker: ActorRef)
}
object ChannelMasterProtocol {
// messages from client
case class ChannelCreate(name: String)
case class ChannelDelete(name: String)
case class ChannelJoin(name: String)
case class ChannelLeave(name: String)
case class ChannelExists(name: String)
case class ChannelJoinCreate(name: String)
// messages to clients
case class ChannelJoinSuccess(channel: ActorRef)
case class ChannelJoinFail(exception: Exception)
case class ChannelExistsResponse(exists: Boolean)
case class ChannelCreated(channel: ActorRef)
}
object Channel2ClientProtocol {
private val DefaultRoutingTag = ""
case class SubscribePull(routingTag: String = DefaultRoutingTag)
case class SubscribePush(routingTag: String = DefaultRoutingTag)
case class UnsubscribePull(routingTag: String = DefaultRoutingTag)
case class UnsubscribePush(routingTag: String = DefaultRoutingTag)
case class Publish(message: Any, routingTag: String = DefaultRoutingTag)
case object InitiateShutdown
}
}
|
mdread/synergy
|
src/main/scala/net/caoticode/synergy/package.scala
|
Scala
|
mit
| 1,587 |
/*
* Wire
* Copyright (C) 2016 Wire Swiss GmbH
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package com.waz.znet2.http
import java.io.{ByteArrayInputStream, File, FileInputStream}
trait RequestSerializer[T] {
def serialize(request: Request[T]): Request[Body]
def contramap[B](f: B => T): RequestSerializer[B] =
RequestSerializer.create { request =>
request.copy(body = this.serialize(request.copy(body = f(request.body))).body)
}
}
object RequestSerializer {
def apply[T](implicit rs: RequestSerializer[T]): RequestSerializer[T] = rs
def create[T](f: Request[T] => Request[Body]): RequestSerializer[T] = new RequestSerializer[T] {
override def serialize(request: Request[T]): Request[Body] = f(request)
}
}
trait BodySerializer[T] {
def serialize(body: T): Body
def contramap[B](f: B => T): BodySerializer[B] =
BodySerializer.create(f andThen this.serialize)
}
object BodySerializer {
def apply[T](implicit bs: BodySerializer[T]): BodySerializer[T] = bs
def create[T](f: T => Body): BodySerializer[T] = new BodySerializer[T] {
override def serialize(body: T): Body = f(body)
}
}
trait RawBodySerializer[T] {
def serialize(value: T): RawBody
def contramap[B](f: B => T): RawBodySerializer[B] =
RawBodySerializer.create(f andThen this.serialize)
}
object RawBodySerializer {
def apply[T](implicit rbs: RawBodyDeserializer[T]): RawBodyDeserializer[T] = rbs
def create[T](f: T => RawBody): RawBodySerializer[T] = new RawBodySerializer[T] {
override def serialize(body: T): RawBody = f(body)
}
}
trait BasicAutoDerivationRulesForSerializers {
implicit val StringBodySerializer: RawBodySerializer[String] =
RawBodySerializer.create(str => {
val bytes = str.getBytes("utf-8")
RawBody(Some(MediaType.PlainText), () => new ByteArrayInputStream(bytes), Some(bytes.length))
})
implicit val IntBodySerializer: RawBodySerializer[Int] =
RawBodySerializer.create(value => {
val bytes = value.toString.getBytes("utf-8")
RawBody(Some(MediaType.PlainText), () => new ByteArrayInputStream(bytes), Some(bytes.length))
})
implicit val BooleanBodySerializer: RawBodySerializer[Boolean] =
RawBodySerializer.create(value=> {
val bytes = value.toString.getBytes("utf-8")
RawBody(Some(MediaType.PlainText), () => new ByteArrayInputStream(bytes), Some(bytes.length))
})
implicit val BytesBodySerializer: RawBodySerializer[Array[Byte]] =
RawBodySerializer.create(bytes => RawBody(Some(MediaType.Bytes), () => new ByteArrayInputStream(bytes), Some(bytes.length)))
implicit val FileBodySerializer: RawBodySerializer[File] =
RawBodySerializer.create(file => RawBody(None, () => new FileInputStream(file), Some(file.length())))
implicit val MultipartMixedBodySerializer: BodySerializer[MultipartBodyMixed] =
BodySerializer.create { body =>
RawMultipartBodyMixed(body.parts.map(p => RawMultipartBodyMixed.Part(p.serialize, p.headers)))
}
implicit val MultipartFormDataBodySerializer: BodySerializer[MultipartBodyFormData] =
BodySerializer.create { body =>
RawMultipartBodyFormData(body.parts.map(p => RawMultipartBodyFormData.Part(p.serialize, p.name, p.fileName)))
}
implicit def bodySerializerFromRawBodySerializer[T](implicit rbs: RawBodySerializer[T]): BodySerializer[T] =
BodySerializer.create(rbs.serialize)
implicit val EmptyBodyRequestSerializer: RequestSerializer[EmptyBody] =
RequestSerializer.create(request => request.copy(body = EmptyBodyImpl))
implicit def serializerFromBodySerializer[T](implicit bs: BodySerializer[T]): RequestSerializer[T] =
RequestSerializer.create(request => request.copy(body = bs.serialize(request.body)))
}
trait AutoDerivationRulesForSerializersOld extends BasicAutoDerivationRulesForSerializers {
import org.json.JSONObject
import com.waz.utils.JsonEncoder
implicit val JsonBodySerializer: RawBodySerializer[JSONObject] =
RawBodySerializer.create(json => {
val bytes = json.toString.getBytes("utf8")
RawBody(Some(MediaType.Json), () => new ByteArrayInputStream(bytes), Some(bytes.length))
})
implicit def objectToJsonBodySerializer[T](implicit e: JsonEncoder[T]): RawBodySerializer[T] =
JsonBodySerializer.contramap(e.apply)
}
trait AutoDerivationRulesForSerializers extends BasicAutoDerivationRulesForSerializers {
import io.circe.{Encoder, Json}
implicit val CirceJsonBodySerializer: RawBodySerializer[Json] = RawBodySerializer.create(json => {
val bytes = json.noSpaces.getBytes("utf8")
RawBody(Some(MediaType.Json), () => new ByteArrayInputStream(bytes), Some(bytes.length))
})
implicit def objectToCirceJsonBodySerializer[T](implicit e: Encoder[T]): RawBodySerializer[T] =
CirceJsonBodySerializer.contramap(e.apply)
}
|
wireapp/wire-android-sync-engine
|
zmessaging/src/main/scala/com/waz/znet2/http/Serializers.scala
|
Scala
|
gpl-3.0
| 5,418 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.spark
import java.io.Serializable
import org.apache.hadoop.hbase.util.Bytes
/**
* This is the key to be used for sorting and shuffling.
*
* We will only partition on the rowKey but we will sort on all three
*
* @param rowKey Record RowKey
* @param family Record ColumnFamily
* @param qualifier Cell Qualifier
*/
class KeyFamilyQualifier(val rowKey:Array[Byte], val family:Array[Byte], val qualifier:Array[Byte])
extends Comparable[KeyFamilyQualifier] with Serializable {
override def compareTo(o: KeyFamilyQualifier): Int = {
var result = Bytes.compareTo(rowKey, o.rowKey)
if (result == 0) {
result = Bytes.compareTo(family, o.family)
if (result == 0) result = Bytes.compareTo(qualifier, o.qualifier)
}
result
}
override def toString: String = {
Bytes.toString(rowKey) + ":" + Bytes.toString(family) + ":" + Bytes.toString(qualifier)
}
}
|
tmalaska/SparkOnHBase
|
src/main/scala/org/apache/hadoop/hbase/spark/KeyFamilyQualifier.scala
|
Scala
|
apache-2.0
| 1,731 |
package chandu0101.scalajs.react.components.demo.pages
import chandu0101.scalajs.react.components.demo.components.LeftNavPage
import chandu0101.scalajs.react.components.demo.routes.{LeftRoute, ReactPopoverRouteModule}
import japgolly.scalajs.react.ReactComponentB
import japgolly.scalajs.react.extra.router2.RouterCtl
/**
* Created by chandrasekharkode .
*/
object ReactPopoverPage {
val component = ReactComponentB[Props]("ReactPopOverPage")
.render((P) => {
LeftNavPage(ReactPopoverRouteModule.menu, P.selectedPage, P.ctrl)
})
.build
case class Props(selectedPage: LeftRoute, ctrl: RouterCtl[LeftRoute])
def apply(selectedPage: LeftRoute, ctrl: RouterCtl[LeftRoute]) = component(Props(selectedPage, ctrl))
}
|
coreyauger/scalajs-react-components
|
demo/src/main/scala/chandu0101/scalajs/react/components/demo/pages/ReactPopoverPage.scala
|
Scala
|
apache-2.0
| 738 |
package dao
import dao.UserDao.systemIdFilter
import dao.helper.TableFilter.{abbreviationFilter, idFilter}
import dao.helper.{DBResult, TableFilter}
import database.helper.LdapUserStatus
import database.helper.LdapUserStatus._
import database.{DegreeDb, UserDb, UserTable}
import models._
import models.helper._
import slick.dbio.Effect
import slick.jdbc.PostgresProfile.api._
import slick.sql.SqlAction
import java.util.UUID
import javax.inject.Inject
import scala.concurrent.{ExecutionContext, Future}
object UserDao extends TableFilter[UserTable] {
def enrollmentFilter(enrollment: UUID): TableFilterPredicate =
_.enrollment.map(_ === enrollment).getOrElse(false)
def firstnameFilter(firstname: String): TableFilterPredicate =
_.firstname.toLowerCase like s"%${firstname.toLowerCase}%"
def lastnameFilter(lastname: String): TableFilterPredicate =
_.lastname.toLowerCase like s"%${lastname.toLowerCase}%"
def systemIdFilter(systemId: String): TableFilterPredicate =
_.systemId.toLowerCase === systemId.toLowerCase
def campusIdFilter(campusId: String): TableFilterPredicate =
_.campusId.toLowerCase === campusId.toLowerCase
def statusFilter(status: LdapUserStatus): TableFilterPredicate =
_.status.toLowerCase === status.label.toLowerCase
}
trait UserDao extends AbstractDao[UserTable, UserDb, User] {
override val tableQuery: TableQuery[UserTable] = TableQuery[UserTable]
def degreeDao: DegreeDao
def authorityDao: AuthorityDao
def labworkApplicationDao: LabworkApplicationDao
def getBySystemId(systemId: String, atomic: Boolean): Future[Option[User]]
def userId(systemId: String): SqlAction[Option[UUID], NoStream, Effect.Read]
def buddyResult(
requesterId: UUID,
requesteeSystemId: String,
labwork: UUID
): Future[BuddyResult]
def makeUserModel(
systemId: String,
campusId: String,
lastname: String,
firstname: String,
email: String,
status: String,
registrationId: Option[String],
enrollment: Option[String]
): Future[UserDb]
def createOrUpdateWithBasicAuthority(user: UserDb): Future[DBResult[UserDb]]
def createOrUpdateWithBasicAuthority(
systemId: String,
campusId: String,
lastname: String,
firstname: String,
email: String,
status: String,
registrationId: Option[String],
enrollment: Option[String]
): Future[DBResult[UserDb]]
def getByCampusIds(campusIds: List[String]): Future[Seq[User]]
}
final class UserDaoImpl @Inject() (
val db: Database,
val authorityDao: AuthorityDao,
val degreeDao: DegreeDao,
val labworkApplicationDao: LabworkApplicationDao,
implicit val executionContext: ExecutionContext
) extends UserDao {
def getByCampusIds(campusIds: List[String]): Future[Seq[User]] =
toUniqueEntity(filterValidOnly(_.campusId.toLowerCase.inSet(campusIds)))
def getBySystemId(systemId: String, atomic: Boolean): Future[Option[User]] =
getSingleWhere(u => u.systemId === systemId, atomic)
def userId(systemId: String): SqlAction[Option[UUID], NoStream, Effect.Read] =
filterValidOnly(systemIdFilter(systemId))
.map(_.id)
.take(1)
.result
.headOption
def makeUserModel(
systemId: String,
campusId: String,
lastname: String,
firstname: String,
email: String,
status: String,
registrationId: Option[String],
enrollment: Option[String]
): Future[UserDb] = {
def createDegree(abbrev: String): Future[Degree] =
degreeDao.create(DegreeDb("", abbrev)).map(_.toUniqueEntity)
for {
status <- Future.fromTry(LdapUserStatus(status))
maybeDegree <- status match {
case StudentStatus
if enrollment.isDefined && registrationId.isDefined =>
val degreeAbbrev = enrollment.get
for {
maybeDegree <- degreeDao.getSingleWhere(
abbreviationFilter(degreeAbbrev).apply
)
degree <- maybeDegree.fold(createDegree(degreeAbbrev))(
Future.successful
)
} yield Some(degree.id)
case EmployeeStatus | LecturerStatus =>
Future.successful(Option.empty[UUID])
case _ =>
Future.failed(
new Throwable(
s"user with $status label must have a associated registration-id and degree abbreviation, but was $registrationId and $enrollment"
)
)
}
user = UserDb(
systemId,
campusId,
lastname,
firstname,
email,
status,
registrationId,
maybeDegree
)
} yield user
}
def createOrUpdateWithBasicAuthority(
user: UserDb
): Future[DBResult[UserDb]] = {
val result = for {
existing <- userId(user.systemId)
createOrUpdated <- existing match {
case Some(_) =>
updateQuery(user).map(u => DBResult.Updated(u))
case None =>
createWithBasicAuthorityQuery(user).map(t => DBResult.Created(t._1))
}
} yield createOrUpdated
db.run(result)
}
def createOrUpdateWithBasicAuthority(
systemId: String,
campusId: String,
lastname: String,
firstname: String,
email: String,
status: String,
registrationId: Option[String],
enrollment: Option[String]
): Future[DBResult[UserDb]] = {
for {
user <- makeUserModel(
systemId,
campusId,
lastname,
firstname,
email,
status,
registrationId,
enrollment
)
res <- createOrUpdateWithBasicAuthority(user)
} yield res
}
def createWithBasicAuthorityQuery(user: UserDb) = {
(for {
createdUser <- createQuery(user) // 1
baseAuth <- authorityDao.createBasicAuthorityFor(user)
} yield (createdUser, baseAuth)).transactionally
}
def buddyResult(
requesterId: UUID,
requesteeSystemId: String,
labwork: UUID
): Future[BuddyResult] = {
val requesteeSystemIdFilter = systemIdFilter(requesteeSystemId)
val requesterIdFilter = idFilter(requesterId)
val buddy = for {
requestee <- filterBy(List(requesteeSystemIdFilter))
requester <- filterBy(List(requesterIdFilter))
sameDegree = requestee.enrollment === requester.enrollment
} yield (requestee, sameDegree.getOrElse(false))
val friends = for {
b <- buddy
friends <- labworkApplicationDao.friendsOf(b._1.id, labwork)
} yield friends
val action = for {
b <- buddy.result
f <- friends.result
} yield {
val optRequestee = b.headOption.map(_._1.toUniqueEntity)
val optSameDegree = b.map(_._2).reduceOption(_ && _)
val friends = f.exists(_.id == requesterId)
(optRequestee, optSameDegree) match {
case (Some(requestee), Some(sameDegree)) =>
if (sameDegree)
if (friends) Allowed(requestee) else Almost(requestee)
else
Denied(requestee)
case _ => NotExisting(requesteeSystemId)
}
}
db.run(action)
}
override protected def shouldUpdate(
existing: UserDb,
toUpdate: UserDb
): Boolean = {
existing.systemId == toUpdate.systemId
}
override protected def existsQuery(
entity: UserDb
): Query[UserTable, UserDb, Seq] = {
filterBy(List(systemIdFilter(entity.systemId)))
}
override protected def toUniqueEntity(
query: Query[UserTable, UserDb, Seq]
): Future[Seq[User]] = {
db.run(query.result.map(_.map(_.toUniqueEntity)))
}
override protected def toAtomic(
query: Query[UserTable, UserDb, Seq]
): Future[Seq[User]] = {
db.run(
query
.joinLeft(degreeDao.tableQuery)
.on(_.enrollment === _.id)
.result
.map(_.map {
case (s, Some(d)) =>
StudentAtom(
s.systemId,
s.campusId,
s.lastname,
s.firstname,
s.email,
s.registrationId.head,
d.toUniqueEntity,
s.id
)
case (dbUser, None) => dbUser.toUniqueEntity
})
)
}
}
|
THK-ADV/lwm-reloaded
|
app/dao/UserDao.scala
|
Scala
|
mit
| 8,183 |
package rpm4s.repo.data.primary
import rpm4s.repo.data.Bytes
case class SizeInfo(
pack: Bytes,
installed: Bytes,
archive: Bytes
)
|
lucidd/rpm4s
|
repo-utils/shared/src/main/scala/rpm4s/repo/data/primary/SizeInfo.scala
|
Scala
|
mit
| 137 |
package springnz.sparkplug.core
import org.apache.spark.rdd.RDD
import scala.reflect.ClassTag
object RDDPimpers {
class RDDExtensions[A: ClassTag](rdd: RDD[A]) {
def mapPartial[B: ClassTag](defaultValue: ⇒ B)(f: PartialFunction[A, B]): RDD[B] = {
rdd.map {
a ⇒ if (f.isDefinedAt(a)) f(a) else defaultValue
}
}
def mapWithFilter[B: ClassTag](f: A ⇒ Option[B]): RDD[B] = {
rdd.map(f).filter(_.isDefined)
.map(_.get)
}
def mapPartialWithFilter[B: ClassTag](f: PartialFunction[A, Option[B]]): RDD[B] = {
rdd.map {
a ⇒ if (f.isDefinedAt(a)) f(a) else None
}.filter(_.isDefined)
.map(_.get)
}
}
}
|
springnz/sparkplug
|
sparkplug-core/src/main/scala/springnz/sparkplug/core/RDDPimpers.scala
|
Scala
|
mit
| 697 |
package me.frmr.wepay.api {
import net.liftweb.json._
import JsonDSL._
case class WithdrawalResponse(withdrawal_id:Long, withdrawal_uri:Option[String] = None)
/**
* Represents an instance of a Withdrawal.
*
* @param account_id The WePay Account ID associated with the Withdrawal.
* @param amount The withdrawal amount.
* @param redirect_uri The URI to redirect users to when finishing the Withdraw flow.
* @param callback_uri The URI for WePay to send IPN messages to.
* @param note The note to be attached to the withdrawal.
* @param withdrawal_id The ID of the Withdrawal on WePay's system. This should only be populated by WePay.
* @param state The current state of the Withdrawal.
* @param withdrawal_uri URI to view the Withdrawal on WePay's site.
* @param recipient_confirmed Actually, I'm not entirely sure what this represents. Ooops.
* @param create_time The time of creation, ya dig?
* @define THIS Withdrawal
**/
case class Withdrawal(account_id:Long, amount:Option[Double] = None, redirect_uri:Option[String] = None,
callback_uri:Option[String] = None, note:Option[String] = None,
withdrawal_id:Option[Long] = None, state:Option[String] = None,
withdrawal_uri:Option[String] = None, recipient_confirmed:Option[Boolean] = None,
create_time:Option[Long] = None) extends ImmutableWePayResource[Withdrawal, WithdrawalResponse] {
val meta = Withdrawal
val _id = withdrawal_id
}
/**
* Manupluates, retrieves, and searches for Withdrawal objects on WePay's system.
*
* @define INSTANCE Withdrawal
* @define CRUDRESPONSETYPE WithdrawalResponse
**/
object Withdrawal extends ImmutableWePayResourceMeta[Withdrawal, WithdrawalResponse] {
protected def extract(json:JValue) = json.extract[Withdrawal]
protected def extractFindResults(json:JValue) = json.extract[List[Withdrawal]]
protected def extractCrudResponse(json:JValue) = json.extract[WithdrawalResponse]
/**
* Search for Withdrawals matching the various parameters.
*
* @param account_id The account ID to search. Required.
* @param state The state of the withdrawal you're interested in. Optional.
* @param limit The maximum number of Withdrawals to return. Optional.
* @param start The number of Withdrawals to skip in the result set. Optional.
* @return A Box containing a List of matching Withdrawal instances.
**/
def find(account_id:Long, state:Option[String] = None, limit:Option[Long] = None, start:Option[Long] = None) =
findQuery(
("account_id" -> account_id) ~
("state" -> state) ~
("limit" -> limit) ~
("start" -> start)
)
}
}
|
farmdawgnation/wepay-scala
|
src/main/scala/me/frmr/wepay/api/Withdrawal.scala
|
Scala
|
apache-2.0
| 2,784 |
package im.tox.antox.fragments
import android.app.{Activity, AlertDialog, Dialog}
import android.content.DialogInterface
import android.os.Bundle
import android.support.v4.app.DialogFragment
import android.view.View
import android.widget.EditText
import im.tox.antox.R
import im.tox.antox.fragments.PinDialogFragment._
//remove if not needed
object PinDialogFragment {
trait PinDialogListener {
def onDialogPositiveClick(dialog: DialogFragment, pin: String): Unit
def onDialogNegativeClick(dialog: DialogFragment): Unit
}
}
class PinDialogFragment extends DialogFragment {
var mListener: PinDialogListener = _
var pin: EditText = _
override def onAttach(activity: Activity) {
super.onAttach(activity)
mListener = activity.asInstanceOf[PinDialogListener]
}
override def onCreateDialog(savedInstanceState: Bundle): Dialog = {
val builder = new AlertDialog.Builder(getActivity)
val inflater = getActivity.getLayoutInflater
val view = inflater.inflate(R.layout.dialog_pin, null)
builder.setView(view)
pin = view.findViewById(R.id.pin).asInstanceOf[EditText]
builder.setMessage(getResources.getString(R.string.dialog_pin))
.setPositiveButton(getResources.getString(R.string.button_confirm), new DialogInterface.OnClickListener() {
def onClick(dialog: DialogInterface, id: Int) {
mListener.onDialogPositiveClick(PinDialogFragment.this, pin.getText.toString)
}
})
.setNegativeButton(getResources.getString(R.string.button_cancel), new DialogInterface.OnClickListener() {
def onClick(dialog: DialogInterface, id: Int) {
mListener.onDialogNegativeClick(PinDialogFragment.this)
}
})
builder.create()
}
override def onCancel(dialog: DialogInterface) {
mListener.onDialogNegativeClick(PinDialogFragment.this)
}
}
|
0xPoly/Antox
|
app/src/main/scala/im/tox/antox/fragments/PinDialogFragment.scala
|
Scala
|
gpl-3.0
| 1,858 |
/*
* Copyright 2015 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.atlas.core.model
import java.time.Instant
import java.time.ZoneOffset
import java.time.ZonedDateTime
import java.time.temporal.ChronoField
import com.netflix.atlas.core.util.Math
trait MathExpr extends TimeSeriesExpr
object MathExpr {
case class Constant(v: Double) extends TimeSeriesExpr {
def dataExprs: List[DataExpr] = Nil
override def toString: String = s"$v,:const"
def isGrouped: Boolean = false
def groupByKey(tags: Map[String, String]): Option[String] = None
def eval(context: EvalContext, data: Map[DataExpr, List[TimeSeries]]): ResultSet = {
val seq = new FunctionTimeSeq(DsType.Gauge, context.step, _ => v)
val ts = TimeSeries(Map("name" -> v.toString), v.toString, seq)
ResultSet(this, List(ts), context.state)
}
}
case object Random extends TimeSeriesExpr {
def dataExprs: List[DataExpr] = Nil
override def toString: String = s":random"
def isGrouped: Boolean = false
def groupByKey(tags: Map[String, String]): Option[String] = None
def eval(context: EvalContext, data: Map[DataExpr, List[TimeSeries]]): ResultSet = {
val seq = new FunctionTimeSeq(DsType.Gauge, context.step, rand)
val ts = TimeSeries(Map("name" -> "random"), "random", seq)
ResultSet(this, List(ts), context.state)
}
private def rand(t: Long): Double = {
scala.util.Random.nextDouble()
}
}
case class Time(mode: String) extends TimeSeriesExpr {
private val chronoField = mode match {
case "secondOfMinute" => ChronoField.SECOND_OF_MINUTE
case "secondOfDay" => ChronoField.SECOND_OF_DAY
case "minuteOfHour" => ChronoField.MINUTE_OF_HOUR
case "minuteOfDay" => ChronoField.MINUTE_OF_DAY
case "hourOfDay" => ChronoField.HOUR_OF_DAY
case "dayOfWeek" => ChronoField.DAY_OF_WEEK
case "dayOfMonth" => ChronoField.DAY_OF_MONTH
case "dayOfYear" => ChronoField.DAY_OF_YEAR
case "monthOfYear" => ChronoField.MONTH_OF_YEAR
case "yearOfCentury" => ChronoField.YEAR
case "yearOfEra" => ChronoField.YEAR_OF_ERA
case "seconds" => ChronoField.INSTANT_SECONDS
case "minutes" => ChronoField.INSTANT_SECONDS
case "hours" => ChronoField.INSTANT_SECONDS
case "days" => ChronoField.INSTANT_SECONDS
case "weeks" => ChronoField.INSTANT_SECONDS
case s => ChronoField.valueOf(s)
}
private val valueFunc = {
if (chronoField != ChronoField.INSTANT_SECONDS) usingCalendar _ else {
mode match {
case "seconds" => sinceEpoch(1000L) _
case "minutes" => sinceEpoch(1000L * 60L) _
case "hours" => sinceEpoch(1000L * 60L * 60L) _
case "days" => sinceEpoch(1000L * 60L * 60L * 24L) _
case "weeks" => sinceEpoch(1000L * 60L * 60L * 24L * 7L) _
}
}
}
private def usingCalendar(t: Long): Double = {
ZonedDateTime.ofInstant(Instant.ofEpochMilli(t), ZoneOffset.UTC).get(chronoField)
}
private def sinceEpoch(divisor: Long)(t: Long): Double = t / divisor
def dataExprs: List[DataExpr] = Nil
override def toString: String = s"$mode,:time"
def isGrouped: Boolean = false
def groupByKey(tags: Map[String, String]): Option[String] = None
def eval(context: EvalContext, data: Map[DataExpr, List[TimeSeries]]): ResultSet = {
val seq = new FunctionTimeSeq(DsType.Gauge, context.step, valueFunc)
val ts = TimeSeries(Map("name" -> mode), mode, seq)
ResultSet(this, List(ts), context.state)
}
}
trait UnaryMathExpr extends TimeSeriesExpr with UnaryOp {
def name: String
def expr: TimeSeriesExpr
def dataExprs: List[DataExpr] = expr.dataExprs
override def toString: String = s"$expr,:$name"
def isGrouped: Boolean = expr.isGrouped
def groupByKey(tags: Map[String, String]): Option[String] = expr.groupByKey(tags)
def eval(context: EvalContext, data: Map[DataExpr, List[TimeSeries]]): ResultSet = {
val rs = expr.eval(context, data)
ResultSet(this, rs.data.map { t => t.unaryOp(s"$name(%s)", this) }, rs.state)
}
}
case class Abs(expr: TimeSeriesExpr) extends UnaryMathExpr {
def name: String = "abs"
def apply(v: Double): Double = math.abs(v)
}
case class Negate(expr: TimeSeriesExpr) extends UnaryMathExpr {
def name: String = "neg"
def apply(v: Double): Double = -v
}
case class Sqrt(expr: TimeSeriesExpr) extends UnaryMathExpr {
def name: String = "sqrt"
def apply(v: Double): Double = math.sqrt(v)
}
case class PerStep(expr: TimeSeriesExpr) extends UnaryMathExpr {
def name: String = "per-step"
// Not used, required by base-class
def apply(v: Double): Double = v
override def eval(context: EvalContext, data: Map[DataExpr, List[TimeSeries]]): ResultSet = {
val rs = expr.eval(context, data)
val newData = rs.data.map { t =>
// Assumes rate-per-second counter
val multiple = t.data.step / 1000
t.unaryOp(s"$name(%s)", v => v * multiple)
}
ResultSet(this, newData, rs.state)
}
}
trait BinaryMathExpr extends TimeSeriesExpr with BinaryOp {
def name: String
def labelFmt: String
def expr1: TimeSeriesExpr
def expr2: TimeSeriesExpr
def dataExprs: List[DataExpr] = expr1.dataExprs ::: expr2.dataExprs
override def toString: String = s"$expr1,$expr2,:$name"
def isGrouped: Boolean = expr1.isGrouped || expr2.isGrouped
def groupByKey(tags: Map[String, String]): Option[String] = {
expr1.groupByKey(tags).orElse(expr2.groupByKey(tags))
}
def eval(context: EvalContext, data: Map[DataExpr, List[TimeSeries]]): ResultSet = {
val rs1 = expr1.eval(context, data)
val rs2 = expr2.eval(context, data)
val result = (expr1.isGrouped, expr2.isGrouped) match {
case (_, false) =>
require(rs2.data.size == 1)
val t2 = rs2.data.head
rs1.data.map(_.binaryOp(t2, labelFmt, this))
case (false, _) =>
require(rs1.data.size == 1)
val t1 = rs1.data.head
// Normally tags are kept for the lhs, in this case we want to prefer the tags from
// the grouped expr on the rhs
rs2.data.map(t2 => t1.binaryOp(t2, labelFmt, this).withTags(t2.tags))
case (true, true) =>
val g2 = rs2.data.groupBy(t => groupByKey(t.tags))
rs1.data.flatMap { t1 =>
val k = groupByKey(t1.tags)
g2.get(k).map {
case t2 :: Nil => t1.binaryOp(t2, labelFmt, this)
case _ => throw new IllegalStateException("too many values for key")
}
}
}
ResultSet(this, result, rs1.state ++ rs2.state)
}
}
case class Add(expr1: TimeSeriesExpr, expr2: TimeSeriesExpr) extends BinaryMathExpr {
def name: String = "add"
def labelFmt: String = "(%s + %s)"
def apply(v1: Double, v2: Double): Double = Math.addNaN(v1, v2)
}
case class Subtract(expr1: TimeSeriesExpr, expr2: TimeSeriesExpr) extends BinaryMathExpr {
def name: String = "sub"
def labelFmt: String = "(%s - %s)"
def apply(v1: Double, v2: Double): Double = Math.subtractNaN(v1, v2)
}
case class Multiply(expr1: TimeSeriesExpr, expr2: TimeSeriesExpr) extends BinaryMathExpr {
def name: String = "mul"
def labelFmt: String = "(%s * %s)"
def apply(v1: Double, v2: Double): Double = v1 * v2
}
case class Divide(expr1: TimeSeriesExpr, expr2: TimeSeriesExpr) extends BinaryMathExpr {
def name: String = "div"
def labelFmt: String = "(%s / %s)"
def apply(v1: Double, v2: Double): Double = {
if (v2 == 0.0) {
// Infinite is not very useful as a value in a visualization and tends to make other
// values difficult to see. So normally a divide by 0 will report a value of NaN so the
// user can visually see the value is unknown/misbehaving for that time.
//
// However, if the numerator is also 0, this likely means no activity on the counters
// during a given interval. Reporting NaN can become confusing as users think there is an
// issue with the data not getting reported. If both are 0 we report 0 as it tends to
// convey the intent that we received data, but there was no activity rather than we
// failed to receive any data.
//
// The :fdiv operator can be used if a strict floating point division is actually
// desirable.
if (v1 == 0.0) 0.0 else Double.NaN
} else {
v1 / v2
}
}
}
case class GreaterThan(expr1: TimeSeriesExpr, expr2: TimeSeriesExpr) extends BinaryMathExpr {
def name: String = "gt"
def labelFmt: String = "(%s > %s)"
def apply(v1: Double, v2: Double): Double = if (v1 > v2) 1.0 else 0.0
}
case class GreaterThanEqual(expr1: TimeSeriesExpr, expr2: TimeSeriesExpr) extends BinaryMathExpr {
def name: String = "ge"
def labelFmt: String = "(%s >= %s)"
def apply(v1: Double, v2: Double): Double = if (v1 >= v2) 1.0 else 0.0
}
case class LessThan(expr1: TimeSeriesExpr, expr2: TimeSeriesExpr) extends BinaryMathExpr {
def name: String = "lt"
def labelFmt: String = "(%s < %s)"
def apply(v1: Double, v2: Double): Double = if (v1 < v2) 1.0 else 0.0
}
case class LessThanEqual(expr1: TimeSeriesExpr, expr2: TimeSeriesExpr) extends BinaryMathExpr {
def name: String = "le"
def labelFmt: String = "(%s <= %s)"
def apply(v1: Double, v2: Double): Double = if (v1 <= v2) 1.0 else 0.0
}
case class FAdd(expr1: TimeSeriesExpr, expr2: TimeSeriesExpr) extends BinaryMathExpr {
def name: String = "fadd"
def labelFmt: String = "(%s + %s)"
def apply(v1: Double, v2: Double): Double = v1 + v2
}
case class FSubtract(expr1: TimeSeriesExpr, expr2: TimeSeriesExpr) extends BinaryMathExpr {
def name: String = "fsub"
def labelFmt: String = "(%s - %s)"
def apply(v1: Double, v2: Double): Double = v1 - v2
}
case class FMultiply(expr1: TimeSeriesExpr, expr2: TimeSeriesExpr) extends BinaryMathExpr {
def name: String = "fmul"
def labelFmt: String = "(%s * %s)"
def apply(v1: Double, v2: Double): Double = v1 * v2
}
case class FDivide(expr1: TimeSeriesExpr, expr2: TimeSeriesExpr) extends BinaryMathExpr {
def name: String = "fdiv"
def labelFmt: String = "(%s / %s)"
def apply(v1: Double, v2: Double): Double = v1 / v2
}
case class And(expr1: TimeSeriesExpr, expr2: TimeSeriesExpr) extends BinaryMathExpr {
def name: String = "and"
def labelFmt: String = "(%s AND %s)"
def apply(v1: Double, v2: Double): Double = {
if (Math.toBoolean(v1) && Math.toBoolean(v2)) 1.0 else 0.0
}
}
case class Or(expr1: TimeSeriesExpr, expr2: TimeSeriesExpr) extends BinaryMathExpr {
def name: String = "or"
def labelFmt: String = "(%s OR %s)"
def apply(v1: Double, v2: Double): Double = {
if (Math.toBoolean(v1) || Math.toBoolean(v2)) 1.0 else 0.0
}
}
trait AggrMathExpr extends TimeSeriesExpr with BinaryOp {
def name: String
def expr: TimeSeriesExpr
def dataExprs: List[DataExpr] = expr.dataExprs
override def toString: String = s"$expr,:$name"
def isGrouped: Boolean = false
def groupByKey(tags: Map[String, String]): Option[String] = None
def eval(context: EvalContext, data: Map[DataExpr, List[TimeSeries]]): ResultSet = {
val rs = expr.eval(context, data)
val t = TimeSeries.aggregate(rs.data.iterator, context.start, context.end, this)
ResultSet(this, List(TimeSeries(t.tags, s"$name(${t.label})", t.data)), rs.state)
}
}
case class Sum(expr: TimeSeriesExpr) extends AggrMathExpr {
def name: String = "sum"
def apply(v1: Double, v2: Double): Double = Math.addNaN(v1, v2)
}
case class Count(expr: TimeSeriesExpr) extends AggrMathExpr {
def name: String = "count"
def apply(v1: Double, v2: Double): Double = Math.addNaN(v1, v2)
override def eval(context: EvalContext, data: Map[DataExpr, List[TimeSeries]]): ResultSet = {
val rs = expr.eval(context, data)
val init = rs.data.map { t =>
TimeSeries(t.tags, t.label, t.data.mapValues(v => if (v.isNaN) Double.NaN else 1.0))
}
val t = TimeSeries.aggregate(init.iterator, context.start, context.end, this)
ResultSet(this, List(TimeSeries(t.tags, s"$name(${t.label})", t.data)), rs.state)
}
}
case class Min(expr: TimeSeriesExpr) extends AggrMathExpr {
def name: String = "min"
def apply(v1: Double, v2: Double): Double = Math.minNaN(v1, v2)
}
case class Max(expr: TimeSeriesExpr) extends AggrMathExpr {
def name: String = "max"
def apply(v1: Double, v2: Double): Double = Math.maxNaN(v1, v2)
}
}
|
rspieldenner/atlas
|
atlas-core/src/main/scala/com/netflix/atlas/core/model/MathExpr.scala
|
Scala
|
apache-2.0
| 13,447 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.api
import org.apache.kafka.common.config.internals.BrokerSecurityConfigs
import org.apache.kafka.common.network.ListenerName
import org.apache.kafka.common.security.auth._
import org.junit.{Before, Test}
import org.junit.Assert._
import org.apache.kafka.common.errors.TopicAuthorizationException
import org.scalatest.Assertions.intercept
// This test case uses a separate listener for client and inter-broker communication, from
// which we derive corresponding principals
object PlaintextEndToEndAuthorizationTest {
@volatile
private var clientListenerName = None: Option[String]
@volatile
private var serverListenerName = None: Option[String]
class TestClientPrincipalBuilder extends KafkaPrincipalBuilder {
override def build(context: AuthenticationContext): KafkaPrincipal = {
clientListenerName = Some(context.listenerName)
context match {
case ctx: PlaintextAuthenticationContext if ctx.clientAddress != null =>
new KafkaPrincipal(KafkaPrincipal.USER_TYPE, "client")
case _ =>
KafkaPrincipal.ANONYMOUS
}
}
}
class TestServerPrincipalBuilder extends KafkaPrincipalBuilder {
override def build(context: AuthenticationContext): KafkaPrincipal = {
serverListenerName = Some(context.listenerName)
context match {
case ctx: PlaintextAuthenticationContext =>
new KafkaPrincipal(KafkaPrincipal.USER_TYPE, "server")
case _ =>
KafkaPrincipal.ANONYMOUS
}
}
}
}
class PlaintextEndToEndAuthorizationTest extends EndToEndAuthorizationTest {
import PlaintextEndToEndAuthorizationTest.{TestClientPrincipalBuilder, TestServerPrincipalBuilder}
override protected def securityProtocol = SecurityProtocol.PLAINTEXT
override protected def listenerName: ListenerName = new ListenerName("CLIENT")
override protected def interBrokerListenerName: ListenerName = new ListenerName("SERVER")
this.serverConfig.setProperty("listener.name.client." + BrokerSecurityConfigs.PRINCIPAL_BUILDER_CLASS_CONFIG,
classOf[TestClientPrincipalBuilder].getName)
this.serverConfig.setProperty("listener.name.server." + BrokerSecurityConfigs.PRINCIPAL_BUILDER_CLASS_CONFIG,
classOf[TestServerPrincipalBuilder].getName)
override val clientPrincipal = new KafkaPrincipal(KafkaPrincipal.USER_TYPE, "client")
override val kafkaPrincipal = new KafkaPrincipal(KafkaPrincipal.USER_TYPE, "server")
@Before
override def setUp(): Unit = {
startSasl(jaasSections(List.empty, None, ZkSasl))
super.setUp()
}
@Test
def testListenerName(): Unit = {
// To check the client listener name, establish a session on the server by sending any request eg sendRecords
val producer = createProducer()
intercept[TopicAuthorizationException](sendRecords(producer, numRecords = 1, tp))
assertEquals(Some("CLIENT"), PlaintextEndToEndAuthorizationTest.clientListenerName)
assertEquals(Some("SERVER"), PlaintextEndToEndAuthorizationTest.serverListenerName)
}
}
|
sslavic/kafka
|
core/src/test/scala/integration/kafka/api/PlaintextEndToEndAuthorizationTest.scala
|
Scala
|
apache-2.0
| 3,819 |
package scorex.transaction.state.database.blockchain
import scorex.crypto.encode.Base58
import scorex.transaction._
import scorex.transaction.assets.{AssetIssuance, BurnTransaction, IssueTransaction, ReissueTransaction}
import scorex.transaction.state.database.state.extension.StateExtension
import scorex.transaction.state.database.state.storage.{AssetsExtendedStateStorageI, StateStorageI}
import scorex.utils.ScorexLogging
import scala.util.{Failure, Success}
//TODO move to state.extension package
class AssetsExtendedState(storage: StateStorageI with AssetsExtendedStateStorageI) extends ScorexLogging
with StateExtension {
override def isValid(tx: Transaction, height: Int): Boolean = tx match {
case tx: ReissueTransaction =>
val reissueValid: Boolean = {
val sameSender = isIssuerAddress(tx.assetId, tx.sender.address)
val reissuable = isReissuable(tx.assetId)
sameSender && reissuable
}
reissueValid
case tx: BurnTransaction =>
isIssuerAddress(tx.assetId, tx.sender.address)
case _ => true
}
override def process(tx: Transaction, blockTs: Long, height: Int): Unit = tx match {
case tx: AssetIssuance =>
addAsset(tx.assetId, height, tx.id, tx.quantity, tx.reissuable)
case tx: BurnTransaction =>
burnAsset(tx.assetId, height, tx.id, -tx.amount)
case _ =>
}
private def isIssuerAddress(assetId: Array[Byte], address: String): Boolean = {
storage.getTransactionBytes(assetId).exists(b =>
IssueTransaction.parseBytes(b) match {
case Success(issue) =>
issue.sender.address == address
case Failure(f) =>
log.debug(s"Can't deserialise issue tx", f)
false
})
}
private[blockchain] def addAsset(assetId: AssetId, height: Int, transactionId: Array[Byte], quantity: Long, reissuable: Boolean): Unit = {
val asset = Base58.encode(assetId)
val transaction = Base58.encode(transactionId)
val assetAtHeight = s"$asset@$height"
val assetAtTransaction = s"$asset@$transaction"
storage.addHeight(asset, height)
storage.addTransaction(assetAtHeight, transaction)
storage.setQuantity(assetAtTransaction, quantity)
storage.setReissuable(assetAtTransaction, reissuable)
}
private[blockchain] def burnAsset(assetId: AssetId, height: Int, transactionId: Array[Byte], quantity: Long): Unit = {
require(quantity <= 0, "Quantity of burned asset should be negative")
val asset = Base58.encode(assetId)
val transaction = Base58.encode(transactionId)
val assetAtHeight = s"$asset@$height"
val assetAtTransaction = s"$asset@$transaction"
storage.addHeight(asset, height)
storage.addTransaction(assetAtHeight, transaction)
storage.setQuantity(assetAtTransaction, quantity)
}
def rollbackTo(assetId: AssetId, height: Int): Unit = {
val asset = Base58.encode(assetId)
val heights = storage.getHeights(asset)
val heightsToRemove = heights.filter(h => h > height)
storage.setHeight(asset, heights -- heightsToRemove)
val transactionsToRemove: Seq[String] = heightsToRemove.foldLeft(Seq.empty[String]) { (result, h) =>
result ++ storage.getTransactions(s"$asset@$h")
}
val keysToRemove = transactionsToRemove.map(t => s"$asset@$t")
keysToRemove.foreach { key =>
storage.removeKey(key)
}
}
def getAssetQuantity(assetId: AssetId): Long = {
val asset = Base58.encode(assetId)
val heights = storage.getHeights(asset)
val sortedHeights = heights.toSeq.sorted
val transactions: Seq[String] = sortedHeights.foldLeft(Seq.empty[String]) { (result, h) =>
result ++ storage.getTransactions(s"$asset@$h")
}
transactions.foldLeft(0L) { (result, transaction) =>
result + storage.getQuantity(s"$asset@$transaction")
}
}
def isReissuable(assetId: AssetId): Boolean = {
val asset = Base58.encode(assetId)
val heights = storage.getHeights(asset)
val reverseSortedHeight = heights.toSeq.reverse
if (reverseSortedHeight.nonEmpty) {
val lastHeight = reverseSortedHeight.head
val transactions = storage.getTransactions(s"$asset@$lastHeight")
if (transactions.nonEmpty) {
val transaction = transactions.toSeq.reverse.head
storage.isReissuable(s"$asset@$transaction")
} else false
} else false
}
}
|
B83YPoj/Waves
|
src/main/scala/scorex/transaction/state/database/blockchain/AssetsExtendedState.scala
|
Scala
|
apache-2.0
| 4,361 |
//package com.sksamuel.elastic4s
//
//import com.sksamuel.elastic4s.ElasticDsl._
//import com.sksamuel.elastic4s.mappings.FieldType.NestedType
//import org.scalatest.{FreeSpec, Matchers}
//import com.sksamuel.elastic4s.testkit.ElasticSugar
//import org.elasticsearch.common.text.Text
//import org.elasticsearch.search.highlight.HighlightField
//
//class NestedQueryTest extends FreeSpec with Matchers with ElasticSugar {
//
// client.execute {
// create index "nested" mappings {
// mapping("show") as {
// field("actor") typed NestedType
// }
// }
// }.await
//
// client.execute(
// index into "nested/show" fields (
// field("name") -> "game of thrones",
// field("actor") -> Seq(
// Map("name" -> "peter dinklage", "birthplace" -> "Morristown"),
// Map("name" -> "pedro pascal", "birthplace" -> "Santiago")
// )
// )
// ).await
//
// refresh("nested")
// blockUntilCount(1, "nested")
//
// "nested object" - {
// "should be searchable by nested field" in {
// val resp1 = client.execute {
// search in "nested/show" query nestedQuery("actor").query(termQuery("actor.name" -> "dinklage"))
// }.await
// resp1.totalHits shouldEqual 1
//
// val resp2 = client.execute {
// search in "nested/show" query nestedQuery("actor").query(termQuery("actor.name" -> "simon"))
// }.await
// resp2.totalHits shouldEqual 0
// }
// }
//
// "nested object" - {
// "should be presented in highlighting" in {
// val resp1 = client.execute {
// search in "nested/show" query nestedQuery("actor").query(termQuery("actor.name" -> "dinklage")).inner {
// innerHits("actor").highlighting(highlight.field("actor.name").matchedFields("actor.name").fragmentSize(20))
// }
// }.await
// resp1.totalHits shouldEqual 1
// val maybeHits = resp1.hits(0).innerHits.get("actor")
// maybeHits.isDefined shouldBe true
// maybeHits.get.getTotalHits shouldEqual 1
// val fields = maybeHits.get.getAt(0).highlightFields
// fields.containsKey("actor.name") shouldBe true
// val fragments = fields.get("actor.name").fragments()
// fragments.length shouldBe 1
// fragments(0).string shouldBe "peter <em>dinklage</em>"
// }
// }
//
// "nested object" - {
// "should have correct inner hit source" in {
// val resp1 = client.execute {
// search in "nested/show" query nestedQuery("actor").query(termQuery("actor.name" -> "dinklage")).inner {
// innerHits("actor").sourceExclude("birthplace")
// }
// }.await
// resp1.totalHits shouldEqual 1
// val maybeInnerHits = resp1.hits(0).innerHits.get("actor")
// maybeInnerHits.isDefined shouldBe true
// maybeInnerHits.get.getTotalHits shouldEqual 1
// maybeInnerHits.get.getAt(0).sourceAsMap.containsKey("birthplace") shouldBe false
// maybeInnerHits.get.getAt(0).sourceAsMap.containsKey("name") shouldBe true
// }
// }
//}
|
aroundus-inc/elastic4s
|
elastic4s-tests/src/test/scala/com/sksamuel/elastic4s/search/NestedQueryTest.scala
|
Scala
|
apache-2.0
| 3,000 |
package org.scalameta.data
import scala.language.experimental.macros
import scala.reflect.macros.blackbox.Context
import org.scalameta.adt.{Reflection => AdtReflection}
import org.scalameta.internal.MacroHelpers
// Parts of @data logic that need a typer context and can't be run in a macro annotation.
object DataTyperMacros {
def nullCheck[T](x: T): Unit = macro DataTyperMacrosBundle.nullCheck
def emptyCheck[T](x: T): Unit = macro DataTyperMacrosBundle.emptyCheck
}
// NOTE: can't call this `DataTyperMacros`, because then typechecking the macro defs will produce spurious cyclic errors
class DataTyperMacrosBundle(val c: Context) extends AdtReflection with MacroHelpers {
lazy val u: c.universe.type = c.universe
lazy val mirror: u.Mirror = c.mirror
import c.universe._
import definitions._
def nullCheck(x: c.Tree): c.Tree = {
if (x.tpe.baseClasses.contains(ObjectClass)) q"$InvariantsRequireMethod($x != null)"
else q"()"
}
def emptyCheck(x: c.Tree): c.Tree = {
val emptyCheckRequested =
try x.symbol.asTerm.accessed.nonEmpty
catch { case _: AssertionError => x.symbol.nonEmpty }
if (emptyCheckRequested) q"$InvariantsRequireMethod($x != null && $x.nonEmpty)"
else q"()"
}
}
|
scalameta/scalameta
|
scalameta/common/shared/src/main/scala/org/scalameta/data/Macros.scala
|
Scala
|
bsd-3-clause
| 1,238 |
package controllers.dev
import anorm._
import play.api.mvc.{Request, ChunkedResult, Action, Controller}
import models.{Schema => S, Location, SQ, User}
import play.api.db.DB
import controllers.Common
import SQ._
import play.api.Play
import java.io.File
import core.LatLng
import play.api.libs.iteratee.{Concurrent, Enumerator}
object Schema extends Common {
def rebuild = Action { request =>
transaction {
models.Schema.drop
models.Schema.create
}
// initialize(Some(Settings.zipcodeSubset))
// initialize(None)
transaction {
seed(request)
}
Ok("schema rebuilt")
}
def setupZipcodes(zipSubset:Option[Set[String]] = None) = DB.withConnection { implicit c =>
def build() {
DB.withConnection { implicit c =>
SQL(
"""
|DROP TABLE IF EXISTS locations;
""".stripMargin).execute()
//
// Seq(S.zipcodes, S.haves, S.wants).foreach { table =>
// SQL(s"SELECT AddGeometryColumn('${table.name}', 'latlng', 4326, 'POINT', 2 );").execute()
// }
SQL(
"""
|CREATE TABLE locations (
| id SERIAL,
| zipcode VARCHAR(5) NULL,
| text VARCHAR(256) NULL,
| latlng Geography(Point)
|);
""".stripMargin).execute()
}
}
def populate() {
val lines:Iterator[String] = io.Source.fromInputStream(Play.classloader.getResourceAsStream("conf/data/Gaz_zcta_national.txt")).getLines.drop(1)
val locations = for {
(line, i) <- lines.zipWithIndex
chunks = line.split("\\t")
zip = chunks(0)
lat = chunks(7)
lng = chunks(8)
if zipSubset.isEmpty || zipSubset.get.contains(zip)
} yield {
Location(Some(zip), LatLng(lat.toFloat, lng.toFloat), 0L)
}
val plan = """PREPARE fooplan (text, geography, int) AS
INSERT INTO locations (zipcode, latlng) VALUES($1, $2);"""
val query = plan + locations.map( loc => s"EXECUTE fooplan('${loc.zipcode.get}', ${loc.latlng.geoString});").mkString("\\n")
SQL(query).execute()
}
build()
populate()
}
def seed(request:Request[_]) = inTransaction {
User.register(User("[email protected]", "q1w2e3"))
Populator.buildCategories()()(request)
}
}
|
maackle/bartera
|
app/controllers/dev/Schema.scala
|
Scala
|
mit
| 2,149 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.mllib.regression
import scala.util.Random
import org.apache.spark.SparkFunSuite
import org.apache.spark.mllib.linalg.Vectors
import org.apache.spark.mllib.util.{LinearDataGenerator, LocalClusterSparkContext,
MLlibTestSparkContext}
import org.apache.spark.util.Utils
private object LinearRegressionSuite {
/** 3 features */
val model = new LinearRegressionModel(weights = Vectors.dense(0.1, 0.2, 0.3), intercept = 0.5)
}
class LinearRegressionSuite extends SparkFunSuite with MLlibTestSparkContext {
def validatePrediction(predictions: Seq[Double], input: Seq[LabeledPoint]) {
val numOffPredictions = predictions.zip(input).count { case (prediction, expected) =>
// A prediction is off if the prediction is more than 0.5 away from expected value.
math.abs(prediction - expected.label) > 0.5
}
// At least 80% of the predictions should be on.
assert(numOffPredictions < input.length / 5)
}
// Test if we can correctly learn Y = 3 + 10*X1 + 10*X2
test("linear regression") {
val testRDD = sc.parallelize(LinearDataGenerator.generateLinearInput(
3.0, Array(10.0, 10.0), 100, 42), 2).cache()
val linReg = new LinearRegressionWithSGD(1.0, 100, 0.0, 1.0).setIntercept(true)
linReg.optimizer.setNumIterations(1000).setStepSize(1.0)
val model = linReg.run(testRDD)
assert(model.intercept >= 2.5 && model.intercept <= 3.5)
val weights = model.weights
assert(weights.size === 2)
assert(weights(0) >= 9.0 && weights(0) <= 11.0)
assert(weights(1) >= 9.0 && weights(1) <= 11.0)
val validationData = LinearDataGenerator.generateLinearInput(
3.0, Array(10.0, 10.0), 100, 17)
val validationRDD = sc.parallelize(validationData, 2).cache()
// Test prediction on RDD.
validatePrediction(model.predict(validationRDD.map(_.features)).collect(), validationData)
// Test prediction on Array.
validatePrediction(validationData.map(row => model.predict(row.features)), validationData)
}
// Test if we can correctly learn Y = 10*X1 + 10*X2
test("linear regression without intercept") {
val testRDD = sc.parallelize(LinearDataGenerator.generateLinearInput(
0.0, Array(10.0, 10.0), 100, 42), 2).cache()
val linReg = new LinearRegressionWithSGD(1.0, 100, 0.0, 1.0).setIntercept(false)
linReg.optimizer.setNumIterations(1000).setStepSize(1.0)
val model = linReg.run(testRDD)
assert(model.intercept === 0.0)
val weights = model.weights
assert(weights.size === 2)
assert(weights(0) >= 9.0 && weights(0) <= 11.0)
assert(weights(1) >= 9.0 && weights(1) <= 11.0)
val validationData = LinearDataGenerator.generateLinearInput(
0.0, Array(10.0, 10.0), 100, 17)
val validationRDD = sc.parallelize(validationData, 2).cache()
// Test prediction on RDD.
validatePrediction(model.predict(validationRDD.map(_.features)).collect(), validationData)
// Test prediction on Array.
validatePrediction(validationData.map(row => model.predict(row.features)), validationData)
}
// Test if we can correctly learn Y = 10*X1 + 10*X10000
test("sparse linear regression without intercept") {
val denseRDD = sc.parallelize(
LinearDataGenerator.generateLinearInput(0.0, Array(10.0, 10.0), 100, 42), 2)
val sparseRDD = denseRDD.map { case LabeledPoint(label, v) =>
val sv = Vectors.sparse(10000, Seq((0, v(0)), (9999, v(1))))
LabeledPoint(label, sv)
}.cache()
val linReg = new LinearRegressionWithSGD(1.0, 100, 0.0, 1.0).setIntercept(false)
linReg.optimizer.setNumIterations(1000).setStepSize(1.0)
val model = linReg.run(sparseRDD)
assert(model.intercept === 0.0)
val weights = model.weights
assert(weights.size === 10000)
assert(weights(0) >= 9.0 && weights(0) <= 11.0)
assert(weights(9999) >= 9.0 && weights(9999) <= 11.0)
val validationData = LinearDataGenerator.generateLinearInput(0.0, Array(10.0, 10.0), 100, 17)
val sparseValidationData = validationData.map { case LabeledPoint(label, v) =>
val sv = Vectors.sparse(10000, Seq((0, v(0)), (9999, v(1))))
LabeledPoint(label, sv)
}
val sparseValidationRDD = sc.parallelize(sparseValidationData, 2)
// Test prediction on RDD.
validatePrediction(
model.predict(sparseValidationRDD.map(_.features)).collect(), sparseValidationData)
// Test prediction on Array.
validatePrediction(
sparseValidationData.map(row => model.predict(row.features)), sparseValidationData)
}
test("model save/load") {
val model = LinearRegressionSuite.model
val tempDir = Utils.createTempDir()
val path = tempDir.toURI.toString
// Save model, load it back, and compare.
try {
model.save(sc, path)
val sameModel = LinearRegressionModel.load(sc, path)
assert(model.weights == sameModel.weights)
assert(model.intercept == sameModel.intercept)
} finally {
Utils.deleteRecursively(tempDir)
}
}
}
class LinearRegressionClusterSuite extends SparkFunSuite with LocalClusterSparkContext {
test("task size should be small in both training and prediction") {
val m = 4
val n = 200000
val points = sc.parallelize(0 until m, 2).mapPartitionsWithIndex { (idx, iter) =>
val random = new Random(idx)
iter.map(i => LabeledPoint(1.0, Vectors.dense(Array.fill(n)(random.nextDouble()))))
}.cache()
// If we serialize data directly in the task closure, the size of the serialized task would be
// greater than 1MB and hence Spark would throw an error.
val model = new LinearRegressionWithSGD(1.0, 2, 0.0, 1.0).run(points)
val predictions = model.predict(points.map(_.features))
}
}
|
pgandhi999/spark
|
mllib/src/test/scala/org/apache/spark/mllib/regression/LinearRegressionSuite.scala
|
Scala
|
apache-2.0
| 6,512 |
/*!
* Copyright 2013-2014 Dennis Hörsch.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package de.dennishoersch.util.dropwizard.config
import io.dropwizard.{ConfiguredBundle => DWConfiguredBundle, Configuration}
import io.dropwizard.setup.Bootstrap
trait ConfiguredBundle[A <: Configuration] extends DWConfiguredBundle[A] {
override final def initialize(bootstrap: Bootstrap[_]) =
init(bootstrap.asInstanceOf[Bootstrap[A]])
def init(bootstrap: Bootstrap[A])
}
|
dhs3000/dropwizard-scala
|
src/main/scala/de/dennishoersch/util/dropwizard/config/ConfiguredBundle.scala
|
Scala
|
apache-2.0
| 991 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.python
import org.apache.spark.sql.execution.{FileSourceScanExec, SparkPlan, SparkPlanTest}
import org.apache.spark.sql.execution.datasources.v2.BatchScanExec
import org.apache.spark.sql.execution.datasources.v2.parquet.ParquetScan
import org.apache.spark.sql.functions.col
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.test.SharedSparkSession
class ExtractPythonUDFsSuite extends SparkPlanTest with SharedSparkSession {
import testImplicits._
val batchedPythonUDF = new MyDummyPythonUDF
val scalarPandasUDF = new MyDummyScalarPandasUDF
private def collectBatchExec(plan: SparkPlan): Seq[BatchEvalPythonExec] = plan.collect {
case b: BatchEvalPythonExec => b
}
private def collectArrowExec(plan: SparkPlan): Seq[ArrowEvalPythonExec] = plan.collect {
case b: ArrowEvalPythonExec => b
}
test("Chained Batched Python UDFs should be combined to a single physical node") {
val df = Seq(("Hello", 4)).toDF("a", "b")
val df2 = df.withColumn("c", batchedPythonUDF(col("a")))
.withColumn("d", batchedPythonUDF(col("c")))
val pythonEvalNodes = collectBatchExec(df2.queryExecution.executedPlan)
assert(pythonEvalNodes.size == 1)
}
test("Chained Scalar Pandas UDFs should be combined to a single physical node") {
val df = Seq(("Hello", 4)).toDF("a", "b")
val df2 = df.withColumn("c", scalarPandasUDF(col("a")))
.withColumn("d", scalarPandasUDF(col("c")))
val arrowEvalNodes = collectArrowExec(df2.queryExecution.executedPlan)
assert(arrowEvalNodes.size == 1)
}
test("Mixed Batched Python UDFs and Pandas UDF should be separate physical node") {
val df = Seq(("Hello", 4)).toDF("a", "b")
val df2 = df.withColumn("c", batchedPythonUDF(col("a")))
.withColumn("d", scalarPandasUDF(col("b")))
val pythonEvalNodes = collectBatchExec(df2.queryExecution.executedPlan)
val arrowEvalNodes = collectArrowExec(df2.queryExecution.executedPlan)
assert(pythonEvalNodes.size == 1)
assert(arrowEvalNodes.size == 1)
}
test("Independent Batched Python UDFs and Scalar Pandas UDFs should be combined separately") {
val df = Seq(("Hello", 4)).toDF("a", "b")
val df2 = df.withColumn("c1", batchedPythonUDF(col("a")))
.withColumn("c2", batchedPythonUDF(col("c1")))
.withColumn("d1", scalarPandasUDF(col("a")))
.withColumn("d2", scalarPandasUDF(col("d1")))
val pythonEvalNodes = collectBatchExec(df2.queryExecution.executedPlan)
val arrowEvalNodes = collectArrowExec(df2.queryExecution.executedPlan)
assert(pythonEvalNodes.size == 1)
assert(arrowEvalNodes.size == 1)
}
test("Dependent Batched Python UDFs and Scalar Pandas UDFs should not be combined") {
val df = Seq(("Hello", 4)).toDF("a", "b")
val df2 = df.withColumn("c1", batchedPythonUDF(col("a")))
.withColumn("d1", scalarPandasUDF(col("c1")))
.withColumn("c2", batchedPythonUDF(col("d1")))
.withColumn("d2", scalarPandasUDF(col("c2")))
val pythonEvalNodes = collectBatchExec(df2.queryExecution.executedPlan)
val arrowEvalNodes = collectArrowExec(df2.queryExecution.executedPlan)
assert(pythonEvalNodes.size == 2)
assert(arrowEvalNodes.size == 2)
}
test("Python UDF should not break column pruning/filter pushdown -- Parquet V1") {
withSQLConf(SQLConf.USE_V1_SOURCE_READER_LIST.key -> "parquet") {
withTempPath { f =>
spark.range(10).select($"id".as("a"), $"id".as("b"))
.write.parquet(f.getCanonicalPath)
val df = spark.read.parquet(f.getCanonicalPath)
withClue("column pruning") {
val query = df.filter(batchedPythonUDF($"a")).select($"a")
val pythonEvalNodes = collectBatchExec(query.queryExecution.executedPlan)
assert(pythonEvalNodes.length == 1)
val scanNodes = query.queryExecution.executedPlan.collect {
case scan: FileSourceScanExec => scan
}
assert(scanNodes.length == 1)
assert(scanNodes.head.output.map(_.name) == Seq("a"))
}
withClue("filter pushdown") {
val query = df.filter($"a" > 1 && batchedPythonUDF($"a"))
val pythonEvalNodes = collectBatchExec(query.queryExecution.executedPlan)
assert(pythonEvalNodes.length == 1)
val scanNodes = query.queryExecution.executedPlan.collect {
case scan: FileSourceScanExec => scan
}
assert(scanNodes.length == 1)
// 'a is not null and 'a > 1
assert(scanNodes.head.dataFilters.length == 2)
assert(scanNodes.head.dataFilters.flatMap(_.references.map(_.name)).distinct == Seq("a"))
}
}
}
}
test("Python UDF should not break column pruning/filter pushdown -- Parquet V2") {
withSQLConf(SQLConf.USE_V1_SOURCE_READER_LIST.key -> "") {
withTempPath { f =>
spark.range(10).select($"id".as("a"), $"id".as("b"))
.write.parquet(f.getCanonicalPath)
val df = spark.read.parquet(f.getCanonicalPath)
withClue("column pruning") {
val query = df.filter(batchedPythonUDF($"a")).select($"a")
val pythonEvalNodes = collectBatchExec(query.queryExecution.executedPlan)
assert(pythonEvalNodes.length == 1)
val scanNodes = query.queryExecution.executedPlan.collect {
case scan: BatchScanExec => scan
}
assert(scanNodes.length == 1)
assert(scanNodes.head.output.map(_.name) == Seq("a"))
}
withClue("filter pushdown") {
val query = df.filter($"a" > 1 && batchedPythonUDF($"a"))
val pythonEvalNodes = collectBatchExec(query.queryExecution.executedPlan)
assert(pythonEvalNodes.length == 1)
val scanNodes = query.queryExecution.executedPlan.collect {
case scan: BatchScanExec => scan
}
assert(scanNodes.length == 1)
// 'a is not null and 'a > 1
val filters = scanNodes.head.scan.asInstanceOf[ParquetScan].pushedFilters
assert(filters.length == 2)
assert(filters.flatMap(_.references).distinct === Array("a"))
}
}
}
}
}
|
techaddict/spark
|
sql/core/src/test/scala/org/apache/spark/sql/execution/python/ExtractPythonUDFsSuite.scala
|
Scala
|
apache-2.0
| 7,002 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.carbondata.spark.testsuite.iud
import java.io.File
import org.apache.spark.sql.{CarbonEnv, Row, SaveMode}
import org.apache.spark.sql.hive.CarbonRelation
import org.apache.spark.sql.test.SparkTestQueryExecutor
import org.apache.spark.sql.test.util.QueryTest
import org.scalatest.BeforeAndAfterAll
import org.apache.carbondata.common.exceptions.sql.MalformedCarbonCommandException
import org.apache.carbondata.core.constants.CarbonCommonConstants
import org.apache.carbondata.core.datastore.filesystem.{CarbonFile, CarbonFileFilter}
import org.apache.carbondata.core.datastore.impl.FileFactory
import org.apache.carbondata.core.mutate.CarbonUpdateUtil
import org.apache.carbondata.core.util.{CarbonProperties, CarbonUtil}
import org.apache.carbondata.core.util.path.CarbonTablePath
class DeleteCarbonTableTestCase extends QueryTest with BeforeAndAfterAll {
override def beforeAll {
sql("use default")
sql("drop database if exists iud_db cascade")
sql("create database iud_db")
sql(
"""create table iud_db.source2 (c11 string,c22 int,c33 string,c55 string, c66 int) STORED
|AS carbondata""".stripMargin)
sql(s"""LOAD DATA LOCAL INPATH '$resourcesPath/IUD/source2.csv' INTO table iud_db.source2""")
sql("use iud_db")
CarbonProperties.getInstance()
.addProperty(CarbonCommonConstants.CARBON_CLEAN_FILES_FORCE_ALLOWED, "true")
}
test("delete data from carbon table with alias [where clause ]") {
sql("""create table iud_db.dest (c1 string,c2 int,c3 string,c5 string) STORED AS carbondata""")
sql(s"""LOAD DATA LOCAL INPATH '$resourcesPath/IUD/dest.csv' INTO table iud_db.dest""")
sql("""delete from iud_db.dest d where d.c1 = 'a'""").collect()
checkAnswer(
sql("""select c2 from iud_db.dest"""),
Seq(Row(2), Row(3), Row(4), Row(5))
)
}
test("delete data from carbon table[where clause ]") {
sql("""drop table if exists iud_db.dest""")
sql("""create table iud_db.dest (c1 string,c2 int,c3 string,c5 string) STORED AS carbondata""")
sql(s"""LOAD DATA LOCAL INPATH '$resourcesPath/IUD/dest.csv' INTO table iud_db.dest""")
sql("""delete from iud_db.dest where c2 = 2""").collect()
checkAnswer(
sql("""select c1 from iud_db.dest"""),
Seq(Row("a"), Row("c"), Row("d"), Row("e"))
)
}
test("delete data from carbon table[where IN ]") {
sql("""drop table if exists iud_db.dest""")
sql("""create table iud_db.dest (c1 string,c2 int,c3 string,c5 string) STORED AS carbondata""")
sql(s"""LOAD DATA LOCAL INPATH '$resourcesPath/IUD/dest.csv' INTO table iud_db.dest""")
sql("""delete from dest where c1 IN ('d', 'e')""").collect()
checkAnswer(
sql("""select c1 from dest"""),
Seq(Row("a"), Row("b"), Row("c"))
)
}
test("delete data from carbon table[with alias No where clause]") {
sql("""drop table if exists iud_db.dest""")
sql("""create table iud_db.dest (c1 string,c2 int,c3 string,c5 string) STORED AS carbondata""")
sql(s"""LOAD DATA LOCAL INPATH '$resourcesPath/IUD/dest.csv' INTO table iud_db.dest""")
sql("""delete from iud_db.dest a""").collect()
checkAnswer(
sql("""select c1 from iud_db.dest"""),
Seq()
)
}
test("delete data from carbon table[No alias No where clause]") {
sql("""drop table if exists iud_db.dest""")
sql("""create table iud_db.dest (c1 string,c2 int,c3 string,c5 string) STORED AS carbondata""")
sql(s"""LOAD DATA LOCAL INPATH '$resourcesPath/IUD/dest.csv' INTO table iud_db.dest""")
sql("""delete from dest""").collect()
checkAnswer(
sql("""select c1 from dest"""),
Seq()
)
}
test("delete data from carbon table[ JOIN with another table ]") {
sql("""drop table if exists iud_db.dest""")
sql("""create table iud_db.dest (c1 string,c2 int,c3 string,c5 string) STORED AS carbondata""")
sql(s"""LOAD DATA LOCAL INPATH '$resourcesPath/IUD/dest.csv' INTO table iud_db.dest""")
sql(""" DELETE FROM dest t1 INNER JOIN source2 t2 ON t1.c1 = t2.c11""").collect()
checkAnswer(
sql("""select c1 from iud_db.dest"""),
Seq(Row("c"), Row("d"), Row("e"))
)
}
test("delete data from carbon table[where numeric condition ]") {
sql("""drop table if exists iud_db.dest""")
sql("""create table iud_db.dest (c1 string,c2 int,c3 string,c5 string) STORED AS carbondata""")
sql(s"""LOAD DATA LOCAL INPATH '$resourcesPath/IUD/dest.csv' INTO table iud_db.dest""")
sql("""delete from iud_db.dest where c2 >= 4""").collect()
checkAnswer(
sql("""select count(*) from iud_db.dest"""),
Seq(Row(3))
)
}
test("partition delete data from carbon table with alias [where clause ]") {
sql("drop table if exists iud_db.dest")
sql(
"""create table iud_db.dest (c1 string,c2 int,c5 string) PARTITIONED BY(c3 string) STORED AS
|carbondata""".stripMargin)
sql(s"""LOAD DATA LOCAL INPATH '$resourcesPath/IUD/dest.csv' INTO table iud_db.dest""")
sql("""delete from iud_db.dest d where d.c1 = 'a'""").collect()
checkAnswer(
sql("""select c2 from iud_db.dest"""),
Seq(Row(2), Row(3), Row(4), Row(5))
)
}
test("partition delete data from carbon table[where clause ]") {
sql("""drop table if exists iud_db.dest""")
sql(
"""create table iud_db.dest (c1 string,c2 int,c5 string) PARTITIONED BY(c3 string) STORED
|AS carbondata""".stripMargin)
sql(s"""LOAD DATA LOCAL INPATH '$resourcesPath/IUD/dest.csv' INTO table iud_db.dest""")
sql("""delete from iud_db.dest where c2 = 2""").collect()
checkAnswer(
sql("""select c1 from iud_db.dest"""),
Seq(Row("a"), Row("c"), Row("d"), Row("e"))
)
}
test("test delete for partition table without merge index files for segment") {
try {
sql("DROP TABLE IF EXISTS iud_db.partition_nomerge_index")
CarbonProperties.getInstance()
.addProperty(CarbonCommonConstants.CARBON_MERGE_INDEX_IN_SEGMENT, "false")
sql(
s"""CREATE TABLE iud_db.partition_nomerge_index (a INT, b INT) PARTITIONED BY (country
|STRING) STORED AS carbondata"""
.stripMargin)
sql("INSERT INTO iud_db.partition_nomerge_index PARTITION(country='India') SELECT 1,2")
sql("INSERT INTO iud_db.partition_nomerge_index PARTITION(country='India') SELECT 3,4")
sql("INSERT INTO iud_db.partition_nomerge_index PARTITION(country='China') SELECT 5,6")
sql("INSERT INTO iud_db.partition_nomerge_index PARTITION(country='China') SELECT 7,8")
checkAnswer(sql("select * from iud_db.partition_nomerge_index"),
Seq(Row(1, 2, "India"), Row(3, 4, "India"), Row(5, 6, "China"), Row(7, 8, "China")))
sql("DELETE FROM iud_db.partition_nomerge_index WHERE b = 4")
checkAnswer(sql("select * from iud_db.partition_nomerge_index"),
Seq(Row(1, 2, "India"), Row(5, 6, "China"), Row(7, 8, "China")))
} finally {
CarbonProperties.getInstance()
.removeProperty(CarbonCommonConstants.CARBON_MERGE_INDEX_IN_SEGMENT)
}
}
test("Records more than one pagesize after delete operation ") {
sql("DROP TABLE IF EXISTS carbon2")
import sqlContext.implicits._
val df = sqlContext.sparkContext.parallelize(1 to 2000000)
.map(x => (x + "a", "b", x))
.toDF("c1", "c2", "c3")
df.write
.format("carbondata")
.option("tableName", "carbon2")
.option("tempCSV", "true")
.option("compress", "true")
.mode(SaveMode.Overwrite)
.save()
checkAnswer(sql("select count(*) from carbon2"), Seq(Row(2000000)))
sql("delete from carbon2 where c1 = '99999a'").collect()
checkAnswer(sql("select count(*) from carbon2"), Seq(Row(1999999)))
checkAnswer(sql("select * from carbon2 where c1 = '99999a'"), Seq())
sql("DROP TABLE IF EXISTS carbon2")
}
test("test select query after compaction, delete and clean files") {
sql("drop table if exists select_after_clean")
sql("create table select_after_clean(id int, name string) STORED AS carbondata")
sql("insert into select_after_clean select 1,'abc'")
sql("insert into select_after_clean select 2,'def'")
sql("insert into select_after_clean select 3,'uhj'")
sql("insert into select_after_clean select 4,'frg'")
sql("alter table select_after_clean compact 'minor'")
sql("clean files for table select_after_clean options('force'='true')")
sql("delete from select_after_clean where name='def'")
sql("clean files for table select_after_clean options('force'='true')")
assertResult(false)(new File(
CarbonTablePath.getSegmentPath(s"$storeLocation/iud_db.db/select_after_clean", "0")).exists())
checkAnswer(sql("""select * from select_after_clean"""),
Seq(Row(1, "abc"), Row(3, "uhj"), Row(4, "frg")))
}
test("test number of update table status files after delete query where no records are deleted") {
sql("drop table if exists update_status_files")
sql("create table update_status_files(name string,age int) STORED AS carbondata")
sql("insert into update_status_files select 'abc',1")
sql("insert into update_status_files select 'def',2")
sql("insert into update_status_files select 'xyz',4")
sql("insert into update_status_files select 'abc',6")
sql("alter table update_status_files compact 'minor'")
sql("delete from update_status_files where age=3").collect()
sql("delete from update_status_files where age=5").collect()
val carbonTable = CarbonEnv
.getCarbonTable(Some("iud_db"), "update_status_files")(sqlContext.sparkSession)
val metaPath = carbonTable.getMetadataPath
val files = FileFactory.getCarbonFile(metaPath)
val result = CarbonEnv.getInstance(sqlContext.sparkSession).carbonMetaStore.getClass
if (result.getCanonicalName.contains("CarbonFileMetastore")) {
assert(files.listFiles(new CarbonFileFilter {
override def accept(file: CarbonFile): Boolean = !file.isDirectory
}).length == 2)
}
else {
assert(files.listFiles().length == 2)
}
sql("drop table update_status_files")
}
test("tuple-id for partition table ") {
sql("drop table if exists iud_db.dest_tuple_part")
sql(
"""create table iud_db.dest_tuple_part (c1 string,c2 int,c5 string) PARTITIONED BY(c3
|string) STORED AS carbondata"""
.stripMargin)
sql(
s"""LOAD DATA LOCAL INPATH '$resourcesPath/IUD/dest.csv' INTO table iud_db.dest_tuple_part"""
.stripMargin)
sql("drop table if exists iud_db.dest_tuple")
sql(
"""create table iud_db.dest_tuple (c1 string,c2 int,c5 string,c3 string) STORED AS
|carbondata"""
.stripMargin)
sql(s"""LOAD DATA LOCAL INPATH '$resourcesPath/IUD/dest.csv' INTO table iud_db.dest_tuple""")
val dataframe_part = sql("select getTupleId() as tupleId from iud_db.dest_tuple_part").collect()
val listOfTupleId_part = dataframe_part.map(df => df.get(0).toString).sorted
assert(listOfTupleId_part(0).startsWith("c3=aa/0-100100000100001_0-0-0-") &&
listOfTupleId_part(0).endsWith("/0/0/0"))
assert(listOfTupleId_part(1).startsWith("c3=bb/0-100100000100002_0-0-0-") &&
listOfTupleId_part(1).endsWith("/0/0/0"))
assert(listOfTupleId_part(2).startsWith("c3=cc/0-100100000100003_0-0-0-") &&
listOfTupleId_part(2).endsWith("/0/0/0"))
assert(listOfTupleId_part(3).startsWith("c3=dd/0-100100000100004_0-0-0-") &&
listOfTupleId_part(3).endsWith("/0/0/0"))
assert(listOfTupleId_part(4).startsWith("c3=ee/0-100100000100005_0-0-0-") &&
listOfTupleId_part(4).endsWith("/0/0/0"))
val dataframe = sql("select getTupleId() as tupleId from iud_db.dest_tuple")
val listOfTupleId = dataframe.collect().map(df => df.get(0).toString).sorted
assert(
listOfTupleId(0).contains("0/0-0_0-0-0-") && listOfTupleId(0).endsWith("/0/0/0"))
assert(
listOfTupleId(1).contains("0/0-0_0-0-0-") && listOfTupleId(1).endsWith("/0/0/1"))
assert(
listOfTupleId(2).contains("0/0-0_0-0-0-") && listOfTupleId(2).endsWith("/0/0/2"))
assert(
listOfTupleId(3).contains("0/0-0_0-0-0-") && listOfTupleId(3).endsWith("/0/0/3"))
assert(
listOfTupleId(4).contains("0/0-0_0-0-0-") && listOfTupleId(4).endsWith("/0/0/4"))
val carbonTable_part = CarbonEnv.getInstance(SparkTestQueryExecutor.spark).carbonMetaStore
.lookupRelation(Option("iud_db"), "dest_tuple_part")(SparkTestQueryExecutor.spark)
.asInstanceOf[CarbonRelation].carbonTable
val carbonTable = CarbonEnv.getInstance(SparkTestQueryExecutor.spark).carbonMetaStore
.lookupRelation(Option("iud_db"), "dest_tuple")(SparkTestQueryExecutor.spark)
.asInstanceOf[CarbonRelation].carbonTable
val carbonDataFilename = new File(carbonTable.getTablePath + "/Fact/Part0/Segment_0/")
.listFiles().filter(fn => fn.getName.endsWith(".carbondata"))
val blockId = CarbonUtil.getBlockId(carbonTable.getAbsoluteTableIdentifier,
carbonDataFilename(0).getAbsolutePath,
"0",
carbonTable.isTransactionalTable,
CarbonUtil.isStandardCarbonTable(carbonTable))
assert(blockId.startsWith("Part0/Segment_0/part-0-0_batchno0-0-0-"))
val carbonDataFilename_part = new File(carbonTable_part.getTablePath + "/c3=aa").listFiles()
.filter(fn => fn.getName.endsWith(".carbondata"))
val blockId_part = CarbonUtil.getBlockId(carbonTable.getAbsoluteTableIdentifier,
carbonDataFilename_part(0).getAbsolutePath,
"0",
carbonTable.isTransactionalTable,
CarbonUtil.isStandardCarbonTable(carbonTable))
assert(blockId_part.startsWith("Part0/Segment_0/part-0-100100000100001_batchno0-0-0-"))
val tableBlockPath = CarbonUpdateUtil
.getTableBlockPath(listOfTupleId(0),
carbonTable.getTablePath,
CarbonUtil.isStandardCarbonTable(carbonTable), true)
val tableBl0ckPath_part = CarbonUpdateUtil
.getTableBlockPath(listOfTupleId_part(0),
carbonTable_part.getTablePath,
CarbonUtil.isStandardCarbonTable(carbonTable_part), true)
assert(tableBl0ckPath_part.endsWith("iud_db.db/dest_tuple_part/c3=aa"))
assert(tableBlockPath.endsWith("iud_db.db/dest_tuple/Fact/Part0/Segment_0"))
sql("drop table if exists iud_db.dest_tuple_part")
sql("drop table if exists iud_db.dest_tuple")
}
test("block deleting records from table which has index") {
sql("drop table if exists test_dm_index")
sql("create table test_dm_index (a string, b string, c string) STORED AS carbondata")
sql("insert into test_dm_index select 'ccc','bbb','ccc'")
sql(
s"""
| CREATE INDEX dm_test_dm_index
| ON TABLE test_dm_index (a)
| AS 'bloomfilter'
| Properties('BLOOM_SIZE'='640000')
""".stripMargin)
assert(intercept[MalformedCarbonCommandException] {
sql("delete from test_dm_index where a = 'ccc'")
}.getMessage.contains("delete operation is not supported for index"))
sql("drop table if exists test_dm_index")
}
test("test delete on table with decimal column") {
sql("drop table if exists decimal_table")
sql(
s"""create table decimal_table(smallIntField smallInt,intField int,bigIntField bigint,
floatField float,
doubleField double,decimalField decimal(25, 4),timestampField timestamp,dateField date,
stringField string,
varcharField varchar(10),charField char(10))stored as carbondata
""".stripMargin)
sql(s"load data local inpath '$resourcesPath/decimalData.csv' into table decimal_table")
val frame = sql(
"select decimalfield from decimal_table where smallIntField = -1 or smallIntField = 3")
sql(s"delete from decimal_table where smallIntField = 2")
checkAnswer(frame, Seq(
Row(-1.1234),
Row(3.1234)
))
sql("drop table if exists decimal_table")
}
test("delete and insert overwrite partition") {
sql("""drop table if exists deleteinpartition""")
sql(
"""CREATE TABLE deleteinpartition (id STRING, sales STRING)
| PARTITIONED BY (dtm STRING)
| STORED AS carbondata""".stripMargin)
sql(
s"""load data local inpath '$resourcesPath/IUD/updateinpartition.csv'
| into table deleteinpartition""".stripMargin)
sql("""delete from deleteinpartition where dtm=20200907 and id='001'""")
sql("""delete from deleteinpartition where dtm=20200907 and id='002'""")
checkAnswer(
sql("""select count(1), dtm from deleteinpartition group by dtm"""),
Seq(Row(8, "20200907"), Row(10, "20200908"))
)
// insert overwrite an partition which is exist.
// make sure the delete executed before still works.
sql(
"""insert overwrite table deleteinpartition
| partition (dtm=20200908)
| select id, sales from deleteinpartition
| where dtm = 20200907""".stripMargin)
checkAnswer(
sql("""select count(1), dtm from deleteinpartition group by dtm"""),
Seq(Row(8, "20200907"), Row(8, "20200908"))
)
// insert overwrite an partition which is not exist.
// make sure the delete executed before still works.
sql(
"""insert overwrite table deleteinpartition
| partition (dtm=20200909)
| select id, sales from deleteinpartition
| where dtm = 20200907""".stripMargin)
checkAnswer(
sql("""select count(1), dtm from deleteinpartition group by dtm"""),
Seq(Row(8, "20200907"), Row(8, "20200908"), Row(8, "20200909"))
)
// drop a partition. make sure the delete executed before still works.
sql("""alter table deleteinpartition drop partition (dtm=20200908)""")
checkAnswer(
sql("""select count(1), dtm from deleteinpartition group by dtm"""),
Seq(Row(8, "20200907"), Row(8, "20200909"))
)
sql("""drop table deleteinpartition""")
}
test("[CARBONDATA-3491] Return updated/deleted rows count when execute update/delete sql") {
sql("drop table if exists test_return_row_count")
sql("create table test_return_row_count (a string, b string, c string) STORED AS carbondata")
.collect()
sql("insert into test_return_row_count select 'aaa','bbb','ccc'").collect()
sql("insert into test_return_row_count select 'bbb','bbb','ccc'").collect()
sql("insert into test_return_row_count select 'ccc','bbb','ccc'").collect()
sql("insert into test_return_row_count select 'ccc','bbb','ccc'").collect()
checkAnswer(sql("delete from test_return_row_count where a = 'aaa'"),
Seq(Row(1))
)
checkAnswer(sql("select * from test_return_row_count"),
Seq(Row("bbb", "bbb", "ccc"), Row("ccc", "bbb", "ccc"), Row("ccc", "bbb", "ccc"))
)
sql("drop table if exists test_return_row_count").collect()
}
test(
"[CARBONDATA-3561] Fix incorrect results after execute delete/update operation if there are " +
"null values")
{
CarbonProperties.getInstance()
.addProperty(CarbonCommonConstants.ENABLE_VECTOR_READER, "true")
val tableName = "fix_incorrect_results_for_iud"
sql(s"drop table if exists ${ tableName }")
sql(s"create table ${ tableName } (a string, b string, c string) STORED AS carbondata")
.collect()
sql(
s"""insert into table ${ tableName }
select '1','1','2017' union all
select '2','2','2017' union all
select '3','3','2017' union all
select '4','4','2017' union all
select '5',null,'2017' union all
select '6',null,'2017' union all
select '7','7','2017' union all
select '8','8','2017' union all
select '9',null,'2017' union all
select '10',null,'2017'""").collect()
checkAnswer(sql(s"select count(1) from ${ tableName } where b is null"), Seq(Row(4)))
checkAnswer(sql(s"delete from ${ tableName } where b ='4'"), Seq(Row(1)))
checkAnswer(sql(s"delete from ${ tableName } where a ='9'"), Seq(Row(1)))
checkAnswer(sql(s"update ${ tableName } set (b) = ('10') where a = '10'"), Seq(Row(1)))
checkAnswer(sql(s"select count(1) from ${ tableName } where b is null"), Seq(Row(2)))
checkAnswer(sql(s"select * from ${ tableName } where a = '1'"), Seq(Row("1", "1", "2017")))
checkAnswer(sql(s"select * from ${ tableName } where a = '10'"), Seq(Row("10", "10", "2017")))
sql(s"drop table if exists ${ tableName }").collect()
}
test("test partition table delete and horizontal compaction") {
sql("drop table if exists iud_db.partition_hc")
sql(
"create table iud_db.partition_hc (c1 string,c2 int,c5 string) PARTITIONED BY(c3 string) " +
"STORED AS carbondata")
sql(
"insert into iud_db.partition_hc values ('a',1,'aaa','aa'),('a',5,'aaa','aa'),('a',9,'aaa'," +
"'aa'),('a',4,'aaa','aa'),('a',2,'aaa','aa'),('a',3,'aaa'," +
"'aa')")
sql("delete from iud_db.partition_hc where c2 = 1").show()
sql("delete from iud_db.partition_hc where c2 = 5").show()
checkAnswer(
sql("""select c2 from iud_db.partition_hc"""),
Seq(Row(9), Row(4), Row(2), Row(3))
)
// verify if the horizontal compaction happened or not
val carbonTable = CarbonEnv.getCarbonTable(Some("iud_db"), "partition_hc")(sqlContext
.sparkSession)
val partitionPath = carbonTable.getTablePath + "/c3=aa"
val deltaFiles = FileFactory.getCarbonFile(partitionPath).listFiles(new CarbonFileFilter {
override def accept(file: CarbonFile): Boolean = {
file.getName.endsWith(CarbonCommonConstants.DELETE_DELTA_FILE_EXT)
}
})
assert(deltaFiles.size == 3)
val updateStatusFiles = FileFactory.getCarbonFile(CarbonTablePath.getMetadataPath(carbonTable
.getTablePath)).listFiles(new CarbonFileFilter {
override def accept(file: CarbonFile): Boolean = {
file.getName.startsWith(CarbonCommonConstants.TABLEUPDATESTATUS_FILENAME)
}
})
assert(updateStatusFiles.size == 3)
}
override def afterAll {
sql("use default")
sql("drop database if exists iud_db cascade")
CarbonProperties.getInstance()
.removeProperty(CarbonCommonConstants.CARBON_CLEAN_FILES_FORCE_ALLOWED)
}
}
|
zzcclp/carbondata
|
integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/iud/DeleteCarbonTableTestCase.scala
|
Scala
|
apache-2.0
| 23,033 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.hive.execution
import org.apache.hadoop.hive.common.StatsSetupConst
import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.catalyst.analysis.CastSupport
import org.apache.spark.sql.catalyst.catalog.{CatalogStatistics, CatalogTable, CatalogTablePartition, ExternalCatalogUtils, HiveTableRelation}
import org.apache.spark.sql.catalyst.expressions.{And, AttributeSet, Expression, ExpressionSet, SubqueryExpression}
import org.apache.spark.sql.catalyst.planning.PhysicalOperation
import org.apache.spark.sql.catalyst.plans.logical.{Filter, LogicalPlan, Project}
import org.apache.spark.sql.catalyst.rules.Rule
import org.apache.spark.sql.execution.datasources.DataSourceStrategy
import org.apache.spark.sql.internal.SQLConf
/**
* Prune hive table partitions using partition filters on [[HiveTableRelation]]. The pruned
* partitions will be kept in [[HiveTableRelation.prunedPartitions]], and the statistics of
* the hive table relation will be updated based on pruned partitions.
*
* This rule is executed in optimization phase, so the statistics can be updated before physical
* planning, which is useful for some spark strategy, eg.
* [[org.apache.spark.sql.execution.SparkStrategies.JoinSelection]].
*
* TODO: merge this with PruneFileSourcePartitions after we completely make hive as a data source.
*/
private[sql] class PruneHiveTablePartitions(session: SparkSession)
extends Rule[LogicalPlan] with CastSupport {
override val conf: SQLConf = session.sessionState.conf
/**
* Extract the partition filters from the filters on the table.
*/
private def getPartitionKeyFilters(
filters: Seq[Expression],
relation: HiveTableRelation): ExpressionSet = {
val normalizedFilters = DataSourceStrategy.normalizeExprs(
filters.filter(f => f.deterministic && !SubqueryExpression.hasSubquery(f)), relation.output)
val partitionColumnSet = AttributeSet(relation.partitionCols)
ExpressionSet(normalizedFilters.filter { f =>
!f.references.isEmpty && f.references.subsetOf(partitionColumnSet)
})
}
/**
* Prune the hive table using filters on the partitions of the table.
*/
private def prunePartitions(
relation: HiveTableRelation,
partitionFilters: ExpressionSet): Seq[CatalogTablePartition] = {
if (conf.metastorePartitionPruning) {
session.sessionState.catalog.listPartitionsByFilter(
relation.tableMeta.identifier, partitionFilters.toSeq)
} else {
ExternalCatalogUtils.prunePartitionsByFilter(relation.tableMeta,
session.sessionState.catalog.listPartitions(relation.tableMeta.identifier),
partitionFilters.toSeq, conf.sessionLocalTimeZone)
}
}
/**
* Update the statistics of the table.
*/
private def updateTableMeta(
tableMeta: CatalogTable,
prunedPartitions: Seq[CatalogTablePartition]): CatalogTable = {
val sizeOfPartitions = prunedPartitions.map { partition =>
val rawDataSize = partition.parameters.get(StatsSetupConst.RAW_DATA_SIZE).map(_.toLong)
val totalSize = partition.parameters.get(StatsSetupConst.TOTAL_SIZE).map(_.toLong)
if (rawDataSize.isDefined && rawDataSize.get > 0) {
rawDataSize.get
} else if (totalSize.isDefined && totalSize.get > 0L) {
totalSize.get
} else {
0L
}
}
if (sizeOfPartitions.forall(_ > 0)) {
val sizeInBytes = sizeOfPartitions.sum
tableMeta.copy(stats = Some(CatalogStatistics(sizeInBytes = BigInt(sizeInBytes))))
} else {
tableMeta
}
}
override def apply(plan: LogicalPlan): LogicalPlan = plan resolveOperators {
case op @ PhysicalOperation(projections, filters, relation: HiveTableRelation)
if filters.nonEmpty && relation.isPartitioned && relation.prunedPartitions.isEmpty =>
val partitionKeyFilters = getPartitionKeyFilters(filters, relation)
if (partitionKeyFilters.nonEmpty) {
val newPartitions = prunePartitions(relation, partitionKeyFilters)
val newTableMeta = updateTableMeta(relation.tableMeta, newPartitions)
val newRelation = relation.copy(
tableMeta = newTableMeta, prunedPartitions = Some(newPartitions))
// Keep partition filters so that they are visible in physical planning
Project(projections, Filter(filters.reduceLeft(And), newRelation))
} else {
op
}
}
}
|
goldmedal/spark
|
sql/hive/src/main/scala/org/apache/spark/sql/hive/execution/PruneHiveTablePartitions.scala
|
Scala
|
apache-2.0
| 5,209 |
package com.themillhousegroup.l7
import java.io.File
import com.typesafe.scalalogging.LazyLogging
import scala.util.Try
import com.themillhousegroup.l7.commands._
import scala.util.Failure
object Automerge {
def apply(existingDirectoryName: String, newerDirectoryName: String) = {
new Automerge(
new File(existingDirectoryName),
new File(newerDirectoryName)
)
}
}
class Automerge(val existingDir: File, val newerDir: File) extends LazyLogging {
def dryRun = merge(true)
import DirectoryHelper.xmlFilesIn
def merge(dryRun: Boolean = false): Try[DifferenceSet[File]] = {
val maybeExistingDir = xmlFilesIn(existingDir)
val maybeNewerDir = xmlFilesIn(newerDir)
for {
existingDir <- maybeExistingDir
newerDir <- maybeNewerDir
result <- DirectoryDifferenceInspector.diff(existingDir, newerDir)
} yield result
}
}
object Failures {
def failWith(msg: String) = Failure(new IllegalArgumentException(msg))
}
object AutomergeApp extends App with CommandProcessor {
val knownCommands = Seq[Command](
SingleDocumentComparisonCommand,
SingleDocumentMergeCommand,
VisualiserCommand)
if (args.isEmpty) {
displayCommands
} else {
runCommand(args)
}
}
|
themillhousegroup/l7-merge
|
src/main/scala/com/themillhousegroup/l7/Automerge.scala
|
Scala
|
mit
| 1,243 |
/*
* Copyright 2011-2022 GatlingCorp (https://gatling.io)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.gatling.recorder.ui.swing.component
import scala.swing._
import scala.swing.BorderPanel.Position._
import io.gatling.recorder.ui.swing.frame.ConfigurationFrame
import io.gatling.recorder.ui.swing.util.UIHelper._
private[swing] object DialogFileSelector {
val message = """|A Swing bug on Mac OS X prevents the Recorder from getting
|the correct path for file with some known extensions.
|Those files closely matches the file you selected, please select
|the correct one :
|""".stripMargin
}
@SuppressWarnings(Array("org.wartremover.warts.LeakingSealed"))
// error is in scala-swing
private[swing] class DialogFileSelector(configurationFrame: ConfigurationFrame, possibleFiles: List[String]) extends Dialog(configurationFrame) {
var selectedFile: Option[String] = None
val radioButtons = possibleFiles.map(new RadioButton(_))
val radiosGroup = new ButtonGroup(radioButtons: _*)
val cancelButton = Button("Cancel")(close())
val okButton =
Button("OK") {
radiosGroup.selected.foreach(button => selectedFile = Some(button.text))
close()
}
val defaultBackground = background
contents = new BorderPanel {
val messageLabel = new TextArea(DialogFileSelector.message) { background = defaultBackground }
val radiosPanel = new BoxPanel(Orientation.Vertical) { radioButtons.foreach(contents += _) }
val buttonsPanel = new CenterAlignedFlowPanel {
contents += okButton
contents += cancelButton
}
layout(messageLabel) = North
layout(radiosPanel) = Center
layout(buttonsPanel) = South
}
modal = true
setLocationRelativeTo(configurationFrame)
}
|
gatling/gatling
|
gatling-recorder/src/main/scala/io/gatling/recorder/ui/swing/component/DialogFileSelector.scala
|
Scala
|
apache-2.0
| 2,326 |
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
* *
* Copyright © 2012 Christian Krause *
* *
* Christian Krause <[email protected]> *
* <[email protected]> *
* *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
* *
* This file is part of 'ClusterKit'. *
* *
* This project is free software: you can redistribute it and/or modify it under the terms *
* of the GNU General Public License as published by the Free Software Foundation, either *
* version 3 of the License, or any later version. *
* *
* This project is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; *
* without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. *
* See the GNU General Public License for more details. *
* *
* You should have received a copy of the GNU General Public License along with this project. *
* If not, see <http://www.gnu.org/licenses/>. *
* *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
package ckit
package client
package swing
package action
import scala.swing.Dialog
/** Tries to quit this application. */
object Quit extends Action("Quit") {
accelerator = Some(keyStroke(Key.Q, Modifier.Control))
mnemonic = Key.Q.id
toolTip = "Logout if necessary and quit."
override def apply = Dialog.showConfirmation(message = "Are you sure you want to quit?") match {
case Dialog.Result.Yes ⇒ SwingClient.quit()
case _ ⇒
}
}
|
wookietreiber/ckit
|
client/swing/main/scala/action/Quit.scala
|
Scala
|
gpl-3.0
| 2,743 |
package org.jetbrains.plugins.scala
package lang
package psi
package stubs
package elements
import com.intellij.openapi.diagnostic.Logger
import com.intellij.psi.PsiElement
import com.intellij.psi.stubs.{IndexSink, StubElement, StubInputStream, StubOutputStream}
import com.intellij.util.io.StringRef
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.templates.{ScClassParents, ScTemplateParents}
import org.jetbrains.plugins.scala.lang.psi.stubs.impl.ScTemplateParentsStubImpl
import scala.collection.mutable.ArrayBuffer
/**
* User: Alexander Podkhalyuzin
* Date: 17.06.2009
*/
abstract class ScTemplateParentsElementType[Func <: ScTemplateParents](debugName: String)
extends ScStubElementType[ScTemplateParentsStub, ScTemplateParents](debugName) {
def serialize(stub: ScTemplateParentsStub, dataStream: StubOutputStream) {
val seq = stub.getTemplateParentsTypesTexts
dataStream.writeInt(seq.length)
for (s <- seq) dataStream.writeName(s)
stub.getConstructor match {
case Some(str) =>
dataStream.writeBoolean(true)
dataStream.writeName(str)
case _ => dataStream.writeBoolean(false)
}
}
def createStubImpl[ParentPsi <: PsiElement](psi: ScTemplateParents, parentStub: StubElement[ParentPsi]): ScTemplateParentsStub = {
val constr = psi match {
case p: ScClassParents => p.constructor.map(_.getText)
case _ => None
}
new ScTemplateParentsStubImpl(parentStub, this, constr.map(StringRef.fromString),
psi.typeElementsWithoutConstructor.map(te => StringRef.fromString(te.getText)))
}
def deserializeImpl(dataStream: StubInputStream, parentStub: Any): ScTemplateParentsStub = {
val length = dataStream.readInt
if (length >= 0) {
val res = new ArrayBuffer[StringRef]
for (i <- 0 until length) res += dataStream.readName
val constr = dataStream.readBoolean() match {
case true => Some(dataStream.readName())
case false => None
}
new ScTemplateParentsStubImpl(parentStub.asInstanceOf[StubElement[PsiElement]], this, constr, res)
} else {
ScTemplateParentsElementType.LOG.error("Negative byte deserialized for array")
new ScTemplateParentsStubImpl(parentStub.asInstanceOf[StubElement[PsiElement]], this, None, Seq.empty)
}
}
def indexStub(stub: ScTemplateParentsStub, sink: IndexSink) {}
}
object ScTemplateParentsElementType {
private val LOG = Logger.getInstance("#org.jetbrains.plugins.scala.lang.psi.stubs.elements.ScTemplateParentsElementType")
}
|
triggerNZ/intellij-scala
|
src/org/jetbrains/plugins/scala/lang/psi/stubs/elements/ScTemplateParentsElementType.scala
|
Scala
|
apache-2.0
| 2,535 |
/*
* Copyright (c) <2012-2013>, Amanj Sherwany <http://www.amanj.me>
* All rights reserved.
* */
package ch.usi.inf.l3.moolc.evaluator
import _root_.ch.usi.inf.l3.moolc.ast._
class MethodBank{
private var methods: List[MMethod] = Nil
private var specializedMethods:
Map[(String, List[PEValue]), MMethod] = Map.empty
private var nextMethodID = 0
private def nullify(args: List[PEValue]) = {
var temp: List[PEValue] = Nil
for(arg <- args) {
arg match{
case x: CTValue => temp = x :: temp
case _ => temp = Bottom :: temp
}
}
temp.reverse
}
def getSpecializedMethodsList = methods
def getMethodName(base: String) = {
val newName = base + "_" + nextMethodID
nextMethodID += 1
newName
}
def add(name: String, args: List[PEValue], method: MMethod) = {
methods = method :: methods
specializedMethods = specializedMethods + ((name, nullify(args)) -> method)
}
def get(name: String, args: List[PEValue]): MMethod = {
println(args)
specializedMethods((name, nullify(args)))
}
def getOption(name: String, args: List[PEValue]) : Option[MMethod] = {
specializedMethods.get((name, nullify(args)))
}
def has(name: String, args: List[PEValue]) : Boolean = {
getOption(name, args) match {
case Some(x) => true
case None => false
}
}
}
class ClassBank{
private var nextClassID = 0
private var classes: List[MClass] = Nil
def getAllSpecializedClasses = classes
private def nullify(args: List[PEValue]) = {
var temp: List[PEValue] = Nil
for(arg <- args) {
arg match{
case x: CTValue => temp = x :: temp
case _ => temp = Bottom :: temp
}
}
temp.reverse
}
def getClassName(base: ClassName) = {
val newName = base.name + "_" + nextClassID
nextClassID = nextClassID + 1
ClassName(newName, NoPosition)
}
private var specializedClasses:
Map[(ClassName, List[PEValue]), MClass] = Map.empty
def add(cname: ClassName, args: List[PEValue], clazz: MClass) = {
classes = clazz :: classes
specializedClasses = specializedClasses + ((cname, nullify(args)) -> clazz)
}
def get(cname: ClassName, args: List[PEValue]): MClass = {
specializedClasses((cname, nullify(args)))
}
def getOption(cname: ClassName, args: List[PEValue]) : Option[MClass] = {
specializedClasses.get((cname, nullify(args)))
}
def has(cname: ClassName, args: List[PEValue]) : Boolean = {
getOption(cname, args) match {
case Some(x) => true
case None => false
}
}
}
/*
* Represents an compile-time value
*/
case class CTValue(expr: Expression) extends PEValue with PEKnownValue {
def getExpr = expr
}
/*
* Represents a Runtime value, ⟂
*/
case object Top extends PEValue{}
/*
* Represents an unknown value, T
*/
case object Bottom extends PEValue{}
/*
* Represents an abstract value
*/
case class AbsValue(expr: Expression) extends PEValue with PEKnownValue {
def getExpr = expr
}
sealed trait PEValue{}
sealed trait PEKnownValue {
def getExpr: Expression
}
|
amanjpro/mool-compiler
|
src/main/scala/ch/usi/inf/l3/moolc/eval/StoreUtil.scala
|
Scala
|
bsd-3-clause
| 2,989 |
package amailp.intellij.robot.psi
import com.intellij.lang.ASTNode
import com.intellij.extapi.psi.ASTWrapperPsiElement
import com.intellij.psi._
import amailp.intellij.robot.psi.reference.ResourceValueReference
class ResourceValue(node: ASTNode) extends ASTWrapperPsiElement(node) {
override def getReference: ResourceValueReference = new ResourceValueReference(this)
}
|
AmailP/robot-plugin
|
src/main/scala/amailp/intellij/robot/psi/ResourceValue.scala
|
Scala
|
gpl-3.0
| 374 |
import annotation.experimental
@main
def run(): Unit = f // error
@experimental
def f = 2
|
dotty-staging/dotty
|
tests/neg-custom-args/no-experimental/i13848.scala
|
Scala
|
apache-2.0
| 92 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.expressions
import java.sql.{Date, Timestamp}
import java.text.{ParseException, SimpleDateFormat}
import java.time.{DateTimeException, Duration, Instant, LocalDate, LocalDateTime, Period, ZoneId}
import java.time.format.DateTimeParseException
import java.time.temporal.ChronoUnit
import java.util.{Calendar, Locale, TimeZone}
import java.util.concurrent.TimeUnit._
import scala.language.postfixOps
import scala.reflect.ClassTag
import scala.util.Random
import org.apache.spark.{SparkFunSuite, SparkUpgradeException}
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.expressions.codegen.GenerateUnsafeProjection
import org.apache.spark.sql.catalyst.util.{DateTimeUtils, IntervalUtils, TimestampFormatter}
import org.apache.spark.sql.catalyst.util.DateTimeConstants._
import org.apache.spark.sql.catalyst.util.DateTimeTestUtils._
import org.apache.spark.sql.catalyst.util.DateTimeUtils.{getZoneId, TimeZoneUTC}
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.internal.SQLConf.TimestampTypes
import org.apache.spark.sql.types._
import org.apache.spark.sql.types.DataTypeTestUtils.{dayTimeIntervalTypes, yearMonthIntervalTypes}
import org.apache.spark.unsafe.types.{CalendarInterval, UTF8String}
class DateExpressionsSuite extends SparkFunSuite with ExpressionEvalHelper {
import IntegralLiteralTestUtils._
private val PST_OPT = Option(PST.getId)
private val JST_OPT = Option(JST.getId)
def toMillis(timestamp: String): Long = {
val tf = TimestampFormatter("yyyy-MM-dd HH:mm:ss", UTC, isParsing = true)
DateTimeUtils.microsToMillis(tf.parse(timestamp))
}
val date = "2015-04-08 13:10:15"
val d = new Date(toMillis(date))
val time = "2013-11-08 13:10:15"
val ts = new Timestamp(toMillis(time))
private def timestampLiteral(s: String, sdf: SimpleDateFormat, dt: DataType): Literal = {
dt match {
case _: TimestampType =>
Literal(new Timestamp(sdf.parse(s).getTime))
case _: TimestampNTZType =>
Literal(LocalDateTime.parse(s.replace(" ", "T")))
}
}
private def timestampAnswer(s: String, sdf: SimpleDateFormat, dt: DataType): Any = {
dt match {
case _: TimestampType =>
DateTimeUtils.fromJavaTimestamp(
new Timestamp(sdf.parse(s).getTime))
case _: TimestampNTZType =>
LocalDateTime.parse(s.replace(" ", "T"))
}
}
test("datetime function current_date") {
val d0 = DateTimeUtils.currentDate(UTC)
val cd = CurrentDate(UTC_OPT).eval(EmptyRow).asInstanceOf[Int]
val d1 = DateTimeUtils.currentDate(UTC)
assert(d0 <= cd && cd <= d1 && d1 - d0 <= 1)
val cdjst = CurrentDate(JST_OPT).eval(EmptyRow).asInstanceOf[Int]
val cdpst = CurrentDate(PST_OPT).eval(EmptyRow).asInstanceOf[Int]
assert(cdpst <= cd && cd <= cdjst)
}
test("datetime function current_timestamp") {
val ct = DateTimeUtils.toJavaTimestamp(CurrentTimestamp().eval(EmptyRow).asInstanceOf[Long])
val t1 = System.currentTimeMillis()
assert(math.abs(t1 - ct.getTime) < 5000)
}
test("datetime function localtimestamp") {
// Verify with multiple outstanding time zones which has no daylight saving time.
Seq("UTC", "Africa/Dakar", "Asia/Hong_Kong").foreach { zid =>
val zoneId = DateTimeUtils.getZoneId(zid)
val ct = LocalTimestamp(Some(zid)).eval(EmptyRow).asInstanceOf[Long]
val t1 = DateTimeUtils.localDateTimeToMicros(LocalDateTime.now(zoneId))
assert(math.abs(t1 - ct) < 1000000)
}
}
test("DayOfYear") {
val sdfDay = new SimpleDateFormat("D", Locale.US)
val c = Calendar.getInstance()
(0 to 3).foreach { m =>
(0 to 5).foreach { i =>
c.set(2000, m, 28, 0, 0, 0)
c.add(Calendar.DATE, i)
checkEvaluation(DayOfYear(Literal(new Date(c.getTimeInMillis))),
sdfDay.format(c.getTime).toInt)
}
}
checkEvaluation(DayOfYear(Literal.create(null, DateType)), null)
checkEvaluation(DayOfYear(Cast(Literal("1582-10-15 13:10:15"), DateType)), 288)
checkEvaluation(DayOfYear(Cast(Literal("1582-10-04 13:10:15"), DateType)), 277)
checkConsistencyBetweenInterpretedAndCodegen(DayOfYear, DateType)
}
test("Year") {
checkEvaluation(Year(Literal.create(null, DateType)), null)
checkEvaluation(Year(Literal(d)), 2015)
checkEvaluation(Year(Cast(Literal(date), DateType, UTC_OPT)), 2015)
checkEvaluation(Year(Cast(Literal(ts), DateType, UTC_OPT)), 2013)
val c = Calendar.getInstance()
(2000 to 2002).foreach { y =>
(0 to 11 by 11).foreach { m =>
c.set(y, m, 28)
(0 to 12).foreach { i =>
c.add(Calendar.HOUR_OF_DAY, 10)
checkEvaluation(Year(Literal(new Date(c.getTimeInMillis))),
c.get(Calendar.YEAR))
}
}
}
checkEvaluation(Year(Cast(Literal("1582-01-01 13:10:15"), DateType)), 1582)
checkEvaluation(Year(Cast(Literal("1581-12-31 13:10:15"), DateType)), 1581)
checkConsistencyBetweenInterpretedAndCodegen(Year, DateType)
}
test("Quarter") {
checkEvaluation(Quarter(Literal.create(null, DateType)), null)
checkEvaluation(Quarter(Literal(d)), 2)
checkEvaluation(Quarter(Cast(Literal(date), DateType, UTC_OPT)), 2)
checkEvaluation(Quarter(Cast(Literal(ts), DateType, UTC_OPT)), 4)
val c = Calendar.getInstance()
(2003 to 2004).foreach { y =>
(0 to 11 by 3).foreach { m =>
c.set(y, m, 28, 0, 0, 0)
(0 to 5 * 24).foreach { i =>
c.add(Calendar.HOUR_OF_DAY, 1)
checkEvaluation(Quarter(Literal(new Date(c.getTimeInMillis))),
c.get(Calendar.MONTH) / 3 + 1)
}
}
}
checkEvaluation(Quarter(Cast(Literal("1582-10-01 13:10:15"), DateType)), 4)
checkEvaluation(Quarter(Cast(Literal("1582-09-30 13:10:15"), DateType)), 3)
checkConsistencyBetweenInterpretedAndCodegen(Quarter, DateType)
}
test("Month") {
checkEvaluation(Month(Literal.create(null, DateType)), null)
checkEvaluation(Month(Literal(d)), 4)
checkEvaluation(Month(Cast(Literal(date), DateType, UTC_OPT)), 4)
checkEvaluation(Month(Cast(Literal(ts), DateType, UTC_OPT)), 11)
checkEvaluation(Month(Cast(Literal("1582-04-28 13:10:15"), DateType)), 4)
checkEvaluation(Month(Cast(Literal("1582-10-04 13:10:15"), DateType)), 10)
checkEvaluation(Month(Cast(Literal("1582-10-15 13:10:15"), DateType)), 10)
val c = Calendar.getInstance()
(2003 to 2004).foreach { y =>
(0 to 3).foreach { m =>
(0 to 2 * 24).foreach { i =>
c.set(y, m, 28, 0, 0, 0)
c.add(Calendar.HOUR_OF_DAY, i)
checkEvaluation(Month(Literal(new Date(c.getTimeInMillis))),
c.get(Calendar.MONTH) + 1)
}
}
}
checkConsistencyBetweenInterpretedAndCodegen(Month, DateType)
}
test("Day / DayOfMonth") {
checkEvaluation(DayOfMonth(Cast(Literal("2000-02-29"), DateType)), 29)
checkEvaluation(DayOfMonth(Literal.create(null, DateType)), null)
checkEvaluation(DayOfMonth(Literal(d)), 8)
checkEvaluation(DayOfMonth(Cast(Literal(date), DateType, UTC_OPT)), 8)
checkEvaluation(DayOfMonth(Cast(Literal(ts), DateType, UTC_OPT)), 8)
checkEvaluation(DayOfMonth(Cast(Literal("1582-04-28 13:10:15"), DateType)), 28)
checkEvaluation(DayOfMonth(Cast(Literal("1582-10-15 13:10:15"), DateType)), 15)
checkEvaluation(DayOfMonth(Cast(Literal("1582-10-04 13:10:15"), DateType)), 4)
val c = Calendar.getInstance()
(1999 to 2000).foreach { y =>
c.set(y, 0, 1, 0, 0, 0)
val random = new Random(System.nanoTime)
random.shuffle(0 to 365 toList).take(10).foreach { d =>
c.set(Calendar.DAY_OF_YEAR, d)
checkEvaluation(DayOfMonth(Literal(new Date(c.getTimeInMillis))),
c.get(Calendar.DAY_OF_MONTH))
}
}
checkConsistencyBetweenInterpretedAndCodegen(DayOfMonth, DateType)
}
test("Seconds") {
assert(Second(Literal.create(null, DateType), UTC_OPT).resolved === false)
assert(Second(Cast(Literal(d), TimestampType, UTC_OPT), UTC_OPT).resolved )
Seq(TimestampType, TimestampNTZType).foreach { dt =>
checkEvaluation(Second(Cast(Literal(d), dt, UTC_OPT), UTC_OPT), 0)
checkEvaluation(Second(Cast(Literal(date), dt, UTC_OPT), UTC_OPT), 15)
}
checkEvaluation(Second(Literal(ts), UTC_OPT), 15)
val c = Calendar.getInstance()
for (zid <- outstandingZoneIds) {
val timeZoneId = Option(zid.getId)
c.setTimeZone(TimeZone.getTimeZone(zid))
(0 to 59 by 5).foreach { s =>
// validate timestamp with local time zone
c.set(2015, 18, 3, 3, 5, s)
checkEvaluation(
Second(Literal(new Timestamp(c.getTimeInMillis)), timeZoneId),
c.get(Calendar.SECOND))
// validate timestamp without time zone
checkEvaluation(
Second(Literal(LocalDateTime.of(2015, 1, 3, 3, 5, s))),
s)
}
Seq(TimestampType, TimestampNTZType).foreach { dt =>
checkConsistencyBetweenInterpretedAndCodegen(
(child: Expression) => Second(child, timeZoneId), dt)
}
}
}
test("DayOfWeek") {
checkEvaluation(DayOfWeek(Literal.create(null, DateType)), null)
checkEvaluation(DayOfWeek(Literal(d)), Calendar.WEDNESDAY)
checkEvaluation(DayOfWeek(Cast(Literal(date), DateType, UTC_OPT)),
Calendar.WEDNESDAY)
checkEvaluation(DayOfWeek(Cast(Literal(ts), DateType, UTC_OPT)), Calendar.FRIDAY)
checkEvaluation(DayOfWeek(Cast(Literal("2011-05-06"), DateType, UTC_OPT)), Calendar.FRIDAY)
checkEvaluation(DayOfWeek(Literal(new Date(toMillis("2017-05-27 13:10:15")))),
Calendar.SATURDAY)
checkEvaluation(DayOfWeek(Literal(new Date(toMillis("1582-10-15 13:10:15")))),
Calendar.FRIDAY)
checkConsistencyBetweenInterpretedAndCodegen(DayOfWeek, DateType)
}
test("WeekDay") {
checkEvaluation(WeekDay(Literal.create(null, DateType)), null)
checkEvaluation(WeekDay(Literal(d)), 2)
checkEvaluation(WeekDay(Cast(Literal(date), DateType, UTC_OPT)), 2)
checkEvaluation(WeekDay(Cast(Literal(ts), DateType, UTC_OPT)), 4)
checkEvaluation(WeekDay(Cast(Literal("2011-05-06"), DateType, UTC_OPT)), 4)
checkEvaluation(WeekDay(Literal(new Date(toMillis("2017-05-27 13:10:15")))), 5)
checkEvaluation(WeekDay(Literal(new Date(toMillis("1582-10-15 13:10:15")))), 4)
checkConsistencyBetweenInterpretedAndCodegen(WeekDay, DateType)
}
test("WeekOfYear") {
checkEvaluation(WeekOfYear(Literal.create(null, DateType)), null)
checkEvaluation(WeekOfYear(Literal(d)), 15)
checkEvaluation(WeekOfYear(Cast(Literal(date), DateType, UTC_OPT)), 15)
checkEvaluation(WeekOfYear(Cast(Literal(ts), DateType, UTC_OPT)), 45)
checkEvaluation(WeekOfYear(Cast(Literal("2011-05-06"), DateType, UTC_OPT)), 18)
checkEvaluation(WeekOfYear(Cast(Literal("1582-10-15 13:10:15"), DateType, UTC_OPT)), 41)
checkEvaluation(WeekOfYear(Cast(Literal("1582-10-04 13:10:15"), DateType, UTC_OPT)), 40)
checkConsistencyBetweenInterpretedAndCodegen(WeekOfYear, DateType)
}
test("DateFormat") {
Seq("legacy", "corrected").foreach { legacyParserPolicy =>
withSQLConf(SQLConf.LEGACY_TIME_PARSER_POLICY.key -> legacyParserPolicy) {
checkEvaluation(
DateFormatClass(Literal.create(null, TimestampType), Literal("y"), UTC_OPT),
null)
checkEvaluation(DateFormatClass(Cast(Literal(d), TimestampType, UTC_OPT),
Literal.create(null, StringType), UTC_OPT), null)
checkEvaluation(DateFormatClass(Cast(Literal(d), TimestampType, UTC_OPT),
Literal("y"), UTC_OPT), "2015")
checkEvaluation(DateFormatClass(Literal(ts), Literal("y"), UTC_OPT), "2013")
checkEvaluation(DateFormatClass(Cast(Literal(d), TimestampType, UTC_OPT),
Literal("H"), UTC_OPT), "0")
checkEvaluation(DateFormatClass(Literal(ts), Literal("H"), UTC_OPT), "13")
checkEvaluation(DateFormatClass(Cast(Literal(d), TimestampType, PST_OPT),
Literal("y"), PST_OPT), "2015")
checkEvaluation(DateFormatClass(Literal(ts), Literal("y"), PST_OPT), "2013")
checkEvaluation(DateFormatClass(Cast(Literal(d), TimestampType, PST_OPT),
Literal("H"), PST_OPT), "0")
checkEvaluation(DateFormatClass(Literal(ts), Literal("H"), PST_OPT), "5")
checkEvaluation(DateFormatClass(Cast(Literal(d), TimestampType, JST_OPT),
Literal("y"), JST_OPT), "2015")
checkEvaluation(DateFormatClass(Literal(ts), Literal("y"), JST_OPT), "2013")
checkEvaluation(DateFormatClass(Cast(Literal(d), TimestampType, JST_OPT),
Literal("H"), JST_OPT), "0")
checkEvaluation(DateFormatClass(Literal(ts), Literal("H"), JST_OPT), "22")
// Test escaping of format
GenerateUnsafeProjection.generate(
DateFormatClass(Literal(ts), Literal("\\""), JST_OPT) :: Nil)
// SPARK-28072 The codegen path should work
checkEvaluation(
expression = DateFormatClass(
BoundReference(ordinal = 0, dataType = TimestampType, nullable = true),
BoundReference(ordinal = 1, dataType = StringType, nullable = true),
JST_OPT),
expected = "22",
inputRow = InternalRow(DateTimeUtils.fromJavaTimestamp(ts), UTF8String.fromString("H")))
}
}
}
test("Hour") {
assert(Hour(Literal.create(null, DateType), UTC_OPT).resolved === false)
assert(Hour(Literal(ts), UTC_OPT).resolved)
Seq(TimestampType, TimestampNTZType).foreach { dt =>
checkEvaluation(Hour(Cast(Literal(d), dt, UTC_OPT), UTC_OPT), 0)
checkEvaluation(Hour(Cast(Literal(date), dt, UTC_OPT), UTC_OPT), 13)
}
checkEvaluation(Hour(Literal(ts), UTC_OPT), 13)
val c = Calendar.getInstance()
for (zid <- outstandingZoneIds) {
val timeZoneId = Option(zid.getId)
c.setTimeZone(TimeZone.getTimeZone(zid))
(0 to 24 by 5).foreach { h =>
// validate timestamp with local time zone
c.set(2015, 18, 3, h, 29, 59)
checkEvaluation(
Hour(Literal(new Timestamp(c.getTimeInMillis)), timeZoneId),
c.get(Calendar.HOUR_OF_DAY))
// validate timestamp without time zone
val localDateTime = LocalDateTime.of(2015, 1, 3, h, 29, 59)
checkEvaluation(Hour(Literal(localDateTime), timeZoneId), h)
}
Seq(TimestampType, TimestampNTZType).foreach { dt =>
checkConsistencyBetweenInterpretedAndCodegen(
(child: Expression) => Hour(child, timeZoneId), dt)
}
}
}
test("Minute") {
assert(Minute(Literal.create(null, DateType), UTC_OPT).resolved === false)
assert(Minute(Literal(ts), UTC_OPT).resolved)
Seq(TimestampType, TimestampNTZType).foreach { dt =>
checkEvaluation(Minute(Cast(Literal(d), dt, UTC_OPT), UTC_OPT), 0)
checkEvaluation(Minute(Cast(Literal(date), dt, UTC_OPT), UTC_OPT), 10)
}
checkEvaluation(Minute(Literal(ts), UTC_OPT), 10)
val c = Calendar.getInstance()
for (zid <- outstandingZoneIds) {
val timeZoneId = Option(zid.getId)
c.setTimeZone(TimeZone.getTimeZone(zid))
(0 to 59 by 5).foreach { m =>
// validate timestamp with local time zone
c.set(2015, 18, 3, 3, m, 3)
checkEvaluation(
Minute(Literal(new Timestamp(c.getTimeInMillis)), timeZoneId),
c.get(Calendar.MINUTE))
// validate timestamp without time zone
val localDateTime = LocalDateTime.of(2015, 1, 3, 3, m, 3)
checkEvaluation(Minute(Literal(localDateTime), timeZoneId), m)
}
Seq(TimestampType, TimestampNTZType).foreach { dt =>
checkConsistencyBetweenInterpretedAndCodegen(
(child: Expression) => Minute(child, timeZoneId), dt)
}
}
}
test("date_add") {
checkEvaluation(
DateAdd(Literal(Date.valueOf("2016-02-28")), Literal(1.toByte)),
DateTimeUtils.fromJavaDate(Date.valueOf("2016-02-29")))
checkEvaluation(
DateAdd(Literal(Date.valueOf("2016-02-28")), Literal(1.toShort)),
DateTimeUtils.fromJavaDate(Date.valueOf("2016-02-29")))
checkEvaluation(
DateAdd(Literal(Date.valueOf("2016-02-28")), Literal(1)),
DateTimeUtils.fromJavaDate(Date.valueOf("2016-02-29")))
checkEvaluation(
DateAdd(Literal(Date.valueOf("2016-02-28")), Literal(-365)),
DateTimeUtils.fromJavaDate(Date.valueOf("2015-02-28")))
checkEvaluation(DateAdd(Literal.create(null, DateType), Literal(1)), null)
checkEvaluation(DateAdd(Literal(Date.valueOf("2016-02-28")), Literal.create(null, IntegerType)),
null)
checkEvaluation(DateAdd(Literal.create(null, DateType), Literal.create(null, IntegerType)),
null)
checkEvaluation(
DateAdd(Literal(Date.valueOf("2016-02-28")), positiveIntLit), 49627)
checkEvaluation(
DateAdd(Literal(Date.valueOf("2016-02-28")), negativeIntLit), -15910)
checkConsistencyBetweenInterpretedAndCodegen(DateAdd, DateType, ByteType)
checkConsistencyBetweenInterpretedAndCodegen(DateAdd, DateType, ShortType)
checkConsistencyBetweenInterpretedAndCodegen(DateAdd, DateType, IntegerType)
}
test("date add interval") {
val d = Date.valueOf("2016-02-28")
Seq("true", "false") foreach { flag =>
withSQLConf((SQLConf.ANSI_ENABLED.key, flag)) {
checkEvaluation(
DateAddInterval(Literal(d), Literal(new CalendarInterval(0, 1, 0))),
DateTimeUtils.fromJavaDate(Date.valueOf("2016-02-29")))
checkEvaluation(
DateAddInterval(Literal(d), Literal(new CalendarInterval(1, 1, 0))),
DateTimeUtils.fromJavaDate(Date.valueOf("2016-03-29")))
checkEvaluation(DateAddInterval(Literal(d), Literal.create(null, CalendarIntervalType)),
null)
checkEvaluation(DateAddInterval(Literal.create(null, DateType),
Literal(new CalendarInterval(1, 1, 0))),
null)
}
}
withSQLConf((SQLConf.ANSI_ENABLED.key, "true")) {
checkExceptionInExpression[IllegalArgumentException](
DateAddInterval(Literal(d), Literal(new CalendarInterval(1, 1, 25 * MICROS_PER_HOUR))),
"Cannot add hours, minutes or seconds, milliseconds, microseconds to a date")
}
withSQLConf((SQLConf.ANSI_ENABLED.key, "false")) {
checkEvaluation(
DateAddInterval(Literal(d), Literal(new CalendarInterval(1, 1, 25))),
DateTimeUtils.fromJavaDate(Date.valueOf("2016-03-29")))
checkEvaluation(
DateAddInterval(Literal(d), Literal(new CalendarInterval(1, 1, 25 * MICROS_PER_HOUR))),
DateTimeUtils.fromJavaDate(Date.valueOf("2016-03-30")))
}
}
test("date_sub") {
checkEvaluation(
DateSub(Literal(Date.valueOf("2015-01-01")), Literal(1.toByte)),
DateTimeUtils.fromJavaDate(Date.valueOf("2014-12-31")))
checkEvaluation(
DateSub(Literal(Date.valueOf("2015-01-01")), Literal(1.toShort)),
DateTimeUtils.fromJavaDate(Date.valueOf("2014-12-31")))
checkEvaluation(
DateSub(Literal(Date.valueOf("2015-01-01")), Literal(1)),
DateTimeUtils.fromJavaDate(Date.valueOf("2014-12-31")))
checkEvaluation(
DateSub(Literal(Date.valueOf("2015-01-01")), Literal(-1)),
DateTimeUtils.fromJavaDate(Date.valueOf("2015-01-02")))
checkEvaluation(DateSub(Literal.create(null, DateType), Literal(1)), null)
checkEvaluation(DateSub(Literal(Date.valueOf("2016-02-28")), Literal.create(null, IntegerType)),
null)
checkEvaluation(DateSub(Literal.create(null, DateType), Literal.create(null, IntegerType)),
null)
checkEvaluation(
DateSub(Literal(Date.valueOf("2016-02-28")), positiveIntLit), -15909)
checkEvaluation(
DateSub(Literal(Date.valueOf("2016-02-28")), negativeIntLit), 49628)
checkConsistencyBetweenInterpretedAndCodegen(DateSub, DateType, ByteType)
checkConsistencyBetweenInterpretedAndCodegen(DateSub, DateType, ShortType)
checkConsistencyBetweenInterpretedAndCodegen(DateSub, DateType, IntegerType)
}
test("time_add") {
val sdf = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss.SSS", Locale.US)
Seq(TimestampType, TimestampNTZType).foreach { dt =>
for (zid <- outstandingZoneIds) {
val timeZoneId = Option(zid.getId)
sdf.setTimeZone(TimeZone.getTimeZone(zid))
checkEvaluation(
TimeAdd(
timestampLiteral("2016-01-29 10:00:00.000", sdf, dt),
Literal(new CalendarInterval(1, 2, 123000L)),
timeZoneId),
timestampAnswer("2016-03-02 10:00:00.123", sdf, dt))
checkEvaluation(
TimeAdd(
Literal.create(null, dt),
Literal(new CalendarInterval(1, 2, 123000L)),
timeZoneId),
null)
checkEvaluation(
TimeAdd(
timestampLiteral("2016-01-29 10:00:00.000", sdf, dt),
Literal.create(null, CalendarIntervalType),
timeZoneId),
null)
checkEvaluation(
TimeAdd(
Literal.create(null, dt),
Literal.create(null, CalendarIntervalType),
timeZoneId),
null)
checkConsistencyBetweenInterpretedAndCodegen(
(start: Expression, interval: Expression) => TimeAdd(start, interval, timeZoneId),
dt, CalendarIntervalType)
}
}
}
test("time_sub") {
val sdf = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss.SSS", Locale.US)
for (zid <- outstandingZoneIds) {
val timeZoneId = Option(zid.getId)
sdf.setTimeZone(TimeZone.getTimeZone(zid))
checkEvaluation(
TimeAdd(
Literal(new Timestamp(sdf.parse("2016-03-31 10:00:00.000").getTime)),
UnaryMinus(Literal(new CalendarInterval(1, 0, 0))),
timeZoneId),
DateTimeUtils.fromJavaTimestamp(
new Timestamp(sdf.parse("2016-02-29 10:00:00.000").getTime)))
checkEvaluation(
TimeAdd(
Literal(new Timestamp(sdf.parse("2016-03-31 10:00:00.000").getTime)),
UnaryMinus(Literal(new CalendarInterval(1, 1, 0))),
timeZoneId),
DateTimeUtils.fromJavaTimestamp(
new Timestamp(sdf.parse("2016-02-28 10:00:00.000").getTime)))
checkEvaluation(
TimeAdd(
Literal(new Timestamp(sdf.parse("2016-03-30 00:00:01.000").getTime)),
UnaryMinus(Literal(new CalendarInterval(1, 0, 2000000.toLong))),
timeZoneId),
DateTimeUtils.fromJavaTimestamp(
new Timestamp(sdf.parse("2016-02-28 23:59:59.000").getTime)))
checkEvaluation(
TimeAdd(
Literal(new Timestamp(sdf.parse("2016-03-30 00:00:01.000").getTime)),
UnaryMinus(Literal(new CalendarInterval(1, 1, 2000000.toLong))),
timeZoneId),
DateTimeUtils.fromJavaTimestamp(
new Timestamp(sdf.parse("2016-02-27 23:59:59.000").getTime)))
checkEvaluation(
TimeAdd(
Literal.create(null, TimestampType),
UnaryMinus(Literal(new CalendarInterval(1, 2, 123000L))),
timeZoneId),
null)
checkEvaluation(
TimeAdd(
Literal(new Timestamp(sdf.parse("2016-01-29 10:00:00.000").getTime)),
UnaryMinus(Literal.create(null, CalendarIntervalType)),
timeZoneId),
null)
checkEvaluation(
TimeAdd(
Literal.create(null, TimestampType),
UnaryMinus(Literal.create(null, CalendarIntervalType)),
timeZoneId),
null)
checkConsistencyBetweenInterpretedAndCodegen((start: Expression, interval: Expression) =>
TimeAdd(start, UnaryMinus(interval), timeZoneId),
TimestampType, CalendarIntervalType)
}
}
private def testAddMonths(dataType: DataType): Unit = {
def addMonths(date: Literal, months: Any): AddMonthsBase = dataType match {
case IntegerType => AddMonths(date, Literal.create(months, dataType))
case _: YearMonthIntervalType =>
val period = if (months == null) null else Period.ofMonths(months.asInstanceOf[Int])
DateAddYMInterval(date, Literal.create(period, dataType))
}
checkEvaluation(addMonths(Literal(Date.valueOf("2015-01-30")), 1),
DateTimeUtils.fromJavaDate(Date.valueOf("2015-02-28")))
checkEvaluation(addMonths(Literal(Date.valueOf("2016-03-30")), -1),
DateTimeUtils.fromJavaDate(Date.valueOf("2016-02-29")))
checkEvaluation(
addMonths(Literal(Date.valueOf("2015-01-30")), null),
null)
checkEvaluation(addMonths(Literal.create(null, DateType), 1), null)
checkEvaluation(addMonths(Literal.create(null, DateType), null),
null)
// Valid range of DateType is [0001-01-01, 9999-12-31]
val maxMonthInterval = 10000 * 12
checkEvaluation(
addMonths(Literal(LocalDate.parse("0001-01-01")), maxMonthInterval),
LocalDate.of(10001, 1, 1).toEpochDay.toInt)
checkEvaluation(
addMonths(Literal(Date.valueOf("9999-12-31")), -1 * maxMonthInterval), -719529)
}
test("add_months") {
testAddMonths(IntegerType)
// Test evaluation results between Interpreted mode and Codegen mode
forAll (
LiteralGenerator.randomGen(DateType),
LiteralGenerator.monthIntervalLiterGen
) { (l1: Literal, l2: Literal) =>
cmpInterpretWithCodegen(EmptyRow, AddMonths(l1, l2))
}
}
test("SPARK-34721: add a year-month interval to a date") {
testAddMonths(YearMonthIntervalType())
// Test evaluation results between Interpreted mode and Codegen mode
forAll (
LiteralGenerator.randomGen(DateType),
LiteralGenerator.yearMonthIntervalLiteralGen
) { (l1: Literal, l2: Literal) =>
cmpInterpretWithCodegen(EmptyRow, DateAddYMInterval(l1, l2))
}
}
test("months_between") {
val sdf = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss", Locale.US)
for (zid <- outstandingZoneIds) {
val timeZoneId = Option(zid.getId)
sdf.setTimeZone(TimeZone.getTimeZone(zid))
checkEvaluation(
MonthsBetween(
Literal(new Timestamp(sdf.parse("1997-02-28 10:30:00").getTime)),
Literal(new Timestamp(sdf.parse("1996-10-30 00:00:00").getTime)),
Literal.TrueLiteral,
timeZoneId = timeZoneId), 3.94959677)
checkEvaluation(
MonthsBetween(
Literal(new Timestamp(sdf.parse("1997-02-28 10:30:00").getTime)),
Literal(new Timestamp(sdf.parse("1996-10-30 00:00:00").getTime)),
Literal.FalseLiteral,
timeZoneId = timeZoneId), 3.9495967741935485)
Seq(Literal.FalseLiteral, Literal.TrueLiteral). foreach { roundOff =>
checkEvaluation(
MonthsBetween(
Literal(new Timestamp(sdf.parse("2015-01-30 11:52:00").getTime)),
Literal(new Timestamp(sdf.parse("2015-01-30 11:50:00").getTime)),
roundOff,
timeZoneId = timeZoneId), 0.0)
checkEvaluation(
MonthsBetween(
Literal(new Timestamp(sdf.parse("2015-01-31 00:00:00").getTime)),
Literal(new Timestamp(sdf.parse("2015-03-31 22:00:00").getTime)),
roundOff,
timeZoneId = timeZoneId), -2.0)
checkEvaluation(
MonthsBetween(
Literal(new Timestamp(sdf.parse("2015-03-31 22:00:00").getTime)),
Literal(new Timestamp(sdf.parse("2015-02-28 00:00:00").getTime)),
roundOff,
timeZoneId = timeZoneId), 1.0)
}
val t = Literal(Timestamp.valueOf("2015-03-31 22:00:00"))
val tnull = Literal.create(null, TimestampType)
checkEvaluation(MonthsBetween(t, tnull, Literal.TrueLiteral, timeZoneId = timeZoneId), null)
checkEvaluation(MonthsBetween(tnull, t, Literal.TrueLiteral, timeZoneId = timeZoneId), null)
checkEvaluation(
MonthsBetween(tnull, tnull, Literal.TrueLiteral, timeZoneId = timeZoneId), null)
checkEvaluation(
MonthsBetween(t, t, Literal.create(null, BooleanType), timeZoneId = timeZoneId), null)
checkConsistencyBetweenInterpretedAndCodegen(
(time1: Expression, time2: Expression, roundOff: Expression) =>
MonthsBetween(time1, time2, roundOff, timeZoneId = timeZoneId),
TimestampType, TimestampType, BooleanType)
}
}
test("last_day") {
checkEvaluation(LastDay(Literal(Date.valueOf("2015-02-28"))), Date.valueOf("2015-02-28"))
checkEvaluation(LastDay(Literal(Date.valueOf("2015-03-27"))), Date.valueOf("2015-03-31"))
checkEvaluation(LastDay(Literal(Date.valueOf("2015-04-26"))), Date.valueOf("2015-04-30"))
checkEvaluation(LastDay(Literal(Date.valueOf("2015-05-25"))), Date.valueOf("2015-05-31"))
checkEvaluation(LastDay(Literal(Date.valueOf("2015-06-24"))), Date.valueOf("2015-06-30"))
checkEvaluation(LastDay(Literal(Date.valueOf("2015-07-23"))), Date.valueOf("2015-07-31"))
checkEvaluation(LastDay(Literal(Date.valueOf("2015-08-01"))), Date.valueOf("2015-08-31"))
checkEvaluation(LastDay(Literal(Date.valueOf("2015-09-02"))), Date.valueOf("2015-09-30"))
checkEvaluation(LastDay(Literal(Date.valueOf("2015-10-03"))), Date.valueOf("2015-10-31"))
checkEvaluation(LastDay(Literal(Date.valueOf("2015-11-04"))), Date.valueOf("2015-11-30"))
checkEvaluation(LastDay(Literal(Date.valueOf("2015-12-05"))), Date.valueOf("2015-12-31"))
checkEvaluation(LastDay(Literal(Date.valueOf("2016-01-06"))), Date.valueOf("2016-01-31"))
checkEvaluation(LastDay(Literal(Date.valueOf("2016-02-07"))), Date.valueOf("2016-02-29"))
checkEvaluation(LastDay(Literal.create(null, DateType)), null)
checkConsistencyBetweenInterpretedAndCodegen(LastDay, DateType)
}
test("next_day") {
def testNextDay(input: String, dayOfWeek: String, output: String): Unit = {
checkEvaluation(
NextDay(Literal(Date.valueOf(input)), NonFoldableLiteral(dayOfWeek)),
DateTimeUtils.fromJavaDate(Date.valueOf(output)))
checkEvaluation(
NextDay(Literal(Date.valueOf(input)), Literal(dayOfWeek)),
DateTimeUtils.fromJavaDate(Date.valueOf(output)))
}
testNextDay("2015-07-23", "Mon", "2015-07-27")
testNextDay("2015-07-23", "mo", "2015-07-27")
testNextDay("2015-07-23", "Tue", "2015-07-28")
testNextDay("2015-07-23", "tu", "2015-07-28")
testNextDay("2015-07-23", "we", "2015-07-29")
testNextDay("2015-07-23", "wed", "2015-07-29")
testNextDay("2015-07-23", "Thu", "2015-07-30")
testNextDay("2015-07-23", "TH", "2015-07-30")
testNextDay("2015-07-23", "Fri", "2015-07-24")
testNextDay("2015-07-23", "fr", "2015-07-24")
Seq(true, false).foreach { ansiEnabled =>
withSQLConf(SQLConf.ANSI_ENABLED.key -> ansiEnabled.toString) {
var expr: Expression = NextDay(Literal(Date.valueOf("2015-07-23")), Literal("xx"))
if (ansiEnabled) {
val errMsg = "Illegal input for day of week: xx"
checkExceptionInExpression[Exception](expr, errMsg)
} else {
checkEvaluation(expr, null)
}
expr = NextDay(Literal.create(null, DateType), Literal("xx"))
checkEvaluation(expr, null)
expr = NextDay(Literal(Date.valueOf("2015-07-23")), Literal.create(null, StringType))
checkEvaluation(expr, null)
// Test escaping of dayOfWeek
expr = NextDay(Literal(Date.valueOf("2015-07-23")), Literal("\\"quote"))
GenerateUnsafeProjection.generate(expr :: Nil)
if (ansiEnabled) {
val errMsg = """Illegal input for day of week: "quote"""
checkExceptionInExpression[Exception](expr, errMsg)
} else {
checkEvaluation(expr, null)
}
}
}
}
private def testTruncDate(input: Date, fmt: String, expected: Date): Unit = {
checkEvaluation(TruncDate(Literal.create(input, DateType), Literal.create(fmt, StringType)),
expected)
checkEvaluation(
TruncDate(Literal.create(input, DateType), NonFoldableLiteral.create(fmt, StringType)),
expected)
}
test("TruncDate") {
val date = Date.valueOf("2015-07-22")
Seq("yyyy", "YYYY", "year", "YEAR", "yy", "YY").foreach { fmt =>
testTruncDate(date, fmt, Date.valueOf("2015-01-01"))
}
Seq("month", "MONTH", "mon", "MON", "mm", "MM").foreach { fmt =>
testTruncDate(date, fmt, Date.valueOf("2015-07-01"))
}
testTruncDate(date, "DD", null)
testTruncDate(date, "SECOND", null)
testTruncDate(date, "HOUR", null)
testTruncDate(null, "MON", null)
// Test escaping of format
GenerateUnsafeProjection.generate(TruncDate(Literal(0, DateType), Literal("\\"quote")) :: Nil)
}
private def testTruncTimestamp(input: Timestamp, fmt: String, expected: Timestamp): Unit = {
checkEvaluation(
TruncTimestamp(Literal.create(fmt, StringType), Literal.create(input, TimestampType)),
expected)
checkEvaluation(
TruncTimestamp(
NonFoldableLiteral.create(fmt, StringType), Literal.create(input, TimestampType)),
expected)
}
test("TruncTimestamp") {
withDefaultTimeZone(UTC) {
val inputDate = Timestamp.valueOf("2015-07-22 05:30:06")
Seq("yyyy", "YYYY", "year", "YEAR", "yy", "YY").foreach { fmt =>
testTruncTimestamp(
inputDate, fmt,
Timestamp.valueOf("2015-01-01 00:00:00"))
}
Seq("month", "MONTH", "mon", "MON", "mm", "MM").foreach { fmt =>
testTruncTimestamp(
inputDate, fmt,
Timestamp.valueOf("2015-07-01 00:00:00"))
}
Seq("DAY", "day", "DD", "dd").foreach { fmt =>
testTruncTimestamp(
inputDate, fmt,
Timestamp.valueOf("2015-07-22 00:00:00"))
}
Seq("HOUR", "hour").foreach { fmt =>
testTruncTimestamp(
inputDate, fmt,
Timestamp.valueOf("2015-07-22 05:00:00"))
}
Seq("MINUTE", "minute").foreach { fmt =>
testTruncTimestamp(
inputDate, fmt,
Timestamp.valueOf("2015-07-22 05:30:00"))
}
Seq("SECOND", "second").foreach { fmt =>
testTruncTimestamp(
inputDate, fmt,
Timestamp.valueOf("2015-07-22 05:30:06"))
}
Seq("WEEK", "week").foreach { fmt =>
testTruncTimestamp(
inputDate, fmt,
Timestamp.valueOf("2015-07-20 00:00:00"))
}
Seq("QUARTER", "quarter").foreach { fmt =>
testTruncTimestamp(
inputDate, fmt,
Timestamp.valueOf("2015-07-01 00:00:00"))
}
testTruncTimestamp(null, "MON", null)
}
}
test("unsupported fmt fields for trunc/date_trunc results null") {
Seq("INVALID", "decade", "century", "millennium", "whatever", null).foreach { field =>
testTruncDate(Date.valueOf("2000-03-08"), field, null)
testTruncDate(null, field, null)
testTruncTimestamp(Timestamp.valueOf("2000-03-08 11:12:13"), field, null)
testTruncTimestamp(null, field, null)
}
}
test("from_unixtime") {
Seq("legacy", "corrected").foreach { legacyParserPolicy =>
withSQLConf(SQLConf.LEGACY_TIME_PARSER_POLICY.key -> legacyParserPolicy) {
val fmt1 = "yyyy-MM-dd HH:mm:ss"
val sdf1 = new SimpleDateFormat(fmt1, Locale.US)
val fmt2 = "yyyy-MM-dd HH:mm:ss.SSS"
val sdf2 = new SimpleDateFormat(fmt2, Locale.US)
for (zid <- outstandingZoneIds) {
val timeZoneId = Option(zid.getId)
val tz = TimeZone.getTimeZone(zid)
sdf1.setTimeZone(tz)
sdf2.setTimeZone(tz)
checkEvaluation(
FromUnixTime(Literal(0L), Literal(fmt1), timeZoneId),
sdf1.format(new Timestamp(0)))
checkEvaluation(FromUnixTime(
Literal(1000L), Literal(fmt1), timeZoneId),
sdf1.format(new Timestamp(1000000)))
checkEvaluation(
FromUnixTime(Literal(-1000L), Literal(fmt2), timeZoneId),
sdf2.format(new Timestamp(-1000000)))
checkEvaluation(
FromUnixTime(
Literal.create(null, LongType),
Literal.create(null, StringType), timeZoneId),
null)
checkEvaluation(
FromUnixTime(Literal.create(null, LongType), Literal(fmt1), timeZoneId),
null)
checkEvaluation(
FromUnixTime(Literal(1000L), Literal.create(null, StringType), timeZoneId),
null)
// SPARK-28072 The codegen path for non-literal input should also work
checkEvaluation(
expression = FromUnixTime(
BoundReference(ordinal = 0, dataType = LongType, nullable = true),
BoundReference(ordinal = 1, dataType = StringType, nullable = true),
timeZoneId),
expected = UTF8String.fromString(sdf1.format(new Timestamp(0))),
inputRow = InternalRow(0L, UTF8String.fromString(fmt1)))
}
}
}
// Test escaping of format
GenerateUnsafeProjection.generate(FromUnixTime(Literal(0L), Literal("\\""), UTC_OPT) :: Nil)
}
test("unix_timestamp") {
Seq("legacy", "corrected").foreach { legacyParserPolicy =>
withSQLConf(SQLConf.LEGACY_TIME_PARSER_POLICY.key -> legacyParserPolicy) {
val sdf1 = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss", Locale.US)
val fmt2 = "yyyy-MM-dd HH:mm:ss.SSS"
val sdf2 = new SimpleDateFormat(fmt2, Locale.US)
val fmt3 = "yy-MM-dd"
val sdf3 = new SimpleDateFormat(fmt3, Locale.US)
sdf3.setTimeZone(TimeZoneUTC)
withDefaultTimeZone(UTC) {
for (zid <- outstandingZoneIds) {
val timeZoneId = Option(zid.getId)
val tz = TimeZone.getTimeZone(zid)
sdf1.setTimeZone(tz)
sdf2.setTimeZone(tz)
val date1 = Date.valueOf("2015-07-24")
checkEvaluation(UnixTimestamp(
Literal(sdf1.format(new Timestamp(0))),
Literal("yyyy-MM-dd HH:mm:ss"), timeZoneId), 0L)
checkEvaluation(UnixTimestamp(
Literal(sdf1.format(new Timestamp(1000000))),
Literal("yyyy-MM-dd HH:mm:ss"), timeZoneId),
1000L)
checkEvaluation(
UnixTimestamp(
Literal(new Timestamp(1000000)), Literal("yyyy-MM-dd HH:mm:ss"), timeZoneId),
1000L)
checkEvaluation(
UnixTimestamp(
Literal(DateTimeUtils.microsToLocalDateTime(DateTimeUtils.millisToMicros(1000000))),
Literal("yyyy-MM-dd HH:mm:ss"), timeZoneId),
1000L)
checkEvaluation(
UnixTimestamp(Literal(date1), Literal("yyyy-MM-dd HH:mm:ss"), timeZoneId),
MICROSECONDS.toSeconds(
DateTimeUtils.daysToMicros(DateTimeUtils.fromJavaDate(date1), tz.toZoneId)))
checkEvaluation(
UnixTimestamp(Literal(sdf2.format(new Timestamp(-1000000))),
Literal(fmt2), timeZoneId),
-1000L)
checkEvaluation(UnixTimestamp(
Literal(sdf3.format(Date.valueOf("2015-07-24"))), Literal(fmt3), timeZoneId),
MICROSECONDS.toSeconds(DateTimeUtils.daysToMicros(
DateTimeUtils.fromJavaDate(Date.valueOf("2015-07-24")), tz.toZoneId)))
val t1 = UnixTimestamp(
CurrentTimestamp(), Literal("yyyy-MM-dd HH:mm:ss")).eval().asInstanceOf[Long]
val t2 = UnixTimestamp(
CurrentTimestamp(), Literal("yyyy-MM-dd HH:mm:ss")).eval().asInstanceOf[Long]
assert(t2 - t1 <= 1)
checkEvaluation(
UnixTimestamp(
Literal.create(null, DateType), Literal.create(null, StringType), timeZoneId),
null)
checkEvaluation(
UnixTimestamp(
Literal.create(null, DateType),
Literal("yyyy-MM-dd HH:mm:ss"), timeZoneId),
null)
checkEvaluation(
UnixTimestamp(Literal(date1), Literal.create(null, StringType), timeZoneId),
MICROSECONDS.toSeconds(
DateTimeUtils.daysToMicros(DateTimeUtils.fromJavaDate(date1), tz.toZoneId)))
}
}
}
}
// Test escaping of format
GenerateUnsafeProjection.generate(
UnixTimestamp(Literal("2015-07-24"), Literal("\\""), UTC_OPT) :: Nil)
}
test("to_unix_timestamp") {
Seq("legacy", "corrected").foreach { legacyParserPolicy =>
withSQLConf(SQLConf.LEGACY_TIME_PARSER_POLICY.key -> legacyParserPolicy) {
val fmt1 = "yyyy-MM-dd HH:mm:ss"
val sdf1 = new SimpleDateFormat(fmt1, Locale.US)
val fmt2 = "yyyy-MM-dd HH:mm:ss.SSS"
val sdf2 = new SimpleDateFormat(fmt2, Locale.US)
val fmt3 = "yy-MM-dd"
val sdf3 = new SimpleDateFormat(fmt3, Locale.US)
sdf3.setTimeZone(TimeZoneUTC)
withDefaultTimeZone(UTC) {
for (zid <- outstandingZoneIds) {
val timeZoneId = Option(zid.getId)
val tz = TimeZone.getTimeZone(zid)
sdf1.setTimeZone(tz)
sdf2.setTimeZone(tz)
val date1 = Date.valueOf("2015-07-24")
checkEvaluation(ToUnixTimestamp(
Literal(sdf1.format(new Timestamp(0))), Literal(fmt1), timeZoneId), 0L)
checkEvaluation(ToUnixTimestamp(
Literal(sdf1.format(new Timestamp(1000000))), Literal(fmt1), timeZoneId),
1000L)
checkEvaluation(ToUnixTimestamp(
Literal(new Timestamp(1000000)), Literal(fmt1)),
1000L)
checkEvaluation(ToUnixTimestamp(
Literal(DateTimeUtils.microsToLocalDateTime(DateTimeUtils.millisToMicros(1000000))),
Literal(fmt1)),
1000L)
checkEvaluation(
ToUnixTimestamp(Literal(date1), Literal(fmt1), timeZoneId),
MICROSECONDS.toSeconds(
DateTimeUtils.daysToMicros(DateTimeUtils.fromJavaDate(date1), zid)))
checkEvaluation(
ToUnixTimestamp(
Literal(sdf2.format(new Timestamp(-1000000))),
Literal(fmt2), timeZoneId),
-1000L)
checkEvaluation(ToUnixTimestamp(
Literal(sdf3.format(Date.valueOf("2015-07-24"))), Literal(fmt3), timeZoneId),
MICROSECONDS.toSeconds(DateTimeUtils.daysToMicros(
DateTimeUtils.fromJavaDate(Date.valueOf("2015-07-24")), zid)))
val t1 = ToUnixTimestamp(
CurrentTimestamp(), Literal(fmt1)).eval().asInstanceOf[Long]
val t2 = ToUnixTimestamp(
CurrentTimestamp(), Literal(fmt1)).eval().asInstanceOf[Long]
assert(t2 - t1 <= 1)
checkEvaluation(ToUnixTimestamp(
Literal.create(null, DateType), Literal.create(null, StringType), timeZoneId), null)
checkEvaluation(
ToUnixTimestamp(
Literal.create(null, DateType), Literal(fmt1), timeZoneId),
null)
checkEvaluation(ToUnixTimestamp(
Literal(date1), Literal.create(null, StringType), timeZoneId),
MICROSECONDS.toSeconds(
DateTimeUtils.daysToMicros(DateTimeUtils.fromJavaDate(date1), zid)))
// SPARK-28072 The codegen path for non-literal input should also work
checkEvaluation(
expression = ToUnixTimestamp(
BoundReference(ordinal = 0, dataType = StringType, nullable = true),
BoundReference(ordinal = 1, dataType = StringType, nullable = true),
timeZoneId),
expected = 0L,
inputRow = InternalRow(
UTF8String.fromString(sdf1.format(new Timestamp(0))), UTF8String.fromString(fmt1)))
}
}
}
}
// Test escaping of format
GenerateUnsafeProjection.generate(
ToUnixTimestamp(Literal("2015-07-24"), Literal("\\""), UTC_OPT) :: Nil)
}
test("datediff") {
checkEvaluation(
DateDiff(Literal(Date.valueOf("2015-07-24")), Literal(Date.valueOf("2015-07-21"))), 3)
checkEvaluation(
DateDiff(Literal(Date.valueOf("2015-07-21")), Literal(Date.valueOf("2015-07-24"))), -3)
checkEvaluation(DateDiff(Literal.create(null, DateType), Literal(Date.valueOf("2015-07-24"))),
null)
checkEvaluation(DateDiff(Literal(Date.valueOf("2015-07-24")), Literal.create(null, DateType)),
null)
checkEvaluation(
DateDiff(Literal.create(null, DateType), Literal.create(null, DateType)),
null)
}
test("to_utc_timestamp") {
def test(t: String, tz: String, expected: String): Unit = {
checkEvaluation(
ToUTCTimestamp(
Literal.create(if (t != null) Timestamp.valueOf(t) else null, TimestampType),
Literal.create(tz, StringType)),
if (expected != null) Timestamp.valueOf(expected) else null)
checkEvaluation(
ToUTCTimestamp(
Literal.create(if (t != null) Timestamp.valueOf(t) else null, TimestampType),
NonFoldableLiteral.create(tz, StringType)),
if (expected != null) Timestamp.valueOf(expected) else null)
}
test("2015-07-24 00:00:00", LA.getId, "2015-07-24 07:00:00")
test("2015-01-24 00:00:00", LA.getId, "2015-01-24 08:00:00")
test(null, "UTC", null)
test("2015-07-24 00:00:00", null, null)
test(null, null, null)
}
test("to_utc_timestamp - invalid time zone id") {
Seq("Invalid time zone", "\\"quote", "UTC*42").foreach { invalidTz =>
val msg = intercept[java.time.DateTimeException] {
GenerateUnsafeProjection.generate(
ToUTCTimestamp(
Literal(Timestamp.valueOf("2015-07-24 00:00:00")), Literal(invalidTz)) :: Nil)
}.getMessage
assert(msg.contains(invalidTz))
}
}
test("from_utc_timestamp") {
def test(t: String, tz: String, expected: String): Unit = {
checkEvaluation(
FromUTCTimestamp(
Literal.create(if (t != null) Timestamp.valueOf(t) else null, TimestampType),
Literal.create(tz, StringType)),
if (expected != null) Timestamp.valueOf(expected) else null)
checkEvaluation(
FromUTCTimestamp(
Literal.create(if (t != null) Timestamp.valueOf(t) else null, TimestampType),
NonFoldableLiteral.create(tz, StringType)),
if (expected != null) Timestamp.valueOf(expected) else null)
}
test("2015-07-24 00:00:00", LA.getId, "2015-07-23 17:00:00")
test("2015-01-24 00:00:00", LA.getId, "2015-01-23 16:00:00")
test(null, "UTC", null)
test("2015-07-24 00:00:00", null, null)
test(null, null, null)
}
test("from_utc_timestamp - invalid time zone id") {
Seq("Invalid time zone", "\\"quote", "UTC*42").foreach { invalidTz =>
val msg = intercept[java.time.DateTimeException] {
GenerateUnsafeProjection.generate(FromUTCTimestamp(Literal(0), Literal(invalidTz)) :: Nil)
}.getMessage
assert(msg.contains(invalidTz))
}
}
test("creating values of DateType via make_date") {
Seq(true, false).foreach({ ansi =>
withSQLConf(SQLConf.ANSI_ENABLED.key -> ansi.toString) {
checkEvaluation(MakeDate(Literal(2013), Literal(7), Literal(15)), Date.valueOf("2013-7-15"))
checkEvaluation(MakeDate(Literal.create(null, IntegerType), Literal(7), Literal(15)), null)
checkEvaluation(MakeDate(Literal(2019), Literal.create(null, IntegerType), Literal(19)),
null)
checkEvaluation(MakeDate(Literal(2019), Literal(7), Literal.create(null, IntegerType)),
null)
}
})
// ansi test
withSQLConf(SQLConf.ANSI_ENABLED.key -> "true") {
checkExceptionInExpression[DateTimeException](MakeDate(Literal(Int.MaxValue), Literal(13),
Literal(19)), EmptyRow, "Invalid value for Year")
checkExceptionInExpression[DateTimeException](MakeDate(Literal(2019),
Literal(13), Literal(19)), EmptyRow, "Invalid value for Month")
checkExceptionInExpression[DateTimeException](MakeDate(Literal(2019), Literal(7),
Literal(32)), EmptyRow, "Invalid value for Day")
}
// non-ansi test
withSQLConf(SQLConf.ANSI_ENABLED.key -> "false") {
checkEvaluation(MakeDate(Literal(Int.MaxValue), Literal(13), Literal(19)), null)
checkEvaluation(MakeDate(Literal(2019), Literal(13), Literal(19)), null)
checkEvaluation(MakeDate(Literal(2019), Literal(7), Literal(32)), null)
}
}
test("creating values of Timestamp/TimestampNTZ via make_timestamp") {
Seq(TimestampTypes.TIMESTAMP_NTZ, TimestampTypes.TIMESTAMP_LTZ).foreach { tsType =>
def expectedAnswer(s: String): Any = tsType match {
case TimestampTypes.TIMESTAMP_NTZ => LocalDateTime.parse(s.replace(" ", "T"))
case TimestampTypes.TIMESTAMP_LTZ => Timestamp.valueOf(s)
}
withSQLConf(SQLConf.TIMESTAMP_TYPE.key -> tsType.toString) {
val expected = expectedAnswer("2013-07-15 08:15:23.5")
Seq(true, false).foreach { ansi =>
withSQLConf(SQLConf.ANSI_ENABLED.key -> ansi.toString) {
var makeTimestampExpr = MakeTimestamp(
Literal(2013), Literal(7), Literal(15), Literal(8), Literal(15),
Literal(Decimal(BigDecimal(23.5), 16, 6)),
Some(Literal(ZoneId.systemDefault().getId)))
checkEvaluation(makeTimestampExpr, expected)
checkEvaluation(makeTimestampExpr.copy(year = Literal.create(null, IntegerType)), null)
checkEvaluation(makeTimestampExpr.copy(month = Literal.create(null, IntegerType)), null)
checkEvaluation(makeTimestampExpr.copy(day = Literal.create(null, IntegerType)), null)
checkEvaluation(makeTimestampExpr.copy(hour = Literal.create(null, IntegerType)), null)
checkEvaluation(makeTimestampExpr.copy(min = Literal.create(null, IntegerType)), null)
checkEvaluation(makeTimestampExpr.copy(sec = Literal.create(null, DecimalType(16, 6))),
null)
checkEvaluation(makeTimestampExpr.copy(timezone = None), expected)
Seq(
(makeTimestampExpr.copy(year = Literal(Int.MaxValue)), "Invalid value for Year"),
(makeTimestampExpr.copy(month = Literal(13)), "Invalid value for Month"),
(makeTimestampExpr.copy(day = Literal(32)), "Invalid value for Day"),
(makeTimestampExpr.copy(hour = Literal(25)), "Invalid value for Hour"),
(makeTimestampExpr.copy(min = Literal(65)), "Invalid value for Min"),
(makeTimestampExpr.copy(sec = Literal(Decimal(
BigDecimal(70.0), 16, 6))), "Invalid value for Second")
).foreach { entry =>
if (ansi) {
checkExceptionInExpression[DateTimeException](entry._1, EmptyRow, entry._2)
} else {
checkEvaluation(entry._1, null)
}
}
makeTimestampExpr = MakeTimestamp(Literal(2019), Literal(6), Literal(30),
Literal(23), Literal(59), Literal(Decimal(BigDecimal(60.0), 16, 6)))
if (ansi) {
checkExceptionInExpression[DateTimeException](makeTimestampExpr.copy(sec = Literal(
Decimal(BigDecimal(60.5), 16, 6))), EmptyRow, "The fraction of sec must be zero")
} else {
checkEvaluation(makeTimestampExpr, expectedAnswer("2019-07-01 00:00:00"))
}
makeTimestampExpr = MakeTimestamp(Literal(2019), Literal(8), Literal(12), Literal(0),
Literal(0), Literal(Decimal(BigDecimal(58.000001), 16, 6)))
checkEvaluation(makeTimestampExpr, expectedAnswer("2019-08-12 00:00:58.000001"))
}
}
// non-ansi test
withSQLConf(SQLConf.ANSI_ENABLED.key -> "false") {
val makeTimestampExpr = MakeTimestamp(Literal(2019), Literal(6), Literal(30),
Literal(23), Literal(59), Literal(Decimal(BigDecimal(60.0), 16, 6)))
checkEvaluation(makeTimestampExpr.copy(sec = Literal(Decimal(BigDecimal(60.5), 16, 6))),
null)
}
Seq(true, false).foreach { ansi =>
withSQLConf(SQLConf.ANSI_ENABLED.key -> ansi.toString) {
val makeTimestampExpr = MakeTimestamp(Literal(2019), Literal(8), Literal(12),
Literal(0), Literal(0), Literal(Decimal(BigDecimal(58.000001), 16, 6)))
checkEvaluation(makeTimestampExpr, expectedAnswer("2019-08-12 00:00:58.000001"))
}
}
}
}
}
test("ISO 8601 week-numbering year") {
checkEvaluation(YearOfWeek(MakeDate(Literal(2006), Literal(1), Literal(1))), 2005)
checkEvaluation(YearOfWeek(MakeDate(Literal(2006), Literal(1), Literal(2))), 2006)
}
test("extract the seconds part with fraction from timestamps") {
outstandingTimezonesIds.foreach { timezone =>
val timestamp = MakeTimestamp(Literal(2019), Literal(8), Literal(10),
Literal(0), Literal(0), Literal(Decimal(10.123456, 16, 6)),
Some(Literal(timezone)), Some(timezone))
def secFrac(ts: MakeTimestamp): SecondWithFraction = SecondWithFraction(ts, Some(timezone))
checkEvaluation(secFrac(timestamp), Decimal(10.123456, 16, 6))
checkEvaluation(
secFrac(timestamp.copy(sec = Literal(Decimal(59000001, 16, 6)))),
Decimal(59000001, 16, 6))
checkEvaluation(
secFrac(timestamp.copy(sec = Literal(Decimal(1, 16, 6)))),
Decimal(0.000001, 16, 6))
checkEvaluation(
secFrac(timestamp.copy(year = Literal(10))),
Decimal(10.123456, 16, 6))
}
}
test("SPARK-34903: timestamps difference") {
val end = Instant.parse("2019-10-04T11:04:01.123456Z")
outstandingTimezonesIds.foreach { tz =>
def sub(left: Instant, right: Instant): Expression = {
SubtractTimestamps(
Literal(left),
Literal(right),
legacyInterval = true,
timeZoneId = Some(tz))
}
checkEvaluation(sub(end, end), new CalendarInterval(0, 0, 0))
checkEvaluation(sub(end, Instant.EPOCH),
IntervalUtils.stringToInterval(UTF8String.fromString("interval " +
"436163 hours 4 minutes 1 seconds 123 milliseconds 456 microseconds")))
checkEvaluation(sub(Instant.EPOCH, end),
IntervalUtils.stringToInterval(UTF8String.fromString("interval " +
"-436163 hours -4 minutes -1 seconds -123 milliseconds -456 microseconds")))
checkEvaluation(
sub(
Instant.parse("9999-12-31T23:59:59.999999Z"),
Instant.parse("0001-01-01T00:00:00Z")),
IntervalUtils.stringToInterval(UTF8String.fromString("interval " +
"87649415 hours 59 minutes 59 seconds 999 milliseconds 999 microseconds")))
}
outstandingTimezonesIds.foreach { tz =>
def check(left: Instant, right: Instant): Unit = {
checkEvaluation(
SubtractTimestamps(
Literal(left),
Literal(right),
legacyInterval = false,
timeZoneId = Some(tz)),
Duration.between(
right.atZone(getZoneId(tz)).toLocalDateTime,
left.atZone(getZoneId(tz)).toLocalDateTime))
}
check(end, end)
check(end, Instant.EPOCH)
check(Instant.EPOCH, end)
check(Instant.parse("9999-12-31T23:59:59.999999Z"), Instant.parse("0001-01-01T00:00:00Z"))
val errMsg = intercept[ArithmeticException] {
checkEvaluation(
SubtractTimestamps(
Literal(Instant.MIN),
Literal(Instant.MAX),
legacyInterval = false,
timeZoneId = Some(tz)),
Duration.ZERO)
}.getMessage
assert(errMsg.contains("overflow"))
Seq(false, true).foreach { legacy =>
checkConsistencyBetweenInterpretedAndCodegen(
(end: Expression, start: Expression) => SubtractTimestamps(end, start, legacy, Some(tz)),
TimestampType, TimestampType)
}
}
}
test("SPARK-35916: timestamps without time zone difference") {
val end = LocalDateTime.parse("2019-10-04T11:04:01.123456")
val epoch = LocalDateTime.ofEpochSecond(0, 0, java.time.ZoneOffset.UTC)
outstandingTimezonesIds.foreach { tz =>
def sub(left: LocalDateTime, right: LocalDateTime): Expression = {
SubtractTimestamps(
Literal(left),
Literal(right),
legacyInterval = true,
timeZoneId = Some(tz))
}
checkEvaluation(sub(end, end), new CalendarInterval(0, 0, 0))
checkEvaluation(sub(end, epoch),
IntervalUtils.stringToInterval(UTF8String.fromString("interval " +
"436163 hours 4 minutes 1 seconds 123 milliseconds 456 microseconds")))
checkEvaluation(sub(epoch, end),
IntervalUtils.stringToInterval(UTF8String.fromString("interval " +
"-436163 hours -4 minutes -1 seconds -123 milliseconds -456 microseconds")))
checkEvaluation(
sub(
LocalDateTime.parse("9999-12-31T23:59:59.999999"),
LocalDateTime.parse("0001-01-01T00:00:00")),
IntervalUtils.stringToInterval(UTF8String.fromString("interval " +
"87649415 hours 59 minutes 59 seconds 999 milliseconds 999 microseconds")))
}
outstandingTimezonesIds.foreach { tz =>
def check(left: LocalDateTime, right: LocalDateTime): Unit = {
checkEvaluation(
SubtractTimestamps(
Literal(left),
Literal(right),
legacyInterval = false,
timeZoneId = Some(tz)),
Duration.between(
right.atZone(getZoneId(tz)).toLocalDateTime,
left.atZone(getZoneId(tz)).toLocalDateTime))
}
check(end, end)
check(end, epoch)
check(epoch, end)
check(LocalDateTime.parse("9999-12-31T23:59:59.999999"),
LocalDateTime.parse("0001-01-01T00:00:00"))
val errMsg = intercept[ArithmeticException] {
checkEvaluation(
SubtractTimestamps(
Literal(LocalDateTime.MIN),
Literal(LocalDateTime.MAX),
legacyInterval = false,
timeZoneId = Some(tz)),
Duration.ZERO)
}.getMessage
assert(errMsg.contains("overflow"))
Seq(false, true).foreach { legacy =>
checkConsistencyBetweenInterpretedAndCodegen(
(end: Expression, start: Expression) => SubtractTimestamps(end, start, legacy, Some(tz)),
TimestampNTZType, TimestampNTZType)
}
}
}
test("SPARK-34896: subtract dates") {
val end = LocalDate.of(2019, 10, 5)
val epochDate = Literal(LocalDate.ofEpochDay(0))
withSQLConf(SQLConf.LEGACY_INTERVAL_ENABLED.key -> "true") {
checkEvaluation(SubtractDates(Literal(end), Literal(end)),
new CalendarInterval(0, 0, 0))
checkEvaluation(SubtractDates(Literal(end.plusDays(1)), Literal(end)),
IntervalUtils.stringToInterval(UTF8String.fromString("interval 1 days")))
checkEvaluation(SubtractDates(Literal(end.minusDays(1)), Literal(end)),
IntervalUtils.stringToInterval(UTF8String.fromString("interval -1 days")))
checkEvaluation(SubtractDates(Literal(end), epochDate),
IntervalUtils.stringToInterval(UTF8String.fromString("interval 49 years 9 months 4 days")))
checkEvaluation(SubtractDates(epochDate, Literal(end)),
IntervalUtils.stringToInterval(
UTF8String.fromString("interval -49 years -9 months -4 days")))
checkEvaluation(
SubtractDates(
Literal(LocalDate.of(10000, 1, 1)),
Literal(LocalDate.of(1, 1, 1))),
IntervalUtils.stringToInterval(UTF8String.fromString("interval 9999 years")))
}
withSQLConf(SQLConf.LEGACY_INTERVAL_ENABLED.key -> "false") {
checkEvaluation(SubtractDates(Literal(end), Literal(end)), Duration.ZERO)
checkEvaluation(SubtractDates(Literal(end.plusDays(1)), Literal(end)), Duration.ofDays(1))
checkEvaluation(SubtractDates(Literal(end.minusDays(1)), Literal(end)), Duration.ofDays(-1))
checkEvaluation(SubtractDates(Literal(end), epochDate), Duration.ofDays(end.toEpochDay))
checkEvaluation(SubtractDates(epochDate, Literal(end)),
Duration.ofDays(end.toEpochDay).negated())
checkEvaluation(
SubtractDates(
Literal(LocalDate.of(10000, 1, 1)),
Literal(LocalDate.of(1, 1, 1))),
Duration.ofDays(ChronoUnit.DAYS.between( LocalDate.of(1, 1, 1), LocalDate.of(10000, 1, 1))))
checkExceptionInExpression[ArithmeticException](
SubtractDates(Literal(LocalDate.MAX), Literal(LocalDate.MIN)),
"overflow")
}
Seq(false, true).foreach { ansiIntervals =>
checkConsistencyBetweenInterpretedAndCodegen(
(end: Expression, start: Expression) => SubtractDates(end, start, ansiIntervals),
DateType, DateType)
}
}
test("to_timestamp_ntz") {
val specialTs = Seq(
"0001-01-01T00:00:00", // the fist timestamp of Common Era
"1582-10-15T23:59:59", // the cutover date from Julian to Gregorian calendar
"1970-01-01T00:00:00", // the epoch timestamp
"9999-12-31T23:59:59" // the last supported timestamp according to SQL standard
)
outstandingZoneIds.foreach { zoneId =>
withDefaultTimeZone(zoneId) {
specialTs.foreach { s =>
val input = s.replace("T", " ")
val expectedTs = LocalDateTime.parse(s)
checkEvaluation(
GetTimestamp(Literal(input), Literal("yyyy-MM-dd HH:mm:ss"), TimestampNTZType),
expectedTs)
Seq(".123456", ".123456PST", ".123456CST", ".123456UTC").foreach { segment =>
val input2 = input + segment
val expectedTs2 = LocalDateTime.parse(s + ".123456")
checkEvaluation(
GetTimestamp(Literal(input2), Literal("yyyy-MM-dd HH:mm:ss.SSSSSS[zzz]"),
TimestampNTZType),
expectedTs2)
}
}
}
}
}
test("to_timestamp exception mode") {
withSQLConf(SQLConf.LEGACY_TIME_PARSER_POLICY.key -> "legacy") {
checkEvaluation(
GetTimestamp(
Literal("2020-01-27T20:06:11.847-0800"),
Literal("yyyy-MM-dd'T'HH:mm:ss.SSSz"), TimestampType),
1580184371847000L)
}
withSQLConf(SQLConf.LEGACY_TIME_PARSER_POLICY.key -> "corrected") {
checkEvaluation(
GetTimestamp(
Literal("2020-01-27T20:06:11.847-0800"),
Literal("yyyy-MM-dd'T'HH:mm:ss.SSSz"),
TimestampType), null)
}
withSQLConf(SQLConf.LEGACY_TIME_PARSER_POLICY.key -> "exception") {
checkExceptionInExpression[SparkUpgradeException](
GetTimestamp(
Literal("2020-01-27T20:06:11.847-0800"),
Literal("yyyy-MM-dd'T'HH:mm:ss.SSSz"),
TimestampType), "Fail to parse")
}
}
test("Consistent error handling for datetime formatting and parsing functions") {
def checkException[T <: Exception : ClassTag](c: String): Unit = {
checkExceptionInExpression[T](new ParseToTimestamp(Literal("1"), Literal(c)).child, c)
checkExceptionInExpression[T](new ParseToDate(Literal("1"), Literal(c)).child, c)
checkExceptionInExpression[T](ToUnixTimestamp(Literal("1"), Literal(c)), c)
checkExceptionInExpression[T](UnixTimestamp(Literal("1"), Literal(c)), c)
if (!Set("E", "F", "q", "Q").contains(c)) {
checkExceptionInExpression[T](DateFormatClass(CurrentTimestamp(), Literal(c)), c)
checkExceptionInExpression[T](FromUnixTime(Literal(0L), Literal(c)), c)
}
}
Seq('Y', 'W', 'w', 'E', 'u', 'F').foreach { l =>
checkException[SparkUpgradeException](l.toString)
}
Seq('q', 'Q', 'e', 'c', 'A', 'n', 'N', 'p').foreach { l =>
checkException[IllegalArgumentException](l.toString)
}
}
test("SPARK-31896: Handle am-pm timestamp parsing when hour is missing") {
checkEvaluation(
new ParseToTimestamp(Literal("PM"), Literal("a")).child,
Timestamp.valueOf("1970-01-01 12:00:00.0"))
checkEvaluation(
new ParseToTimestamp(Literal("11:11 PM"), Literal("mm:ss a")).child,
Timestamp.valueOf("1970-01-01 12:11:11.0"))
}
def testIntegralInput(testFunc: Number => Unit): Unit = {
def checkResult(input: Long): Unit = {
if (input.toByte == input) {
testFunc(input.toByte)
} else if (input.toShort == input) {
testFunc(input.toShort)
} else if (input.toInt == input) {
testFunc(input.toInt)
} else {
testFunc(input)
}
}
checkResult(0)
checkResult(Byte.MaxValue)
checkResult(Byte.MinValue)
checkResult(Short.MaxValue)
checkResult(Short.MinValue)
checkResult(Int.MaxValue)
checkResult(Int.MinValue)
checkResult(Int.MaxValue.toLong + 100)
checkResult(Int.MinValue.toLong - 100)
}
test("DATE_FROM_UNIX_DATE") {
def testIntegralFunc(value: Number): Unit = {
checkEvaluation(
DateFromUnixDate(Literal(value.intValue())),
LocalDate.ofEpochDay(value.intValue()))
}
// test null input
checkEvaluation(DateFromUnixDate(Literal(null, IntegerType)), null)
// test integral input
testIntegralInput(testIntegralFunc)
}
test("UNIX_DATE") {
def testIntegralFunc(value: Number): Unit = {
checkEvaluation(
UnixDate(Literal(LocalDate.ofEpochDay(value.intValue()))),
value.intValue())
}
// test null input
checkEvaluation(UnixDate(Literal(null, DateType)), null)
// test various inputs
testIntegralInput(testIntegralFunc)
}
test("UNIX_SECONDS") {
checkEvaluation(UnixSeconds(Literal(null, TimestampType)), null)
var timestamp = Literal(new Timestamp(0L))
checkEvaluation(UnixSeconds(timestamp), 0L)
timestamp = Literal(new Timestamp(1000L))
checkEvaluation(UnixSeconds(timestamp), 1L)
timestamp = Literal(new Timestamp(-1000L))
checkEvaluation(UnixSeconds(timestamp), -1L)
// -1ms is considered to be in -1st second, as 0-999ms is in 0th second.
timestamp = Literal(new Timestamp(-1L))
checkEvaluation(UnixSeconds(timestamp), -1L)
timestamp = Literal(new Timestamp(-1000L))
checkEvaluation(UnixSeconds(timestamp), -1L)
// Truncates higher levels of precision
timestamp = Literal(new Timestamp(1999L))
checkEvaluation(UnixSeconds(timestamp), 1L)
}
test("UNIX_MILLIS") {
checkEvaluation(UnixMillis(Literal(null, TimestampType)), null)
var timestamp = Literal(new Timestamp(0L))
checkEvaluation(UnixMillis(timestamp), 0L)
timestamp = Literal(new Timestamp(1000L))
checkEvaluation(UnixMillis(timestamp), 1000L)
timestamp = Literal(new Timestamp(-1000L))
checkEvaluation(UnixMillis(timestamp), -1000L)
// Truncates higher levels of precision
val timestampWithNanos = new Timestamp(1000L)
timestampWithNanos.setNanos(999999)
checkEvaluation(UnixMillis(Literal(timestampWithNanos)), 1000L)
}
test("UNIX_MICROS") {
checkEvaluation(UnixMicros(Literal(null, TimestampType)), null)
var timestamp = Literal(new Timestamp(0L))
checkEvaluation(UnixMicros(timestamp), 0L)
timestamp = Literal(new Timestamp(1000L))
checkEvaluation(UnixMicros(timestamp), 1000000L)
timestamp = Literal(new Timestamp(-1000L))
checkEvaluation(UnixMicros(timestamp), -1000000L)
val timestampWithNanos = new Timestamp(1000L)
timestampWithNanos.setNanos(1000) // 1 microsecond
checkEvaluation(UnixMicros(Literal(timestampWithNanos)), 1000001L)
}
test("TIMESTAMP_SECONDS") {
def testIntegralFunc(value: Number): Unit = {
checkEvaluation(
SecondsToTimestamp(Literal(value)),
Instant.ofEpochSecond(value.longValue()))
}
// test null input
checkEvaluation(
SecondsToTimestamp(Literal(null, IntegerType)),
null)
// test integral input
testIntegralInput(testIntegralFunc)
// test overflow
checkExceptionInExpression[ArithmeticException](
SecondsToTimestamp(Literal(Long.MaxValue, LongType)), EmptyRow, "long overflow")
def testFractionalInput(input: String): Unit = {
Seq(input.toFloat, input.toDouble, Decimal(input)).foreach { value =>
checkEvaluation(
SecondsToTimestamp(Literal(value)),
(input.toDouble * MICROS_PER_SECOND).toLong)
}
}
testFractionalInput("1.0")
testFractionalInput("-1.0")
testFractionalInput("1.234567")
testFractionalInput("-1.234567")
// test overflow for decimal input
checkExceptionInExpression[ArithmeticException](
SecondsToTimestamp(Literal(Decimal("9" * 38))), "Overflow"
)
// test truncation error for decimal input
checkExceptionInExpression[ArithmeticException](
SecondsToTimestamp(Literal(Decimal("0.1234567"))), "Rounding necessary"
)
// test NaN
checkEvaluation(
SecondsToTimestamp(Literal(Double.NaN)),
null)
checkEvaluation(
SecondsToTimestamp(Literal(Float.NaN)),
null)
// double input can truncate
checkEvaluation(
SecondsToTimestamp(Literal(123.456789123)),
Instant.ofEpochSecond(123, 456789000))
checkEvaluation(SecondsToTimestamp(Literal(16777215.0f)), Instant.ofEpochSecond(16777215))
}
test("TIMESTAMP_MILLIS") {
def testIntegralFunc(value: Number): Unit = {
checkEvaluation(
MillisToTimestamp(Literal(value)),
Instant.ofEpochMilli(value.longValue()))
}
// test null input
checkEvaluation(
MillisToTimestamp(Literal(null, IntegerType)),
null)
// test integral input
testIntegralInput(testIntegralFunc)
// test overflow
checkExceptionInExpression[ArithmeticException](
MillisToTimestamp(Literal(Long.MaxValue, LongType)), EmptyRow, "long overflow")
}
test("TIMESTAMP_MICROS") {
def testIntegralFunc(value: Number): Unit = {
checkEvaluation(
MicrosToTimestamp(Literal(value)),
value.longValue())
}
// test null input
checkEvaluation(
MicrosToTimestamp(Literal(null, IntegerType)),
null)
// test integral input
testIntegralInput(testIntegralFunc)
// test max/min input
testIntegralFunc(Long.MaxValue)
testIntegralFunc(Long.MinValue)
}
test("SPARK-33498: GetTimestamp,UnixTimestamp,ToUnixTimestamp with parseError") {
Seq(true, false).foreach { ansiEnabled =>
Seq("LEGACY", "CORRECTED", "EXCEPTION").foreach { policy =>
withSQLConf(SQLConf.LEGACY_TIME_PARSER_POLICY.key -> policy,
SQLConf.ANSI_ENABLED.key -> ansiEnabled.toString) {
val exprSeq = Seq[Expression](
GetTimestamp(Literal("2020-01-27T20:06:11.847"), Literal("yyyy-MM-dd HH:mm:ss.SSS"),
TimestampType),
GetTimestamp(Literal("Unparseable"), Literal("yyyy-MM-dd HH:mm:ss.SSS"),
TimestampType),
UnixTimestamp(Literal("2020-01-27T20:06:11.847"), Literal("yyyy-MM-dd HH:mm:ss.SSS")),
UnixTimestamp(Literal("Unparseable"), Literal("yyyy-MM-dd HH:mm:ss.SSS")),
ToUnixTimestamp(Literal("2020-01-27T20:06:11.847"), Literal("yyyy-MM-dd HH:mm:ss.SSS")),
ToUnixTimestamp(Literal("Unparseable"), Literal("yyyy-MM-dd HH:mm:ss.SSS"))
)
if (!ansiEnabled) {
exprSeq.foreach(checkEvaluation(_, null))
} else if (policy == "LEGACY") {
exprSeq.foreach(checkExceptionInExpression[ParseException](_, "Unparseable"))
} else {
exprSeq.foreach(
checkExceptionInExpression[DateTimeParseException](_, "could not be parsed"))
}
// LEGACY works, CORRECTED failed, EXCEPTION with SparkUpgradeException
val exprSeq2 = Seq[(Expression, Long)](
(GetTimestamp(Literal("2020-01-27T20:06:11.847!!!"),
Literal("yyyy-MM-dd'T'HH:mm:ss.SSS"), TimestampType), 1580184371847000L),
(UnixTimestamp(Literal("2020-01-27T20:06:11.847!!!"),
Literal("yyyy-MM-dd'T'HH:mm:ss.SSS")), 1580184371L),
(ToUnixTimestamp(Literal("2020-01-27T20:06:11.847!!!"),
Literal("yyyy-MM-dd'T'HH:mm:ss.SSS")), 1580184371L)
)
if (policy == "LEGACY") {
exprSeq2.foreach(pair => checkEvaluation(pair._1, pair._2))
} else if (policy == "EXCEPTION") {
exprSeq2.foreach(pair =>
checkExceptionInExpression[SparkUpgradeException](
pair._1,
"You may get a different result due to the upgrading of Spark 3.0"))
} else {
if (ansiEnabled) {
exprSeq2.foreach(pair =>
checkExceptionInExpression[DateTimeParseException](pair._1, "could not be parsed"))
} else {
exprSeq2.foreach(pair => checkEvaluation(pair._1, null))
}
}
}
}
}
}
test("SPARK-34739,SPARK-35889: add a year-month interval to a timestamp") {
val sdf = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss.SSS", Locale.US)
Seq(TimestampType, TimestampNTZType).foreach { dt =>
for (zid <- outstandingZoneIds) {
val timeZoneId = Option(zid.getId)
sdf.setTimeZone(TimeZone.getTimeZone(zid))
checkEvaluation(
TimestampAddYMInterval(
timestampLiteral("2016-01-29 10:11:12.123", sdf, dt),
Literal(Period.ofMonths(2)),
timeZoneId),
timestampAnswer("2016-03-29 10:11:12.123", sdf, dt))
checkEvaluation(
TimestampAddYMInterval(
Literal.create(null, dt),
Literal(Period.ofMonths(1)),
timeZoneId),
null)
checkEvaluation(
TimestampAddYMInterval(
timestampLiteral("2016-01-29 10:00:00.000", sdf, dt),
Literal.create(null, YearMonthIntervalType()),
timeZoneId),
null)
checkEvaluation(
TimestampAddYMInterval(
Literal.create(null, dt),
Literal.create(null, YearMonthIntervalType()),
timeZoneId),
null)
yearMonthIntervalTypes.foreach { it =>
checkConsistencyBetweenInterpretedAndCodegen(
(ts: Expression, interval: Expression) =>
TimestampAddYMInterval(ts, interval, timeZoneId), dt, it)
}
}
}
}
test("SPARK-34761,SPARK-35889: add a day-time interval to a timestamp") {
val sdf = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss.SSS", Locale.US)
Seq(TimestampType, TimestampNTZType).foreach { dt =>
for (zid <- outstandingZoneIds) {
val timeZoneId = Option(zid.getId)
sdf.setTimeZone(TimeZone.getTimeZone(zid))
checkEvaluation(
TimeAdd(
timestampLiteral("2021-01-01 00:00:00.123", sdf, dt),
Literal(Duration.ofDays(10).plusMinutes(10).plusMillis(321)),
timeZoneId),
timestampAnswer("2021-01-11 00:10:00.444", sdf, dt))
checkEvaluation(
TimeAdd(
timestampLiteral("2021-01-01 00:10:00.123", sdf, dt),
Literal(Duration.ofDays(-10).minusMinutes(9).minusMillis(120)),
timeZoneId),
timestampAnswer("2020-12-22 00:01:00.003", sdf, dt))
val e = intercept[Exception] {
checkEvaluation(
TimeAdd(
timestampLiteral("2021-01-01 00:00:00.123", sdf, dt),
Literal(Duration.of(Long.MaxValue, ChronoUnit.MICROS)),
timeZoneId),
null)
}.getCause
assert(e.isInstanceOf[ArithmeticException])
assert(e.getMessage.contains("long overflow"))
checkEvaluation(
TimeAdd(
Literal.create(null, dt),
Literal(Duration.ofDays(1)),
timeZoneId),
null)
checkEvaluation(
TimeAdd(
timestampLiteral("2021-01-01 00:00:00.123", sdf, dt),
Literal.create(null, DayTimeIntervalType()),
timeZoneId),
null)
checkEvaluation(
TimeAdd(
Literal.create(null, dt),
Literal.create(null, DayTimeIntervalType()),
timeZoneId),
null)
dayTimeIntervalTypes.foreach { it =>
checkConsistencyBetweenInterpretedAndCodegen((ts: Expression, interval: Expression) =>
TimeAdd(ts, interval, timeZoneId), dt, it)
}
}
}
}
}
|
chuckchen/spark
|
sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/DateExpressionsSuite.scala
|
Scala
|
apache-2.0
| 76,985 |
/*
* Copyright 2011-2022 GatlingCorp (https://gatling.io)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.gatling.recorder.render.template
import io.gatling.BaseSpec
class PackageSpec extends BaseSpec {
"protectWithTripleQuotes" should "wrap a String containing double quotes with triple quotes" in {
val string = "foo\\"bar"
string.protect(Format.Scala) shouldBe s"$TripleQuotes$string$TripleQuotes"
}
it should "wrap a String containing backslashes with triple quotes" in {
val string = "foo\\\\bar"
string.protect(Format.Scala) shouldBe s"$TripleQuotes$string$TripleQuotes"
}
it should "otherwise wrap a String with simple quotes" in {
val string = "foobar"
string.protect(Format.Scala) shouldBe s"$SimpleQuotes$string$SimpleQuotes"
}
}
|
gatling/gatling
|
gatling-recorder/src/test/scala/io/gatling/recorder/render/template/PackageSpec.scala
|
Scala
|
apache-2.0
| 1,302 |
package net.kwas.impatient.ch7
object HashMapCopier {
import java.util.{HashMap => JavaHashMap}
import collection.immutable.{HashMap => ScalaHashMap}
def copy[A, B](source: JavaHashMap[A, B]): ScalaHashMap[A, B] = {
val builder = ScalaHashMap.newBuilder[A, B]
val iterator = source.entrySet().iterator()
while (iterator.hasNext()) {
val entry = iterator.next()
builder += ((entry.getKey(), entry.getValue()))
}
builder.result
}
}
|
dkwasny/ScalaImpatient
|
src/main/scala/net/kwas/impatient/ch7/HashMapCopier.scala
|
Scala
|
mit
| 475 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package com.bwsw.sj.engine.core.testutils.benchmark.batch
import com.bwsw.sj.engine.core.testutils.benchmark.loader.BenchmarkDataSenderConfig
import com.bwsw.sj.engine.core.testutils.benchmark.{Benchmark, BenchmarkFactory}
import com.typesafe.config.Config
/**
* @author Pavel Tomskikh
*/
trait BatchBenchmarkFactory[C <: BenchmarkDataSenderConfig]
extends BenchmarkFactory[BatchBenchmarkParameters, C] {
override def create(config: Config, senderConfig: C): Benchmark[BatchBenchmarkParameters] =
create(new BatchBenchmarkConfig(config), senderConfig)
protected def create(benchmarkConfig: BatchBenchmarkConfig, senderConfig: C): Benchmark[BatchBenchmarkParameters]
}
|
bwsw/sj-platform
|
core/sj-engine-core/src/main/scala/com/bwsw/sj/engine/core/testutils/benchmark/batch/BatchBenchmarkFactory.scala
|
Scala
|
apache-2.0
| 1,494 |
/*
* Copyright 2015 LG CNS.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package scouter.server.core.cache;
import scouter.util.IntSet;
object XLogCache {
val cache = new XLogLoopCache[Array[Byte]](20480);
def put(objHash: Int, time: Int, error: Boolean, record: Array[Byte]) {
cache.put(objHash, time, error, record);
}
def get(last_loop: Long, last_index: Int, time: Int): CacheOut[Array[Byte]] = {
return cache.get(last_loop, last_index, time);
}
def get(objHashSet: IntSet, last_loop: Long, last_index: Int, time: Int): CacheOut[Array[Byte]] = {
return cache.get(objHashSet, last_loop, last_index, time);
}
}
|
jahnaviancha/scouter
|
scouter.server/src/scouter/server/core/cache/XLogCache.scala
|
Scala
|
apache-2.0
| 1,225 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.openwhisk.common
import java.time.{Clock, Duration, Instant}
import akka.event.Logging.{DebugLevel, InfoLevel, LogLevel, WarningLevel}
import akka.http.scaladsl.model.headers.RawHeader
import pureconfig.loadConfigOrThrow
import spray.json._
import org.apache.openwhisk.core.ConfigKeys
import pureconfig._
import org.apache.openwhisk.common.tracing.WhiskTracerProvider
import org.apache.openwhisk.common.WhiskInstants._
import scala.util.Try
/**
* A transaction id for tracking operations in the system that are specific to a request.
* An instance of TransactionId is implicitly received by all logging methods. The actual
* metadata is stored indirectly in the referenced meta object.
*/
case class TransactionId private (meta: TransactionMetadata) extends AnyVal {
def id = meta.id
override def toString = s"#tid_${meta.id}"
def toHeader = RawHeader(TransactionId.generatorConfig.header, meta.id)
/**
* Method to count events.
*
* @param from Reference, where the method was called from.
* @param marker A LogMarkerToken. They are defined in <code>LoggingMarkers</code>.
* @param message An additional message to be written into the log, together with the other information.
* @param logLevel The Loglevel, the message should have. Default is <code>InfoLevel</code>.
*/
def mark(from: AnyRef, marker: LogMarkerToken, message: => String = "", logLevel: LogLevel = DebugLevel)(
implicit logging: Logging) = {
if (TransactionId.metricsLog) {
// marker received with a debug level will be emitted on info level
logging.emit(InfoLevel, this, from, createMessageWithMarker(message, LogMarker(marker, deltaToStart)))
} else {
logging.emit(logLevel, this, from, message)
}
MetricEmitter.emitCounterMetric(marker)
}
/**
* Method to start taking time of an action in the code. It returns a <code>StartMarker</code> which has to be
* passed into the <code>finished</code>-method.
*
* @param from Reference, where the method was called from.
* @param marker A LogMarkerToken. They are defined in <code>LoggingMarkers</code>.
* @param message An additional message to be written into the log, together with the other information.
* @param logLevel The Loglevel, the message should have. Default is <code>InfoLevel</code>.
*
* @return startMarker that has to be passed to the finished or failed method to calculate the time difference.
*/
def started(from: AnyRef, marker: LogMarkerToken, message: => String = "", logLevel: LogLevel = DebugLevel)(
implicit logging: Logging): StartMarker = {
if (TransactionId.metricsLog) {
// marker received with a debug level will be emitted on info level
logging.emit(InfoLevel, this, from, createMessageWithMarker(message, LogMarker(marker, deltaToStart)))
} else {
logging.emit(logLevel, this, from, message)
}
MetricEmitter.emitCounterMetric(marker)
//tracing support
WhiskTracerProvider.tracer.startSpan(marker, this)
StartMarker(Instant.now.inMills, marker)
}
/**
* Method to stop taking time of an action in the code. The time the method used will be written into a log message.
*
* @param from Reference, where the method was called from.
* @param startMarker <code>StartMarker</code> returned by a <code>starting</code> method.
* @param message An additional message to be written into the log, together with the other information.
* @param logLevel The Loglevel, the message should have. Default is <code>InfoLevel</code>.
* @param endTime Manually set the timestamp of the end. By default it is NOW.
*/
def finished(from: AnyRef,
startMarker: StartMarker,
message: => String = "",
logLevel: LogLevel = DebugLevel,
endTime: Instant = Instant.now(Clock.systemUTC))(implicit logging: Logging) = {
val endMarker = startMarker.startMarker.asFinish
val deltaToEnd = deltaToMarker(startMarker, endTime)
if (TransactionId.metricsLog) {
logging.emit(
InfoLevel,
this,
from,
createMessageWithMarker(
if (logLevel <= InfoLevel) message else "",
LogMarker(endMarker, deltaToStart, Some(deltaToEnd))))
} else {
logging.emit(logLevel, this, from, message)
}
MetricEmitter.emitHistogramMetric(endMarker, deltaToEnd)
//tracing support
WhiskTracerProvider.tracer.finishSpan(this)
}
/**
* Method to stop taking time of an action in the code that failed. The time the method used will be written into a log message.
*
* @param from Reference, where the method was called from.
* @param startMarker <code>StartMarker</code> returned by a <code>starting</code> method.
* @param message An additional message to be written into the log, together with the other information.
* @param logLevel The <code>LogLevel</code> the message should have. Default is <code>WarningLevel</code>.
*/
def failed(from: AnyRef, startMarker: StartMarker, message: => String = "", logLevel: LogLevel = WarningLevel)(
implicit logging: Logging) = {
val endMarker = startMarker.startMarker.asError
val deltaToEnd = deltaToMarker(startMarker)
if (TransactionId.metricsLog) {
logging.emit(
logLevel,
this,
from,
createMessageWithMarker(message, LogMarker(endMarker, deltaToStart, Some(deltaToEnd))))
} else {
logging.emit(logLevel, this, from, message)
}
MetricEmitter.emitHistogramMetric(endMarker, deltaToEnd)
MetricEmitter.emitCounterMetric(endMarker)
//tracing support
WhiskTracerProvider.tracer.error(this, message)
}
/**
* Calculates the time between now and the beginning of the transaction.
*/
def deltaToStart = Duration.between(meta.start, Instant.now(Clock.systemUTC)).toMillis
/**
* Calculates the time between now and the startMarker that was returned by <code>starting</code>.
*
* @param startMarker <code>StartMarker</code> returned by a <code>starting</code> method.
* @param endTime Manually set the endtime. By default it is NOW.
*/
def deltaToMarker(startMarker: StartMarker, endTime: Instant = Instant.now(Clock.systemUTC)) =
Duration.between(startMarker.start, endTime).toMillis
/**
* Formats log message to include marker.
*
* @param message: The log message without the marker
* @param marker: The marker to add to the message
*/
private def createMessageWithMarker(message: String, marker: LogMarker): String = s"$message $marker"
}
/**
* The StartMarker which includes the <code>LogMarkerToken</code> and the start-time.
*
* @param start the time when the startMarker was set
* @param startMarker the LogMarkerToken which defines the start event
*/
case class StartMarker(start: Instant, startMarker: LogMarkerToken)
/**
* The transaction metadata encapsulates important properties about a transaction.
*
* @param id the transaction identifier; it is positive for client requests,
* negative for system operation and zero when originator is not known
* @param start the timestamp when the request processing commenced
* @param extraLogging enables logging, if set to true
*/
protected case class TransactionMetadata(id: String, start: Instant, extraLogging: Boolean = false)
case class MetricConfig(prometheusEnabled: Boolean,
kamonEnabled: Boolean,
kamonTagsEnabled: Boolean,
logsEnabled: Boolean)
object TransactionId {
val metricConfig = loadConfigOrThrow[MetricConfig](ConfigKeys.metrics)
// get the metric parameters directly from the environment since WhiskConfig can not be instantiated here
val metricsKamon: Boolean = metricConfig.kamonEnabled
val metricsKamonTags: Boolean = metricConfig.kamonTagsEnabled
val metricsLog: Boolean = metricConfig.logsEnabled
val generatorConfig = loadConfigOrThrow[TransactionGeneratorConfig](ConfigKeys.transactions)
val systemPrefix = "sid_"
val unknown = TransactionId(systemPrefix + "unknown")
val testing = TransactionId(systemPrefix + "testing") // Common id for for unit testing
val invoker = TransactionId(systemPrefix + "invoker") // Invoker startup/shutdown or GC activity
val invokerWarmup = TransactionId(systemPrefix + "invokerWarmup") // Invoker warmup thread that makes stem-cell containers
val invokerNanny = TransactionId(systemPrefix + "invokerNanny") // Invoker nanny thread
val dispatcher = TransactionId(systemPrefix + "dispatcher") // Kafka message dispatcher
val loadbalancer = TransactionId(systemPrefix + "loadbalancer") // Loadbalancer thread
val invokerHealth = TransactionId(systemPrefix + "invokerHealth") // Invoker supervision
val controller = TransactionId(systemPrefix + "controller") // Controller startup
val dbBatcher = TransactionId(systemPrefix + "dbBatcher") // Database batcher
def apply(tid: String, extraLogging: Boolean = false): TransactionId = {
val now = Instant.now(Clock.systemUTC()).inMills
TransactionId(TransactionMetadata(tid, now, extraLogging))
}
implicit val serdes = new RootJsonFormat[TransactionId] {
def write(t: TransactionId) = {
if (t.meta.extraLogging)
JsArray(JsString(t.meta.id), JsNumber(t.meta.start.toEpochMilli), JsBoolean(t.meta.extraLogging))
else
JsArray(JsString(t.meta.id), JsNumber(t.meta.start.toEpochMilli))
}
def read(value: JsValue) =
Try {
value match {
case JsArray(Vector(JsString(id), JsNumber(start))) =>
TransactionId(TransactionMetadata(id, Instant.ofEpochMilli(start.longValue), false))
case JsArray(Vector(JsString(id), JsNumber(start), JsBoolean(extraLogging))) =>
TransactionId(TransactionMetadata(id, Instant.ofEpochMilli(start.longValue), extraLogging))
}
} getOrElse unknown
}
}
case class TransactionGeneratorConfig(header: String) {
val lowerCaseHeader = header.toLowerCase //to cache the lowercase version of the header name
}
|
markusthoemmes/openwhisk
|
common/scala/src/main/scala/org/apache/openwhisk/common/TransactionId.scala
|
Scala
|
apache-2.0
| 10,904 |
package com.feynmanliang.optala
/** The results from a run of an iterative optimization algorithm along with performance diagnostic information.
* @tparam T type of an algorithm's internal state
*/
trait RunResult[T] {
val bestSolution: Solution // best solution found in this algorithm run
val stateTrace: Seq[T] // sequence of states at each iteration
val numObjEval: Long // number of objective function evaluations until termination
val numGradEval: Long // number of gradient evaluations until termination
}
|
feynmanliang/optala
|
src/main/scala/com/feynmanliang/optala/RunResult.scala
|
Scala
|
mit
| 528 |
package org.opennetworkinsight.testutils
import java.util.Date
import org.apache.spark.{SparkConf, SparkContext}
import scala.concurrent.Lock
/**
* THIS CODE WAS COPIED DIRECTLY FROM THE OPEN SOURCE PROJECT TAP (Trusted Analytics Platform)
* which has an Apache V2.0
*/
/**
* Don't use this class directly!! Use the FlatSpec or WordSpec version for your tests
*
* TestingSparkContext supports two basic modes:
*
* 1. shared SparkContext for all tests - this is fast
* 2. starting and stopping SparkContext for every test - this is slow but more independent
*
* You can't have more than one local SparkContext running at the same time.
*/
private[testutils] object TestingSparkContext {
/** lock allows non-Spark tests to still run concurrently */
private val lock = new Lock()
/** global SparkContext that can be re-used between tests */
private lazy val sc: SparkContext = createLocalSparkContext()
/** System property can be used to turn off globalSparkContext easily */
private val useGlobalSparkContext: Boolean = System.getProperty("useGlobalSparkContext", "true").toBoolean
/**
* Should be called from before()
*/
def sparkContext: SparkContext = {
if (useGlobalSparkContext) {
// reuse the global SparkContext
sc
}
else {
// create a new SparkContext each time
lock.acquire()
createLocalSparkContext()
}
}
/**
* Should be called from after()
*/
def cleanUp(): Unit = {
if (!useGlobalSparkContext) {
cleanupSpark()
lock.release()
}
}
private def createLocalSparkContext(
serializer: String = "org.apache.spark.serializer.KryoSerializer",
registrator: String = "org.trustedanalytics.atk.graphbuilder.GraphBuilderKryoRegistrator"): SparkContext = {
// LogUtils.silenceSpark()
System.setProperty("spark.driver.allowMultipleContexts", "true")
val conf = new SparkConf()
.setMaster("local")
.setAppName(this.getClass.getSimpleName + " " + new Date())
//conf.set("spark.serializer", serializer)
//conf.set("spark.kryo.registrator", registrator)
conf.set("spark.sql.shuffle.partitions", "2")
new SparkContext(conf)
}
/**
* Shutdown spark and release the lock
*/
private def cleanupSpark(): Unit = {
try {
if (sc != null) {
sc.stop()
}
}
finally {
// To avoid Akka rebinding to the same port, since it doesn't unbind immediately on shutdown
System.clearProperty("spark.driver.port")
}
}
}
|
Open-Network-Insight/oni-ml
|
src/test/scala/org/opennetworkinsight/testutils/TestingSparkContext.scala
|
Scala
|
apache-2.0
| 2,617 |
/*
* Copyright (C) 2013 The Mango Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* The code of this project is a port of (or wrapper around) the Guava-libraries.
* See http://code.google.com/p/guava-libraries/
*
* @author Markus Schneider
*/
package org.feijoas.mango.common.collect
import scala.collection.convert.decorateAsScala.iterableAsScalaIterableConverter
import org.feijoas.mango.common.annotations.Beta
import org.feijoas.mango.common.base.Optional.asGuavaOptionalConverter
import org.feijoas.mango.common.base.Optional.asMangoOptionConverter
import org.feijoas.mango.common.base.Preconditions.checkNotNull
import org.feijoas.mango.common.convert.AsJava
import org.feijoas.mango.common.convert.AsScala
import com.google.common.{ collect => cgcc }
/**
* A variant of {@link TreeTraverser} for binary trees, providing additional traversals specific to
* binary trees.
*
* @author Markus Schneider
* @since 0.11 (copied from guava-libraries)
*/
@Beta
trait BinaryTreeTraverser[T] extends TreeTraverser[T] {
/**
* Returns the left child of the specified node, or `Option#isEmpty` if the specified
* node has no left child.
*/
def leftChild: T => Option[T]
/**
* Returns the right child of the specified node, or `Option#isEmpty` if the specified
* node has no right child.
*/
def rightChild: T => Option[T]
/**
* Returns the children of this node, in left-to-right order.
*/
final override def children: T => Iterable[T] = (root: T) => this.asJava.children(root).asScala
/**
* Returns an unmodifiable iterable over the nodes in a tree structure, using in-order
* traversal.
*/
final def inOrderTraversal(root: T): Iterable[T] = this.asJava.inOrderTraversal(root).asScala
}
/** Factory for [[BinaryTreeTraverser]] instances. */
object BinaryTreeTraverser {
/**
* Creates a new `BinaryTreeTraverser` using a function that returns the left child and one that returns the right child
*/
final def apply[T](left: T => Option[T], right: T => Option[T]): BinaryTreeTraverser[T] = new BinaryTreeTraverser[T] {
final override def leftChild = (root: T) => left(root)
final override def rightChild = (root: T) => right(root)
}
/**
* Creates a new `BinaryTreeTraverser` using a function that returns the left child and the right child as a Tuple
*/
final def apply[T](childs: T => (Option[T],Option[T])): BinaryTreeTraverser[T] = new BinaryTreeTraverser[T] {
final override def leftChild = (root: T) => childs(root)._1
final override def rightChild = (root: T) => childs(root)._2
}
/**
* Adds an `asJava` method that wraps a Scala `BinaryTreeTraverser` in
* a Guava `BinaryTreeTraverser`.
*
* The returned Guava `BinaryTreeTraverser` forwards all calls
* to the given Scala `BinaryTreeTraverser`.
*
* @param fnc the Scala `BinaryTreeTraverser` to wrap in a Guava `BinaryTreeTraverser`
* @return An object with an `asJava` method that returns a Guava `BinaryTreeTraverser`
* view of the argument
*/
implicit final def asGuavaBinaryTreeTraverserConverter[T](traverser: BinaryTreeTraverser[T]): AsJava[cgcc.BinaryTreeTraverser[T]] = {
def convert(traverser: BinaryTreeTraverser[T]): cgcc.BinaryTreeTraverser[T] = traverser match {
case t: AsMangoBinaryTreeTraverser[T] => t.delegate
case _ => AsGuavaBinaryTreeTraverser(traverser)
}
new AsJava(convert(traverser))
}
/**
* Adds an `asScala` method that wraps a Guava `BinaryTreeTraverser` in
* a Scala `BinaryTreeTraverser`.
*
* The returned Scala `BinaryTreeTraverser` forwards all calls
* to the given Guava `BinaryTreeTraverser``.
*
* @param pred the Guava `BinaryTreeTraverser` to wrap in a Scala `BinaryTreeTraverser`
* @return An object with an `asScala` method that returns a Scala `BinaryTreeTraverser`
* view of the argument
*/
implicit final def asMangoBinaryTreeTraverserConverter[T](traverser: cgcc.BinaryTreeTraverser[T]): AsScala[BinaryTreeTraverser[T]] = {
def convert(traverser: cgcc.BinaryTreeTraverser[T]) = traverser match {
case AsGuavaBinaryTreeTraverser(delegate) => delegate
case _ => AsMangoBinaryTreeTraverser(traverser)
}
new AsScala(convert(traverser))
}
}
/**
* Wraps a Scala `BinaryTreeTraverser` in a Guava `BinaryTreeTraverser`
*/
@SerialVersionUID(1L)
private[mango] case class AsGuavaBinaryTreeTraverser[T](delegate: BinaryTreeTraverser[T]) extends cgcc.BinaryTreeTraverser[T] with Serializable {
checkNotNull(delegate)
final override def leftChild(root: T) = delegate.leftChild(root).asJava
final override def rightChild(root: T) = delegate.rightChild(root).asJava
}
/**
* Wraps a Guava `BinaryTreeTraverser` in a Scala `BinaryTreeTraverser`
*/
@SerialVersionUID(1L)
private[mango] case class AsMangoBinaryTreeTraverser[T](delegate: cgcc.BinaryTreeTraverser[T]) extends BinaryTreeTraverser[T] with Serializable {
checkNotNull(delegate)
final override def leftChild = (root: T) => delegate.leftChild(root).asScala
final override def rightChild = (root: T) => delegate.rightChild(root).asScala
}
|
feijoas/mango
|
src/main/scala/org/feijoas/mango/common/collect/BinaryTreeTraverser.scala
|
Scala
|
apache-2.0
| 5,666 |
package edu.rice.habanero.benchmarks.apsp
import edu.rice.habanero.actors.{ScalazActor, ScalazActorState, ScalazPool}
import edu.rice.habanero.benchmarks.{Benchmark, BenchmarkRunner}
import scala.collection.mutable.ListBuffer
/**
*
* @author <a href="http://shams.web.rice.edu/">Shams Imam</a> ([email protected])
*/
object ApspScalazActorBenchmark {
def main(args: Array[String]) {
BenchmarkRunner.runBenchmark(args, new ApspScalazActorBenchmark)
}
private final class ApspScalazActorBenchmark extends Benchmark {
def initialize(args: Array[String]) {
ApspConfig.parseArgs(args)
ApspUtils.generateGraph()
}
def printArgInfo() {
ApspConfig.printArgs()
}
def runIteration() {
val graphData = ApspUtils.graphData
val numNodes = ApspConfig.N
val blockSize = ApspConfig.B
val numBlocksInSingleDim: Int = numNodes / blockSize
// create and automatically the actors
val blockActors = Array.tabulate[ScalazActor[AnyRef]](numBlocksInSingleDim, numBlocksInSingleDim) {
(i, j) =>
val myBlockId = (i * numBlocksInSingleDim) + j
val apspActor = new ApspFloydWarshallActor(myBlockId, blockSize, numNodes, graphData)
apspActor.start()
apspActor
}
// create the links to the neighbors
for (bi <- 0 until numBlocksInSingleDim) {
for (bj <- 0 until numBlocksInSingleDim) {
val neighbors = new ListBuffer[ScalazActor[AnyRef]]()
// add neighbors in same column
for (r <- 0 until numBlocksInSingleDim) {
if (r != bi) {
neighbors.append(blockActors(r)(bj))
}
}
// add neighbors in same row
for (c <- 0 until numBlocksInSingleDim) {
if (c != bj) {
neighbors.append(blockActors(bi)(c))
}
}
blockActors(bi)(bj).send(ApspNeighborMessage(neighbors))
}
}
// start the computation
for (bi <- 0 until numBlocksInSingleDim) {
for (bj <- 0 until numBlocksInSingleDim) {
blockActors(bi)(bj).send(ApspInitialMessage)
}
}
ScalazActorState.awaitTermination()
}
def cleanupIteration(lastIteration: Boolean, execTimeMillis: Double): Unit = {
if (lastIteration) {
ScalazPool.shutdown()
} else {
ApspUtils.generateGraph()
}
}
}
sealed abstract class ApspMessage
private case object ApspInitialMessage extends ApspMessage
private case class ApspResultMessage(k: Int, myBlockId: Int, initData: Array[Array[Long]]) extends ApspMessage
private case class ApspNeighborMessage(neighbors: ListBuffer[ScalazActor[AnyRef]]) extends ApspMessage
private class ApspFloydWarshallActor(myBlockId: Int, blockSize: Int, graphSize: Int, initGraphData: Array[Array[Long]]) extends ScalazActor[AnyRef] {
private val self = this
private val numBlocksInSingleDim: Int = graphSize / blockSize
private val numNeighbors: Int = 2 * (numBlocksInSingleDim - 1)
final val rowOffset: Int = (myBlockId / numBlocksInSingleDim) * blockSize
final val colOffset: Int = (myBlockId % numBlocksInSingleDim) * blockSize
private val neighbors = new ListBuffer[ScalazActor[AnyRef]]()
private var k = -1
private val neighborDataPerIteration = new java.util.HashMap[Int, Array[Array[Long]]]()
private var receivedNeighbors = false
private var currentIterData = ApspUtils.getBlock(initGraphData, myBlockId)
override def process(msg: AnyRef) {
msg match {
case message: ApspResultMessage =>
if (!receivedNeighbors) {
val msg = "Block-" + myBlockId + " hasn't received neighbors yet!"
println("ERROR: " + msg)
throw new Exception(msg)
}
val haveAllData = storeIterationData(message.k, message.myBlockId, message.initData)
if (haveAllData) {
// received enough data from neighbors, can proceed to do computation for next k
k += 1
performComputation()
notifyNeighbors()
neighborDataPerIteration.clear()
if (k == graphSize - 1) {
// we've completed the computation
exit()
}
}
case ApspInitialMessage =>
notifyNeighbors()
case ApspNeighborMessage(msgNeighbors) =>
receivedNeighbors = true
msgNeighbors.foreach {
loopNeighbor => neighbors.append(loopNeighbor)
}
}
}
private def storeIterationData(iteration: Int, sourceId: Int, dataArray: Array[Array[Long]]): Boolean = {
neighborDataPerIteration.put(sourceId, dataArray)
neighborDataPerIteration.size() == numNeighbors
}
private def performComputation(): Unit = {
val prevIterData = currentIterData
// make modifications on a fresh local data array for this iteration
currentIterData = Array.tabulate[Long](blockSize, blockSize)((i, j) => 0)
for (i <- 0 until blockSize) {
for (j <- 0 until blockSize) {
val gi = rowOffset + i
val gj = colOffset + j
val newIterData = elementAt(gi, k, k - 1, prevIterData) + elementAt(k, gj, k - 1, prevIterData)
currentIterData(i)(j) = scala.math.min(prevIterData(i)(j), newIterData)
}
}
}
private def elementAt(row: Int, col: Int, srcIter: Int, prevIterData: Array[Array[Long]]): Long = {
val destBlockId = ((row / blockSize) * numBlocksInSingleDim) + (col / blockSize)
val localRow = row % blockSize
val localCol = col % blockSize
// println("Accessing block-" + destBlockId + " from block-" + selfActor.myBlockId + " for " + (row, col))
if (destBlockId == myBlockId) {
prevIterData(localRow)(localCol)
} else {
val blockData = neighborDataPerIteration.get(destBlockId)
blockData(localRow)(localCol)
}
}
private def notifyNeighbors(): Unit = {
// send the current result to all other blocks who might need it
// note: this is inefficient version where data is sent to neighbors
// who might not need it for the current value of k
val resultMessage = ApspResultMessage(k, myBlockId, currentIterData)
neighbors.foreach {
loopNeighbor =>
loopNeighbor.send(resultMessage)
}
}
}
}
|
shamsmahmood/savina
|
src/main/scala/edu/rice/habanero/benchmarks/apsp/ApspScalazActorBenchmark.scala
|
Scala
|
gpl-2.0
| 6,440 |
/*
* Copyright (c) 2019. Yuriy Stul
*/
package com.stulsoft.ysps.pmatch
/** Demonstrates usage of type patterns; adding variable to pattern; <i>@</i>
*
* @author Yuriy Stul
*/
object MatchWithAt extends App {
test1()
test2()
def test1(): Unit = {
println("==>test1")
val o = Option("Test")
o match {
case Some(t) => println(s"case Some(t): t = $t") // access to value only
case _ =>
}
o match {
case p@Some(t) => println(s"case p @ Some(t): p = $p, t = $t") // access to object and value
case _ =>
}
o match {
case p@Some("Test") => println(s"case p @ Some(text): p = $p") // access to object
case _ =>
}
}
def test2(): Unit = {
println("==>test2")
val o = SomeClass33("ttttt", "Test")
o match {
case SomeClass33(_, "Test") => println("Found 1")
case _ => println("Did not found 1")
}
o match {
case s@SomeClass33(_, "Test") => println(s"Found 2, s=$s")
case _ => println("Did not found 2")
}
// Compilation error
/*
o match {
case s:SomeClass33(_, "Test") => println(s"Found 3, s=$s")
case _ => println("Did not found 3")
}
*/
o match {
case s: SomeClass33 if s.n2 == "Test" => println(s"Found 4, s=$s")
case _ => println("Did not found 4")
}
}
}
case class SomeClass33(n1: String, n2: String)
|
ysden123/ysps
|
src/main/scala/com/stulsoft/ysps/pmatch/MatchWithAt.scala
|
Scala
|
mit
| 1,411 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import sbt._
import Keys._
import Process._
import scala.xml.{Node, Elem}
import scala.xml.transform.{RewriteRule, RuleTransformer}
object KafkaBuild extends Build {
val buildNumber = SettingKey[String]("build-number", "Build number defaults to $BUILD_NUMBER environment variable")
val releaseName = SettingKey[String]("release-name", "the full name of this release")
val commonSettings = Seq(
organization := "org.apache.kafka",
pomExtra :=
<parent>
<groupId>org.apache</groupId>
<artifactId>apache</artifactId>
<version>10</version>
</parent>
<licenses>
<license>
<name>Apache 2</name>
<url>http://www.apache.org/licenses/LICENSE-2.0.txt</url>
<distribution>repo</distribution>
</license>
</licenses>,
scalacOptions ++= Seq("-deprecation", "-unchecked", "-g:none"),
crossScalaVersions := Seq("2.8.0","2.8.2", "2.9.1", "2.9.2"),
scalaVersion := "2.8.0",
version := "0.8.0-beta1",
publishTo := Some("Apache Maven Repo" at "https://repository.apache.org/service/local/staging/deploy/maven2"),
credentials += Credentials(Path.userHome / ".m2" / ".credentials"),
buildNumber := System.getProperty("build.number", ""),
version <<= (buildNumber, version) { (build, version) => if (build == "") version else version + "+" + build},
releaseName <<= (name, version, scalaVersion) {(name, version, scalaVersion) => name + "_" + scalaVersion + "-" + version},
javacOptions ++= Seq("-Xlint:unchecked", "-source", "1.5"),
parallelExecution in Test := false, // Prevent tests from overrunning each other
libraryDependencies ++= Seq(
"log4j" % "log4j" % "1.2.15",
"net.sf.jopt-simple" % "jopt-simple" % "3.2",
"org.slf4j" % "slf4j-simple" % "1.6.4"
),
// The issue is going from log4j 1.2.14 to 1.2.15, the developers added some features which required
// some dependencies on various sun and javax packages.
ivyXML := <dependencies>
<exclude module="javax"/>
<exclude module="jmxri"/>
<exclude module="jmxtools"/>
<exclude module="mail"/>
<exclude module="jms"/>
<dependency org="org.apache.zookeeper" name="zookeeper" rev="3.3.4">
<exclude org="log4j" module="log4j"/>
<exclude org="jline" module="jline"/>
</dependency>
</dependencies>
)
val hadoopSettings = Seq(
javacOptions ++= Seq("-Xlint:deprecation"),
libraryDependencies ++= Seq(
"org.apache.avro" % "avro" % "1.4.0",
"org.apache.pig" % "pig" % "0.8.0",
"commons-logging" % "commons-logging" % "1.0.4",
"org.codehaus.jackson" % "jackson-core-asl" % "1.5.5",
"org.codehaus.jackson" % "jackson-mapper-asl" % "1.5.5",
"org.apache.hadoop" % "hadoop-core" % "0.20.2"
),
ivyXML :=
<dependencies>
<exclude module="netty"/>
<exclude module="javax"/>
<exclude module="jmxri"/>
<exclude module="jmxtools"/>
<exclude module="mail"/>
<exclude module="jms"/>
<dependency org="org.apache.hadoop" name="hadoop-core" rev="0.20.2">
<exclude org="junit" module="junit"/>
</dependency>
<dependency org="org.apache.pig" name="pig" rev="0.8.0">
<exclude org="junit" module="junit"/>
</dependency>
</dependencies>
)
val restSettings = Seq(
javacOptions ++= Seq("-Xlint:deprecation"),
libraryDependencies ++= Seq(
"org.eclipse.jetty" % "jetty-server" % "8.0.4.v20111024",
"org.eclipse.jetty" % "jetty-servlet" % "8.0.4.v20111024",
"org.mongodb" % "mongo-java-driver" % "2.11.3"
),
ivyXML :=
<dependencies>
<exclude module="javax"/>
<exclude module="jmxri"/>
<exclude module="jmxtools"/>
<exclude module="mail"/>
<exclude module="jms"/>
<dependency org="org.eclipse.jetty" name="jetty-server" rev="8.0.4.v20111024">
</dependency>
<dependency org="org.eclipse.jetty" name="jetty-servlet" rev="8.0.4.v20111024">
</dependency>
<dependency org="org.mongodb" name="mongo-java-driver" rev="2.11.3">
</dependency>
</dependencies>
)
val runRat = TaskKey[Unit]("run-rat-task", "Runs Apache rat on Kafka")
val runRatTask = runRat := {
"bin/run-rat.sh" !
}
val release = TaskKey[Unit]("release", "Creates a deployable release directory file with dependencies, config, and scripts.")
val releaseTask = release <<= ( packageBin in (core, Compile), dependencyClasspath in (core, Runtime), exportedProducts in Compile,
target, releaseName in core ) map { (packageBin, deps, products, target, releaseName) =>
val jarFiles = deps.files.filter(f => !products.files.contains(f) && f.getName.endsWith(".jar"))
val destination = target / "RELEASE" / releaseName
IO.copyFile(packageBin, destination / packageBin.getName)
IO.copy(jarFiles.map { f => (f, destination / "libs" / f.getName) })
IO.copyDirectory(file("config"), destination / "config")
IO.copyDirectory(file("bin"), destination / "bin")
for {file <- (destination / "bin").listFiles} { file.setExecutable(true, true) }
}
val releaseRest = TaskKey[Unit]("release-rest", "Creates a deployable release directory file with dependencies, config, and scripts.")
val releaseRestTask = releaseRest <<= ( packageBin in (rest, Compile), dependencyClasspath in (rest, Runtime), exportedProducts in Compile,
target, releaseName in core ) map { (packageBin, deps, products, target, releaseName) =>
val jarFiles = deps.files.filter(f => !products.files.contains(f) && f.getName.endsWith(".jar"))
val destination = target / "RELEASE" / releaseName
IO.copyFile(packageBin, destination / packageBin.getName)
IO.copy(jarFiles.map { f => (f, destination / "libs" / f.getName) })
IO.copyDirectory(file("config"), destination / "config")
IO.copyDirectory(file("bin"), destination / "bin")
for {file <- (destination / "bin").listFiles} { file.setExecutable(true, true) }
}
val releaseZip = TaskKey[Unit]("release-zip", "Creates a deployable zip file with dependencies, config, and scripts.")
val releaseZipTask = releaseZip <<= (release, target, releaseName in core) map { (release, target, releaseName) =>
val zipPath = target / "RELEASE" / "%s.zip".format(releaseName)
IO.delete(zipPath)
IO.zip((target/"RELEASE" ** releaseName ***) x relativeTo(target/"RELEASE"), zipPath)
}
val releaseTar = TaskKey[Unit]("release-tar", "Creates a deployable tar.gz file with dependencies, config, and scripts.")
val releaseTarTask = releaseTar <<= ( release, target, releaseName in core) map { (release, target, releaseName) =>
Process(Seq("tar", "czf", "%s.tar.gz".format(releaseName), releaseName), target / "RELEASE").! match {
case 0 => ()
case n => sys.error("Failed to run native tar application!")
}
}
lazy val kafka = Project(id = "Kafka", base = file(".")).aggregate(core, examples, contrib, perf).settings((commonSettings ++
runRatTask ++ releaseTask ++ releaseZipTask ++ releaseTarTask ++ releaseRestTask): _*)
lazy val core = Project(id = "core", base = file("core")).settings(commonSettings: _*)
lazy val examples = Project(id = "java-examples", base = file("examples")).settings(commonSettings :_*) dependsOn (core)
lazy val perf = Project(id = "perf", base = file("perf")).settings((Seq(name := "kafka-perf") ++ commonSettings):_*) dependsOn (core)
lazy val contrib = Project(id = "contrib", base = file("contrib")).aggregate(hadoopProducer, hadoopConsumer).settings(commonSettings :_*)
lazy val hadoopProducer = Project(id = "hadoop-producer", base = file("contrib/hadoop-producer")).settings(hadoopSettings ++ commonSettings: _*) dependsOn (core)
lazy val hadoopConsumer = Project(id = "hadoop-consumer", base = file("contrib/hadoop-consumer")).settings(hadoopSettings ++ commonSettings: _*) dependsOn (core)
lazy val rest = Project(id = "rest", base = file("contrib/rest")).settings(commonSettings ++ restSettings: _*) dependsOn (core)
}
|
lakshmi-kannan/kafka-sashafied
|
project/Build.scala
|
Scala
|
apache-2.0
| 9,046 |
package scalariform.lexer
import scala.xml.parsing.TokenTests
import scalariform.lexer.CharConstants.SU
import scalariform.lexer.Tokens._
import scalariform._
class ScalaLexer(
protected val reader: IUnicodeEscapeReader,
protected val forgiveErrors: Boolean = false,
protected val scalaVersion: ScalaVersion = ScalaVersions.DEFAULT
) extends ScalaOnlyLexer with XmlLexer with ModeStack with TokenTests with Iterator[Token] {
import ScalaLexer._
// -- Character buffer ----------------------------------------------------------------------------------------
/**
* Circular buffer of characters yet to be processed (after unicode escaping)
*/
private val charBuffer: Array[Char] = Array.fill(BUFFER_SIZE)(SU)
/**
* A circular buffer of the unicode escape, if any, associated with the corresponding character in charBuffer.
*/
private val unicodeEscapesBuffer: Array[Option[String]] = Array.fill(BUFFER_SIZE)(None)
private var bufferStart = 0
private var bufferEnd = 0
private def charsInBuffer = (BUFFER_SIZE + bufferEnd - bufferStart) & BUFFER_MASK
/**
* Is the current character the result of a unicode escape?
*/
protected def isUnicodeEscape = unicodeEscapesBuffer(bufferStart).isDefined
// ------------------------------------------------------------------------------------------------------------
/**
* Has a Unicode escape occurred somewhere in the current token?
*/
private var seenUnicodeEscape = false
/**
* Start position of this token in the (pre-Unicode escaped) text
*/
private var tokenOffset = 0
/**
* Length so far of this token (before unicode escaping)
*/
private var tokenLength = 0
/**
* The previous character
*/
protected var lastCh: Char = SU
private var tokenText: String = _
private var rawText: String = _
private var stopIndex: Int = 0
protected var builtToken: Token = _
/**
* Number of characters left in the character buffer before the end of file, or -1 if this is yet to be discovered.
*/
private var untilEof = if (reader.isEof) 0 else -1
protected def eof = untilEof == 0
private var eofTokenEmitted = false
/**
* Get the current character.
*/
protected def ch: Char = {
if (bufferEnd == bufferStart)
bufferOneCharacter()
charBuffer(bufferStart)
}
/**
* Get the character at the given lookahead from the current position.
*/
protected def ch(lookahead: Int) = {
for (n ← 1 to lookahead + 1 - charsInBuffer)
bufferOneCharacter()
charBuffer((bufferStart + lookahead) & BUFFER_MASK)
}
private def bufferOneCharacter() {
charBuffer(bufferEnd) = reader.read()
unicodeEscapesBuffer(bufferEnd) = reader.unicodeEscapeOpt
bufferEnd = (bufferEnd + 1) & BUFFER_MASK
if (untilEof == -1 && reader.isEof)
untilEof = charsInBuffer
}
/**
* Accept the current character and advance to the next.
*/
protected def nextChar() {
if (bufferEnd == bufferStart)
bufferOneCharacter()
lastCh = charBuffer(bufferStart)
val unicodeEscapeOpt = unicodeEscapesBuffer(bufferStart)
bufferStart = (bufferStart + 1) & BUFFER_MASK
tokenLength +=
(unicodeEscapeOpt match {
case None ⇒ 1
case Some(s) ⇒ s.length
})
seenUnicodeEscape |= unicodeEscapeOpt.isDefined
if (untilEof > 0)
untilEof -= 1
}
/**
* Mark the end of a token of the given type.
*/
protected def token(tokenType: TokenType) {
// require(tokenType == EOF || tokenLength > 0)
finaliseTokenData()
builtToken = Token(tokenType, tokenText, tokenOffset, rawText)
if (seenUnicodeEscape)
builtToken.containsUnicodeEscape = true
resetTokenData()
}
private def resetTokenData() {
rawText = null
tokenText = null
tokenOffset = stopIndex + 1
tokenLength = 0
seenUnicodeEscape = false
}
private def finaliseTokenData() {
if (tokenText == null) {
stopIndex = math.min(tokenOffset + tokenLength - 1, reader.text.length - 1) // min protects against overeager consumption past EOF
rawText = reader.text.substring(tokenOffset, stopIndex + 1)
tokenText =
if (seenUnicodeEscape)
UnicodeEscapeDecoder.decode(rawText, forgiveErrors)
else
rawText
}
}
private[lexer] def text = reader.text
protected def getTokenText: String = {
finaliseTokenData()
tokenText
}
protected def lookaheadIs(s: String): Boolean =
s.zipWithIndex forall { case (c, index) ⇒ ch(index) == c }
protected def munch(s: String) {
// require(lookaheadIs(s))
for (_ ← 1 to s.length)
nextChar()
}
override def next(): Token = {
if (isXmlMode)
fetchXmlToken()
else if (isScalaMode)
fetchScalaToken()
else if (isStringInterpolationMode) {
fetchStringInterpolationToken()
}
if (builtToken.tokenType == EOF)
eofTokenEmitted = true
builtToken
}
override def hasNext = !eofTokenEmitted
private def fetchStringInterpolationToken() {
if (stringInterpolationMode.interpolationVariable) {
stringInterpolationMode.interpolationVariable = false
do {
nextChar()
} while (ch != SU && Character.isUnicodeIdentifierPart(ch))
val tokenType = Keywords(getTokenText).getOrElse(VARID)
token(tokenType)
} else {
if (stringInterpolationMode.initialSegment) {
stringInterpolationMode.initialSegment = false
if (stringInterpolationMode.multiLine)
munch("\\"\\"\\"")
else
munch("\\"")
}
getStringPart(stringInterpolationMode.multiLine)
}
}
}
object ScalaLexer {
/**
* Convert the given Scala source code into a list of "raw" tokens.
*
* This includes whitespace and comment tokens. No NEWLINE or NEWLINES tokens are inferred. The final token
* will be of type EOF.
*
* @param forgiveErrors -- if true, no exceptions will be thrown when malformed tokens are encountered.
* @param scalaVersion -- the version of Scala to assume as the source type (e.g. "2.9.1"). This can affect the
* interpretation of certain tokens (for example, floating point literals).
*/
@throws(classOf[ScalaLexerException])
def rawTokenise(s: String, forgiveErrors: Boolean = false, scalaVersion: String = ScalaVersions.DEFAULT_VERSION): List[Token] =
createRawLexer(s, forgiveErrors, scalaVersion).toList
/**
* Create a lexer for "raw" tokens.
*
* @see rawTokenise
*/
def createRawLexer(s: String, forgiveErrors: Boolean = false, scalaVersion: String = ScalaVersions.DEFAULT_VERSION): ScalaLexer =
makeRawLexer(s, forgiveErrors, ScalaVersion.parseOrDefault(scalaVersion))
/**
* Convert the given Scala source code into a list of tokens.
*
* NEWLINE or NEWLINES tokens are inferred, and whitespace and comments are absorbed into the token they
* precede. The final token will be of type EOF.
*
* @param forgiveErrors -- if true, no exceptions will be thrown when malformed tokens are encountered.
* @param scalaVersion -- the version of Scala to assume as the source type (e.g. "2.9.1"). This can affect the
* interpretation of certain tokens (for example, floating point literals).
*/
@throws(classOf[ScalaLexerException])
def tokenise(s: String, forgiveErrors: Boolean = false, scalaVersion: String = ScalaVersions.DEFAULT_VERSION): List[Token] = {
val rawLexer = createRawLexer(s, forgiveErrors, scalaVersion)
val lexer = new NewlineInferencer(new WhitespaceAndCommentsGrouper(rawLexer))
lexer.toList
}
private val BUFFER_SIZE = 16 // sufficient lookahead for "</xml:unparsed>" (15 chars)
private val BUFFER_MASK = BUFFER_SIZE - 1
private def makeRawLexer(s: String, forgiveErrors: Boolean, scalaVersion: ScalaVersion): ScalaLexer =
new ScalaLexer(new UnicodeEscapeReader(s, forgiveErrors), forgiveErrors, scalaVersion)
}
|
jkinkead/scalariform
|
scalariform/src/main/scala/scalariform/lexer/ScalaLexer.scala
|
Scala
|
mit
| 7,995 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.