code
stringlengths 5
1M
| repo_name
stringlengths 5
109
| path
stringlengths 6
208
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 5
1M
|
---|---|---|---|---|---|
trait Expr { type T }
def foo[A](e: Expr { type T = A }) = e match
case e1: Expr { type T <: Int } =>
val i: Int = ??? : e1.T
|
dotty-staging/dotty
|
tests/pos/i13820.scala
|
Scala
|
apache-2.0
| 132 |
/*
* Copyright (c) 2017 sadikovi
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
package com.github.sadikovi.riff
import java.io.ByteArrayInputStream
import java.util.NoSuchElementException
import org.apache.spark.sql.types._
import com.github.sadikovi.riff.io.OutputBuffer
import com.github.sadikovi.testutil.UnitTestSuite
class TypeDescriptionSuite extends UnitTestSuite {
test("assert schema - schema is null") {
val schema: StructType = null
val err = intercept[UnsupportedOperationException] {
new TypeDescription(schema, Array("col1", "col2"))
}
err.getMessage should be (
s"Schema has insufficient number of columns, $schema, expected at least one column")
}
test("assert schema - schema is empty") {
val schema = StructType(Nil)
val err = intercept[UnsupportedOperationException] {
new TypeDescription(schema, Array("col1", "col2"))
}
err.getMessage should be (
s"Schema has insufficient number of columns, $schema, expected at least one column")
}
test("supported types") {
TypeDescription.isSupportedDataType(IntegerType) should be (true)
TypeDescription.isSupportedDataType(LongType) should be (true)
TypeDescription.isSupportedDataType(StringType) should be (true)
TypeDescription.isSupportedDataType(DateType) should be (true)
TypeDescription.isSupportedDataType(TimestampType) should be (true)
TypeDescription.isSupportedDataType(BooleanType) should be (true)
TypeDescription.isSupportedDataType(ShortType) should be (true)
TypeDescription.isSupportedDataType(ByteType) should be (true)
}
test("unsupported types") {
TypeDescription.isSupportedDataType(FloatType) should be (false)
TypeDescription.isSupportedDataType(DoubleType) should be (false)
TypeDescription.isSupportedDataType(NullType) should be (false)
}
test("assert schema - unsupported type") {
val schema = StructType(
StructField("col1", IntegerType) ::
StructField("col2", LongType) ::
StructField("col3", StringType) ::
StructField("col4", NullType) :: Nil)
val err = intercept[UnsupportedOperationException] {
new TypeDescription(schema, Array("col1", "col2"))
}
err.getMessage should be (
"Field StructField(col4,NullType,true) with type NullType is not supported")
}
test("init type description - duplicate column names in indexed fields") {
val schema = StructType(
StructField("col1", IntegerType) ::
StructField("col2", LongType) ::
StructField("col3", StringType) :: Nil)
val err = intercept[IllegalArgumentException] {
new TypeDescription(schema, Array("col1", "col2", "col1"))
}
err.getMessage should be ("Found duplicate index column 'col1' in list [col1, col2, col1]")
}
test("init type description - duplicate indexed fields in schema") {
// col2 is duplicated that is part of the indexed fields [col1, col2]
val schema = StructType(
StructField("col1", IntegerType) ::
StructField("col2", LongType) ::
StructField("col2", IntegerType) ::
StructField("col3", StringType) :: Nil)
val err = intercept[RuntimeException] {
new TypeDescription(schema, Array("col1", "col2"))
}
assert(err.getMessage.contains("Inconsistency of schema with type description"))
}
test("init type description - duplicate data fields in schema") {
// col3 is duplicated that is not part of the indexed fields [col1, col2]
val schema = StructType(
StructField("col1", IntegerType) ::
StructField("col2", LongType) ::
StructField("col3", IntegerType) ::
StructField("col3", StringType) :: Nil)
val err = intercept[RuntimeException] {
new TypeDescription(schema, Array("col1", "col2"))
}
assert(err.getMessage.contains("Inconsistency of schema with type description"))
}
test("init type description - indexed fields as null") {
val schema = StructType(
StructField("col1", IntegerType) ::
StructField("col2", LongType) ::
StructField("col3", StringType) :: Nil)
val td = new TypeDescription(schema, null)
td.size() should be (3)
td.indexFields().isEmpty should be (true)
}
test("init type description - indexed fields as empty array") {
val schema = StructType(
StructField("col1", IntegerType) ::
StructField("col2", LongType) ::
StructField("col3", StringType) :: Nil)
val td = new TypeDescription(schema, Array.empty)
td.size() should be (3)
td.indexFields().isEmpty should be (true)
}
test("init type description with indexed fields - check fields") {
val schema = StructType(
StructField("col1", IntegerType) ::
StructField("col2", LongType) ::
StructField("col3", IntegerType) ::
StructField("col4", StringType) ::
StructField("col5", StringType) :: Nil)
val td = new TypeDescription(schema, Array("col4", "col1"))
td.size() should be (5)
td.indexFields() should be (Array(
new TypeSpec(StructField("col4", StringType), true, 0, 3),
new TypeSpec(StructField("col1", IntegerType), true, 1, 0)
))
td.dataFields() should be (Array(
new TypeSpec(StructField("col2", LongType), false, 2, 1),
new TypeSpec(StructField("col3", IntegerType), false, 3, 2),
new TypeSpec(StructField("col5", StringType), false, 4, 4)
))
td.fields() should be (Array(
new TypeSpec(StructField("col4", StringType), true, 0, 3),
new TypeSpec(StructField("col1", IntegerType), true, 1, 0),
new TypeSpec(StructField("col2", LongType), false, 2, 1),
new TypeSpec(StructField("col3", IntegerType), false, 3, 2),
new TypeSpec(StructField("col5", StringType), false, 4, 4)
))
}
test("init type description with indexed fields only - check fields") {
val schema = StructType(
StructField("col1", IntegerType) ::
StructField("col2", LongType) ::
StructField("col3", StringType) :: Nil)
val td = new TypeDescription(schema, Array("col3", "col1", "col2"))
td.size() should be (3)
td.indexFields() should be (Array(
new TypeSpec(StructField("col3", StringType), true, 0, 2),
new TypeSpec(StructField("col1", IntegerType), true, 1, 0),
new TypeSpec(StructField("col2", LongType), true, 2, 1)
))
td.dataFields() should be (Array.empty)
td.fields() should be (Array(
new TypeSpec(StructField("col3", StringType), true, 0, 2),
new TypeSpec(StructField("col1", IntegerType), true, 1, 0),
new TypeSpec(StructField("col2", LongType), true, 2, 1)
))
}
test("init type description with data fields only - check fields") {
val schema = StructType(
StructField("col1", IntegerType) ::
StructField("col2", LongType) ::
StructField("col3", StringType) :: Nil)
val td = new TypeDescription(schema, null)
td.size() should be (3)
td.indexFields() should be (Array.empty)
td.dataFields() should be (Array(
new TypeSpec(StructField("col1", IntegerType), false, 0, 0),
new TypeSpec(StructField("col2", LongType), false, 1, 1),
new TypeSpec(StructField("col3", StringType), false, 2, 2)
))
td.fields() should be (Array(
new TypeSpec(StructField("col1", IntegerType), false, 0, 0),
new TypeSpec(StructField("col2", LongType), false, 1, 1),
new TypeSpec(StructField("col3", StringType), false, 2, 2)
))
}
test("type description - position by name") {
val schema = StructType(
StructField("col1", IntegerType) ::
StructField("col2", LongType) ::
StructField("col3", IntegerType) ::
StructField("col4", StringType) ::
StructField("col5", StringType) :: Nil)
val td = new TypeDescription(schema, Array("col4", "col1"))
td.position("col4") should be (0)
td.position("col1") should be (1)
td.position("col2") should be (2)
td.position("col3") should be (3)
td.position("col5") should be (4)
val err = intercept[NoSuchElementException] {
td.position("abc")
}
err.getMessage should be ("No such field abc")
}
test("type description - atPosition by ordinal") {
val schema = StructType(
StructField("col1", IntegerType) ::
StructField("col2", LongType) ::
StructField("col3", IntegerType) ::
StructField("col4", StringType) ::
StructField("col5", StringType) :: Nil)
val td = new TypeDescription(schema, Array("col4", "col1"))
td.atPosition(0) should be (new TypeSpec(StructField("col4", StringType), true, 0, 3))
td.atPosition(1) should be (new TypeSpec(StructField("col1", IntegerType), true, 1, 0))
td.atPosition(2) should be (new TypeSpec(StructField("col2", LongType), false, 2, 1))
td.atPosition(3) should be (new TypeSpec(StructField("col3", IntegerType), false, 3, 2))
td.atPosition(4) should be (new TypeSpec(StructField("col5", StringType), false, 4, 4))
}
test("type description - equals 1") {
val schema = StructType(
StructField("col1", IntegerType) ::
StructField("col2", LongType) ::
StructField("col3", IntegerType) :: Nil)
val td1 = new TypeDescription(schema, Array("col1"))
val td2 = new TypeDescription(schema, Array("col1"))
val td3 = new TypeDescription(schema, Array("col1", "col2"))
val td4 = new TypeDescription(schema)
td1.equals(td1) should be (true)
td2.equals(td1) should be (true)
td3.equals(td1) should be (false)
td4.equals(td1) should be (false)
}
test("type description - equals 2") {
val schema = StructType(
StructField("col1", IntegerType) ::
StructField("col2", LongType) ::
StructField("col3", IntegerType) :: Nil)
val td1 = new TypeDescription(schema, Array("col1"))
val td2 = new TypeDescription(schema, Array("col1"))
td2 should be (td1)
}
test("type description - toString") {
val schema = StructType(
StructField("col1", IntegerType) ::
StructField("col2", LongType) ::
StructField("col3", IntegerType) :: Nil)
val td = new TypeDescription(schema, Array("col1"))
td.toString should be ("TypeDescription[" +
"TypeSpec(col1: int, indexed=true, position=0, origPos=0), " +
"TypeSpec(col2: bigint, indexed=false, position=1, origPos=1), " +
"TypeSpec(col3: int, indexed=false, position=2, origPos=2)]")
}
test("write/read type description") {
val schema = StructType(
StructField("col1", IntegerType) ::
StructField("col2", LongType) ::
StructField("col3", IntegerType) ::
StructField("col4", StringType) ::
StructField("col5", StringType) :: Nil)
val td1 = new TypeDescription(schema, Array("col4", "col1"))
val out = new OutputBuffer()
td1.writeTo(out)
val in = new ByteArrayInputStream(out.array())
val td2 = TypeDescription.readFrom(in)
td2.equals(td1) should be (true)
td2.toString should be (td1.toString)
}
test("write/read type description, data fields only") {
val schema = StructType(
StructField("col1", IntegerType) ::
StructField("col2", LongType) ::
StructField("col3", IntegerType) ::
StructField("col4", StringType) ::
StructField("col5", StringType) :: Nil)
val td1 = new TypeDescription(schema)
val out = new OutputBuffer()
td1.writeTo(out)
val in = new ByteArrayInputStream(out.array())
val td2 = TypeDescription.readFrom(in)
td2.equals(td1) should be (true)
td2.toString should be (td1.toString)
}
test("write/read type description, index fields only") {
val schema = StructType(
StructField("col1", IntegerType) ::
StructField("col2", StringType) ::
StructField("col3", StringType) :: Nil)
val td1 = new TypeDescription(schema, Array("col1", "col2", "col3"))
val out = new OutputBuffer()
td1.writeTo(out)
val in = new ByteArrayInputStream(out.array())
val td2 = TypeDescription.readFrom(in)
td2.equals(td1) should be (true)
td2.toString should be (td1.toString)
}
test("convert to struct type") {
val schema = StructType(
StructField("col1", IntegerType) ::
StructField("col2", LongType) ::
StructField("col3", IntegerType) ::
StructField("col4", StringType) ::
StructField("col5", StringType) :: Nil)
val td = new TypeDescription(schema, Array("col4", "col1", "col5"))
td.toStructType() should be (StructType(
StructField("col4", StringType) ::
StructField("col1", IntegerType) ::
StructField("col5", StringType) ::
StructField("col2", LongType) ::
StructField("col3", IntegerType) :: Nil))
}
test("convert to struct type without index fields") {
val schema = StructType(
StructField("col1", IntegerType) ::
StructField("col2", LongType) :: Nil)
val td = new TypeDescription(schema)
td.toStructType() should be (schema)
}
}
|
sadikovi/riff
|
format/src/test/scala/com/github/sadikovi/riff/TypeDescriptionSuite.scala
|
Scala
|
mit
| 13,951 |
/*
* Copyright 2012 Twitter Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.twitter.zipkin.common
import java.nio.ByteBuffer
case class BinaryAnnotation(
key: String,
value: ByteBuffer,
annotationType: AnnotationType,
host: Option[Endpoint]
)
|
martindale/zipkin
|
zipkin-common/src/main/scala/com/twitter/zipkin/common/BinaryAnnotation.scala
|
Scala
|
apache-2.0
| 794 |
package repositories.storage.dao.nodes
import com.google.inject.{Inject, Singleton}
import models.storage.nodes.StorageType.RootLoanType
import models.storage.nodes._
import models.storage.nodes.dto.{StorageNodeDto, StorageUnitDto}
import no.uio.musit.MusitResults._
import no.uio.musit.functional.Implicits.futureMonad
import no.uio.musit.functional.MonadTransformers.MusitResultT
import no.uio.musit.models.ObjectTypes.CollectionObjectType
import no.uio.musit.models._
import no.uio.musit.time.Implicits._
import no.uio.musit.time.dateTimeNow
import play.api.Logger
import play.api.db.slick.DatabaseConfigProvider
import repositories.shared.dao.SharedTables
import repositories.storage.dao.StorageTables
import slick.jdbc.GetResult
import scala.concurrent.{ExecutionContext, Future}
// scalastyle:off number.of.methods
/**
* TODO: Document me!!!
*/
// TODO: Change public API methods to use MusitResult[A]
@Singleton
class StorageUnitDao @Inject()(
implicit
val dbConfigProvider: DatabaseConfigProvider,
val ec: ExecutionContext
) extends StorageTables
with SharedTables {
import profile.api._
val logger = Logger(classOf[StorageUnitDao])
/**
* Check to see if the node with the provided StorageNodeId exists or not.
*
* @param id
* @return
*/
def exists(mid: MuseumId, id: StorageNodeDatabaseId): Future[MusitResult[Boolean]] = {
val query = storageNodeTable.filter { su =>
su.museumId === mid && su.id === id && su.isDeleted === false
}.exists.result
db.run(query)
.map(found => MusitSuccess(found))
.recover(nonFatal("Checking if node exists caused an exception"))
}
def exists(mid: MuseumId, id: StorageNodeId): Future[MusitResult[Boolean]] = {
val query = storageNodeTable.filter { su =>
su.museumId === mid && su.uuid === id && su.isDeleted === false
}.exists.result
db.run(query)
.map(found => MusitSuccess(found))
.recover(nonFatal("Checking if node exists caused an exception"))
}
/**
* Count of *all* children of this node, irrespective of access rights to
* the children
*/
def numChildren(id: StorageNodeDatabaseId): Future[MusitResult[Int]] = {
db.run(countChildren(id))
.map(MusitSuccess.apply)
.recover(nonFatal(s"An error occurred counting number node children under $id"))
}
/**
* The number of museum objects directly at the given node.
* To calculate the total number of objects for nodes in the tree,
* use the {{{totalObjectCount}}} method.
*
* @param nodeId StorageNodeId to count objects for.
* @return Future[Int] with the number of objects directly on the provided nodeId
*/
def numObjectsInNode(nodeId: StorageNodeId): Future[MusitResult[Int]] = {
val q = localObjectsTable.filter(_.currentLocationId === nodeId).length
db.run(q.result)
.map(MusitSuccess.apply)
.recover(nonFatal(s"An error occurred counting number direct objects in $nodeId"))
}
/**
* TODO: Document me!!!
*/
def getByDatabaseId(
mid: MuseumId,
id: StorageNodeDatabaseId
): Future[MusitResult[Option[StorageUnit]]] = {
val query = getNonRootByDatabaseIdAction(mid, id)
db.run(query)
.map(res => MusitSuccess(res.map(StorageNodeDto.toStorageUnit)))
.recover(nonFatal(s"Unable to get storage unit with museimId $mid"))
}
def getById(
mid: MuseumId,
id: StorageNodeId
): Future[MusitResult[Option[StorageUnit]]] = {
val query = getNonRootByIdAction(mid, id)
db.run(query)
.map(res => MusitSuccess(res.map(StorageNodeDto.toStorageUnit)))
.recover(nonFatal(s"Unable to get storage unit with museimId $mid"))
}
/**
* TODO: Document me!!!
*/
def getNodeByDatabaseId(
mid: MuseumId,
id: StorageNodeDatabaseId
): Future[MusitResult[Option[GenericStorageNode]]] = {
val query = getNodeByDatabaseIdAction(mid, id)
db.run(query)
.map(_.map(StorageNodeDto.toGenericStorageNode))
.map(MusitSuccess.apply)
.recover(nonFatal(s"Unable to get nodes by id for museumId $mid"))
}
def getNodeById(
mid: MuseumId,
id: StorageNodeId
): Future[MusitResult[Option[GenericStorageNode]]] = {
val query = getNodeByIdAction(mid, id)
db.run(query)
.map(_.map(StorageNodeDto.toGenericStorageNode))
.map(MusitSuccess.apply)
.recover(nonFatal(s"Unable to get nodes by id for museumId $mid"))
}
def getParentsForNodes(
mid: MuseumId,
ids: Seq[StorageNodeId]
): Future[MusitResult[Map[StorageNodeId, Option[StorageNodeId]]]] = {
val q1 = storageNodeTable.filter(n => n.museumId === mid && (n.uuid inSet ids))
val query = for {
(child, parent) <- q1 joinLeft storageNodeTable on (_.isPartOf === _.id)
} yield {
// It's safe to do a get on child.uuid here, because it wouldn't have been
// found if it wasn't set. Besides it's _really_ a required column.
child.uuid -> parent.map(_.uuid)
}
db.run(query.result)
.map(res => MusitSuccess(res.map(r => r._1 -> r._2).toMap))
.recover(nonFatal("Unexpected error occurred fetching parent nodes."))
}
/**
* Fetches the node data for provided database ids
*
* @param mid
* @param ids
* @return
*/
def getNodesByDatabaseIds(
mid: MuseumId,
ids: Seq[StorageNodeDatabaseId]
): Future[MusitResult[Seq[GenericStorageNode]]] = {
val query = storageNodeTable.filter { sn =>
sn.museumId === mid &&
sn.isDeleted === false &&
(sn.id inSet ids)
}.result
db.run(query)
.map(_.map(StorageNodeDto.toGenericStorageNode))
.map(MusitSuccess.apply)
.recover(nonFatal(s"Unable to get nodes by id for museumId $mid"))
}
def getNodesByIds(
mid: MuseumId,
ids: Seq[StorageNodeId]
): Future[MusitResult[Seq[GenericStorageNode]]] = {
val query = storageNodeTable.filter { sn =>
sn.museumId === mid &&
sn.isDeleted === false &&
(sn.uuid inSet ids)
}.result
db.run(query)
.map(_.map(StorageNodeDto.toGenericStorageNode))
.map(MusitSuccess.apply)
.recover(nonFatal(s"Unable to get nodes by id for museumId $mid"))
}
/**
* TODO: Document me!!!
*/
def getStorageTypesInPath(
mid: MuseumId,
path: NodePath,
limit: Option[Int] = None
): Future[MusitResult[Seq[(StorageNodeDatabaseId, StorageType)]]] = {
val ids = limit.map(l => path.asIdSeq.take(l)).getOrElse(path.asIdSeq)
val query = storageNodeTable.filter { sn =>
sn.museumId === mid &&
sn.id.inSet(ids)
}.map(res => (res.id, res.storageType, res.path)).sortBy(_._3.asc)
db.run(query.result)
.map(_.map(tuple => tuple._1 -> tuple._2))
.map(MusitSuccess.apply)
.recover(nonFatal(s"Unable to get nodes by id for museumId $mid"))
}
/**
* Find all nodes that are of type Root.
*
* @return a Future collection of Root nodes.
*/
def findRootNodes(mid: MuseumId): Future[MusitResult[Seq[RootNode]]] = {
val query = storageNodeTable.filter { root =>
root.museumId === mid &&
root.isDeleted === false &&
root.isPartOf.isEmpty &&
(root.storageType === rootNodeType || root.storageType === rootLoanType)
}.result
db.run(query)
.map(_.map(StorageNodeDto.toRootNode))
.map(MusitSuccess.apply)
.recover(nonFatal(s"Unable to find rood nodes for museumId $mid"))
}
/**
* find the Root node with the given StorageNodeId.
*
* @param id StorageNodeId for the Root node.
* @return An Option that contains the Root node if it was found.
*/
def findRootNode(id: StorageNodeDatabaseId): Future[MusitResult[Option[RootNode]]] = {
val query = storageNodeTable.filter { root =>
root.id === id &&
root.isDeleted === false &&
(root.storageType === rootNodeType || root.storageType === rootLoanType)
}.result.headOption
db.run(query).map(mdto => MusitSuccess(mdto.map(StorageNodeDto.toRootNode)))
}
def findRootLoanNodes(
museumId: MuseumId
): Future[MusitResult[Seq[StorageNodeDatabaseId]]] = {
val tpe: StorageType = StorageType.RootLoanType
val query = storageNodeTable.filter { n =>
n.museumId === museumId && n.storageType === tpe
}.map(_.id)
db.run(query.result)
.map(nodes => MusitSuccess(nodes))
.recover(nonFatal(s"Error occurred getting RootLoan nodes for museum $museumId"))
}
private def sortedChildrenQuery(
mid: MuseumId,
id: StorageNodeId,
page: Int,
limit: Int
)(implicit gr: GetResult[StorageUnitDto]) = {
val offset = (page - 1) * limit
sql"""
SELECT sn.STORAGE_NODE_ID, sn.STORAGE_NODE_UUID, sn.STORAGE_NODE_NAME,
sn.AREA, sn.AREA_TO, sn.IS_PART_OF, sn.HEIGHT, sn.HEIGHT_TO,
sn.GROUP_READ, sn.GROUP_WRITE, sn.OLD_BARCODE, sn.NODE_PATH,
sn.IS_DELETED, sn.STORAGE_TYPE, sn.MUSEUM_ID, sn.UPDATED_BY, sn.UPDATED_DATE
FROM
(
SELECT n.STORAGE_NODE_ID FROM MUSARK_STORAGE.STORAGE_NODE n
WHERE n.STORAGE_NODE_UUID=${id.asString}
) p,
MUSARK_STORAGE.STORAGE_NODE sn
WHERE sn.IS_PART_OF=p.STORAGE_NODE_ID
AND sn.MUSEUM_ID=${mid.underlying}
AND sn.IS_DELETED=0
ORDER BY CASE
WHEN sn.STORAGE_TYPE='Organisation' THEN '01'
WHEN sn.STORAGE_TYPE='Building' THEN '02'
WHEN sn.STORAGE_TYPE='Room' THEN '03'
WHEN sn.STORAGE_TYPE='StorageUnit' THEN '04'
ELSE sn.STORAGE_TYPE END ASC, lower(sn.STORAGE_NODE_NAME)
OFFSET ${offset} ROWS FETCH NEXT ${limit} ROWS ONLY
""".as[StorageUnitDto]
}
private def totalChildCountQuery(mid: MuseumId, id: StorageNodeId) = {
sql"""
SELECT COUNT(1) FROM
(
SELECT n.STORAGE_NODE_ID FROM MUSARK_STORAGE.STORAGE_NODE n
WHERE n.STORAGE_NODE_UUID=${id.asString}
) p,
MUSARK_STORAGE.STORAGE_NODE sn
WHERE sn.MUSEUM_ID=${mid.underlying}
AND sn.IS_PART_OF=p.STORAGE_NODE_ID
AND sn.IS_DELETED=0
""".as[Int].head
}
/**
* Retrieve a page result of the children for the given {{{StorageNodeId}}}
*
* @param mid
* @param id
* @param page
* @param limit
* @return
*/
def getChildren(
mid: MuseumId,
id: StorageNodeId,
page: Int,
limit: Int
): Future[MusitResult[PagedResult[GenericStorageNode]]] = {
implicit val tupleToResult = StorageNodeDto.storageUnitTupleGetResult
val totalChildrenQuery = totalChildCountQuery(mid, id)
val sortedQuery = sortedChildrenQuery(mid, id, page, limit)
val matches: Future[MusitResult[Vector[GenericStorageNode]]] = db
.run(sortedQuery)
.map(_.map(StorageNodeDto.toGenericStorageNode))
.map(MusitSuccess.apply)
.recover(nonFatal(s"Unexpected error when fetching children"))
val total: Future[MusitResult[Int]] = db
.run(totalChildrenQuery)
.map(MusitSuccess.apply)
.recover(nonFatal(s"Unexpected error when fetching total children"))
(for {
mat <- MusitResultT(matches)
tot <- MusitResultT(total)
} yield PagedResult(tot, mat)).value
}
def listAllChildrenFor(
museumId: MuseumId,
ids: Seq[StorageNodeDatabaseId]
): Future[MusitResult[Seq[(StorageNodeDatabaseId, String)]]] = {
val q1 = (likePath: String) =>
storageNodeTable.filter { n =>
n.museumId === museumId && (SimpleLiteral[String]("NODE_PATH") like likePath)
}
val query = ids
.map(id => s",${id.underlying},%")
.map(q1)
.reduce((query, queryPart) => query union queryPart)
.map(n => (n.id, n.name))
.sortBy(_._2.asc)
db.run(query.result)
.map { res =>
MusitSuccess(
res.map(r => (r._1, r._2))
)
}
.recover(
nonFatal(
s"Error occurred reading children for RootLoan nodes ${ids.mkString(", ")}"
)
)
}
/**
* Get the StorageType for the given StorageNodeDatabaseId
*/
def getStorageTypeFor(
mid: MuseumId,
id: StorageNodeDatabaseId
): Future[MusitResult[Option[StorageType]]] = {
val query = storageNodeTable.filter { node =>
node.id === id && node.isDeleted === false
}.map(_.storageType).result.headOption
db.run(query).map(MusitSuccess.apply)
}
/**
* Get the StorageNodeDatabaseId and StorageType for the given StorageNodeId
*/
def getStorageTypeFor(
mid: MuseumId,
uuid: StorageNodeId
): Future[MusitResult[Option[(StorageNodeDatabaseId, StorageType)]]] = {
val query = storageNodeTable.filter { n =>
n.museumId === mid && n.uuid === uuid && n.isDeleted === false
}.map(n => n.id -> n.storageType).result.headOption
db.run(query).map(MusitSuccess.apply)
}
/**
* Get the StorageNodeDatabaseId and StorageType for the given old barcode.
*/
def getStorageTypeFor(
mid: MuseumId,
oldBarcode: Long
): Future[MusitResult[Option[(StorageNodeDatabaseId, StorageType)]]] = {
val query = storageNodeTable.filter { n =>
n.museumId === mid && n.oldBarcode === oldBarcode && n.isDeleted === false
}.map(n => n.id -> n.storageType).result.headOption
db.run(query).map(MusitSuccess.apply)
}
/**
* TODO: Document me!!!
*/
def insert(
mid: MuseumId,
storageUnit: StorageUnit
): Future[MusitResult[StorageNodeDatabaseId]] = {
val dto = StorageNodeDto.fromStorageUnit(mid, storageUnit)
db.run(insertNodeAction(dto))
.map(MusitSuccess.apply)
.recover(nonFatal(s"Unable to insert storage unit for museumId: $mid"))
}
/**
* TODO: Document me!!!
*/
def insertRoot(
mid: MuseumId,
root: RootNode
): Future[MusitResult[StorageNodeDatabaseId]] = {
logger.debug("Inserting root node...")
val dto = StorageNodeDto.fromRootNode(mid, root).asStorageUnitDto(mid)
db.run(insertNodeAction(dto))
.map(MusitSuccess.apply)
.recover(nonFatal("Unable to insert root"))
}
/**
* Set the path for the Root with the given StorageNodeId.
*
* @param id StorageNodeId of the Root node.
* @param path NodePath to set
* @return An Option containing the updated Root node.
*/
def setRootPath(
id: StorageNodeDatabaseId,
path: NodePath
): Future[MusitResult[Unit]] = {
logger.debug(s"Updating path to $path for root node $id")
db.run(updatePathAction(id, path)).map {
case res: Int if res == 1 =>
MusitSuccess(())
case res: Int =>
val msg = wrongNumUpdatedRows(id, res)
logger.warn(msg)
MusitDbError(msg)
}
}
/**
* Updates the path for all nodes that starts with the "oldPath".
*
* @param id the StorageNodeId to update
* @param path the NodePath to set
* @return MusitResult[Unit]
*/
def setPath(id: StorageNodeDatabaseId, path: NodePath): Future[MusitResult[Unit]] = {
db.run(updatePathAction(id, path)).map {
case res: Int if res == 1 =>
MusitSuccess(())
case res: Int =>
val msg = wrongNumUpdatedRows(id, res)
logger.warn(msg)
MusitDbError(msg)
}
}
/**
* Updates all paths for the subtree of the given StorageNodeId
*
* @param id StorageNodeId
* @param oldPath NodePath representing the old path
* @param newPath NodePath representing the new path
* @return The number of paths updated.
*/
def updateSubTreePath(
id: StorageNodeDatabaseId,
oldPath: NodePath,
newPath: NodePath
): Future[MusitResult[Int]] = {
db.run(updatePathsAction(oldPath, newPath).transactionally)
.map {
case res: Int if res != 0 =>
logger.debug(s"Successfully updated path for $res nodes")
MusitSuccess(res)
case _ =>
val msg = s"Did not update any paths starting with $oldPath"
logger.error(msg)
MusitInternalError(msg)
}
.recover(nonFatal(s"Unexpected error when updating paths for unit $id sub-tree"))
}
def batchUpdateLocation(
nodes: Seq[StorageNode],
newParent: StorageNode
): Future[MusitResult[Unit]] = {
val a1 = DBIO.sequence(nodes.map(n => updatePartOfAction(n.id.get, newParent.id)))
val a2 = DBIO.sequence {
nodes.map(n => updatePathsAction(n.path, newParent.path.appendChild(n.id.get)))
}
db.run(a2.andThen(a1).transactionally)
.map { _ =>
logger.debug(s"Successfully updated node locations for ${nodes.size} nodes")
MusitSuccess(())
}
.recover(
nonFatal(s"Unexpected error when updating location for ${nodes.size} nodes.")
)
}
/**
* TODO: Document me!!!
*/
def update(
mid: MuseumId,
id: StorageNodeId,
storageUnit: StorageUnit
): Future[MusitResult[Option[Int]]] = {
val dto = StorageNodeDto.fromStorageUnit(mid, storageUnit, uuid = Some(id))
db.run(updateNodeAction(mid, id, dto)).map {
case res: Int if res == 1 => MusitSuccess(Some(res))
case res: Int if res == 0 => MusitSuccess(None)
case res: Int =>
val msg = wrongNumUpdatedRows(id, res)
logger.warn(msg)
MusitDbError(msg)
}
}
/**
* Find and return the NodePath for the given StorageNodeId.
*
* @param id StorageNodeId to get the NodePath for
* @return NodePath
*/
def getPathByDatabaseId(
mid: MuseumId,
id: StorageNodeDatabaseId
): Future[MusitResult[Option[NodePath]]] = {
db.run(getPathByDatabaseIdAction(mid, id))
.map(MusitSuccess.apply)
.recover(nonFatal(s"Unable to get path for museumId $mid and storage node $id"))
}
def getPathById(
mid: MuseumId,
id: StorageNodeId
): Future[MusitResult[Option[NodePath]]] = {
db.run(getPathByIdAction(mid, id))
.map(MusitSuccess.apply)
.recover(nonFatal(s"Unable to get path for museumId $mid and storage node $id"))
}
/**
* TODO: Document me!!!
*/
def markAsDeleted(
doneBy: ActorId,
mid: MuseumId,
id: StorageNodeId
): Future[MusitResult[Int]] = {
val query = storageNodeTable.filter { su =>
su.uuid === id && su.isDeleted === false && su.museumId === mid
}.map { del =>
(del.isDeleted, del.updatedBy, del.updatedDate)
}.update((true, Some(doneBy), Some(dateTimeNow)))
db.run(query).map {
case res: Int if res == 1 =>
MusitSuccess(res)
case res: Int =>
val msg = wrongNumUpdatedRows(id, res)
logger.warn(msg)
MusitDbError(msg)
}
}
/**
* TODO: Document me!!!
*/
def updatePartOf(
id: StorageNodeDatabaseId,
partOf: Option[StorageNodeDatabaseId]
): Future[MusitResult[Int]] = {
val query = updatePartOfAction(id, partOf)
db.run(query.transactionally).map {
case res: Int if res == 1 =>
MusitSuccess(res)
case res: Int =>
val msg = wrongNumUpdatedRows(id, res)
logger.warn(msg)
MusitDbError(msg)
}
}
/**
* Given the provided NodePath, fetch all the associated names for each of
* the ID's in the path.
*
* @param nodePath NodePath to find names for
* @return A Seq[NamedPathElement]
*/
def namesForPath(nodePath: NodePath): Future[MusitResult[Seq[NamedPathElement]]] = {
db.run(namesForPathAction(nodePath))
.map(MusitSuccess.apply)
.recover(nonFatal(s"Unable to get node paths for $nodePath"))
}
/**
*
* @param mid
* @param searchString
* @param page
* @param limit
* @return
*/
def getStorageNodeByName(
mid: MuseumId,
searchString: String,
page: Int,
limit: Int
): Future[MusitResult[Seq[GenericStorageNode]]] = {
if (searchString.length > 2) {
val query = getStorageNodeByNameAction(mid, searchString, page, limit)
db.run(query)
.map(_.map(StorageNodeDto.toGenericStorageNode))
.map(MusitSuccess.apply)
.recover(
nonFatal(s"Unable to get node by name '$searchString' for museum: $mid")
)
} else {
Future.successful(MusitSuccess(Seq.empty))
}
}
def currentLocation(
mid: MuseumId,
objectId: ObjectUUID
): Future[MusitResult[Option[(StorageNodeId, NodePath)]]] = {
val findLocalObjectAction = localObjectsTable.filter { lo =>
lo.museumId === mid &&
lo.objectUuid === objectId &&
lo.objectType === CollectionObjectType.name
}.map(_.currentLocationId).result.headOption
val findPathAction = (maybeId: Option[StorageNodeId]) =>
maybeId.map { nodeId =>
storageNodeTable.filter(_.uuid === nodeId).map(_.path).result.headOption
}.getOrElse(DBIO.successful(None))
val query = for {
maybeNodeId <- findLocalObjectAction
maybePath <- findPathAction(maybeNodeId)
} yield maybeNodeId.flatMap(nid => maybePath.map(p => (nid, p)))
db.run(query)
.map(MusitSuccess.apply)
.recover(
nonFatal(s"Error occurred while getting current location for object $objectId")
)
}
}
// scalastyle:on number.of.methods
|
MUSIT-Norway/musit
|
service_backend/app/repositories/storage/dao/nodes/StorageUnitDao.scala
|
Scala
|
gpl-2.0
| 21,104 |
/*
* Copyright © 2015 Lukas Rosenthaler, Benjamin Geer, Ivan Subotic,
* Tobias Schweizer, André Kilchenmann, and Sepideh Alassi.
* This file is part of Knora.
* Knora is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published
* by the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
* Knora is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
* You should have received a copy of the GNU Affero General Public
* License along with Knora. If not, see <http://www.gnu.org/licenses/>.
*/
package org.knora.webapi.e2e.v1
import akka.http.scaladsl.model.{ContentTypes, HttpEntity, StatusCodes}
import com.typesafe.config.ConfigFactory
import org.knora.webapi.messages.v1.store.triplestoremessages.{RdfDataObject, TriplestoreJsonProtocol}
import org.knora.webapi.{E2ESpec, StartupFlags}
import spray.json._
import scala.concurrent.duration._
object StoreRouteV1E2ESpec {
val config = ConfigFactory.parseString(
"""
akka.loglevel = "DEBUG"
akka.stdout-loglevel = "DEBUG"
""".stripMargin)
}
/**
* End-to-End (E2E) test specification for testing the 'v1/store' route.
*
* This spec tests the 'v1/store' route.
*/
class StoreRouteV1E2ESpec extends E2ESpec(StoreRouteV1E2ESpec.config) with TriplestoreJsonProtocol {
/**
* The marshaling to Json is done automatically by spray, hence the import of the 'TriplestoreJsonProtocol'.
* The Json which spray generates looks like this:
*
* [
* {"path": "_test_data/all_data/incunabula-data.ttl", "name": "http://www.knora.org/data/incunabula"},
* {"path": "_test_data/demo_data/images-demo-data.ttl", "name": "http://www.knora.org/data/images"}
* ]
*
* and could have been supplied to the post request instead of the scala object.
*/
private val rdfDataObjects: List[RdfDataObject] = List(
RdfDataObject(path = "_test_data/all_data/incunabula-data.ttl", name = "http://www.knora.org/data/incunabula"),
RdfDataObject(path = "_test_data/demo_data/images-demo-data.ttl", name = "http://www.knora.org/data/images")
)
"The ResetTriplestoreContent Route ('v1/store/ResetTriplestoreContent')" should {
"succeed with resetting if startup flag is set" in {
/**
* This test corresponds to the following curl call:
* curl -H "Content-Type: application/json" -X POST -d '[{"path":"../knora-ontologies/knora-base.ttl","name":"http://www.knora.org/ontology/knora-base"}]' http://localhost:3333/v1/store/ResetTriplestoreContent
*/
StartupFlags.allowResetTriplestoreContentOperationOverHTTP send true
log.debug(s"StartupFlags.allowResetTriplestoreContentOperationOverHTTP = ${StartupFlags.allowResetTriplestoreContentOperationOverHTTP.get}")
val request = Post(baseApiUrl + "/v1/store/ResetTriplestoreContent", HttpEntity(ContentTypes.`application/json`, rdfDataObjects.toJson.compactPrint))
val response = singleAwaitingRequest(request, 300.seconds)
log.debug("==>> " + response.toString)
assert(response.status === StatusCodes.OK)
}
"fail with resetting if startup flag is not set" in {
StartupFlags.allowResetTriplestoreContentOperationOverHTTP send false
//log.debug("==>> before")
val request = Post(baseApiUrl + "/v1/store/ResetTriplestoreContent", HttpEntity(ContentTypes.`application/json`, rdfDataObjects.toJson.compactPrint))
val response = singleAwaitingRequest(request, 300.seconds)
//log.debug("==>> " + response.toString)
assert(response.status === StatusCodes.Forbidden)
}
}
}
|
nie-ine/Knora
|
webapi/src/test/scala/org/knora/webapi/e2e/v1/StoreRouteV1E2ESpec.scala
|
Scala
|
agpl-3.0
| 4,005 |
package net.dinkla.lbnn.spark
import com.esotericsoftware.kryo.Kryo
import net.dinkla.lbnn.utils.TextDate
import org.apache.spark.serializer.KryoRegistrator
/**
* Created by dinkla on 19/06/15.
*/
class CustomKryoRegistrator extends KryoRegistrator {
override def registerClasses(kryo: Kryo) {
kryo.register(classOf[TextDate])
kryo.register(classOf[CheckIn])
}
}
|
jdinkla/location-based-nearest-neighbours
|
src/main/scala/net/dinkla/lbnn/spark/CustomKryoRegistrator.scala
|
Scala
|
apache-2.0
| 381 |
package io.getquill.context.cassandra
import io.getquill.{ CassandraMonixContext, Literal }
package object monix {
lazy val testMonixDB = new CassandraMonixContext(Literal, "testStreamDB") with CassandraTestEntities
}
|
getquill/quill
|
quill-cassandra-monix/src/test/scala/io/getquill/context/cassandra/monix/package.scala
|
Scala
|
apache-2.0
| 221 |
/*
* Copyright (c) 2015, Nightfall Group
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package moe.nightfall.instrumentality.animations
import moe.nightfall.instrumentality.PoseBoneTransform
import scala.collection.mutable.HashMap
/**
* Created on 28/07/15.
*/
class PoseAnimation extends Animation {
val hashMap = new HashMap[String, PoseBoneTransform]
def this(from: PoseAnimation) {
this()
from.hashMap.toSeq.foreach { case (key, value) =>
hashMap(key) = new PoseBoneTransform(value)
}
}
def this(a: PoseAnimation, b: PoseAnimation, i: Float) {
this()
// Handle all cases where A contains the key
a.hashMap.toSeq.foreach { case (key, value) =>
hashMap(key) = new PoseBoneTransform(value, if (b.hashMap.contains(key)) b.hashMap(key) else null, i)
}
// Handle the remaining cases where only B contains the key
b.hashMap.toSeq.filter((t: (String, PoseBoneTransform)) => !a.hashMap.contains(t._1)).foreach { case (key, value) =>
hashMap(key) = new PoseBoneTransform(null, value, i)
}
}
override def getBoneTransform(boneName: String) = hashMap get boneName.toLowerCase
}
|
Nightfall/Instrumentality
|
core/src/main/scala/moe/nightfall/instrumentality/animations/PoseAnimation.scala
|
Scala
|
bsd-2-clause
| 2,458 |
package com.codingkapoor.codingbat
import org.scalatest.{FlatSpec, Matchers}
class RecursionIISpec extends FlatSpec with Matchers {
"""Given an array of ints, method "groupSum"""" should """evaluate if it is possible to choose a group of some of the ints, such that the group sums to the given target?""" in {
RecursionII.groupSum(0, List(2, 4, 8), 10) should equal(true)
RecursionII.groupSum(0, List(2, 4, 8), 14) should equal(true)
RecursionII.groupSum(0, List(2, 4, 8), 9) should equal(false)
RecursionII.groupSum2(0, List(2, 4, 8), 10) should equal(true)
RecursionII.groupSum2(0, List(2, 4, 8), 14) should equal(true)
RecursionII.groupSum2(0, List(2, 4, 8), 9) should equal(false)
}
"""Given an array of ints, method "groupSum6"""" should """evaluate if it is possible to choose a group of some of the ints, beginning at the start index, such that the group sums to the given target? However, with the additional constraint that all 6's must be chosen.""" in {
RecursionII.groupSum6(0, List(5, 6, 2), 8) should equal(true)
RecursionII.groupSum6(0, List(5, 6, 2), 9) should equal(false)
RecursionII.groupSum6(0, List(5, 6, 2), 7) should equal(false)
}
"""Given an array of ints, method "groupNoAdj"""" should """evaluate if it is possible to choose a group of some of the ints, such that the group sums to the given target with this additional constraint: If a value in the array is chosen to be in the group, the value immediately following it in the array must not be chosen.""" in {
RecursionII.groupNoAdj(0, List(2, 5, 10, 4), 12) should equal(true)
RecursionII.groupNoAdj(0, List(2, 5, 10, 4), 14) should equal(false)
RecursionII.groupNoAdj(0, List(2, 5, 10, 4), 7) should equal(false)
}
"""Given an array of ints, method "groupSum5"""" should """evaluate if it is possible to choose a group of some of the ints, such that the group sums to the given target with these additional constraints: all multiples of 5 in the array must be included in the group. If the value immediately following a multiple of 5 is 1, it must not be chosen.""" in {
RecursionII.groupSum5(0, List(2, 5, 10, 4), 19) should equal(true)
RecursionII.groupSum5(0, List(2, 5, 10, 4), 17) should equal(true)
RecursionII.groupSum5(0, List(2, 5, 10, 4), 12) should equal(false)
}
"""Given an array of ints, method "groupSumClump"""" should """evaluate if it is possible to choose a group of some of the ints, such that the group sums to the given target, with this additional constraint: if there are numbers in the array that are adjacent and the identical value, they must either all be chosen, or none of them chosen.""" in {
RecursionII.groupSumClump(0, List(1, 2, 3, 4, 4, 4, 6, 6, 7, 6, 4, 1), 14) should equal(true)
RecursionII.groupSumClump(0, List(2, 4, 8), 10) should equal(true)
RecursionII.groupSumClump(0, List(1, 2, 4, 8, 1), 14) should equal(true)
RecursionII.groupSumClump(0, List(2, 4, 4, 8), 14) should equal(false)
}
"""Given an array of ints, method "splitArray"""" should """evaluate if it is possible to divide the ints into two groups, so that the sums of the two groups are the same.""" in {
RecursionII.splitArray(List(2, 2)) should equal(true)
RecursionII.splitArray(List(2, 3)) should equal(false)
RecursionII.splitArray(List(2, 3, 5)) should equal(true)
}
"""Given an array of ints, method "splitOdd"""" should """evaluate if it is possible to divide the ints into two groups, so that the sum of one group is a multiple of 10, and the sum of the other group is odd. Every int must be in one group or the other.""" in {
RecursionII.splitOdd10(List(5, 5, 5)) should equal(true)
RecursionII.splitOdd10(List(5, 5, 6)) should equal(false)
RecursionII.splitOdd10(List(5, 5, 6, 1)) should equal(true)
}
"""Given an array of ints, method "split53"""" should """evaluate if it is possible to divide the ints into two groups, so that the sum of the two groups is the same, with these constraints: all the values that are multiple of 5 must be in one group, and all the values that are a multiple of 3 (and not a multiple of 5) must be in the other.""" in {
RecursionII.split53(List(1, 1)) should equal(true)
RecursionII.split53(List(1, 1, 1)) should equal(false)
RecursionII.split53(List(2, 4, 2)) should equal(true)
}
}
|
codingkapoor/scala-coding-bat
|
src/test/scala/com/codingkapoor/codingbat/RecursionIISpec.scala
|
Scala
|
mit
| 4,371 |
package org.nisshiee.toban.controller.api
import org.specs2._
import play.api.test._
import play.api.test.Helpers._
import play.api.Play.current
import play.api.db._
import play.api.libs.json._, Json._
import org.nisshiee.toban.model._
import org.nisshiee.toban.test.TestHelper
class MemberApiControllerTest extends Specification with TestHelper { def is =
"getAll" ^
"Memberが登録されていない場合、空ArrayのJSONが返る" ! e1^
"Memberが登録されている場合、登録されている全タスクがID順ArrayのJSONで返る" ! e2^
end
def e1 = {
val result = runningEmptyApplication {
MemberController.getAll("")(FakeRequest())
}
val resultJs = parse(contentAsString(result))
(status(result) must equalTo(OK)) and
(header("Access-Control-Allow-Origin", result) must equalTo(Some("*"))) and
(resultJs.asOpt[List[Member]] must beSome.which(_.isEmpty))
}
def e2 = runningEmptyApplication {
val expectedOpt = DB.withConnection { implicit c =>
for {
t1 <- Member.create("testmember1")
t2 <- Member.create("testmember2")
} yield List(t1, t2)
}
val result = MemberController.getAll("")(FakeRequest())
val resultJs = parse(contentAsString(result))
(expectedOpt must beSome.which(_.size == 2)) and
(status(result) must equalTo(OK)) and
(header("Access-Control-Allow-Origin", result) must equalTo(Some("*"))) and
(resultJs.asOpt[List[Member]] must equalTo(expectedOpt))
}
}
|
nisshiee/to-ban
|
test/controllers/api/MemberApiControllerTest.scala
|
Scala
|
mit
| 1,675 |
package com.aristocrat.mandrill.requests.Templates
import com.aristocrat.mandrill.requests.MandrillRequest
case class Update(
key: String,
name: String,
fromEmail: String,
fromName: String,
subject: String,
code: String,
text: String,
publish: Boolean,
labels: Seq[String]) extends MandrillRequest
|
aristocratic/mandrill
|
src/main/scala/com/aristocrat/mandrill/requests/Templates/Update.scala
|
Scala
|
mit
| 336 |
package net.sansa_stack.rdf.spark.model.ds
import net.sansa_stack.rdf.spark.utils._
import org.apache.jena.graph.{ Node, Triple }
import org.apache.spark.rdd.RDD
import org.apache.spark.sql._
import org.apache.spark.sql.types.{ StringType, StructField, StructType }
/**
* Spark based implementation of Dataset of triples.
*
* @author Gezim Sejdiu Lorenz Buehmann
*/
object TripleOps {
/**
* Convert a [[Dataset[Triple]] into a RDD[Triple].
*
* @param triples Dataset of triples.
* @return a RDD of triples.
*/
def toRDD(triples: Dataset[Triple]): RDD[Triple] =
triples.rdd
/**
* Convert a [[Dataset[Triple]]] into a DataFrame.
*
* @param triples Dataset of triples.
* @return a DataFrame of triples.
*/
def toDF(triples: Dataset[Triple]): DataFrame = {
val spark: SparkSession = SparkSession.builder().getOrCreate()
val schema = SchemaUtils.SQLSchemaDefault
val rowRDD = triples.rdd.map(t =>
Row(
NodeUtils.getNodeValue(t.getSubject),
NodeUtils.getNodeValue(t.getPredicate),
NodeUtils.getNodeValue(t.getObject)))
val df = spark.createDataFrame(rowRDD, schema)
df.createOrReplaceTempView("TRIPLES")
df
}
/**
* Get triples.
*
* @param triples DataFrame of triples.
* @return DataFrame which contains list of the triples.
*/
def getTriples(triples: Dataset[Triple]): Dataset[Triple] =
triples
/**
* Returns an Dataset of triples that match with the given input.
*
* @param triples Dataset of triples
* @param subject the subject
* @param predicate the predicate
* @param object the object
* @return Dataset of triples
*/
def find(triples: Dataset[Triple], subject: Option[Node] = None, predicate: Option[Node] = None, `object`: Option[Node] = None): Dataset[Triple] = {
triples.filter(t =>
(subject == None || t.getSubject.matches(subject.get)) &&
(predicate == None || t.getPredicate.matches(predicate.get)) &&
(`object` == None || t.getObject.matches(`object`.get)))
}
/**
* Returns an Dataset of triples that match with the given input.
*
* @param triples Dataset of triples
* @param triple the triple to be checked
* @return Dataset of triples that match the given input
*/
def find(triples: Dataset[Triple], triple: Triple): Dataset[Triple] = {
find(
triples,
if (triple.getSubject.isVariable) None else Option(triple.getSubject),
if (triple.getPredicate.isVariable) None else Option(triple.getPredicate),
if (triple.getObject.isVariable) None else Option(triple.getObject))
}
/**
* Return the number of triples.
*
* @param triples Dataset of triples
* @return the number of triples
*/
def size(triples: Dataset[Triple]): Long =
triples.count()
/**
* Return the union of this RDF graph and another one.
*
* @param triples Dataset of RDF graph
* @param other the other RDF graph
* @return graph (union of both)
*/
def union(triples: Dataset[Triple], other: Dataset[Triple]): Dataset[Triple] =
triples.union(other)
/**
* Return the union all of RDF graphs.
*
* @param triples Dataset of RDF graph
* @param others sequence of Datasets of other RDF graph
* @return graph (union of all)
*/
def unionAll(triples: Dataset[Triple], others: Seq[Dataset[Triple]]): Dataset[Triple] =
others.reduce(_ union _)
/**
* Returns a new RDF graph that contains the intersection
* of the current RDF graph with the given RDF graph.
*
* @param triples Dataset of RDF graph
* @param other the other RDF graph
* @return the intersection of both RDF graphs
*/
def intersection(triples: Dataset[Triple], other: Dataset[Triple]): Dataset[Triple] =
triples.intersect(other)
/**
* Returns a new RDF graph that contains the difference
* between the current RDF graph and the given RDF graph.
*
* @param triples Dataset of RDF graph
* @param other the other RDF graph
* @return the difference of both RDF graphs
*/
def difference(triples: Dataset[Triple], other: Dataset[Triple]): Dataset[Triple] =
triples.except(other)
/**
* Determine whether this RDF graph contains any triples
* with a given (subject, predicate, object) pattern.
*
* @param triples Dataset of triples
* @param subject the subject (None for any)
* @param predicate the predicate (None for any)
* @param object the object (None for any)
* @return true if there exists within this RDF graph
* a triple with (S, P, O) pattern, false otherwise
*/
def contains(triples: Dataset[Triple], subject: Option[Node] = None, predicate: Option[Node] = None, `object`: Option[Node] = None): Boolean = {
find(triples, subject, predicate, `object`).count() > 0
}
/**
* Determine if a triple is present in this RDF graph.
*
* @param triples Dataset of triples
* @param triple the triple to be checked
* @return true if the statement s is in this RDF graph, false otherwise
*/
def contains(triples: Dataset[Triple], triple: Triple): Boolean = {
find(triples, triple).count() > 0
}
/**
* Determine if any of the triples in an RDF graph are also contained in this RDF graph.
*
* @param triples Dataset of triples
* @param other the other RDF graph containing the statements to be tested
* @return true if any of the statements in RDF graph are also contained
* in this RDF graph and false otherwise.
*/
def containsAny(triples: Dataset[Triple], other: Dataset[Triple]): Boolean = {
difference(triples, other).count() > 0
}
/**
* Determine if all of the statements in an RDF graph are also contained in this RDF graph.
*
* @param triples Dataset of triples
* @param other the other RDF graph containing the statements to be tested
* @return true if all of the statements in RDF graph are also contained
* in this RDF graph and false otherwise.
*/
def containsAll(triples: Dataset[Triple], other: Dataset[Triple]): Boolean = {
difference(triples, other).count() == 0
}
@transient var spark: SparkSession = SparkSession.builder.getOrCreate()
/**
* Add a statement to the current RDF graph.
*
* @param triples Dataset of RDF graph
* @param triple the triple to be added.
* @return new Dataset of triples containing this statement.
*/
def add(triples: Dataset[Triple], triple: Triple): Dataset[Triple] = {
val statement = spark.sparkContext.parallelize(Seq(triple))
import net.sansa_stack.rdf.spark.model._
union(triples, statement.toDS())
}
/**
* Add a list of statements to the current RDF graph.
*
* @param triples Dataset of RDF graph
* @param triple the list of triples to be added.
* @return new Dataset of triples containing this list of statements.
*/
def addAll(triples: Dataset[Triple], triple: Seq[Triple]): Dataset[Triple] = {
val statements = spark.sparkContext.parallelize(triple)
import net.sansa_stack.rdf.spark.model._
union(triples, statements.toDS())
}
/**
* Removes a statement from the current RDF graph.
* The statement with the same subject, predicate and
* object as that supplied will be removed from the model.
*
* @param triples Dataset of RDF graph
* @param triple the statement to be removed.
* @return new Dataset of triples without this statement.
*/
def remove(triples: Dataset[Triple], triple: Triple): Dataset[Triple] = {
val statement = spark.sparkContext.parallelize(Seq(triple))
import net.sansa_stack.rdf.spark.model._
difference(triples, statement.toDS())
}
/**
* Removes all the statements from the current RDF graph.
* The statements with the same subject, predicate and
* object as those supplied will be removed from the model.
*
* @param triples Dataset of RDF graph
* @param triple the list of statements to be removed.
* @return new Dataset of triples without these statements.
*/
def removeAll(triples: Dataset[Triple], triple: Seq[Triple]): Dataset[Triple] = {
val statements = spark.sparkContext.parallelize(triple)
import net.sansa_stack.rdf.spark.model._
difference(triples, statements.toDS())
}
/**
* Write N-Triples from a given Dataset of triples
*
* @param triples Dataset of RDF graph
* @param path path to the file containing N-Triples
*/
def saveAsNTriplesFile(triples: Dataset[Triple], path: String): Unit = {
import net.sansa_stack.rdf.common.io.ntriples.JenaTripleToNTripleString
triples.rdd
.map(new JenaTripleToNTripleString()) // map to N-Triples string
.saveAsTextFile(path)
}
}
|
SANSA-Stack/Spark-RDF
|
sansa-rdf-spark/src/main/scala/net/sansa_stack/rdf/spark/model/ds/TripleOps.scala
|
Scala
|
gpl-3.0
| 8,681 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.coordinator
import java.util
import kafka.utils.nonthreadsafe
import scala.collection.Map
import org.apache.kafka.common.protocol.Errors
case class MemberSummary(memberId: String,
clientId: String,
clientHost: String,
metadata: Array[Byte],
assignment: Array[Byte])
/**
* Member metadata contains the following metadata:
*
* Heartbeat metadata:
* 1. negotiated heartbeat session timeout
* 2. timestamp of the latest heartbeat
*
* Protocol metadata:
* 1. the list of supported protocols (ordered by preference)
* 2. the metadata associated with each protocol
*
* In addition, it also contains the following state information:
*
* 1. Awaiting rebalance callback: when the group is in the prepare-rebalance state,
* its rebalance callback will be kept in the metadata if the
* member has sent the join group request
* 2. Awaiting sync callback: when the group is in the awaiting-sync state, its sync callback
* is kept in metadata until the leader provides the group assignment
* and the group transitions to stable
*/
@nonthreadsafe
private[coordinator] class MemberMetadata(val memberId: String,
val groupId: String,
val clientId: String,
val clientHost: String,
val rebalanceTimeoutMs: Int,
val sessionTimeoutMs: Int,
val protocolType: String,
var supportedProtocols: List[(String, Array[Byte])]) {
var assignment: Array[Byte] = Array.empty[Byte]
var awaitingJoinCallback: JoinGroupResult => Unit = null
var awaitingSyncCallback: (Array[Byte], Errors) => Unit = null
var latestHeartbeat: Long = -1
var isLeaving: Boolean = false
def protocols = supportedProtocols.map(_._1).toSet
/**
* Get metadata corresponding to the provided protocol.
*/
def metadata(protocol: String): Array[Byte] = {
supportedProtocols.find(_._1 == protocol) match {
case Some((_, metadata)) => metadata
case None =>
throw new IllegalArgumentException("Member does not support protocol")
}
}
/**
* Check if the provided protocol metadata matches the currently stored metadata.
*/
def matches(protocols: List[(String, Array[Byte])]): Boolean = {
if (protocols.size != this.supportedProtocols.size)
return false
for (i <- 0 until protocols.size) {
val p1 = protocols(i)
val p2 = supportedProtocols(i)
if (p1._1 != p2._1 || !util.Arrays.equals(p1._2, p2._2))
return false
}
true
}
def summary(protocol: String): MemberSummary = {
MemberSummary(memberId, clientId, clientHost, metadata(protocol), assignment)
}
def summaryNoMetadata(): MemberSummary = {
MemberSummary(memberId, clientId, clientHost, Array.empty[Byte], Array.empty[Byte])
}
/**
* Vote for one of the potential group protocols. This takes into account the protocol preference as
* indicated by the order of supported protocols and returns the first one also contained in the set
*/
def vote(candidates: Set[String]): String = {
supportedProtocols.find({ case (protocol, _) => candidates.contains(protocol)}) match {
case Some((protocol, _)) => protocol
case None =>
throw new IllegalArgumentException("Member does not support any of the candidate protocols")
}
}
override def toString = {
"[%s,%s,%s,%d]".format(memberId, clientId, clientHost, sessionTimeoutMs)
}
}
|
ijuma/kafka
|
core/src/main/scala/kafka/coordinator/MemberMetadata.scala
|
Scala
|
apache-2.0
| 4,643 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.planner.plan.nodes.calcite
import org.apache.calcite.plan.{Convention, RelOptCluster, RelTraitSet}
import org.apache.calcite.rel.RelNode
import org.apache.calcite.rel.`type`.RelDataType
import org.apache.calcite.rex.RexNode
import java.util
/**
* Sub-class of [[Expand]] that is a relational expression
* which returns multiple rows expanded from one input row.
* This class corresponds to Calcite logical rel.
*/
final class LogicalExpand(
cluster: RelOptCluster,
traits: RelTraitSet,
input: RelNode,
outputRowType: RelDataType,
projects: util.List[util.List[RexNode]],
expandIdIndex: Int)
extends Expand(cluster, traits, input, outputRowType, projects, expandIdIndex) {
override def copy(traitSet: RelTraitSet, inputs: util.List[RelNode]): RelNode = {
new LogicalExpand(cluster, traitSet, inputs.get(0), outputRowType, projects, expandIdIndex)
}
}
object LogicalExpand {
def create(
input: RelNode,
outputRowType: RelDataType,
projects: util.List[util.List[RexNode]],
expandIdIndex: Int): LogicalExpand = {
val traits = input.getCluster.traitSetOf(Convention.NONE)
new LogicalExpand(input.getCluster, traits, input, outputRowType, projects, expandIdIndex)
}
}
|
bowenli86/flink
|
flink-table/flink-table-planner-blink/src/main/scala/org/apache/flink/table/planner/plan/nodes/calcite/LogicalExpand.scala
|
Scala
|
apache-2.0
| 2,085 |
package org.brijest.storm.engine
package gui
import javax.media.opengl._
import javax.media.opengl.awt.GLCanvas
import javax.media.opengl.glu.GLU
import GL._
import GL2._
import GL2ES1._
import GL2ES2._
import fixedfunc.GLLightingFunc._
import fixedfunc.GLMatrixFunc._
import org.scalagl._
import model._
import collection._
package object iso {
def pngStream(name: String): java.io.InputStream = {
getClass.getResourceAsStream("/iso/" + name + ".png")
}
def confStream(name: String): java.io.InputStream = {
getClass.getResourceAsStream("/iso/" + name + ".conf")
}
object Sprites {
def maxheight = 320
}
def ceilpow2(n: Int) = {
var pow2 = 1
while (n > pow2) {
pow2 = pow2 << 1
}
pow2
}
/* model rendering */
private[iso] def renderCube(x: Float, y: Float, xspan2: Float, yspan2: Float, bottom: Float, top: Float)(implicit gl: GL2) = geometry(GL_TRIANGLE_STRIP) {
val xspan = xspan2 / 2
val yspan = yspan2 / 2
/* top */
v3d(x - xspan, y - yspan, top)
v3d(x - xspan, y + yspan, top)
v3d(x + xspan, y - yspan, top)
v3d(x + xspan, y + yspan, top)
/* sides and bottom */
if (top > 0) {
v3d(x + xspan, y + yspan, bottom)
v3d(x - xspan, y + yspan, top)
v3d(x - xspan, y + yspan, bottom)
v3d(x - xspan, y - yspan, top)
v3d(x - xspan, y - yspan, bottom)
v3d(x + xspan, y - yspan, top)
v3d(x + xspan, y - yspan, bottom)
v3d(x + xspan, y + yspan, bottom)
}
}
private[iso] def renderTriPrism(x: Float, y: Float, xspan2: Float, yspan2: Float, bottom: Float, top: Float)(implicit gl: GL2) = geometry(GL_QUADS) {
val xspan = xspan2 / 2
val yspan = yspan2 / 2
// TODO fix this to render actual triprism
/* top */
v3d(x - xspan, y - yspan, top)
v3d(x - xspan, y + yspan, top)
v3d(x + xspan, y + yspan, top)
v3d(x + xspan, y - yspan, top)
/* sides and bottom */
if (top > 0) {
v3d(x - xspan, y - yspan, top)
v3d(x - xspan, y - yspan, bottom)
v3d(x - xspan, y + yspan, bottom)
v3d(x - xspan, y + yspan, top)
v3d(x + xspan, y - yspan, top)
v3d(x + xspan, y - yspan, bottom)
v3d(x - xspan, y - yspan, bottom)
v3d(x - xspan, y - yspan, top)
v3d(x + xspan, y + yspan, top)
v3d(x + xspan, y + yspan, bottom)
v3d(x + xspan, y - yspan, bottom)
v3d(x + xspan, y - yspan, top)
v3d(x - xspan, y + yspan, top)
v3d(x - xspan, y + yspan, bottom)
v3d(x + xspan, y + yspan, bottom)
v3d(x + xspan, y + yspan, top)
}
}
private[iso] def renderRectangle(xtl: Float, ytl: Float, xbr: Float, ybr: Float, height: Float)(implicit gl: GL2) = geometry(GL_QUADS) {
v3d(xtl, ybr, height)
v3d(xbr, ybr, height)
v3d(xbr, ytl, height)
v3d(xtl, ytl, height)
}
def renderShape(x: Int, y: Int, s: Shape, hgt: Float)(implicit gl: GL2): Unit = s match {
case Shape.Cube(xd, yd, zd, xoff, yoff, zoff) =>
val bottom = hgt + zoff
renderCube(x + xoff, y + yoff, xd, yd, bottom, bottom + zd)
case Shape.TriPrism(xd, yd, zd, xoff, yoff, zoff) =>
val bottom = hgt + zoff
renderTriPrism(x + xoff, y + yoff, xd, yd, bottom, bottom + zd)
case Shape.Composite(subs) =>
for (sub <- subs) renderShape(x, y, sub, hgt)
case Shape.None =>
// do nothing
}
}
|
axel22/scala-2d-game-editor
|
src/main/scala/org/brijest/storm/engine/gui/iso/package.scala
|
Scala
|
bsd-3-clause
| 3,391 |
package repos.jdbc
/** Table Janitor is a process that is responsible to ensure that all index tables
* are being updated. It also populates new indexes.
*
* It is possible that one of the repo users is running an old binary that does not
* write to some index on insert. Table Janitor can ensure that everything that
* is inserted gets eventually indexed.
*
* It is assumed that at any point in time there is at most one Table Janitor
* running over a database.
*
* Periodically, the janitor scans for new entries in the main table and index
* them if necessary.
*
* Sometimes, the PKs may not appear in consecutive order. For example a later transaction
* completes before a transaction that starts earlier. Then, the Janitor will observe a temporary
* gap in the pks. Since the Janitor wants to ensure that all entries are indexed, when gaps
* in the PKs are detected, it will retry to fill them (for a configurable time period) until
* they are fixed.
*
* The table janitor uses a JanitorIndexStatus table that maps index names to
* the highest PK p on the main table where all pk <= p are known to be indexed (that is,
* all pks up to the first unresolved gap)
*/
import akka.actor.{ActorRef, Actor, ActorLogging}
import akka.stream.Materializer
import akka.stream.scaladsl.Source
import repos.{EntryTableRecord, Repo, SecondaryIndex}
import slick.lifted
import slick.lifted.BaseColumnExtensionMethods
import scala.annotation.tailrec
import scala.concurrent.duration.Duration
import scala.concurrent.{Await, ExecutionContext}
import scala.language.existentials
class TableJanitor(readJdbcDb: JdbcDb, writeJdbcDb: JdbcDb, allRepos: Seq[Repo[_, _]],
materializer: akka.stream.Materializer, monitorActor: Option[ActorRef]) extends Actor with ActorLogging {
import TableJanitor._
import context.dispatcher
// We care only about repos that have indexes.
val repos = allRepos.filter(_.allIndexes.nonEmpty)
var janitorStatus = TableJanitorStatus()
def updateStatus(newJanitorStatus: TableJanitorStatus): Unit = {
janitorStatus = newJanitorStatus
monitorActor.foreach(_ ! janitorStatus)
}
/** Build indexes from the last checkpoint to the present.
*
* @return number of index records written.
*/
def catchUp()(implicit ec: scala.concurrent.ExecutionContext): Long = {
implicit def am: Materializer = materializer
val currentTime = System.currentTimeMillis()
val result = repos.map {
repo =>
updateStatus(janitorStatus.copy(current = s"Catching up on ${repo.name}"))
val oldState = janitorStatus.repoState(repo.name)
val newState = catchUpForRepo(
readJdbcDb = readJdbcDb,
writeJdbcDb = writeJdbcDb,
currentTime = currentTime,
statusTable = janitorStatus.statusTable,
repo = repo,
state = oldState,
log = log.info)
updateStatus(
janitorStatus.copy(
repoState = janitorStatus.repoState + (repo.name -> newState)))
newState.indexedAllUpTo - oldState.indexedAllUpTo
}.sum
updateStatus(janitorStatus.copy(current = "Idle"))
result
}
/** Creates Janitor's index table, missing repos and index tables if they do not exist. */
def setupTables()(implicit ec: scala.concurrent.ExecutionContext) = {
RepoManagement.createMissingRepos(writeJdbcDb, allRepos)
// Ensure that all indexes not in the index status are set to zero.
val currentStatus = loadJanitorIndexStatus(readJdbcDb = readJdbcDb)
for {
repo <- allRepos
index <- repo.allIndexes if (!currentStatus.contains(readJdbcDb.innerIndex(index).ix3TableName))
} {
updateLastPkForTable(writeJdbcDb, index, 0)
}
}
def initJanitor(): Unit = {
log.info("Booting Table Janitor.")
setupTables()
val statusTable = loadJanitorIndexStatus(readJdbcDb = readJdbcDb)
updateStatus(TableJanitorStatus(
repoState = repos.map {
repo =>
val min = repo.allIndexes.map(lookupInStatusTable(readJdbcDb, statusTable, _)).min
repo.name -> State(min, min, Vector.empty)
}.toMap,
statusTable = statusTable,
current = "Initialized"))
log.info("Starting to catch up.")
val updatedRecords = catchUp()
log.info(s"Done catching up. $updatedRecords records updated.")
}
// Boots the Janitor.
override def preStart: Unit = {
self ! Init
}
def receive = {
case Init =>
initJanitor()
case Tick =>
updateStatus(janitorStatus.copy(statusTable = loadJanitorIndexStatus(readJdbcDb = readJdbcDb)))
catchUp()
}
override def postStop: Unit = {
log.info("Stopping Table Janitor.")
}
}
object TableJanitor {
val JANITOR_INDEX_STATUS_TABLE = "janitor_index_status"
case object Tick
case object Init
case class Gap(start: Long, end: Long, observed: Long) {
override def toString: String = s"[$start, $end] at $observed"
}
/** State of a run
*
* @param indexedAllUpTo the maximal pk that we are confident we indexed it and everything before it.
* @param maxSeen the last pk observed.
* @param gaps Set of pk ranges between seenAllUp to maxSeen we have not seen.
*/
case class State(indexedAllUpTo: Long, maxSeen: Long, gaps: Vector[Gap])
case class TableJanitorStatus(
repoState: Map[String, State] = Map.empty,
statusTable: StatusTable = Map.empty,
current: String = "Initializing")
val FORGET_MISSING_AFTER_MS = 600 * 1000
val GROUP_LIMIT = 5000
def processGroup[Id, M](state: State, items: Seq[EntryTableRecord[Id, M]]): State = {
require(state.maxSeen == state.indexedAllUpTo || state.gaps.nonEmpty)
require(state.gaps.isEmpty || state.gaps.head.start == state.indexedAllUpTo + 1)
require(state.gaps.isEmpty || state.gaps.last.end < state.maxSeen)
val tmpState = items.foldLeft(state) {
case (s, item) =>
assert(item.pk > s.maxSeen)
if (item.pk == s.maxSeen + 1)
s.copy(maxSeen = item.pk)
else
s.copy(maxSeen = item.pk, gaps = s.gaps :+
Gap(s.maxSeen + 1, item.pk - 1, item.timestamp))
}
if (tmpState.gaps.isEmpty) State(tmpState.maxSeen, tmpState.maxSeen, Vector.empty)
else tmpState.copy(indexedAllUpTo = tmpState.gaps.head.start - 1)
}
private[repos] def nextEntries[Id, M, R](readJdbcDb: JdbcDb, writeJdbcDb: JdbcDb, repo: Repo[Id, M], initialState: State, indexItems: (State, Seq[EntryTableRecord[Id, M]]) => Unit)(implicit am: Materializer, ec: ExecutionContext): State = {
Await.result(
Source.fromPublisher(readJdbcDb.stream(repo.getEntries(initialState.maxSeen)))
.grouped(GROUP_LIMIT).runFold(initialState) { (state, items) =>
val newState = processGroup(state, items)
indexItems(newState, items)
newState
}, Duration.Inf)
}
private[repos] def fillGaps[Id, M](gaps: Seq[Gap], items: Set[Long]): Seq[Gap] = {
@tailrec
def inner(original: Gap, current: Long, acc: List[Gap]): List[Gap] = {
if (current > original.end) acc
else if (!items.contains(current)) acc match {
case (g@Gap(start, end, _)) :: gs if current == end + 1 =>
// extending current gap
inner(original, current + 1, g.copy(end = current) :: gs)
case gs =>
// new gap
inner(original, current + 1, Gap(current, current, original.observed) :: gs)
} else inner(original, current + 1, acc)
}
gaps.flatMap(g => inner(g, g.start, Nil).reverseIterator)
}
private[repos] def fetchGaps[Id, M](jdbcDb: JdbcDb, repo: Repo[Id, M], gaps: Seq[Gap],
indexItems: Seq[EntryTableRecord[Id, M]] => Unit)(implicit am: Materializer): Seq[Gap] =
if (gaps.isEmpty) gaps else {
import jdbcDb.profile.api._
def inBetween(g: Gap)(pk: BaseColumnExtensionMethods[Long]) = if (g.start == g.end)
(pk === g.start) else pk.between(g.start, g.end)
val allGaps = gaps.tail.foldLeft(inBetween(gaps.head) _) {
case (filter, gap) =>
pk: BaseColumnExtensionMethods[Long] => filter(pk) || inBetween(gap)(pk)
}
val q = jdbcDb.innerRepo(repo).entryTable.filter(e => allGaps(e.pk)).sortBy(_.pk).result
Await.result(
Source.fromPublisher(jdbcDb.db.stream(q)).grouped(5000).runFold(gaps) {
(gaps, items) =>
indexItems(items)
fillGaps(gaps, items.map(_.pk).toSet)
}, Duration.Inf)
}
type IndexableRecord[Id, M] = ((Id, M), Long)
private[repos] def ensureIndexed[Id, M, R](jdbcDb: JdbcDb, indexTable: SecondaryIndex[Id, M, R],
entries: Seq[IndexableRecord[Id, M]],
log: String => Unit = _ => ()) = {
import jdbcDb.profile.api._
val inner = jdbcDb.innerIndex(indexTable)
val pksToIndex: Seq[Long] = entries.map(_._2)
val alreadyIndexedPk: Set[Long] =
jdbcDb.jc.blockingWrapper(
inner.indexTable.asInstanceOf[lifted.TableQuery[JdbcDb#Ix3Table[Id, R]]]
.filter(_.parentPk inSet pksToIndex).map(_.parentPk).result).toSet
val unindexedEntries = entries.filterNot(e => alreadyIndexedPk.contains(e._2))
if (unindexedEntries.nonEmpty) {
jdbcDb.jc.blockingWrapper(inner.buildInsertAction(unindexedEntries))
log(s"Repo ${indexTable.repo.name}: indexed ${unindexedEntries.size} entries into ${indexTable.name}")
}
}
private type StatusTable = Map[String, Long]
private def lookupInStatusTable(readJdbcDb: JdbcDb, s: StatusTable, index: SecondaryIndex[_, _, _]): Long = {
s(readJdbcDb.innerIndex(index).ix3TableName)
}
private def discardExpiredGaps(repoName: String, gaps: Seq[Gap], currentTime: Long, logger: String => Unit): Seq[Gap] = {
gaps.filterNot {
g =>
val shouldExpire = g.observed + FORGET_MISSING_AFTER_MS < currentTime
if (shouldExpire) {
logger(s"Repo ${repoName}: discarding expired gap $g")
}
shouldExpire
}
}
// Loads the Janitor index status table to memory.
private[repos] def loadJanitorIndexStatus(readJdbcDb: JdbcDb)(implicit ec: scala.concurrent.ExecutionContext): StatusTable = {
import readJdbcDb.jc.profile.api._
readJdbcDb.jc.blockingWrapper(
readJdbcDb.jc.JanitorIndexStatus.map({
r => r.indexTableName -> r.lastPk
}).result).toMap
}
private def updateLastPkForTable(jdbcDb: JdbcDb, index: SecondaryIndex[_, _, _], lastPk: Long) = {
jdbcDb.jc.JanitorIndexStatus.updateLastPkForIndex(jdbcDb.innerIndex(index).ix3TableName, lastPk)
}
private[repos] def catchUpForRepo[Id, M](readJdbcDb: JdbcDb,
writeJdbcDb: JdbcDb,
currentTime: Long,
statusTable: StatusTable,
repo: Repo[Id, M],
state: State,
log: String => Unit = println)(implicit materializer: Materializer, ec: ExecutionContext): State = {
def indexItems(items: Seq[EntryTableRecord[Id, M]]): Unit = {
val indexableItems: Seq[IndexableRecord[Id, M]] = items.map(i => ((i.id, i.entry), i.pk))
repo.allIndexes.foreach {
index =>
val indexCompleteUpTo = lookupInStatusTable(readJdbcDb, statusTable, index)
if (items.last.pk > indexCompleteUpTo) {
TableJanitor.ensureIndexed(writeJdbcDb, index,
indexableItems.filter(_._2 > indexCompleteUpTo), log)
}
}
}
val withoutDiscardedGaps: Seq[Gap] = discardExpiredGaps(repo.name, state.gaps, currentTime, log)
val updatedGaps: Seq[Gap] = TableJanitor.fetchGaps(jdbcDb = readJdbcDb, repo, withoutDiscardedGaps,
indexItems)(materializer)
val stateWithUpdatedGaps = state.copy(
indexedAllUpTo = if (updatedGaps.isEmpty) state.maxSeen else (updatedGaps.head.start - 1),
gaps = updatedGaps.toVector)
val newState = TableJanitor.nextEntries(readJdbcDb, writeJdbcDb, repo, stateWithUpdatedGaps,
{ (tmpState, items: Seq[EntryTableRecord[Id, M]]) =>
indexItems(items)
repo.allIndexes.foreach {
index =>
if (tmpState.indexedAllUpTo > lookupInStatusTable(readJdbcDb, statusTable, index)) {
updateLastPkForTable(jdbcDb = writeJdbcDb, index, tmpState.indexedAllUpTo)
}
}
})
if (newState.indexedAllUpTo != state.indexedAllUpTo) {
repo.allIndexes.foreach {
index =>
if (newState.indexedAllUpTo > lookupInStatusTable(readJdbcDb, statusTable, index)) {
updateLastPkForTable(jdbcDb = writeJdbcDb, index, newState.indexedAllUpTo)
}
}
}
{
val currentGapSet = newState.gaps.toSet
val previousGapSet = withoutDiscardedGaps.toSet
val deletedGaps = previousGapSet -- currentGapSet
val newGaps = currentGapSet -- previousGapSet
if (deletedGaps.nonEmpty) {
log(s"Repo ${repo.name}: closed gaps ${deletedGaps.mkString(", ")}")
}
if (newGaps.nonEmpty) {
log(s"Repo ${repo.name}: detected new gaps ${newGaps.mkString(", ")}")
}
}
newState
}
}
|
trueaccord/repos
|
src/main/scala/repos/jdbc/TableJanitor.scala
|
Scala
|
apache-2.0
| 13,355 |
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
package write_model
import java.util.UUID
import cqrs.command.{EventBus, AggregateRootFactory}
import write_model.AccountCommands.CreateAccountCommand
import write_model.AccountEvents.AccountCreated
object AccountFactory extends AggregateRootFactory[Account]{
override def handleCommand(id: UUID, eventBus: EventBus): CommandHandler = {
case CreateAccountCommand => eventBus.publish(AccountCreated(id))
}
override def applyEvent: EventHandler = {
case AccountCreated(id) => Account(id, 0)
}
}
|
cqrs-endeavour/cqrs-framework-sample
|
src/main/scala/write_model/AccountFactory.scala
|
Scala
|
mpl-2.0
| 718 |
package com.twitter.finagle.exp
import com.twitter.finagle._
import com.twitter.finagle.client.{StackClient, StdStackClient, DefaultPool}
import com.twitter.finagle.exp.mysql._
import com.twitter.finagle.exp.mysql.transport.{MysqlTransporter, Packet}
import com.twitter.finagle.param.ProtocolLibrary
import com.twitter.finagle.tracing._
import com.twitter.finagle.transport.Transport
import com.twitter.util.Duration
/**
* Supplements a [[com.twitter.finagle.Client]] with convenient
* builder methods for constructing a mysql client.
*/
trait MysqlRichClient { self: com.twitter.finagle.Client[Request, Result] =>
/**
* Creates a new `RichClient` connected to the logical
* destination described by `dest` with the assigned
* `label`. The `label` is used to scope client stats.
*/
def newRichClient(dest: Name, label: String): mysql.Client with mysql.Transactions =
mysql.Client(newClient(dest, label))
/**
* Creates a new `RichClient` connected to the logical
* destination described by `dest`.
*/
def newRichClient(dest: String): mysql.Client with mysql.Transactions =
mysql.Client(newClient(dest))
}
object MySqlClientTracingFilter {
object Stackable extends Stack.Module1[param.Label, ServiceFactory[Request, Result]] {
val role = ClientTracingFilter.role
val description = "Add MySql client specific annotations to the trace"
def make(_label: param.Label, next: ServiceFactory[Request, Result]) = {
val param.Label(label) = _label
// TODO(jeff): should be able to get this directly from ClientTracingFilter
val annotations = new AnnotatingTracingFilter[Request, Result](
label, Annotation.ClientSend(), Annotation.ClientRecv())
annotations andThen TracingFilter andThen next
}
}
object TracingFilter extends SimpleFilter[Request, Result] {
def apply(request: Request, service: Service[Request, Result]) = {
if (Trace.isActivelyTracing) {
request match {
case QueryRequest(sqlStatement) => Trace.recordBinary("mysql.query", sqlStatement)
case PrepareRequest(sqlStatement) => Trace.recordBinary("mysql.prepare", sqlStatement)
// TODO: save the prepared statement and put it in the executed request trace
case ExecuteRequest(id, _, _, _) => Trace.recordBinary("mysql.execute", id)
case _ => Trace.record("mysql." + request.getClass.getSimpleName.replace("$", ""))
}
}
service(request)
}
}
}
/**
* @example {{{
* val client = Mysql.client
* .withCredentials("<username>", "<password>")
* .withDatabase("<db>")
* .newRichClient("inet!localhost:3306")
* }}}
*/
object Mysql extends com.twitter.finagle.Client[Request, Result] with MysqlRichClient {
/**
* Implements a mysql client in terms of a
* [[com.twitter.finagle.StackClient]]. The client inherits a wealth
* of features from finagle including connection pooling and load
* balancing.
*
* Additionally, this class provides methods for constructing a rich
* client which exposes a rich mysql api.
*/
case class Client(
stack: Stack[ServiceFactory[Request, Result]] = StackClient.newStack
.replace(ClientTracingFilter.role, MySqlClientTracingFilter.Stackable),
params: Stack.Params = StackClient.defaultParams + DefaultPool.Param(
low = 0, high = 1, bufferSize = 0,
idleTime = Duration.Top,
maxWaiters = Int.MaxValue) +
ProtocolLibrary("mysql")
) extends StdStackClient[Request, Result, Client] with MysqlRichClient {
protected def copy1(
stack: Stack[ServiceFactory[Request, Result]] = this.stack,
params: Stack.Params = this.params
): Client = copy(stack, params)
protected type In = Packet
protected type Out = Packet
protected def newTransporter() = MysqlTransporter(params)
protected def newDispatcher(transport: Transport[Packet, Packet]): Service[Request, Result] =
mysql.ClientDispatcher(transport, Handshake(params))
/**
* The credentials to use when authenticating a new session.
*/
def withCredentials(u: String, p: String): Client =
configured(Handshake.Credentials(Option(u), Option(p)))
/**
* Database to use when this client establishes a new session.
*/
def withDatabase(db: String): Client =
configured(Handshake.Database(Option(db)))
/**
* The default character set used when establishing
* a new session.
*/
def withCharset(charset: Short): Client =
configured(Handshake.Charset(charset))
}
val client = Client()
def newClient(dest: Name, label: String): ServiceFactory[Request, Result] =
client.newClient(dest, label)
def newService(dest: Name, label: String): Service[Request, Result] =
client.newService(dest, label)
/**
* The credentials to use when authenticating a new session.
*/
@deprecated("Use client.withCredentials", "6.22.0")
def withCredentials(u: String, p: String): Client =
client.configured(Handshake.Credentials(Option(u), Option(p)))
/**
* Database to use when this client establishes a new session.
*/
@deprecated("Use client.withDatabase", "6.22.0")
def withDatabase(db: String): Client =
client.configured(Handshake.Database(Option(db)))
/**
* The default character set used when establishing
* a new session.
*/
@deprecated("Use client.withCharset", "6.22.0")
def withCharset(charset: Short): Client =
client.configured(Handshake.Charset(charset))
/**
* A client configured with parameter p.
*/
@deprecated("Use client.configured", "6.22.0")
def configured[P: Stack.Param](p: P): Client =
client.configured(p)
}
|
travisbrown/finagle
|
finagle-mysql/src/main/scala/com/twitter/finagle/Mysql.scala
|
Scala
|
apache-2.0
| 5,706 |
final class X
final class Y
object Test1 {
trait Test {
type Type
// This is testing that both permutations of the types in a &
// are taken into account by the intersection test
val i: Bar[Y & Type] = 1 // error
}
type Bar[A] = A match {
case X & Y => String
case Y => Int
}
}
object Test2 {
trait Wizzle[L <: Int with Singleton] {
type Bar[A] = A match {
case 0 => String
case L => Int
}
// This is testing that we don't make wrong assumptions about Singleton
def right(fa: Bar[L]): Int = fa // error
}
trait Wazzlo[L <: Int with AnyVal] {
type Bar[A] = A match {
case 0 => String
case L => Int
}
// This is testing that we don't make wrong assumptions about AnyVal
def right(fa: Bar[L]): Int = fa // error
}
trait Wuzzlu[L <: String with AnyRef] {
type Bar[A] = A match {
case "" => String
case L => Int
}
// This is testing that we don't make wrong assumptions about AnyRef
def right(fa: Bar[L]): Int = fa // error
}
}
object Test3 {
type Bar[A] = A match {
case X => String
case Y => Int
}
trait XX {
type Foo
val a: Bar[X & Foo] = "hello"
val b: Bar[Y & Foo] = 1 // error
def apply(fa: Bar[X & Foo]): Bar[Y & Foo]
def boom: Int = apply(a) // error
}
trait YY extends XX {
type Foo = X & Y
def apply(fa: Bar[X & Foo]): Bar[Y & Foo] = fa
}
}
object Test4 {
type Bar[A] = A match {
case X => String
case Y => Int
}
trait XX {
type Foo
type FooAlias = Foo
val a: Bar[X & FooAlias] = "hello"
val b: Bar[Y & FooAlias] = 1 // error
def apply(fa: Bar[X & FooAlias]): Bar[Y & FooAlias]
def boom: Int = apply(a) // error
}
trait YY extends XX {
type Foo = X & Y
def apply(fa: Bar[X & FooAlias]): Bar[Y & FooAlias] = fa
}
}
|
som-snytt/dotty
|
tests/neg/6314.scala
|
Scala
|
apache-2.0
| 1,870 |
package org.scalamu
import org.scalamu.core.api._
package object core {
def die(failure: RemoteProcessFailure): Nothing = die(failure.exitCode)
def die(exitValue: Int): Nothing = sys.exit(exitValue)
}
|
sugakandrey/scalamu
|
core/src/main/scala/org/scalamu/core/package.scala
|
Scala
|
gpl-3.0
| 222 |
package pipelines
import org.scalatest.Suite
import org.scalatest.BeforeAndAfterEach
import org.apache.spark.SparkContext
// TODO: delete this file and use the version from Spark once SPARK-750 is fixed.
/** Manages a local `sc` {@link SparkContext} variable, correctly stopping it after each test. */
trait LocalSparkContext extends BeforeAndAfterEach { self: Suite =>
@transient var sc: SparkContext = _
override def afterEach() {
resetSparkContext()
super.afterEach()
}
def resetSparkContext() = {
if (sc != null) {
LocalSparkContext.stop(sc)
sc = null
}
}
}
object LocalSparkContext {
def stop(sc: SparkContext) {
sc.stop()
// To avoid Akka rebinding to the same port, since it doesn't unbind immediately on shutdown
System.clearProperty("spark.driver.port")
}
/** Runs `f` by passing in `sc` and ensures that `sc` is stopped. */
def withSpark[T](sc: SparkContext)(f: SparkContext => T) = {
try {
f(sc)
} finally {
stop(sc)
}
}
}
|
concretevitamin/keystone
|
src/test/scala/pipelines/LocalSparkContext.scala
|
Scala
|
apache-2.0
| 1,029 |
package org.jetbrains.plugins.scala
package lang
package parser
package parsing
package params
import org.jetbrains.plugins.scala.lang.lexer.ScalaTokenTypes
import org.jetbrains.plugins.scala.lang.parser.parsing.builder.ScalaPsiBuilder
import org.jetbrains.plugins.scala.lang.parser.util.ParserUtils
/**
* @author Alexander Podkhalyuzin
* Date: 06.03.2008
*/
/*
* TypeParamClause ::= '[' VariantTypeParam {',' VariantTypeParam} ']'
*/
object TypeParamClause extends TypeParamClause {
override protected def typeParam = TypeParam
}
trait TypeParamClause {
protected def typeParam: TypeParam
def parse(builder: ScalaPsiBuilder): Boolean = {
val typeMarker = builder.mark
builder.getTokenType match {
case ScalaTokenTypes.tLSQBRACKET =>
builder.advanceLexer() //Ate [
builder.disableNewlines()
case _ =>
typeMarker.drop()
return false
}
if (!typeParam.parse(builder, mayHaveVariance = true)) {
builder error ScalaBundle.message("wrong.parameter")
}
while (builder.getTokenType == ScalaTokenTypes.tCOMMA && !ParserUtils.eatTrailingComma(builder, ScalaTokenTypes.tRSQBRACKET)) {
builder.advanceLexer() //Ate
if (!typeParam.parse(builder, mayHaveVariance = true)) {
builder error ScalaBundle.message("wrong.parameter")
}
}
builder.getTokenType match {
case ScalaTokenTypes.tRSQBRACKET =>
builder.advanceLexer() //Ate ]
case _ =>
builder error ScalaBundle.message("rsqbracket.expected")
}
builder.restoreNewlinesState()
typeMarker.done(ScalaElementTypes.TYPE_PARAM_CLAUSE)
true
}
}
|
loskutov/intellij-scala
|
src/org/jetbrains/plugins/scala/lang/parser/parsing/params/TypeParamClause.scala
|
Scala
|
apache-2.0
| 1,641 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.streaming.continuous
import scala.util.control.NonFatal
import org.apache.spark.SparkException
import org.apache.spark.internal.Logging
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.expressions.Attribute
import org.apache.spark.sql.execution.SparkPlan
import org.apache.spark.sql.execution.streaming.StreamExecution
import org.apache.spark.sql.sources.v2.writer.streaming.StreamingWriteSupport
/**
* The physical plan for writing data into a continuous processing [[StreamingWriteSupport]].
*/
case class WriteToContinuousDataSourceExec(writeSupport: StreamingWriteSupport, query: SparkPlan)
extends SparkPlan with Logging {
override def children: Seq[SparkPlan] = Seq(query)
override def output: Seq[Attribute] = Nil
override protected def doExecute(): RDD[InternalRow] = {
val writerFactory = writeSupport.createStreamingWriterFactory()
val rdd = new ContinuousWriteRDD(query.execute(), writerFactory)
logInfo(s"Start processing data source write support: $writeSupport. " +
s"The input RDD has ${rdd.partitions.length} partitions.")
EpochCoordinatorRef.get(
sparkContext.getLocalProperty(ContinuousExecution.EPOCH_COORDINATOR_ID_KEY),
sparkContext.env)
.askSync[Unit](SetWriterPartitions(rdd.getNumPartitions))
try {
// Force the RDD to run so continuous processing starts; no data is actually being collected
// to the driver, as ContinuousWriteRDD outputs nothing.
rdd.collect()
} catch {
case _: InterruptedException =>
// Interruption is how continuous queries are ended, so accept and ignore the exception.
case cause: Throwable =>
cause match {
// Do not wrap interruption exceptions that will be handled by streaming specially.
case _ if StreamExecution.isInterruptionException(cause, sparkContext) => throw cause
// Only wrap non fatal exceptions.
case NonFatal(e) => throw new SparkException("Writing job aborted.", e)
case _ => throw cause
}
}
sparkContext.emptyRDD
}
}
|
michalsenkyr/spark
|
sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/continuous/WriteToContinuousDataSourceExec.scala
|
Scala
|
apache-2.0
| 2,977 |
/*
* The MIT License
*
* Copyright (c) 2011 Vladimir Kirichenko <[email protected]>
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
package butter4s.net
/**
* @author Vladimir Kirichenko <[email protected]>
*/
class Url
|
vladimirk/butter4s
|
http/src/butter4s/net/Url.scala
|
Scala
|
mit
| 1,299 |
/*
* Copyright (c) 2002-2018 "Neo Technology,"
* Network Engine for Objects in Lund AB [http://neotechnology.com]
*
* This file is part of Neo4j.
*
* Neo4j is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.neo4j.cypher
import java.util
import org.neo4j.test.ImpermanentGraphDatabase
import scala.collection.JavaConverters._
trait RunWithConfigTestSupport {
def runWithConfig(m: (String, String)*)(run: ExecutionEngine => Unit) = {
val config: util.Map[String, String] = m.toMap.asJava
val graph = new ImpermanentGraphDatabase(config)
try {
val engine = new ExecutionEngine(graph)
run(engine)
} finally {
graph.shutdown()
}
}
}
|
HuangLS/neo4j
|
community/cypher/cypher/src/test/scala/org/neo4j/cypher/RunWithConfigTestSupport.scala
|
Scala
|
apache-2.0
| 1,275 |
/*
* Copyright (c) 2002-2018 "Neo Technology,"
* Network Engine for Objects in Lund AB [http://neotechnology.com]
*
* This file is part of Neo4j.
*
* Neo4j is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.neo4j.cypher.internal.compiler.v2_3.commands.expressions
import org.neo4j.cypher.internal.compiler.v2_3._
import org.neo4j.cypher.internal.compiler.v2_3.helpers.CollectionSupport
import org.neo4j.cypher.internal.compiler.v2_3.symbols.SymbolTable
import pipes.QueryState
import org.neo4j.cypher.internal.frontend.v2_3.symbols._
case class ReduceFunction(collection: Expression, id: String, expression: Expression, acc:String, init:Expression )
extends NullInNullOutExpression(collection) with CollectionSupport {
def compute(value: Any, m: ExecutionContext)(implicit state: QueryState) = {
val initMap = m.newWith(acc -> init(m))
val computedMap = makeTraversable(value).foldLeft(initMap) { (accMap, k) => {
val innerMap = accMap.newWith(id -> k)
innerMap.newWith(acc -> expression(innerMap))
}
}
computedMap(acc)
}
def rewrite(f: (Expression) => Expression) =
f(ReduceFunction(collection.rewrite(f), id, expression.rewrite(f), acc, init.rewrite(f)))
def arguments: Seq[Expression] = Seq(collection, init)
override def children = Seq(collection, expression, init)
def identifierDependencies(expectedType: CypherType) = AnyType
def calculateType(symbols: SymbolTable) = {
val iteratorType = collection.evaluateType(CTCollection(CTAny), symbols).legacyIteratedType
var innerSymbols = symbols.add(acc, init.evaluateType(CTAny, symbols))
innerSymbols = innerSymbols.add(id, iteratorType)
// return expressions's type as the end result for reduce
expression.evaluateType(CTAny, innerSymbols)
}
def symbolTableDependencies = (collection.symbolTableDependencies ++ expression.symbolTableDependencies ++ init.symbolTableDependencies) - id - acc
}
|
HuangLS/neo4j
|
community/cypher/cypher-compiler-2.3/src/main/scala/org/neo4j/cypher/internal/compiler/v2_3/commands/expressions/ReduceFunction.scala
|
Scala
|
apache-2.0
| 2,536 |
package suzaku.ui
trait ComponentBlueprint extends Blueprint {
def create: Component[_ <: ComponentBlueprint, _]
def sameAs(that: this.type): Boolean = equals(that)
}
|
suzaku-io/suzaku
|
core-app/shared/src/main/scala/suzaku/ui/ComponentBlueprint.scala
|
Scala
|
apache-2.0
| 173 |
/*
* Copyright 2001-2009 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalatest.junit.junit4helpers
import org.scalatest._
import org.scalatest.junit.JUnitSuite
import org.junit.Test
import org.junit.Ignore
// This needs to be top level, not nested, because JUnit3 can't instantiate it
// to run each test in its own instance if it is nested (no no-arg construtor).
class TestWasCalledSuite extends JUnitSuite {
@Test def doThis() { TestWasCalledSuite.theDoThisCalled = true }
@Test def doThat() { TestWasCalledSuite.theDoThatCalled = true }
}
object TestWasCalledSuite {
def reinitialize() {
theDoThisCalled = false
theDoThatCalled = false
}
// Had to pull these out of the class, because JUnit makes a separate
// instance for each test
var theDoThisCalled = false
var theDoThatCalled = false
}
class TestWithNonUnitMethod extends JUnitSuite {
@Test def doThis() {}
@Test def doThat() {}
// JUnit will not discover or run this, because its return type
// is not Unit
@Test def doTheOtherThing(): String = "hi"
}
class TestWithMethodNamedTest extends JUnitSuite {
@Test def doThis() {}
@Test def doThat() {}
// JUnit will discover and run this method:
@Test def doIt() {}
}
class ASuite extends JUnitSuite {
@Test def doThis() = ()
@Test def doThat(info: Informer) = ()
}
class BSuite extends JUnitSuite {
@Ignore
@Test def doThis() = ()
@Test def doThat(info: Informer) = ()
}
class CSuite extends JUnitSuite {
@FastAsLight
@Test def doThis() = ()
@Test def doThat(info: Informer) = ()
}
class DSuite extends JUnitSuite {
@FastAsLight
@SlowAsMolasses
@Test def doThis() = ()
@SlowAsMolasses
@Test def doThat(info: Informer) = ()
@Test def doTheOtherThing(info: Informer) = ()
@Test def doOne() = ()
@Test def doTwo() = ()
@Test def doIt() = ()
@Test def doFour(): String = "hi" // JUnit will not run these two because they don't
@Test def doFive(): Int = 5 // have result type Unit.
}
class ESuite extends JUnitSuite {
@FastAsLight
@SlowAsMolasses
@Test def doThis() = ()
@SlowAsMolasses
@Test def doThat(info: Informer) = ()
@Ignore
@Test def doTheOtherThing(info: Informer) = ()
}
class ShouldFailSuite extends JUnitSuite {
@Test def doThrowsAssertionError() { throw new AssertionError }
@Test def doThrowsPlainOldError() { throw new Error }
@Test def doThrowsThrowable() { throw new Throwable }
}
|
yyuu/scalatest
|
src/test/scala/org/scalatest/junit/junit4helpers/TestWasCalledSuite.scala
|
Scala
|
apache-2.0
| 2,964 |
import enumeratum.values.{LongEnum, LongEnumEntry}
import pl.iterators.kebs.json.{KebsEnumFormats, KebsSpray}
import spray.json._
import org.scalatest.funsuite.AnyFunSuite
import org.scalatest.matchers.should.Matchers
class SprayValueEnumJsonFormatTests extends AnyFunSuite with Matchers {
sealed abstract class LongGreeting(val value: Long) extends LongEnumEntry
object LongGreeting extends LongEnum[LongGreeting] {
val values = findValues
case object Hello extends LongGreeting(0L)
case object GoodBye extends LongGreeting(1L)
case object Hi extends LongGreeting(2L)
case object Bye extends LongGreeting(3L)
}
import LongGreeting._
object KebsProtocol extends DefaultJsonProtocol with KebsSpray with KebsEnumFormats
test("value enum JsonFormat") {
import KebsProtocol._
val jf = implicitly[JsonFormat[LongGreeting]]
jf.read(JsNumber(0)) shouldBe Hello
jf.read(JsNumber(1)) shouldBe GoodBye
jf.write(Hello) shouldBe JsNumber(0)
jf.write(GoodBye) shouldBe JsNumber(1)
}
test("value enum deserialization error") {
import KebsProtocol._
val jf = implicitly[JsonFormat[LongGreeting]]
the[DeserializationException] thrownBy jf.read(JsNumber(4)) should have message "4 is not a member of 0, 1, 2, 3"
}
}
|
theiterators/kebs
|
spray-json/src/test/scala/SprayValueEnumJsonFormatTests.scala
|
Scala
|
mit
| 1,291 |
package com.daodecode.scalaj.collection.test
import java.util
import java.util.Arrays.asList
import scala.collection.immutable.{Seq => ImSeq}
import scala.collection.mutable.{Buffer => MBuffer, Map => MMap, Set => MSet}
import scala.reflect.ClassTag
import com.daodecode.scalaj.collection._
import org.scalatest.{Matchers, WordSpec}
class SimpleSConvertersTest extends WordSpec with Matchers with JListBuilder with JSetBuilder with JMapBuilder {
"JListConverters" should {
def acceptBufferOf[A](sb: MBuffer[A]) = sb
def checkMutableBuffer[JL <: JList[Int]: ClassTag](): Unit = {
val jList: JL = listOf(2)
jList should be(JList(2))
jList.deepAsScala += 5
jList should be(JList(2, 5))
}
"convert lists of primitives properly" in {
"acceptBufferOf[Byte](asList[JByte](jb(1), jb(2), jb(3)))" shouldNot compile
"acceptBufferOf[Byte](asList[JByte](jb(1), jb(2), jb(3)).asScala)" shouldNot compile
acceptBufferOf[Byte](asList[JByte](jb(1), jb(2), jb(3)).deepAsScala)
acceptBufferOf[Short](asList[JShort](js(1), js(2), js(3)).deepAsScala)
acceptBufferOf[Int](asList[JInt](1, 2, 3).deepAsScala)
acceptBufferOf[Long](asList[JLong](1L, 2L, 3L).deepAsScala)
acceptBufferOf[Float](asList[JFloat](1f, 2f, 3f).deepAsScala)
acceptBufferOf[Double](asList[JDouble](1d, 2d, 3d).deepAsScala)
acceptBufferOf[Char](asList[JChar]('a', 'b').deepAsScala)
acceptBufferOf[Boolean](asList[JBoolean](true, false).deepAsScala)
}
"convert lists of non-primitives properly" in {
case class Boo(i: Int)
"acceptBufferOf[Boo](asList(Boo(3), Boo(5)))" shouldNot compile
acceptBufferOf[Boo](asList(Boo(3), Boo(5)).asScala)
acceptBufferOf[Boo](asList(Boo(3), Boo(5)).deepAsScala)
}
"allow custom converters" in {
implicit val intToString = SConverter[JInt, String](_ + 1.toString)
val asScala: MBuffer[String] = asList[JInt](1, 2, 3).deepAsScala
asScala(0) should be("11")
asScala(1) should be("21")
asScala(2) should be("31")
}
"support all standard JList subclasses" in {
acceptBufferOf(new util.ArrayList[JLong]().deepAsScala)
acceptBufferOf(new util.LinkedList[JLong]().deepAsScala)
}
"keep lists mutable" in {
checkMutableBuffer[util.ArrayList[Int]]()
checkMutableBuffer[util.LinkedList[Int]]()
}
"return same java list with primitives and self conversions" in {
{
val javaList = new util.ArrayList[JLong]()
javaList.deepAsScala.asJava should be theSameInstanceAs javaList
}
{
val javaList = new util.LinkedList[JLong]()
javaList.deepAsScala.asJava should be theSameInstanceAs javaList
}
{
class A
val javaList = new util.LinkedList[A]()
javaList.deepAsScala.asJava should be theSameInstanceAs javaList
}
}
"return immutable seq if asked" in {
import com.daodecode.scalaj.collection.immutable._
asList[JLong](1L, 2L, 3L).deepAsScalaImmutable: ImSeq[Long]
}
}
"ArrayConverters" should {
def acceptArrayOf[A](ar: Array[A]) = ar
"convert arrays of primitives properly" in {
acceptArrayOf[Byte](Array[JByte](jb(1), jb(2), jb(3)).deepAsScala)
acceptArrayOf[Short](Array[JShort](js(1), js(2), js(3)).deepAsScala)
acceptArrayOf[Int](Array[JInt](1, 2, 3).deepAsScala)
acceptArrayOf[Long](Array[JLong](1L, 2L, 3L).deepAsScala)
acceptArrayOf[Float](Array[JFloat](1f, 2f, 3f).deepAsScala)
acceptArrayOf[Double](Array[JDouble](1d, 2d, 3d).deepAsScala)
acceptArrayOf[Char](Array[JChar]('a', 'b').deepAsScala)
acceptArrayOf[Boolean](Array[JBoolean](true, false).deepAsScala)
}
"convert arrays of non-primitives properly" in {
case class Boo(i: Int)
acceptArrayOf[Boo](Array(Boo(3), Boo(5)).deepAsScala)
}
"allow custom converters" in {
implicit val intToString = SConverter[Int, String](_ + 1.toString)
val asScala: Array[String] = Array(1, 2, 3).deepAsScala
asScala(0) should be("11")
asScala(1) should be("21")
asScala(2) should be("31")
}
"return same array with primitives and self conversions" in {
{
val javaArray = Array[Int](1)
javaArray.deepAsScala should be theSameInstanceAs javaArray
}
{
val javaArray = Array[Char]('s')
javaArray.deepAsScala should be theSameInstanceAs javaArray
}
{
val javaArray = Array[Double](12d)
javaArray.deepAsScala should be theSameInstanceAs javaArray
}
{
class A
val javaArray = Array[A](new A)
javaArray.deepAsScala should be theSameInstanceAs javaArray
}
}
}
"JSetConverters" should {
def acceptMSetOf[A](ms: MSet[A]) = ms
def checkMutableSet[JS <: JSet[Int]: ClassTag](): Unit = {
val mSet: JS = setOf(2)
mSet should be(JSet(2))
mSet.deepAsScala += 5
mSet should be(JSet(2, 5))
}
"convert sets of primitives properly" in {
acceptMSetOf[Byte](JSet[JByte](jb(1), jb(2), jb(3)).deepAsScala)
acceptMSetOf[Short](JSet[JShort](js(1), js(2), js(3)).deepAsScala)
acceptMSetOf[Int](JSet(1, 2, 3).deepAsScala)
acceptMSetOf[Long](JSet(1L, 2L, 3L).deepAsScala)
acceptMSetOf[Float](JSet(1f, 2f, 3f).deepAsScala)
acceptMSetOf[Double](JSet(1d, 2d, 3d).deepAsScala)
acceptMSetOf[Char](JSet('a', 'b').deepAsScala)
acceptMSetOf[Boolean](JSet(true, false).deepAsScala)
}
"convert sets of non-primitives properly" in {
case class Boo(i: Int)
acceptMSetOf[Boo](JSet(Boo(3), Boo(5)).asScala)
acceptMSetOf[Boo](JSet(Boo(3), Boo(5)).deepAsScala)
}
"allow custom converters" in {
implicit val intToString = SConverter[Int, String](_ + 1.toString)
val asScala: MSet[String] = JSet(1, 2, 3).deepAsScala
asScala should contain("11")
asScala should contain("21")
asScala should contain("31")
}
"support all standard JSet subclasses" in {
acceptMSetOf(new util.HashSet[JInt]().deepAsScala)
acceptMSetOf(new util.TreeSet[JInt]().deepAsScala)
acceptMSetOf(new util.LinkedHashSet[JInt]().deepAsScala)
}
"keep sets mutable" in {
checkMutableSet[util.HashSet[Int]]()
checkMutableSet[util.TreeSet[Int]]()
checkMutableSet[util.LinkedHashSet[Int]]()
}
"return same java set with primitives and self conversions" in {
{
val javaSet = setOf[JInt, util.HashSet[JInt]](1, 2)
javaSet.deepAsScala.asJava should be theSameInstanceAs javaSet
}
{
val javaSet = setOf[JInt, util.TreeSet[JInt]](1, 2)
javaSet.deepAsScala.asJava should be theSameInstanceAs javaSet
}
{
val javaSet = setOf[JInt, util.LinkedHashSet[JInt]](1, 2)
javaSet.deepAsScala.asJava should be theSameInstanceAs javaSet
}
{
class A
val javaSet = setOf[A, util.LinkedHashSet[A]](new A)
javaSet.deepAsScala.asJava should be theSameInstanceAs javaSet
}
}
"return immutable set if asked" in {
import com.daodecode.scalaj.collection.immutable._
JSet[JLong](1L, 2L, 3L).deepAsScalaImmutable: Set[Long]
}
}
"JMapConverters" should {
def acceptMMapOf[A, B](sm: MMap[A, B]) = sm
def checkMutableMap[JM <: JMap[JInt, String]: ClassTag](): Unit = {
val jMap = mapOf[JInt, String, JM](ji(2) -> "two")
jMap should be(JMap(2 -> "two"))
jMap.deepAsScala update (5, "five")
jMap should be(JMap(2 -> "two", 5 -> "five"))
}
"convert maps of primitives properly" in {
acceptMMapOf[Byte, Int](JMap[JByte, JInt](jb(1) -> 2).deepAsScala)
acceptMMapOf[Short, Long](JMap[JShort, JLong](js(1) -> 2L).deepAsScala)
acceptMMapOf[Float, Double](JMap[JFloat, JDouble]((1f: JFloat) -> (2d: JDouble)).deepAsScala)
acceptMMapOf[Boolean, Char](JMap[JBoolean, JChar]((true: JBoolean) -> ('t': JChar)).deepAsScala)
}
"convert maps of non-primitives properly" in {
case class Boo(i: Int)
acceptMMapOf[Boo, String](JMap(Boo(3) -> "3", Boo(5) -> "5").asScala)
acceptMMapOf[Boo, String](JMap(Boo(3) -> "3", Boo(5) -> "5").deepAsScala)
}
"allow custom converters" in {
implicit val intToString = SConverter[Int, String](_ + 1.toString)
val asScala: MMap[String, String] = JMap[String, Int]("one" -> 1, "two" -> 2, "three" -> 3).deepAsScala
asScala("one") should be("11")
asScala("two") should be("21")
asScala("three") should be("31")
}
"support all standard Map subclasses" in {
acceptMMapOf(new util.HashMap[JInt, String]().deepAsScala)
acceptMMapOf(new util.IdentityHashMap[JInt, String]().deepAsScala)
acceptMMapOf(new util.LinkedHashMap[JInt, String]().deepAsScala)
acceptMMapOf(new util.TreeMap[JInt, String]().deepAsScala)
acceptMMapOf(new util.WeakHashMap[JInt, String]().deepAsScala)
}
"keep mutable maps mutable" in {
checkMutableMap[util.HashMap[JInt, String]]()
checkMutableMap[util.IdentityHashMap[JInt, String]]()
checkMutableMap[util.LinkedHashMap[JInt, String]]()
checkMutableMap[util.TreeMap[JInt, String]]()
checkMutableMap[util.WeakHashMap[JInt, String]]()
}
"return same mutable scala map with primitives and self conversions" in {
{
val javaMap = mapOf[JInt, String, util.HashMap[JInt, String]](ji(1) -> "one")
javaMap.deepAsScala.asJava should be theSameInstanceAs javaMap
}
{
val javaMap = mapOf[JInt, String, util.IdentityHashMap[JInt, String]](ji(1) -> "one")
javaMap.deepAsScala.asJava should be theSameInstanceAs javaMap
}
{
val javaMap = mapOf[JInt, String, util.LinkedHashMap[JInt, String]](ji(1) -> "one")
javaMap.deepAsScala.asJava should be theSameInstanceAs javaMap
}
{
val javaMap = mapOf[JInt, String, util.TreeMap[JInt, String]](ji(1) -> "one")
javaMap.deepAsScala.asJava should be theSameInstanceAs javaMap
}
{
val javaMap = mapOf[JInt, String, util.WeakHashMap[JInt, String]](ji(1) -> "one")
javaMap.deepAsScala.asJava should be theSameInstanceAs javaMap
}
{
class A
val javaMap = mapOf[A, String, util.HashMap[A, String]](new A -> "a")
javaMap.deepAsScala.asJava should be theSameInstanceAs javaMap
}
}
"return immutable map if asked" in {
import com.daodecode.scalaj.collection.immutable._
JMap[String, JDouble]("1" -> 1.0d).deepAsScalaImmutable: Map[String, Double]
}
}
}
|
jozic/scalaj
|
scalaj-collection/src/test/scala_2.13-/com/daodecode/scalaj/collection/test/SimpleSConvertersTest.scala
|
Scala
|
bsd-3-clause
| 10,704 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution
import org.apache.spark.shuffle.ShuffleMemoryManager
/**
* A [[ShuffleMemoryManager]] that can be controlled to run out of memory.
* 可以控制运行时的内存
*/
class TestShuffleMemoryManager extends ShuffleMemoryManager(Long.MaxValue, 4 * 1024 * 1024) {
//判断是否内存溢出
private var oom = false
override def tryToAcquire(numBytes: Long): Long = {
if (oom) {
oom = false
0
} else {
// Uncomment the following to trace memory allocations.
//取消下列跟踪内存分配
// println(s"tryToAcquire $numBytes in " +
// Thread.currentThread().getStackTrace.mkString("", "\n -", ""))
val acquired = super.tryToAcquire(numBytes)
acquired
}
}
override def release(numBytes: Long): Unit = {
// Uncomment the following to trace memory releases.
//取消下列跟踪内存分配
// println(s"release $numBytes in " +
// Thread.currentThread().getStackTrace.mkString("", "\n -", ""))
super.release(numBytes)
}
def markAsOutOfMemory(): Unit = {
oom = true
}
}
|
tophua/spark1.52
|
sql/core/src/test/scala/org/apache/spark/sql/execution/TestShuffleMemoryManager.scala
|
Scala
|
apache-2.0
| 1,920 |
/*
* Copyright 2017-2018 47 Degrees, LLC. <http://www.47deg.com>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package freestyle.free
import cats.Monad
import _root_.doobie.{ConnectionIO, Transactor}
object doobie {
@free sealed trait DoobieM {
def transact[A](f: ConnectionIO[A]): FS[A]
}
trait Implicits {
implicit def freeStyleDoobieHandler[M[_]: Monad](
implicit xa: Transactor[M]): DoobieM.Handler[M] =
new DoobieM.Handler[M] {
def transact[A](fa: ConnectionIO[A]): M[A] = xa.trans.apply(fa)
}
implicit def freeSLiftDoobie[F[_]: DoobieM]: FreeSLift[F, ConnectionIO] =
new FreeSLift[F, ConnectionIO] {
def liftFSPar[A](cio: ConnectionIO[A]): FreeS.Par[F, A] = DoobieM[F].transact(cio)
}
}
object implicits extends Implicits
}
|
frees-io/freestyle
|
modules/integrations/doobie/src/main/scala/free/doobie.scala
|
Scala
|
apache-2.0
| 1,324 |
import scala.util.TupledFunction
object Test {
def main(args: Array[String]): Unit = {
type T
type R
summon[TupledFunction[Nothing, ((T, T, T)) => R]] // error
summon[TupledFunction[Any, ((T, T, T)) => R]] // error
summon[TupledFunction[Tuple1[Int], ((T, T, T)) => R]] // error
summon[TupledFunction[(T, T, T))=> R, Nothing]] // error
summon[TupledFunction[(T, T, T) => R, Any]] // error
summon[TupledFunction[((T, T, T)) => R, Tuple1[Int]]] // error
summon[TupledFunction[() => R, () => R]] // error
summon[TupledFunction[() => Unit, () => Unit]] // error
summon[TupledFunction[(T, T, T) => R, () => R]] // error
summon[TupledFunction[(T, T, T) => R, (T, T) => R]] // error
summon[TupledFunction[(T, T, T) => R, ((T, T, T)) ?=> R]] // error
summon[TupledFunction[(T, T, T) => R, ((T, T, T)) ?=> R]] // error
}
}
|
dotty-staging/dotty
|
tests/disabled/neg/tupled-function-instances.scala
|
Scala
|
apache-2.0
| 878 |
/*
* Copyright (c) 2013 Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see http://www.gnu.org/licenses/agpl.html.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
package generated.scala
class UnsupervisedTrainingSet[T: Manifest](val _data: DenseMatrix[T])
|
tesendic/Relite
|
src/generated/scala/UnsupervisedTrainingSet.scala
|
Scala
|
agpl-3.0
| 1,084 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.expressions.{Expression, Generator}
import org.apache.spark.sql.catalyst.expressions.codegen.{CodegenContext, ExprCode}
import org.apache.spark.sql.functions._
import org.apache.spark.sql.test.SharedSQLContext
import org.apache.spark.sql.types.{IntegerType, StructType}
class GeneratorFunctionSuite extends QueryTest with SharedSQLContext {
import testImplicits._
test("stack") {
val df = spark.range(1)
// Empty DataFrame suppress the result generation
checkAnswer(spark.emptyDataFrame.selectExpr("stack(1, 1, 2, 3)"), Nil)
// Rows & columns
checkAnswer(df.selectExpr("stack(1, 1, 2, 3)"), Row(1, 2, 3) :: Nil)
checkAnswer(df.selectExpr("stack(2, 1, 2, 3)"), Row(1, 2) :: Row(3, null) :: Nil)
checkAnswer(df.selectExpr("stack(3, 1, 2, 3)"), Row(1) :: Row(2) :: Row(3) :: Nil)
checkAnswer(df.selectExpr("stack(4, 1, 2, 3)"), Row(1) :: Row(2) :: Row(3) :: Row(null) :: Nil)
// Various column types
checkAnswer(df.selectExpr("stack(3, 1, 1.1, 'a', 2, 2.2, 'b', 3, 3.3, 'c')"),
Row(1, 1.1, "a") :: Row(2, 2.2, "b") :: Row(3, 3.3, "c") :: Nil)
// Null values
checkAnswer(df.selectExpr("stack(3, 1, 1.1, null, 2, null, 'b', null, 3.3, 'c')"),
Row(1, 1.1, null) :: Row(2, null, "b") :: Row(null, 3.3, "c") :: Nil)
// Repeat generation at every input row
checkAnswer(spark.range(2).selectExpr("stack(2, 1, 2, 3)"),
Row(1, 2) :: Row(3, null) :: Row(1, 2) :: Row(3, null) :: Nil)
// The first argument must be a positive constant integer.
val m = intercept[AnalysisException] {
df.selectExpr("stack(1.1, 1, 2, 3)")
}.getMessage
assert(m.contains("The number of rows must be a positive constant integer."))
val m2 = intercept[AnalysisException] {
df.selectExpr("stack(-1, 1, 2, 3)")
}.getMessage
assert(m2.contains("The number of rows must be a positive constant integer."))
// The data for the same column should have the same type.
val m3 = intercept[AnalysisException] {
df.selectExpr("stack(2, 1, '2.2')")
}.getMessage
assert(m3.contains("data type mismatch: Argument 1 (int) != Argument 2 (string)"))
// stack on column data
val df2 = Seq((2, 1, 2, 3)).toDF("n", "a", "b", "c")
checkAnswer(df2.selectExpr("stack(2, a, b, c)"), Row(1, 2) :: Row(3, null) :: Nil)
val m4 = intercept[AnalysisException] {
df2.selectExpr("stack(n, a, b, c)")
}.getMessage
assert(m4.contains("The number of rows must be a positive constant integer."))
val df3 = Seq((2, 1, 2.0)).toDF("n", "a", "b")
val m5 = intercept[AnalysisException] {
df3.selectExpr("stack(2, a, b)")
}.getMessage
assert(m5.contains("data type mismatch: Argument 1 (int) != Argument 2 (double)"))
}
test("single explode") {
val df = Seq((1, Seq(1, 2, 3))).toDF("a", "intList")
checkAnswer(
df.select(explode('intList)),
Row(1) :: Row(2) :: Row(3) :: Nil)
}
test("single explode_outer") {
val df = Seq((1, Seq(1, 2, 3)), (2, Seq())).toDF("a", "intList")
checkAnswer(
df.select(explode_outer('intList)),
Row(1) :: Row(2) :: Row(3) :: Row(null) :: Nil)
}
test("single posexplode") {
val df = Seq((1, Seq(1, 2, 3))).toDF("a", "intList")
checkAnswer(
df.select(posexplode('intList)),
Row(0, 1) :: Row(1, 2) :: Row(2, 3) :: Nil)
}
test("single posexplode_outer") {
val df = Seq((1, Seq(1, 2, 3)), (2, Seq())).toDF("a", "intList")
checkAnswer(
df.select(posexplode_outer('intList)),
Row(0, 1) :: Row(1, 2) :: Row(2, 3) :: Row(null, null) :: Nil)
}
test("explode and other columns") {
val df = Seq((1, Seq(1, 2, 3))).toDF("a", "intList")
checkAnswer(
df.select($"a", explode('intList)),
Row(1, 1) ::
Row(1, 2) ::
Row(1, 3) :: Nil)
checkAnswer(
df.select($"*", explode('intList)),
Row(1, Seq(1, 2, 3), 1) ::
Row(1, Seq(1, 2, 3), 2) ::
Row(1, Seq(1, 2, 3), 3) :: Nil)
}
test("explode_outer and other columns") {
val df = Seq((1, Seq(1, 2, 3)), (2, Seq())).toDF("a", "intList")
checkAnswer(
df.select($"a", explode_outer('intList)),
Row(1, 1) ::
Row(1, 2) ::
Row(1, 3) ::
Row(2, null) ::
Nil)
checkAnswer(
df.select($"*", explode_outer('intList)),
Row(1, Seq(1, 2, 3), 1) ::
Row(1, Seq(1, 2, 3), 2) ::
Row(1, Seq(1, 2, 3), 3) ::
Row(2, Seq(), null) ::
Nil)
}
test("aliased explode") {
val df = Seq((1, Seq(1, 2, 3))).toDF("a", "intList")
checkAnswer(
df.select(explode('intList).as('int)).select('int),
Row(1) :: Row(2) :: Row(3) :: Nil)
checkAnswer(
df.select(explode('intList).as('int)).select(sum('int)),
Row(6) :: Nil)
}
test("aliased explode_outer") {
val df = Seq((1, Seq(1, 2, 3)), (2, Seq())).toDF("a", "intList")
checkAnswer(
df.select(explode_outer('intList).as('int)).select('int),
Row(1) :: Row(2) :: Row(3) :: Row(null) :: Nil)
checkAnswer(
df.select(explode('intList).as('int)).select(sum('int)),
Row(6) :: Nil)
}
test("explode on map") {
val df = Seq((1, Map("a" -> "b"))).toDF("a", "map")
checkAnswer(
df.select(explode('map)),
Row("a", "b"))
}
test("explode_outer on map") {
val df = Seq((1, Map("a" -> "b")), (2, Map[String, String]()),
(3, Map("c" -> "d"))).toDF("a", "map")
checkAnswer(
df.select(explode_outer('map)),
Row("a", "b") :: Row(null, null) :: Row("c", "d") :: Nil)
}
test("explode on map with aliases") {
val df = Seq((1, Map("a" -> "b"))).toDF("a", "map")
checkAnswer(
df.select(explode('map).as("key1" :: "value1" :: Nil)).select("key1", "value1"),
Row("a", "b"))
}
test("explode_outer on map with aliases") {
val df = Seq((3, None), (1, Some(Map("a" -> "b")))).toDF("a", "map")
checkAnswer(
df.select(explode_outer('map).as("key1" :: "value1" :: Nil)).select("key1", "value1"),
Row("a", "b") :: Row(null, null) :: Nil)
}
test("self join explode") {
val df = Seq((1, Seq(1, 2, 3))).toDF("a", "intList")
val exploded = df.select(explode('intList).as('i))
checkAnswer(
exploded.join(exploded, exploded("i") === exploded("i")).agg(count("*")),
Row(3) :: Nil)
}
test("inline raises exception on array of null type") {
val m = intercept[AnalysisException] {
spark.range(2).selectExpr("inline(array())")
}.getMessage
assert(m.contains("data type mismatch"))
}
test("inline with empty table") {
checkAnswer(
spark.range(0).selectExpr("inline(array(struct(10, 100)))"),
Nil)
}
test("inline on literal") {
checkAnswer(
spark.range(2).selectExpr("inline(array(struct(10, 100), struct(20, 200), struct(30, 300)))"),
Row(10, 100) :: Row(20, 200) :: Row(30, 300) ::
Row(10, 100) :: Row(20, 200) :: Row(30, 300) :: Nil)
}
test("inline on column") {
val df = Seq((1, 2)).toDF("a", "b")
checkAnswer(
df.selectExpr("inline(array(struct(a), struct(a)))"),
Row(1) :: Row(1) :: Nil)
checkAnswer(
df.selectExpr("inline(array(struct(a, b), struct(a, b)))"),
Row(1, 2) :: Row(1, 2) :: Nil)
// Spark think [struct<a:int>, struct<b:int>] is heterogeneous due to name difference.
val m = intercept[AnalysisException] {
df.selectExpr("inline(array(struct(a), struct(b)))")
}.getMessage
assert(m.contains("data type mismatch"))
checkAnswer(
df.selectExpr("inline(array(struct(a), named_struct('a', b)))"),
Row(1) :: Row(2) :: Nil)
// Spark think [struct<a:int>, struct<col1:int>] is heterogeneous due to name difference.
val m2 = intercept[AnalysisException] {
df.selectExpr("inline(array(struct(a), struct(2)))")
}.getMessage
assert(m2.contains("data type mismatch"))
checkAnswer(
df.selectExpr("inline(array(struct(a), named_struct('a', 2)))"),
Row(1) :: Row(2) :: Nil)
checkAnswer(
df.selectExpr("struct(a)").selectExpr("inline(array(*))"),
Row(1) :: Nil)
checkAnswer(
df.selectExpr("array(struct(a), named_struct('a', b))").selectExpr("inline(*)"),
Row(1) :: Row(2) :: Nil)
}
test("inline_outer") {
val df = Seq((1, "2"), (3, "4"), (5, "6")).toDF("col1", "col2")
val df2 = df.select(when('col1 === 1, null).otherwise(array(struct('col1, 'col2))).as("col1"))
checkAnswer(
df2.selectExpr("inline(col1)"),
Row(3, "4") :: Row(5, "6") :: Nil
)
checkAnswer(
df2.selectExpr("inline_outer(col1)"),
Row(null, null) :: Row(3, "4") :: Row(5, "6") :: Nil
)
}
test("SPARK-14986: Outer lateral view with empty generate expression") {
checkAnswer(
sql("select nil from values 1 lateral view outer explode(array()) n as nil"),
Row(null) :: Nil
)
}
test("outer explode()") {
checkAnswer(
sql("select * from values 1, 2 lateral view outer explode(array()) a as b"),
Row(1, null) :: Row(2, null) :: Nil)
}
test("outer generator()") {
spark.sessionState.functionRegistry
.createOrReplaceTempFunction("empty_gen", _ => EmptyGenerator())
checkAnswer(
sql("select * from values 1, 2 lateral view outer empty_gen() a as b"),
Row(1, null) :: Row(2, null) :: Nil)
}
}
case class EmptyGenerator() extends Generator {
override def children: Seq[Expression] = Nil
override def elementSchema: StructType = new StructType().add("id", IntegerType)
override def eval(input: InternalRow): TraversableOnce[InternalRow] = Seq.empty
override protected def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
val iteratorClass = classOf[Iterator[_]].getName
ev.copy(code = s"$iteratorClass<InternalRow> ${ev.value} = $iteratorClass$$.MODULE$$.empty();")
}
}
|
brad-kaiser/spark
|
sql/core/src/test/scala/org/apache/spark/sql/GeneratorFunctionSuite.scala
|
Scala
|
apache-2.0
| 10,773 |
package core.modules
import com.google.inject.AbstractModule
import com.sksamuel.elastic4s.{ElasticClient, ElasticsearchClientUri}
import models.SiteSchema
import org.elasticsearch.common.settings.ImmutableSettings
import play.api.{Logger, Environment, Configuration}
import com.sksamuel.elastic4s.ElasticDsl._
import core.util.FutureUtil._
import javax.inject._
import scala.concurrent.{Future, ExecutionContext}
import scala.util.Success
class PlayModule (environment: Environment, config: Configuration) extends AbstractModule {
def configure = {
val remote_url = config.getString("elasticsearch.remote.url").get
val cluster_name = config.getString("elasticsearch.cluster").get
val settings = ImmutableSettings.settingsBuilder()
.put("cluster.name", cluster_name)
.build()
val client = ElasticClient.remote(settings, ElasticsearchClientUri(remote_url))
bind(classOf[ElasticClient]).toInstance(client)
bind(classOf[SearchMappingsFoo]).to(classOf[SearchMappings]).asEagerSingleton()
}
}
trait SearchMappingsFoo
@Singleton
class SearchMappings @Inject() (client: ElasticClient, config: Configuration)(implicit context: ExecutionContext) extends SearchMappingsFoo {
//todo: it's confusing that we're reusing this as the index name
val cluster_name = config.getString("elasticsearch.cluster").get
client.execute {
index.exists(cluster_name)
}.flatMap { response =>
if (response.isExists) {
Future.sequence(SiteSchema.schema.byType.map { kvp =>
client.execute {
put mapping cluster_name / kvp._1 as kvp._2.elasticFields(Set())
}
}).andThen {
case Success(_) => Logger.info("Updated elasticsearch mappings")
}
} else {
client.execute {
create index cluster_name mappings (SiteSchema.schema.byType.map(kvp => kvp._1 as kvp._2.elasticFields(Set())).toSeq: _*)
}.andThen {
case Success(_) => Logger.info("Created elasticsearch index")
}
}
}.andThen(logFailure("PlayModule::ensureESIndex"))
}
|
moatra/stagert
|
app/core/modules/PlayModule.scala
|
Scala
|
mit
| 2,045 |
// diversity-maximization: Diversity maximization in Streaming and MapReduce
// Copyright (C) 2016 Matteo Ceccarello <[email protected]>
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package it.unipd.dei.diversity
import org.scalameter.api._
import scala.util.Random
object FarthestHeuristicBench extends Bench.OfflineReport {
val randomGen = new Random()
val distance: (Point, Point) => Double = Distance.euclidean
val sets: Gen[Array[Point]] = for {
size <- Gen.range("size")(100, 500, 100)
} yield Array.ofDim[Point](size).map{_ => Point.random(10, randomGen)}
val ks: Gen[Int] = Gen.range("k")(10, 90, 10)
val params: Gen[(Array[Point], Int)] = for {
points <- sets
k <- ks
} yield (points, k)
performance of "gmm" in {
measure method "runIdiomatic" in {
using(params) in { case (points, k) =>
FarthestPointHeuristic.runIdiomatic(points, k, distance)
}
}
measure method "runSlow" in {
using(params) in { case (points, k) =>
FarthestPointHeuristic.runSlow(points, k, 0, distance)
}
}
measure method "run" in {
using(params) in { case (points, k) =>
FarthestPointHeuristic.run(points, k, 0, distance)
}
}
}
}
|
Cecca/diversity-maximization
|
core/src/bench/scala/it/unipd/dei/diversity/FarthestHeuristicBench.scala
|
Scala
|
gpl-3.0
| 1,848 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.scheduler.cluster.k8s
import java.util.Locale
import scala.collection.JavaConverters._
import io.fabric8.kubernetes.api.model.ContainerStateTerminated
import io.fabric8.kubernetes.api.model.Pod
import org.apache.spark.deploy.k8s.Constants._
import org.apache.spark.internal.Logging
/**
* An immutable view of the current executor pods that are running in the cluster.
*/
private[spark] case class ExecutorPodsSnapshot(
executorPods: Map[Long, ExecutorPodState],
fullSnapshotTs: Long) {
import ExecutorPodsSnapshot._
def withUpdate(updatedPod: Pod): ExecutorPodsSnapshot = {
val newExecutorPods = executorPods ++ toStatesByExecutorId(Seq(updatedPod))
new ExecutorPodsSnapshot(newExecutorPods, fullSnapshotTs)
}
}
object ExecutorPodsSnapshot extends Logging {
private var shouldCheckAllContainers: Boolean = _
private var sparkContainerName: String = DEFAULT_EXECUTOR_CONTAINER_NAME
def apply(executorPods: Seq[Pod], fullSnapshotTs: Long): ExecutorPodsSnapshot = {
ExecutorPodsSnapshot(toStatesByExecutorId(executorPods), fullSnapshotTs)
}
def apply(): ExecutorPodsSnapshot = ExecutorPodsSnapshot(Map.empty[Long, ExecutorPodState], 0)
def setShouldCheckAllContainers(watchAllContainers: Boolean): Unit = {
shouldCheckAllContainers = watchAllContainers
}
def setSparkContainerName(containerName: String): Unit = {
sparkContainerName = containerName
}
private def toStatesByExecutorId(executorPods: Seq[Pod]): Map[Long, ExecutorPodState] = {
executorPods.flatMap { pod =>
pod.getMetadata.getLabels.get(SPARK_EXECUTOR_ID_LABEL) match {
case "EXECID" | null =>
// The exec label has not yet been assigned
None
case id =>
// We have a "real" id label
Some((id.toLong, toState(pod)))
}
}.toMap
}
private def toState(pod: Pod): ExecutorPodState = {
if (isDeleted(pod)) {
PodDeleted(pod)
} else {
val phase = pod.getStatus.getPhase.toLowerCase(Locale.ROOT)
phase match {
case "pending" =>
PodPending(pod)
case "running" =>
// If we're checking all containers look for any non-zero exits
if (shouldCheckAllContainers &&
"Never" == pod.getSpec.getRestartPolicy &&
pod.getStatus.getContainerStatuses.stream
.map[ContainerStateTerminated](cs => cs.getState.getTerminated)
.anyMatch(t => t != null && t.getExitCode != 0)) {
PodFailed(pod)
} else {
// Otherwise look for the Spark container and get the exit code if present.
val sparkContainerExitCode = pod.getStatus.getContainerStatuses.asScala
.find(_.getName() == sparkContainerName).flatMap(x => Option(x.getState))
.flatMap(x => Option(x.getTerminated)).flatMap(x => Option(x.getExitCode))
.map(_.toInt)
sparkContainerExitCode match {
case Some(t) =>
t match {
case 0 =>
PodSucceeded(pod)
case _ =>
PodFailed(pod)
}
// No exit code means we are running.
case _ =>
PodRunning(pod)
}
}
case "failed" =>
PodFailed(pod)
case "succeeded" =>
PodSucceeded(pod)
case "terminating" =>
PodTerminating(pod)
case _ =>
logWarning(s"Received unknown phase $phase for executor pod with name" +
s" ${pod.getMetadata.getName} in namespace ${pod.getMetadata.getNamespace}")
PodUnknown(pod)
}
}
}
private def isDeleted(pod: Pod): Boolean = {
(pod.getMetadata.getDeletionTimestamp != null &&
(
pod.getStatus == null ||
pod.getStatus.getPhase == null ||
(pod.getStatus.getPhase.toLowerCase(Locale.ROOT) != "terminating" &&
pod.getStatus.getPhase.toLowerCase(Locale.ROOT) != "running")
))
}
}
|
ueshin/apache-spark
|
resource-managers/kubernetes/core/src/main/scala/org/apache/spark/scheduler/cluster/k8s/ExecutorPodsSnapshot.scala
|
Scala
|
apache-2.0
| 4,863 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.consumer
import java.net.InetAddress
import java.util.UUID
import java.util.concurrent._
import java.util.concurrent.atomic._
import java.util.concurrent.locks.ReentrantLock
import com.yammer.metrics.core.Gauge
import kafka.api._
import kafka.client.ClientUtils
import kafka.cluster._
import kafka.common._
import kafka.javaapi.consumer.ConsumerRebalanceListener
import kafka.metrics._
import kafka.network.BlockingChannel
import kafka.serializer._
import kafka.utils.CoreUtils.inLock
import kafka.utils.ZkUtils._
import kafka.utils._
import org.I0Itec.zkclient.exception.ZkNodeExistsException
import org.I0Itec.zkclient.{IZkChildListener, IZkDataListener, IZkStateListener}
import org.apache.kafka.common.protocol.Errors
import org.apache.kafka.common.security.JaasUtils
import org.apache.kafka.common.utils.Time
import org.apache.zookeeper.Watcher.Event.KeeperState
import scala.collection._
import scala.collection.JavaConverters._
/**
* This class handles the consumers interaction with zookeeper
*
* Directories:
* 1. Consumer id registry:
* /consumers/[group_id]/ids/[consumer_id] -> topic1,...topicN
* A consumer has a unique consumer id within a consumer group. A consumer registers its id as an ephemeral znode
* and puts all topics that it subscribes to as the value of the znode. The znode is deleted when the client is gone.
* A consumer subscribes to event changes of the consumer id registry within its group.
*
* The consumer id is picked up from configuration, instead of the sequential id assigned by ZK. Generated sequential
* ids are hard to recover during temporary connection loss to ZK, since it's difficult for the client to figure out
* whether the creation of a sequential znode has succeeded or not. More details can be found at
* (http://wiki.apache.org/hadoop/ZooKeeper/ErrorHandling)
*
* 2. Broker node registry:
* /brokers/[0...N] --> { "host" : "host:port",
* "topics" : {"topic1": ["partition1" ... "partitionN"], ...,
* "topicN": ["partition1" ... "partitionN"] } }
* This is a list of all present broker brokers. A unique logical node id is configured on each broker node. A broker
* node registers itself on start-up and creates a znode with the logical node id under /brokers. The value of the znode
* is a JSON String that contains (1) the host name and the port the broker is listening to, (2) a list of topics that
* the broker serves, (3) a list of logical partitions assigned to each topic on the broker.
* A consumer subscribes to event changes of the broker node registry.
*
* 3. Partition owner registry:
* /consumers/[group_id]/owner/[topic]/[broker_id-partition_id] --> consumer_node_id
* This stores the mapping before broker partitions and consumers. Each partition is owned by a unique consumer
* within a consumer group. The mapping is reestablished after each rebalancing.
*
* 4. Consumer offset tracking:
* /consumers/[group_id]/offsets/[topic]/[broker_id-partition_id] --> offset_counter_value
* Each consumer tracks the offset of the latest message consumed for each partition.
*
*/
private[kafka] object ZookeeperConsumerConnector {
val shutdownCommand: FetchedDataChunk = new FetchedDataChunk(null, null, -1L)
}
private[kafka] class ZookeeperConsumerConnector(val config: ConsumerConfig,
val enableFetcher: Boolean) // for testing only
extends ConsumerConnector with Logging with KafkaMetricsGroup {
private val isShuttingDown = new AtomicBoolean(false)
private val rebalanceLock = new Object
private var fetcher: Option[ConsumerFetcherManager] = None
private var zkUtils: ZkUtils = null
private var topicRegistry = new Pool[String, Pool[Int, PartitionTopicInfo]]
private val checkpointedZkOffsets = new Pool[TopicAndPartition, Long]
private val topicThreadIdAndQueues = new Pool[(String, ConsumerThreadId), BlockingQueue[FetchedDataChunk]]
private val scheduler = new KafkaScheduler(threads = 1, threadNamePrefix = "kafka-consumer-scheduler-")
private val messageStreamCreated = new AtomicBoolean(false)
private var sessionExpirationListener: ZKSessionExpireListener = null
private var topicPartitionChangeListener: ZKTopicPartitionChangeListener = null
private var loadBalancerListener: ZKRebalancerListener = null
private var offsetsChannel: BlockingChannel = null
private val offsetsChannelLock = new Object
private var wildcardTopicWatcher: ZookeeperTopicEventWatcher = null
private var consumerRebalanceListener: ConsumerRebalanceListener = null
// useful for tracking migration of consumers to store offsets in kafka
private val kafkaCommitMeter = newMeter("KafkaCommitsPerSec", "commits", TimeUnit.SECONDS, Map("clientId" -> config.clientId))
private val zkCommitMeter = newMeter("ZooKeeperCommitsPerSec", "commits", TimeUnit.SECONDS, Map("clientId" -> config.clientId))
private val rebalanceTimer = new KafkaTimer(newTimer("RebalanceRateAndTime", TimeUnit.MILLISECONDS, TimeUnit.SECONDS, Map("clientId" -> config.clientId)))
newGauge(
"yammer-metrics-count",
new Gauge[Int] {
def value = {
com.yammer.metrics.Metrics.defaultRegistry().allMetrics().size()
}
}
)
val consumerIdString = {
var consumerUuid : String = null
config.consumerId match {
case Some(consumerId) // for testing only
=> consumerUuid = consumerId
case None // generate unique consumerId automatically
=> val uuid = UUID.randomUUID()
consumerUuid = "%s-%d-%s".format(
InetAddress.getLocalHost.getHostName, System.currentTimeMillis,
uuid.getMostSignificantBits().toHexString.substring(0,8))
}
config.groupId + "_" + consumerUuid
}
this.logIdent = "[" + consumerIdString + "], "
connectZk()
createFetcher()
ensureOffsetManagerConnected()
if (config.autoCommitEnable) {
scheduler.startup
info("starting auto committer every " + config.autoCommitIntervalMs + " ms")
scheduler.schedule("kafka-consumer-autocommit",
autoCommit,
delay = config.autoCommitIntervalMs,
period = config.autoCommitIntervalMs,
unit = TimeUnit.MILLISECONDS)
}
KafkaMetricsReporter.startReporters(config.props)
AppInfo.registerInfo()
def this(config: ConsumerConfig) = this(config, true)
def createMessageStreams(topicCountMap: Map[String,Int]): Map[String, List[KafkaStream[Array[Byte],Array[Byte]]]] =
createMessageStreams(topicCountMap, new DefaultDecoder(), new DefaultDecoder())
def createMessageStreams[K,V](topicCountMap: Map[String,Int], keyDecoder: Decoder[K], valueDecoder: Decoder[V])
: Map[String, List[KafkaStream[K,V]]] = {
if (messageStreamCreated.getAndSet(true))
throw new MessageStreamsExistException(this.getClass.getSimpleName +
" can create message streams at most once",null)
consume(topicCountMap, keyDecoder, valueDecoder)
}
def createMessageStreamsByFilter[K,V](topicFilter: TopicFilter,
numStreams: Int,
keyDecoder: Decoder[K] = new DefaultDecoder(),
valueDecoder: Decoder[V] = new DefaultDecoder()) = {
val wildcardStreamsHandler = new WildcardStreamsHandler[K,V](topicFilter, numStreams, keyDecoder, valueDecoder)
wildcardStreamsHandler.streams
}
def setConsumerRebalanceListener(listener: ConsumerRebalanceListener) {
if (messageStreamCreated.get())
throw new MessageStreamsExistException(this.getClass.getSimpleName +
" can only set consumer rebalance listener before creating streams",null)
consumerRebalanceListener = listener
}
private def createFetcher() {
if (enableFetcher)
fetcher = Some(new ConsumerFetcherManager(consumerIdString, config, zkUtils))
}
private def connectZk() {
info("Connecting to zookeeper instance at " + config.zkConnect)
zkUtils = ZkUtils(config.zkConnect,
config.zkSessionTimeoutMs,
config.zkConnectionTimeoutMs,
JaasUtils.isZkSecurityEnabled())
}
// Blocks until the offset manager is located and a channel is established to it.
private def ensureOffsetManagerConnected() {
if (config.offsetsStorage == "kafka") {
if (offsetsChannel == null || !offsetsChannel.isConnected)
offsetsChannel = ClientUtils.channelToOffsetManager(config.groupId, zkUtils,
config.offsetsChannelSocketTimeoutMs, config.offsetsChannelBackoffMs)
debug("Connected to offset manager %s:%d.".format(offsetsChannel.host, offsetsChannel.port))
}
}
def shutdown() {
val canShutdown = isShuttingDown.compareAndSet(false, true)
if (canShutdown) {
info("ZKConsumerConnector shutting down")
val startTime = System.nanoTime()
KafkaMetricsGroup.removeAllConsumerMetrics(config.clientId)
if (wildcardTopicWatcher != null)
wildcardTopicWatcher.shutdown()
rebalanceLock synchronized {
try {
if (config.autoCommitEnable)
scheduler.shutdown()
fetcher match {
case Some(f) => f.stopConnections
case None =>
}
sendShutdownToAllQueues()
if (config.autoCommitEnable)
commitOffsets(true)
if (zkUtils != null) {
zkUtils.close()
zkUtils = null
}
if (offsetsChannel != null) offsetsChannel.disconnect()
} catch {
case e: Throwable =>
fatal("error during consumer connector shutdown", e)
}
info("ZKConsumerConnector shutdown completed in " + (System.nanoTime() - startTime) / 1000000 + " ms")
}
}
}
def consume[K, V](topicCountMap: scala.collection.Map[String,Int], keyDecoder: Decoder[K], valueDecoder: Decoder[V])
: Map[String,List[KafkaStream[K,V]]] = {
debug("entering consume ")
if (topicCountMap == null)
throw new RuntimeException("topicCountMap is null")
val topicCount = TopicCount.constructTopicCount(consumerIdString, topicCountMap)
val topicThreadIds = topicCount.getConsumerThreadIdsPerTopic
// make a list of (queue,stream) pairs, one pair for each threadId
val queuesAndStreams = topicThreadIds.values.map(threadIdSet =>
threadIdSet.map(_ => {
val queue = new LinkedBlockingQueue[FetchedDataChunk](config.queuedMaxMessages)
val stream = new KafkaStream[K,V](
queue, config.consumerTimeoutMs, keyDecoder, valueDecoder, config.clientId)
(queue, stream)
})
).flatten.toList
val dirs = new ZKGroupDirs(config.groupId)
registerConsumerInZK(dirs, consumerIdString, topicCount)
reinitializeConsumer(topicCount, queuesAndStreams)
loadBalancerListener.kafkaMessageAndMetadataStreams.asInstanceOf[Map[String, List[KafkaStream[K,V]]]]
}
// this API is used by unit tests only
def getTopicRegistry: Pool[String, Pool[Int, PartitionTopicInfo]] = topicRegistry
private def registerConsumerInZK(dirs: ZKGroupDirs, consumerIdString: String, topicCount: TopicCount) {
info("begin registering consumer " + consumerIdString + " in ZK")
val timestamp = Time.SYSTEM.milliseconds.toString
val consumerRegistrationInfo = Json.encode(Map("version" -> 1, "subscription" -> topicCount.getTopicCountMap, "pattern" -> topicCount.pattern,
"timestamp" -> timestamp))
val zkWatchedEphemeral = new ZKCheckedEphemeral(dirs.
consumerRegistryDir + "/" + consumerIdString,
consumerRegistrationInfo,
zkUtils.zkConnection.getZookeeper,
false)
zkWatchedEphemeral.create()
info("end registering consumer " + consumerIdString + " in ZK")
}
private def sendShutdownToAllQueues() = {
for (queue <- topicThreadIdAndQueues.values.toSet[BlockingQueue[FetchedDataChunk]]) {
debug("Clearing up queue")
queue.clear()
queue.put(ZookeeperConsumerConnector.shutdownCommand)
debug("Cleared queue and sent shutdown command")
}
}
def autoCommit() {
trace("auto committing")
try {
commitOffsets(isAutoCommit = false)
}
catch {
case t: Throwable =>
// log it and let it go
error("exception during autoCommit: ", t)
}
}
def commitOffsetToZooKeeper(topicPartition: TopicAndPartition, offset: Long) {
if (checkpointedZkOffsets.get(topicPartition) != offset) {
val topicDirs = new ZKGroupTopicDirs(config.groupId, topicPartition.topic)
zkUtils.updatePersistentPath(topicDirs.consumerOffsetDir + "/" + topicPartition.partition, offset.toString)
checkpointedZkOffsets.put(topicPartition, offset)
zkCommitMeter.mark()
}
}
/**
* KAFKA-1743: This method added for backward compatibility.
*/
def commitOffsets { commitOffsets(true) }
def commitOffsets(isAutoCommit: Boolean) {
val offsetsToCommit =
immutable.Map(topicRegistry.values.flatMap { partitionTopicInfos =>
partitionTopicInfos.values.map { info =>
TopicAndPartition(info.topic, info.partitionId) -> OffsetAndMetadata(info.getConsumeOffset())
}
}.toSeq: _*)
commitOffsets(offsetsToCommit, isAutoCommit)
}
def commitOffsets(offsetsToCommit: immutable.Map[TopicAndPartition, OffsetAndMetadata], isAutoCommit: Boolean) {
trace("OffsetMap: %s".format(offsetsToCommit))
var retriesRemaining = 1 + (if (isAutoCommit) 0 else config.offsetsCommitMaxRetries) // no retries for commits from auto-commit
var done = false
while (!done) {
val committed = offsetsChannelLock synchronized {
// committed when we receive either no error codes or only MetadataTooLarge errors
if (offsetsToCommit.size > 0) {
if (config.offsetsStorage == "zookeeper") {
offsetsToCommit.foreach { case (topicAndPartition, offsetAndMetadata) =>
commitOffsetToZooKeeper(topicAndPartition, offsetAndMetadata.offset)
}
true
} else {
val offsetCommitRequest = OffsetCommitRequest(config.groupId, offsetsToCommit, clientId = config.clientId)
ensureOffsetManagerConnected()
try {
kafkaCommitMeter.mark(offsetsToCommit.size)
offsetsChannel.send(offsetCommitRequest)
val offsetCommitResponse = OffsetCommitResponse.readFrom(offsetsChannel.receive().payload())
trace("Offset commit response: %s.".format(offsetCommitResponse))
val (commitFailed, retryableIfFailed, shouldRefreshCoordinator, errorCount) = {
offsetCommitResponse.commitStatus.foldLeft(false, false, false, 0) { case (folded, (topicPartition, error)) =>
if (error == Errors.NONE && config.dualCommitEnabled) {
val offset = offsetsToCommit(topicPartition).offset
commitOffsetToZooKeeper(topicPartition, offset)
}
(folded._1 || // update commitFailed
error != Errors.NONE,
folded._2 || // update retryableIfFailed - (only metadata too large is not retryable)
(error != Errors.NONE && error != Errors.OFFSET_METADATA_TOO_LARGE),
folded._3 || // update shouldRefreshCoordinator
error == Errors.NOT_COORDINATOR_FOR_GROUP ||
error == Errors.GROUP_COORDINATOR_NOT_AVAILABLE,
// update error count
folded._4 + (if (error != Errors.NONE) 1 else 0))
}
}
debug(errorCount + " errors in offset commit response.")
if (shouldRefreshCoordinator) {
debug("Could not commit offsets (because offset coordinator has moved or is unavailable).")
offsetsChannel.disconnect()
}
if (commitFailed && retryableIfFailed)
false
else
true
}
catch {
case t: Throwable =>
error("Error while committing offsets.", t)
offsetsChannel.disconnect()
false
}
}
} else {
debug("No updates to offsets since last commit.")
true
}
}
done = {
retriesRemaining -= 1
retriesRemaining == 0 || committed
}
if (!done) {
debug("Retrying offset commit in %d ms".format(config.offsetsChannelBackoffMs))
Thread.sleep(config.offsetsChannelBackoffMs)
}
}
}
private def fetchOffsetFromZooKeeper(topicPartition: TopicAndPartition) = {
val dirs = new ZKGroupTopicDirs(config.groupId, topicPartition.topic)
val offsetString = zkUtils.readDataMaybeNull(dirs.consumerOffsetDir + "/" + topicPartition.partition)._1
offsetString match {
case Some(offsetStr) => (topicPartition, OffsetMetadataAndError(offsetStr.toLong))
case None => (topicPartition, OffsetMetadataAndError.NoOffset)
}
}
private def fetchOffsets(partitions: Seq[TopicAndPartition]) = {
if (partitions.isEmpty)
Some(OffsetFetchResponse(Map.empty))
else if (config.offsetsStorage == "zookeeper") {
val offsets = partitions.map(fetchOffsetFromZooKeeper)
Some(OffsetFetchResponse(immutable.Map(offsets:_*)))
} else {
val offsetFetchRequest = OffsetFetchRequest(groupId = config.groupId, requestInfo = partitions, clientId = config.clientId)
var offsetFetchResponseOpt: Option[OffsetFetchResponse] = None
while (!isShuttingDown.get && !offsetFetchResponseOpt.isDefined) {
offsetFetchResponseOpt = offsetsChannelLock synchronized {
ensureOffsetManagerConnected()
try {
offsetsChannel.send(offsetFetchRequest)
val offsetFetchResponse = OffsetFetchResponse.readFrom(offsetsChannel.receive().payload())
trace("Offset fetch response: %s.".format(offsetFetchResponse))
val (leaderChanged, loadInProgress) =
offsetFetchResponse.requestInfo.values.foldLeft(false, false) { case (folded, offsetMetadataAndError) =>
(folded._1 || (offsetMetadataAndError.error == Errors.NOT_COORDINATOR_FOR_GROUP),
folded._2 || (offsetMetadataAndError.error == Errors.GROUP_LOAD_IN_PROGRESS))
}
if (leaderChanged) {
offsetsChannel.disconnect()
debug("Could not fetch offsets (because offset manager has moved).")
None // retry
}
else if (loadInProgress) {
debug("Could not fetch offsets (because offset cache is being loaded).")
None // retry
}
else {
if (config.dualCommitEnabled) {
// if dual-commit is enabled (i.e., if a consumer group is migrating offsets to kafka), then pick the
// maximum between offsets in zookeeper and kafka.
val kafkaOffsets = offsetFetchResponse.requestInfo
val mostRecentOffsets = kafkaOffsets.map { case (topicPartition, kafkaOffset) =>
val zkOffset = fetchOffsetFromZooKeeper(topicPartition)._2.offset
val mostRecentOffset = zkOffset.max(kafkaOffset.offset)
(topicPartition, OffsetMetadataAndError(mostRecentOffset, kafkaOffset.metadata, Errors.NONE))
}
Some(OffsetFetchResponse(mostRecentOffsets))
}
else
Some(offsetFetchResponse)
}
}
catch {
case e: Exception =>
warn("Error while fetching offsets from %s:%d. Possible cause: %s".format(offsetsChannel.host, offsetsChannel.port, e.getMessage))
offsetsChannel.disconnect()
None // retry
}
}
if (offsetFetchResponseOpt.isEmpty) {
debug("Retrying offset fetch in %d ms".format(config.offsetsChannelBackoffMs))
Thread.sleep(config.offsetsChannelBackoffMs)
}
}
offsetFetchResponseOpt
}
}
class ZKSessionExpireListener(val dirs: ZKGroupDirs,
val consumerIdString: String,
val topicCount: TopicCount,
val loadBalancerListener: ZKRebalancerListener)
extends IZkStateListener {
@throws[Exception]
def handleStateChanged(state: KeeperState) {
// do nothing, since zkclient will do reconnect for us.
}
/**
* Called after the zookeeper session has expired and a new session has been created. You would have to re-create
* any ephemeral nodes here.
*
* @throws Exception
* On any error.
*/
@throws[Exception]
def handleNewSession() {
/**
* When we get a SessionExpired event, we lost all ephemeral nodes and zkclient has reestablished a
* connection for us. We need to release the ownership of the current consumer and re-register this
* consumer in the consumer registry and trigger a rebalance.
*/
info("ZK expired; release old broker parition ownership; re-register consumer " + consumerIdString)
loadBalancerListener.resetState()
registerConsumerInZK(dirs, consumerIdString, topicCount)
// explicitly trigger load balancing for this consumer
loadBalancerListener.syncedRebalance()
// There is no need to resubscribe to child and state changes.
// The child change watchers will be set inside rebalance when we read the children list.
}
override def handleSessionEstablishmentError(error: Throwable): Unit = {
fatal("Could not establish session with zookeeper", error)
}
}
class ZKTopicPartitionChangeListener(val loadBalancerListener: ZKRebalancerListener)
extends IZkDataListener {
def handleDataChange(dataPath : String, data: Object) {
try {
info("Topic info for path " + dataPath + " changed to " + data.toString + ", triggering rebalance")
// queue up the rebalance event
loadBalancerListener.rebalanceEventTriggered()
// There is no need to re-subscribe the watcher since it will be automatically
// re-registered upon firing of this event by zkClient
} catch {
case e: Throwable => error("Error while handling topic partition change for data path " + dataPath, e )
}
}
@throws[Exception]
def handleDataDeleted(dataPath : String) {
// TODO: This need to be implemented when we support delete topic
warn("Topic for path " + dataPath + " gets deleted, which should not happen at this time")
}
}
class ZKRebalancerListener(val group: String, val consumerIdString: String,
val kafkaMessageAndMetadataStreams: mutable.Map[String,List[KafkaStream[_,_]]])
extends IZkChildListener {
private val partitionAssignor = PartitionAssignor.createInstance(config.partitionAssignmentStrategy)
private var isWatcherTriggered = false
private val lock = new ReentrantLock
private val cond = lock.newCondition()
@volatile private var allTopicsOwnedPartitionsCount = 0
newGauge("OwnedPartitionsCount",
new Gauge[Int] {
def value() = allTopicsOwnedPartitionsCount
},
Map("clientId" -> config.clientId, "groupId" -> config.groupId))
private def ownedPartitionsCountMetricTags(topic: String) = Map("clientId" -> config.clientId, "groupId" -> config.groupId, "topic" -> topic)
private val watcherExecutorThread = new Thread(consumerIdString + "_watcher_executor") {
override def run() {
info("starting watcher executor thread for consumer " + consumerIdString)
var doRebalance = false
while (!isShuttingDown.get) {
try {
lock.lock()
try {
if (!isWatcherTriggered)
cond.await(1000, TimeUnit.MILLISECONDS) // wake up periodically so that it can check the shutdown flag
} finally {
doRebalance = isWatcherTriggered
isWatcherTriggered = false
lock.unlock()
}
if (doRebalance)
syncedRebalance
} catch {
case t: Throwable => error("error during syncedRebalance", t)
}
}
info("stopping watcher executor thread for consumer " + consumerIdString)
}
}
watcherExecutorThread.start()
@throws[Exception]
def handleChildChange(parentPath : String, curChilds : java.util.List[String]) {
rebalanceEventTriggered()
}
def rebalanceEventTriggered() {
inLock(lock) {
isWatcherTriggered = true
cond.signalAll()
}
}
private def deletePartitionOwnershipFromZK(topic: String, partition: Int) {
val topicDirs = new ZKGroupTopicDirs(group, topic)
val znode = topicDirs.consumerOwnerDir + "/" + partition
zkUtils.deletePath(znode)
debug("Consumer " + consumerIdString + " releasing " + znode)
}
private def releasePartitionOwnership(localTopicRegistry: Pool[String, Pool[Int, PartitionTopicInfo]])= {
info("Releasing partition ownership")
for ((topic, infos) <- localTopicRegistry) {
for(partition <- infos.keys) {
deletePartitionOwnershipFromZK(topic, partition)
}
removeMetric("OwnedPartitionsCount", ownedPartitionsCountMetricTags(topic))
localTopicRegistry.remove(topic)
}
allTopicsOwnedPartitionsCount = 0
}
def resetState() {
topicRegistry.clear
}
def syncedRebalance() {
rebalanceLock synchronized {
rebalanceTimer.time {
for (i <- 0 until config.rebalanceMaxRetries) {
if(isShuttingDown.get()) {
return
}
info("begin rebalancing consumer " + consumerIdString + " try #" + i)
var done = false
var cluster: Cluster = null
try {
cluster = zkUtils.getCluster()
done = rebalance(cluster)
} catch {
case e: Throwable =>
/** occasionally, we may hit a ZK exception because the ZK state is changing while we are iterating.
* For example, a ZK node can disappear between the time we get all children and the time we try to get
* the value of a child. Just let this go since another rebalance will be triggered.
**/
info("exception during rebalance ", e)
}
info("end rebalancing consumer " + consumerIdString + " try #" + i)
if (done) {
return
} else {
/* Here the cache is at a risk of being stale. To take future rebalancing decisions correctly, we should
* clear the cache */
info("Rebalancing attempt failed. Clearing the cache before the next rebalancing operation is triggered")
}
// stop all fetchers and clear all the queues to avoid data duplication
closeFetchersForQueues(cluster, kafkaMessageAndMetadataStreams, topicThreadIdAndQueues.map(q => q._2))
Thread.sleep(config.rebalanceBackoffMs)
}
}
}
throw new ConsumerRebalanceFailedException(consumerIdString + " can't rebalance after " + config.rebalanceMaxRetries +" retries")
}
private def rebalance(cluster: Cluster): Boolean = {
val myTopicThreadIdsMap = TopicCount.constructTopicCount(
group, consumerIdString, zkUtils, config.excludeInternalTopics).getConsumerThreadIdsPerTopic
val brokers = zkUtils.getAllBrokersInCluster()
if (brokers.size == 0) {
// This can happen in a rare case when there are no brokers available in the cluster when the consumer is started.
// We log an warning and register for child changes on brokers/id so that rebalance can be triggered when the brokers
// are up.
warn("no brokers found when trying to rebalance.")
zkUtils.zkClient.subscribeChildChanges(BrokerIdsPath, loadBalancerListener)
true
}
else {
/**
* fetchers must be stopped to avoid data duplication, since if the current
* rebalancing attempt fails, the partitions that are released could be owned by another consumer.
* But if we don't stop the fetchers first, this consumer would continue returning data for released
* partitions in parallel. So, not stopping the fetchers leads to duplicate data.
*/
closeFetchers(cluster, kafkaMessageAndMetadataStreams, myTopicThreadIdsMap)
if (consumerRebalanceListener != null) {
info("Invoking rebalance listener before relasing partition ownerships.")
consumerRebalanceListener.beforeReleasingPartitions(
if (topicRegistry.size == 0)
new java.util.HashMap[String, java.util.Set[java.lang.Integer]]
else
topicRegistry.map(topics =>
topics._1 -> topics._2.keys // note this is incorrect, see KAFKA-2284
).toMap.asJava.asInstanceOf[java.util.Map[String, java.util.Set[java.lang.Integer]]]
)
}
releasePartitionOwnership(topicRegistry)
val assignmentContext = new AssignmentContext(group, consumerIdString, config.excludeInternalTopics, zkUtils)
val globalPartitionAssignment = partitionAssignor.assign(assignmentContext)
val partitionAssignment = globalPartitionAssignment.get(assignmentContext.consumerId)
val currentTopicRegistry = new Pool[String, Pool[Int, PartitionTopicInfo]](
valueFactory = Some((_: String) => new Pool[Int, PartitionTopicInfo]))
// fetch current offsets for all topic-partitions
val topicPartitions = partitionAssignment.keySet.toSeq
val offsetFetchResponseOpt = fetchOffsets(topicPartitions)
if (isShuttingDown.get || !offsetFetchResponseOpt.isDefined)
false
else {
val offsetFetchResponse = offsetFetchResponseOpt.get
topicPartitions.foreach(topicAndPartition => {
val (topic, partition) = topicAndPartition.asTuple
val offset = offsetFetchResponse.requestInfo(topicAndPartition).offset
val threadId = partitionAssignment(topicAndPartition)
addPartitionTopicInfo(currentTopicRegistry, partition, topic, offset, threadId)
})
/**
* move the partition ownership here, since that can be used to indicate a truly successful re-balancing attempt
* A rebalancing attempt is completed successfully only after the fetchers have been started correctly
*/
if(reflectPartitionOwnershipDecision(partitionAssignment)) {
allTopicsOwnedPartitionsCount = partitionAssignment.size
partitionAssignment.view.groupBy { case (topicPartition, _) => topicPartition.topic }
.foreach { case (topic, partitionThreadPairs) =>
newGauge("OwnedPartitionsCount",
new Gauge[Int] {
def value() = partitionThreadPairs.size
},
ownedPartitionsCountMetricTags(topic))
}
topicRegistry = currentTopicRegistry
// Invoke beforeStartingFetchers callback if the consumerRebalanceListener is set.
if (consumerRebalanceListener != null) {
info("Invoking rebalance listener before starting fetchers.")
// Partition assignor returns the global partition assignment organized as a map of [TopicPartition, ThreadId]
// per consumer, and we need to re-organize it to a map of [Partition, ThreadId] per topic before passing
// to the rebalance callback.
val partitionAssginmentGroupByTopic = globalPartitionAssignment.values.flatten.groupBy[String] {
case (topicPartition, _) => topicPartition.topic
}
val partitionAssigmentMapForCallback = partitionAssginmentGroupByTopic.map({
case (topic, partitionOwnerShips) =>
val partitionOwnershipForTopicScalaMap = partitionOwnerShips.map({
case (topicAndPartition, consumerThreadId) =>
(topicAndPartition.partition: Integer) -> consumerThreadId
}).toMap
topic -> partitionOwnershipForTopicScalaMap.asJava
})
consumerRebalanceListener.beforeStartingFetchers(
consumerIdString,
partitionAssigmentMapForCallback.asJava
)
}
updateFetcher(cluster)
true
} else {
false
}
}
}
}
private def closeFetchersForQueues(cluster: Cluster,
messageStreams: Map[String,List[KafkaStream[_,_]]],
queuesToBeCleared: Iterable[BlockingQueue[FetchedDataChunk]]) {
val allPartitionInfos = topicRegistry.values.map(p => p.values).flatten
fetcher match {
case Some(f) =>
f.stopConnections
clearFetcherQueues(allPartitionInfos, cluster, queuesToBeCleared, messageStreams)
/**
* here, we need to commit offsets before stopping the consumer from returning any more messages
* from the current data chunk. Since partition ownership is not yet released, this commit offsets
* call will ensure that the offsets committed now will be used by the next consumer thread owning the partition
* for the current data chunk. Since the fetchers are already shutdown and this is the last chunk to be iterated
* by the consumer, there will be no more messages returned by this iterator until the rebalancing finishes
* successfully and the fetchers restart to fetch more data chunks
**/
if (config.autoCommitEnable) {
info("Committing all offsets after clearing the fetcher queues")
commitOffsets(true)
}
case None =>
}
}
private def clearFetcherQueues(topicInfos: Iterable[PartitionTopicInfo], cluster: Cluster,
queuesTobeCleared: Iterable[BlockingQueue[FetchedDataChunk]],
messageStreams: Map[String,List[KafkaStream[_,_]]]) {
// Clear all but the currently iterated upon chunk in the consumer thread's queue
queuesTobeCleared.foreach(_.clear)
info("Cleared all relevant queues for this fetcher")
// Also clear the currently iterated upon chunk in the consumer threads
if(messageStreams != null)
messageStreams.foreach(_._2.foreach(s => s.clear()))
info("Cleared the data chunks in all the consumer message iterators")
}
private def closeFetchers(cluster: Cluster, messageStreams: Map[String,List[KafkaStream[_,_]]],
relevantTopicThreadIdsMap: Map[String, Set[ConsumerThreadId]]) {
// only clear the fetcher queues for certain topic partitions that *might* no longer be served by this consumer
// after this rebalancing attempt
val queuesTobeCleared = topicThreadIdAndQueues.filter(q => relevantTopicThreadIdsMap.contains(q._1._1)).map(q => q._2)
closeFetchersForQueues(cluster, messageStreams, queuesTobeCleared)
}
private def updateFetcher(cluster: Cluster) {
// update partitions for fetcher
var allPartitionInfos : List[PartitionTopicInfo] = Nil
for (partitionInfos <- topicRegistry.values)
for (partition <- partitionInfos.values)
allPartitionInfos ::= partition
info("Consumer " + consumerIdString + " selected partitions : " +
allPartitionInfos.sortWith((s,t) => s.partitionId < t.partitionId).map(_.toString).mkString(","))
fetcher match {
case Some(f) =>
f.startConnections(allPartitionInfos, cluster)
case None =>
}
}
private def reflectPartitionOwnershipDecision(partitionAssignment: Map[TopicAndPartition, ConsumerThreadId]): Boolean = {
var successfullyOwnedPartitions : List[(String, Int)] = Nil
val partitionOwnershipSuccessful = partitionAssignment.map { partitionOwner =>
val topic = partitionOwner._1.topic
val partition = partitionOwner._1.partition
val consumerThreadId = partitionOwner._2
val partitionOwnerPath = zkUtils.getConsumerPartitionOwnerPath(group, topic, partition)
try {
zkUtils.createEphemeralPathExpectConflict(partitionOwnerPath, consumerThreadId.toString)
info(consumerThreadId + " successfully owned partition " + partition + " for topic " + topic)
successfullyOwnedPartitions ::= (topic, partition)
true
} catch {
case _: ZkNodeExistsException =>
// The node hasn't been deleted by the original owner. So wait a bit and retry.
info("waiting for the partition ownership to be deleted: " + partition + " for topic " + topic)
false
}
}
val hasPartitionOwnershipFailed = partitionOwnershipSuccessful.foldLeft(0)((sum, decision) => sum + (if(decision) 0 else 1))
/* even if one of the partition ownership attempt has failed, return false */
if(hasPartitionOwnershipFailed > 0) {
// remove all paths that we have owned in ZK
successfullyOwnedPartitions.foreach(topicAndPartition => deletePartitionOwnershipFromZK(topicAndPartition._1, topicAndPartition._2))
false
}
else true
}
private def addPartitionTopicInfo(currentTopicRegistry: Pool[String, Pool[Int, PartitionTopicInfo]],
partition: Int, topic: String,
offset: Long, consumerThreadId: ConsumerThreadId) {
val partTopicInfoMap = currentTopicRegistry.getAndMaybePut(topic)
val queue = topicThreadIdAndQueues.get((topic, consumerThreadId))
val consumedOffset = new AtomicLong(offset)
val fetchedOffset = new AtomicLong(offset)
val partTopicInfo = new PartitionTopicInfo(topic,
partition,
queue,
consumedOffset,
fetchedOffset,
new AtomicInteger(config.fetchMessageMaxBytes),
config.clientId)
partTopicInfoMap.put(partition, partTopicInfo)
debug(partTopicInfo + " selected new offset " + offset)
checkpointedZkOffsets.put(TopicAndPartition(topic, partition), offset)
}
}
private def reinitializeConsumer[K,V](
topicCount: TopicCount,
queuesAndStreams: List[(LinkedBlockingQueue[FetchedDataChunk],KafkaStream[K,V])]) {
val dirs = new ZKGroupDirs(config.groupId)
// listener to consumer and partition changes
if (loadBalancerListener == null) {
val topicStreamsMap = new mutable.HashMap[String,List[KafkaStream[K,V]]]
loadBalancerListener = new ZKRebalancerListener(
config.groupId, consumerIdString, topicStreamsMap.asInstanceOf[scala.collection.mutable.Map[String, List[KafkaStream[_,_]]]])
}
// create listener for session expired event if not exist yet
if (sessionExpirationListener == null)
sessionExpirationListener = new ZKSessionExpireListener(
dirs, consumerIdString, topicCount, loadBalancerListener)
// create listener for topic partition change event if not exist yet
if (topicPartitionChangeListener == null)
topicPartitionChangeListener = new ZKTopicPartitionChangeListener(loadBalancerListener)
val topicStreamsMap = loadBalancerListener.kafkaMessageAndMetadataStreams
// map of {topic -> Set(thread-1, thread-2, ...)}
val consumerThreadIdsPerTopic: Map[String, Set[ConsumerThreadId]] =
topicCount.getConsumerThreadIdsPerTopic
val allQueuesAndStreams = topicCount match {
case _: WildcardTopicCount =>
/*
* Wild-card consumption streams share the same queues, so we need to
* duplicate the list for the subsequent zip operation.
*/
(1 to consumerThreadIdsPerTopic.keySet.size).flatMap(_ => queuesAndStreams).toList
case _: StaticTopicCount =>
queuesAndStreams
}
val topicThreadIds = consumerThreadIdsPerTopic.map { case (topic, threadIds) =>
threadIds.map((topic, _))
}.flatten
require(topicThreadIds.size == allQueuesAndStreams.size,
"Mismatch between thread ID count (%d) and queue count (%d)"
.format(topicThreadIds.size, allQueuesAndStreams.size))
val threadQueueStreamPairs = topicThreadIds.zip(allQueuesAndStreams)
threadQueueStreamPairs.foreach(e => {
val topicThreadId = e._1
val q = e._2._1
topicThreadIdAndQueues.put(topicThreadId, q)
debug("Adding topicThreadId %s and queue %s to topicThreadIdAndQueues data structure".format(topicThreadId, q.toString))
newGauge(
"FetchQueueSize",
new Gauge[Int] {
def value = q.size
},
Map("clientId" -> config.clientId,
"topic" -> topicThreadId._1,
"threadId" -> topicThreadId._2.threadId.toString)
)
})
val groupedByTopic = threadQueueStreamPairs.groupBy(_._1._1)
groupedByTopic.foreach(e => {
val topic = e._1
val streams = e._2.map(_._2._2).toList
topicStreamsMap += (topic -> streams)
debug("adding topic %s and %d streams to map.".format(topic, streams.size))
})
// listener to consumer and partition changes
zkUtils.zkClient.subscribeStateChanges(sessionExpirationListener)
zkUtils.zkClient.subscribeChildChanges(dirs.consumerRegistryDir, loadBalancerListener)
topicStreamsMap.foreach { topicAndStreams =>
// register on broker partition path changes
val topicPath = BrokerTopicsPath + "/" + topicAndStreams._1
zkUtils.zkClient.subscribeDataChanges(topicPath, topicPartitionChangeListener)
}
// explicitly trigger load balancing for this consumer
loadBalancerListener.syncedRebalance()
}
class WildcardStreamsHandler[K,V](topicFilter: TopicFilter,
numStreams: Int,
keyDecoder: Decoder[K],
valueDecoder: Decoder[V])
extends TopicEventHandler[String] {
if (messageStreamCreated.getAndSet(true))
throw new RuntimeException("Each consumer connector can create " +
"message streams by filter at most once.")
private val wildcardQueuesAndStreams = (1 to numStreams)
.map(_ => {
val queue = new LinkedBlockingQueue[FetchedDataChunk](config.queuedMaxMessages)
val stream = new KafkaStream[K,V](queue,
config.consumerTimeoutMs,
keyDecoder,
valueDecoder,
config.clientId)
(queue, stream)
}).toList
// bootstrap with existing topics
private var wildcardTopics =
zkUtils.getChildrenParentMayNotExist(BrokerTopicsPath)
.filter(topic => topicFilter.isTopicAllowed(topic, config.excludeInternalTopics))
private val wildcardTopicCount = TopicCount.constructTopicCount(
consumerIdString, topicFilter, numStreams, zkUtils, config.excludeInternalTopics)
val dirs = new ZKGroupDirs(config.groupId)
registerConsumerInZK(dirs, consumerIdString, wildcardTopicCount)
reinitializeConsumer(wildcardTopicCount, wildcardQueuesAndStreams)
/*
* Topic events will trigger subsequent synced rebalances.
*/
info("Creating topic event watcher for topics " + topicFilter)
wildcardTopicWatcher = new ZookeeperTopicEventWatcher(zkUtils, this)
def handleTopicEvent(allTopics: Seq[String]) {
debug("Handling topic event")
val updatedTopics = allTopics.filter(topic => topicFilter.isTopicAllowed(topic, config.excludeInternalTopics))
val addedTopics = updatedTopics filterNot (wildcardTopics contains)
if (addedTopics.nonEmpty)
info("Topic event: added topics = %s"
.format(addedTopics))
/*
* TODO: Deleted topics are interesting (and will not be a concern until
* 0.8 release). We may need to remove these topics from the rebalance
* listener's map in reinitializeConsumer.
*/
val deletedTopics = wildcardTopics filterNot (updatedTopics contains)
if (deletedTopics.nonEmpty)
info("Topic event: deleted topics = %s"
.format(deletedTopics))
wildcardTopics = updatedTopics
info("Topics to consume = %s".format(wildcardTopics))
if (addedTopics.nonEmpty || deletedTopics.nonEmpty)
reinitializeConsumer(wildcardTopicCount, wildcardQueuesAndStreams)
}
def streams: Seq[KafkaStream[K,V]] =
wildcardQueuesAndStreams.map(_._2)
}
}
|
ijuma/kafka
|
core/src/main/scala/kafka/consumer/ZookeeperConsumerConnector.scala
|
Scala
|
apache-2.0
| 46,619 |
package com.typesafe.sbt.packager.archetypes
package jlink
import scala.sys.process.{BasicIO, Process, ProcessBuilder}
import sbt._
import sbt.Keys._
import com.typesafe.sbt.SbtNativePackager.{Debian, Universal}
import com.typesafe.sbt.packager.Keys.{bundledJvmLocation, packageName}
import com.typesafe.sbt.packager.Compat._
import com.typesafe.sbt.packager.archetypes.jlink._
import com.typesafe.sbt.packager.archetypes.scripts.BashStartScriptKeys
import com.typesafe.sbt.packager.universal.UniversalPlugin
/**
* == Jlink Application ==
*
* This class contains the default settings for creating and deploying an
* application as a runtime image using the standard `jlink` utility.
*
* == Configuration ==
*
* This plugin adds new settings to configure your packaged application.
*
* @example Enable this plugin in your `build.sbt` with
*
* {{{
* enablePlugins(JlinkPlugin)
* }}}
*/
object JlinkPlugin extends AutoPlugin {
object autoImport extends JlinkKeys {
val JlinkIgnore = JlinkPlugin.Ignore
}
import autoImport._
override def requires = JavaAppPackaging
override lazy val projectSettings: Seq[Setting[_]] = Seq(
target in jlinkBuildImage := target.value / "jlink" / "output",
jlinkBundledJvmLocation := "jre",
bundledJvmLocation := Some(jlinkBundledJvmLocation.value),
jlinkIgnoreMissingDependency :=
(jlinkIgnoreMissingDependency ?? JlinkIgnore.nothing).value,
// Don't use `fullClasspath in Compile` directly - this way we can inject
// custom classpath elements for the scan.
fullClasspath in jlinkBuildImage := (fullClasspath in Compile).value,
jlinkModules := (jlinkModules ?? Nil).value,
jlinkModules ++= {
val log = streams.value.log
val javaHome0 = javaHome.in(jlinkBuildImage).value.getOrElse(defaultJavaHome)
val run = runJavaTool(javaHome0, log) _
val paths = fullClasspath.in(jlinkBuildImage).value.map(_.data.getPath)
val shouldIgnore = jlinkIgnoreMissingDependency.value
// We can find the java toolchain version by parsing the `release` file. This
// only works for Java 9+, but so does this whole plugin.
// Alternatives:
// - Parsing `java -version` output - the format is not standardized, so there
// are a lot of weird incompatibilities.
// - Parsing `java -XshowSettings:properties` output - the format is nicer,
// but the command itself is subject to change without notice.
val releaseFile = javaHome0 / "release"
val javaVersion = IO
.readLines(releaseFile)
.collectFirst {
case javaVersionPattern(feature) => feature
}
.getOrElse(sys.error("JAVA_VERSION not found in ${releaseFile.getAbsolutePath}"))
// Jdeps has a few convenient options (like --print-module-deps), but those
// are not flexible enough - we need to parse the full output.
val jdepsOutput = runForOutput(run("jdeps", "--multi-release" +: javaVersion +: "-R" +: paths), log)
val deps = jdepsOutput.linesIterator
// There are headers in some of the lines - ignore those.
.flatMap(PackageDependency.parse(_).iterator)
.toSeq
// Check that we don't have any dangling dependencies that were not
// explicitly ignored.
val missingDeps = deps
.collect {
case PackageDependency(dependent, dependee, PackageDependency.NotFound) =>
(dependent, dependee)
}
.filterNot(shouldIgnore)
.distinct
.sorted
if (missingDeps.nonEmpty) {
log.error(
"Dependee packages not found in classpath. You can use jlinkIgnoreMissingDependency to silence these."
)
missingDeps.foreach {
case (a, b) =>
log.error(s" $a -> $b")
}
sys.error("Missing package dependencies")
}
val detectedModuleDeps = deps.collect {
case PackageDependency(_, _, PackageDependency.Module(module)) =>
module
}.toSet
// Some JakartaEE artifacts use `java.*` module names, even though
// they are not a part of the platform anymore.
// https://github.com/eclipse-ee4j/ee4j/issues/34
// This requires special handling on our part when deciding if the module
// is a part of the platform or not.
// At least the new modules shouldn't be doing this...
val knownJakartaJavaModules = Set("java.xml.bind", "java.xml.soap", "java.ws.rs")
val filteredModuleDeps = detectedModuleDeps
.filter { m =>
m.startsWith("jdk.") || m.startsWith("java.")
}
.filterNot(knownJakartaJavaModules.contains)
// We always want `java.base`, and `jlink` requires at least one module.
(filteredModuleDeps + "java.base").toSeq
},
// No external modules by default: see #1247.
jlinkModulePath := (jlinkModulePath ?? Nil).value,
jlinkOptions := (jlinkOptions ?? Nil).value,
jlinkOptions ++= {
val modules = jlinkModules.value
if (modules.isEmpty) {
sys.error("jlinkModules is empty")
}
JlinkOptions(
addModules = modules,
output = Some(target.in(jlinkBuildImage).value),
modulePath = jlinkModulePath.value
)
},
jlinkBuildImage := {
val log = streams.value.log
val javaHome0 = javaHome.in(jlinkBuildImage).value.getOrElse(defaultJavaHome)
val run = runJavaTool(javaHome0, log) _
val outDir = target.in(jlinkBuildImage).value
IO.delete(outDir)
runForOutput(run("jlink", jlinkOptions.value), log)
outDir
},
mappings in jlinkBuildImage := {
val prefix = jlinkBundledJvmLocation.value
// make sure the prefix has a terminating slash
val prefix0 = if (prefix.isEmpty) prefix else (prefix + "/")
findFiles(jlinkBuildImage.value).map {
case (file, string) => (file, prefix0 + string)
}
},
mappings in Universal ++= mappings.in(jlinkBuildImage).value
)
// Extracts java version from a release file line (`JAVA_VERSION` property):
// - if the feature version is 1, yield the minor version number (e.g. 1.9.0 -> 9);
// - otherwise yield the major version number (e.g. 11.0.3 -> 11).
private[jlink] val javaVersionPattern = """JAVA_VERSION="(?:1\\.)?(\\d+).*?"""".r
// TODO: deduplicate with UniversalPlugin and DebianPlugin
/** Finds all files in a directory. */
private def findFiles(dir: File): Seq[(File, String)] =
((PathFinder(dir) ** AllPassFilter) --- dir)
.pair(file => IO.relativize(dir, file))
private lazy val defaultJavaHome: File =
file(sys.props.getOrElse("java.home", sys.error("no java.home")))
private def runJavaTool(jvm: File, log: Logger)(exeName: String, args: Seq[String]): ProcessBuilder = {
val exe = (jvm / "bin" / exeName).getAbsolutePath
log.info("Running: " + (exe +: args).mkString(" "))
Process(exe, args)
}
// Like `ProcessBuilder.!!`, but this logs the output in case of a non-zero
// exit code. We need this since some Java tools write their errors to stdout.
// This uses `scala.sys.process.ProcessLogger` instead of the SBT `Logger`
// to make it a drop-in replacement for `ProcessBuilder.!!`.
private def runForOutput(builder: ProcessBuilder, log: scala.sys.process.ProcessLogger): String = {
val buffer = new StringBuffer
val code = builder.run(BasicIO(false, buffer, Some(log))).exitValue()
if (code == 0) buffer.toString
else {
log.out(buffer.toString)
scala.sys.error("Nonzero exit value: " + code)
}
}
private object JlinkOptions {
def apply(addModules: Seq[String], output: Option[File], modulePath: Seq[File]): Seq[String] =
option("--output", output) ++
list("--add-modules", addModules, ",") ++
list("--module-path", modulePath, ":")
private def option[A](arg: String, value: Option[A]): Seq[String] =
value.toSeq.flatMap(a => Seq(arg, a.toString))
private def list[A](arg: String, values: Seq[A], separator: String): Seq[String] =
if (values.nonEmpty) Seq(arg, values.mkString(separator)) else Nil
}
// Jdeps output row
private final case class PackageDependency(dependent: String, dependee: String, source: PackageDependency.Source)
private final object PackageDependency {
sealed trait Source
object Source {
def parse(s: String): Source = s match {
case "not found" => NotFound
// We have no foolproof way to separate jars from modules here, so
// we have to do something flaky.
case name
if name.toLowerCase.endsWith(".jar") ||
!name.contains('.') ||
name.contains(' ') =>
JarOrDir(name)
case name => Module(name)
}
}
case object NotFound extends Source
final case class Module(name: String) extends Source
final case class JarOrDir(name: String) extends Source
// Examples of package dependencies in jdeps output (whitespace may vary,
// but there will always be some leading whitespace):
// Dependency on a package(java.lang) in a module (java.base):
// foo.bar -> java.lang java.base
// Dependency on a package (scala.collection) in a JAR
// (scala-library-2.12.8.jar):
// foo.bar -> scala.collection scala-library-2.12.8.jar
// Dependency on a package (foo.baz) in a class directory (classes):
// foo.bar -> foo.baz classes
// Missing dependency on a package (qux.quux):
// foo.bar -> qux.quux not found
// There are also jar/directory/module-level dependencies, but we are
// not interested in those:
// foo.jar -> scala-library-2.12.8.jar
// classes -> java.base
// foo.jar -> not found
private val pattern = """^\\s+([^\\s]+)\\s+->\\s+([^\\s]+)\\s+([^\\s].*?)\\s*$""".r
def parse(s: String): Option[PackageDependency] = s match {
case pattern(dependent, dependee, source) =>
Some(PackageDependency(dependent, dependee, Source.parse(source)))
case _ => None
}
}
object Ignore {
val nothing: ((String, String)) => Boolean = Function.const(false)
val everything: ((String, String)) => Boolean = Function.const(true)
def only(dependencies: (String, String)*): ((String, String)) => Boolean = dependencies.toSet.contains
/** This matches pairs by their respective ''package'' prefixes. This means that `"foo.bar"`
* matches `"foo.bar"`, `"foo.bar.baz"`, but not `"foo.barqux"`. Empty
* string matches anything.
*/
def byPackagePrefix(prefixPairs: (String, String)*): ((String, String)) => Boolean = {
case (a, b) =>
prefixPairs.exists {
case (prefixA, prefixB) =>
packagePrefixMatches(prefixA, a) && packagePrefixMatches(prefixB, b)
}
}
private def packagePrefixMatches(prefix: String, s: String): Boolean =
prefix.isEmpty ||
s == prefix ||
s.startsWith(prefix + ".")
}
}
|
Sciss/sbt-native-packager
|
src/main/scala/com/typesafe/sbt/packager/archetypes/jlink/JlinkPlugin.scala
|
Scala
|
bsd-2-clause
| 10,985 |
package concrete
import org.scalatest.FlatSpec
import org.scalatest.Matchers
import org.scalacheck.Gen
import org.scalatestplus.scalacheck.ScalaCheckPropertyChecks
final class IntDomainTest extends FlatSpec with Matchers with ScalaCheckPropertyChecks {
"IntDomain" should "find next/prev" in {
val b = IntDomain.ofSeq(1, 2, 7, 8)
b.prev(3) shouldBe 2
b.prev(2) shouldBe 1
b.next(2) shouldBe 7
b.prev(9) shouldBe 8
a[NoSuchElementException] should be thrownBy b.next(9)
a[NoSuchElementException] should be thrownBy b.prev(1)
a[NoSuchElementException] should be thrownBy b.prev(0)
}
it should "behave as sets" in {
forAll(Gen.containerOf[Set, Int](Gen.choose(-10000, 10000))) { s =>
val s2 = s.foldLeft[IntDomain](EmptyIntDomain)(_ | _)
s2.view should contain theSameElementsAs s.view
}
forAll(Gen.containerOf[Set, Int](Gen.choose(0, 63))) { s =>
val s2 = s.foldLeft[IntDomain](EmptyIntDomain)(_ | _)
s2.view should contain theSameElementsAs s.view
}
}
it should "enumerate" in {
IntDomain.ofSeq(1, 2, 7, 8).view should contain theSameElementsAs Seq(1, 2, 7, 8)
}
it should "detect presence" in {
val domain = IntDomain.ofSeq(0, 1)
assert(domain.contains(0))
val d2 = domain - 0
assert(!d2.contains(0))
assert(domain.contains(0))
}
it should "split" in {
val domain = IntDomain.ofSeq(3, 4, 5, 7)
domain.removeTo(4).head shouldBe 5
domain.removeFrom(6).last shouldBe 5
domain.removeFrom(0) shouldBe empty
domain.removeFrom(3) shouldBe empty
domain.removeTo(7) shouldBe empty
domain.removeTo(8) shouldBe empty
domain.removeFrom(-1) shouldBe empty
domain.removeUntil(7).view should contain theSameElementsAs Seq(7)
domain.removeUntil(400) shouldBe empty
domain.removeFrom(400) shouldBe domain
}
it should "convert bit vector to interval" in {
IntDomain.ofSeq(5, 6, 7) shouldBe an[IntervalDomain]
}
it should "compute unions from intervals" in {
val d1 = IntDomain.ofInterval(5, 10)
d1.view should contain theSameElementsAs (5 to 10)
val u1 = d1 | IntDomain.ofSeq(12, 15)
u1.view should contain theSameElementsAs (5 to 10) ++ Seq(12, 15)
u1 shouldBe a[BitVectorDomain]
val u2 = d1 | IntDomain.ofInterval(12, 15)
u2.view should contain theSameElementsAs (5 to 10) ++ (12 to 15)
u2 shouldBe a[BitVectorDomain]
val u3 = d1 | IntDomain.ofInterval(10, 15)
u3.view should contain theSameElementsAs (5 to 15)
u3 shouldBe an[IntervalDomain]
val u4 = d1 | IntDomain.ofInterval(8, 10)
u4.view should contain theSameElementsAs d1.view
u4 shouldBe an[IntervalDomain]
val u5 = d1 | IntDomain.ofInterval(11, 15)
u5.view should contain theSameElementsAs (5 to 15)
u5 shouldBe an[IntervalDomain]
val u6 = d1 | Singleton(15)
u6.view should contain theSameElementsAs (5 to 10) ++ Seq(15)
u6 shouldBe a[BitVectorDomain]
val u7 = d1 | Singleton(11)
u7.view should contain theSameElementsAs (5 to 11)
u7 shouldBe an[IntervalDomain]
d1 | Singleton(10) should be theSameInstanceAs d1
d1 | EmptyIntDomain should be theSameInstanceAs d1
}
it should "compute intersections from intervals" in {
val d1 = IntDomain.ofInterval(5, 10)
d1 & IntDomain.ofSeq(7, 15) shouldBe Singleton(7)
d1 & IntDomain.ofSeq(12, 15) shouldBe EmptyIntDomain
d1 & IntDomain.ofInterval(12, 15) shouldBe EmptyIntDomain
d1 & IntDomain.ofInterval(10, 15) shouldBe Singleton(10)
val i1 = d1 & IntDomain.ofInterval(7, 15)
i1.view should contain theSameElementsAs (7 to 10)
i1 shouldBe an[IntervalDomain]
d1 & IntDomain.ofInterval(0, 10) shouldBe d1
d1 & IntDomain.ofSeq(3, 5, 6, 7, 8, 9, 10) shouldBe d1
}
it should "compute unions from bit vectors" in {
val d1 = IntDomain.ofSeq(5, 7, 10)
val u1 = d1 | IntDomain.ofSeq(12, 15)
u1.view should contain theSameElementsAs Seq(5, 7, 10) ++ Seq(12, 15)
u1 shouldBe a[BitVectorDomain]
val u2 = d1 | IntDomain.ofInterval(10, 15)
u2.view should contain theSameElementsAs Seq(5, 7) ++ (10 to 15)
u2 shouldBe a[BitVectorDomain]
val u3 = d1 | IntDomain.ofInterval(5, 15)
u3.view should contain theSameElementsAs (5 to 15)
u3 shouldBe an[IntervalDomain]
val u6 = d1 | Singleton(15)
u6.view should contain theSameElementsAs Seq(5, 7, 10, 15)
u6 shouldBe a[BitVectorDomain]
d1 | Singleton(10) should be theSameInstanceAs d1
d1 | EmptyIntDomain should be theSameInstanceAs d1
}
it should "compute intersections from bit vectors" in {
val d1 = IntDomain.ofSeq(5, 7, 10)
d1 & IntDomain.ofSeq(7, 15) shouldBe Singleton(7)
d1 & IntDomain.ofSeq(12, 15) shouldBe EmptyIntDomain
d1 & IntDomain.ofInterval(12, 15) shouldBe EmptyIntDomain
d1 & IntDomain.ofInterval(10, 15) shouldBe Singleton(10)
val i1 = d1 & IntDomain.ofInterval(7, 15)
i1.view should contain theSameElementsAs Seq(7, 10)
i1 shouldBe a[BitVectorDomain]
d1 & IntDomain.ofInterval(0, 10) shouldBe d1
d1 & IntDomain.ofSeq(5, 7, 10) shouldBe d1
}
it should "compute unions from singletons" in {
val d1 = Singleton(10)
d1 | Singleton(10) should be theSameInstanceAs d1
d1 | EmptyIntDomain should be theSameInstanceAs d1
val u1 = d1 | Singleton(11)
u1.view should contain theSameElementsAs (10 to 11)
u1 shouldBe an[IntervalDomain]
val u2 = d1 | Singleton(12)
u2.view should contain theSameElementsAs Seq(10, 12)
u2 shouldBe a[BitVectorDomain]
}
}
|
concrete-cp/concrete
|
src/test/scala/concrete/IntDomainTest.scala
|
Scala
|
lgpl-2.1
| 5,586 |
package ca.uqam.euler.nicolas
import scala.collection.immutable.SortedMap
object Problem031 {
object CoinMap {
val coins = List(200, 100, 50, 20, 10, 5, 2, 1)
val empty = CoinMap(coins.map(_ -> 0).toMap)
}
case class CoinMap(m: Map[Int, Int]) {
val sum = m.map(e => e._1 * e._2).sum
def add(coin: Int) = CoinMap(m.updated(coin, m(coin) + 1))
}
import CoinMap._
def combinations(
candidates: Set[CoinMap],
found: Set[CoinMap],
target: Int): Set[CoinMap] = {
val newFound = found ++ candidates.filter(_.sum == target)
val smallers = candidates.filter(_.sum < target)
if (smallers.isEmpty)
newFound
else {
val newCandidates = for (s <- smallers; c <- coins) yield s.add(c)
combinations(newCandidates, newFound, target)
}
}
def main(args: Array[String]) = Answer {
// Answer : 73682
// Note: not very efficient -- there should be a better, simpler way - see forum
val xs = combinations(Set(empty), Set[CoinMap](), 200)
println(xs.mkString("\\n"))
xs.size
}
}
|
nicolaspayette/project-euler
|
src/main/scala/ca/uqam/euler/nicolas/Problem031.scala
|
Scala
|
mit
| 1,064 |
package dotty.communitybuild
import java.nio.file.Paths
import java.nio.file.Path
import java.nio.file.Files
import scala.sys.process._
import CommunityBuildRunner.run
object Main:
private def generateDocs(project: CommunityProject): Seq[Path] =
val name = project.project
try
project.doc()
val pathsOut = s"find community-projects/$name/ -name 'scaladoc.version'".!!
pathsOut.linesIterator.map(Paths.get(_).getParent).toList
catch
case e: Exception =>
e.printStackTrace()
Nil
def withProjects[T](names: Seq[String], opName: String)(op: CommunityProject => T): Seq[T] =
val missing = names.filterNot(projectMap.contains)
if missing.nonEmpty then
val allNames = allProjects.map(_.project).mkString(", ")
println(s"Missing projects: ${missing.mkString(", ")}. All projects: $allNames")
sys.exit(1)
val (failed, completed) = names.flatMap(projectMap.apply).partitionMap( o =>
try
Right(op(o))
catch case e: Throwable =>
e.printStackTrace()
Left(o)
)
if failed.nonEmpty then
println(s"$opName failed for ${failed.mkString(", ")}")
sys.exit(1)
completed
/** Allows running various commands on community build projects. */
def main(args: Array[String]): Unit =
args.toList match
case "publish" :: names if names.nonEmpty =>
withProjects(names, "Publishing")(_.publish())
case "build" :: names if names.nonEmpty =>
withProjects(names, "Build")(_.build())
case "doc" :: "all" :: destStr :: Nil =>
val dest = Paths.get(destStr)
Seq("rm", "-rf", destStr).!
Files.createDirectory(dest)
val (toRun, ignored) =
allProjects.partition( p =>
p.docCommand != null
&& (!p.requiresExperimental || p.compilerSupportExperimental)
)
val paths = toRun.map { project =>
val name = project.project
val projectDest = dest.resolve(name)
val projectRoot = Paths.get(s"community-projects/$name")
println(s"generating docs for $name into $projectDest")
val generatedDocs = generateDocs(project)
if !Files.exists(projectDest) && generatedDocs.nonEmpty then
Files.createDirectory(projectDest)
val docsFiles = generatedDocs.map { docsPath =>
val destFileName =
docsPath.subpath(2, docsPath.getNameCount).toString.replace('/', '_')
Seq("cp", "-r", docsPath.toString, projectDest.resolve(destFileName).toString).!
destFileName
}
name -> docsFiles
}
val (failed, withDocs) = paths.partition{ case (_, paths) => paths.isEmpty }
val indexFile = withDocs.map { case (name, paths) =>
paths.map(p => s"""<a href="$name/$p/index.html">$p</a></br>\\n""")
.mkString(s"<h1>$name</h1>","\\n", "\\n")
}.mkString("<html><body>\\n", "\\n", "\\n</html></body>")
Files.write(dest.resolve("index.html"), indexFile.getBytes)
if ignored.nonEmpty then
println(s"Ignored project without doc command: ${ignored.map(_.project)}")
if failed.nonEmpty then
println(s"Documentation not found for ${failed.map(_._1).mkString(", ")}")
sys.exit(1)
case "doc" :: names if names.nonEmpty =>
val failed = withProjects(names, "Documenting"){ p =>
val docsRoots = generateDocs(p)
println(docsRoots)
if docsRoots.nonEmpty then println(s"Docs for $p generated in $docsRoots")
if docsRoots.isEmpty then Some(p.project) else None
}.flatten
if failed.nonEmpty then
println(s"Documentation not found for ${failed.mkString(", ")}")
sys.exit(1)
case "run" :: names if names.nonEmpty =>
given CommunityBuildRunner()
withProjects(names, "Running")(_.run())
case args =>
println("USAGE: <COMMAND> <PROJECT NAME>")
println("COMMAND is one of: publish, build, doc, doc all, run")
println("Available projects are:")
allProjects.foreach { k =>
println(s"\\t${k.project}")
}
sys.exit(1)
|
lampepfl/dotty
|
community-build/src/scala/dotty/communitybuild/Main.scala
|
Scala
|
apache-2.0
| 4,225 |
package test
import org.specs2.mutable.Specification
import java.io.IOException
import java.net.InetSocketAddress
import java.lang.reflect.Proxy
import org.apache.avro.specific.SpecificData
import org.apache.avro.ipc.netty.NettyServer
import org.apache.avro.ipc.netty.NettyTransceiver
import org.apache.avro.ipc.Server
import org.apache.avro.ipc.specific.SpecificRequestor
import org.apache.avro.ipc.specific.SpecificResponder
import example.proto.Mail
import example.proto.Message
class SpecificRPCTest extends Specification {
skipAll // RPC tests fail on Linux (Ubuntu 16.04), solution unknown
// adapted from https://github.com/phunt/avro-rpc-quickstart
"A case class " should {
"serialize and deserialize correctly via rpc" in {
class MailImpl extends Mail {
// in this simple example just return details of the message
def send(message: Message): String = {
System.out.println("Sending message")
val response: String = message.body
response.toString
}
}
System.out.println("Starting server")
// usually this would be another app, but for simplicity
val protocol = Mail.PROTOCOL
val responder = new SpecificResponder(protocol, new MailImpl())
val server = new NettyServer(responder, new InetSocketAddress(65111))
System.out.println("Server started")
val client = new NettyTransceiver(new InetSocketAddress(65111))
// client code - attach to the server and send a message
val requestor = new SpecificRequestor(protocol, client, SpecificData.get)
val mailProxy: Mail = Proxy.newProxyInstance(
SpecificData.get.getClassLoader,
Array(classOf[Mail]),
requestor).asInstanceOf[Mail]
val message = new Message("avro_user", "pat", "hello_world")
System.out.println("Calling proxy.send with message: " + message.toString)
System.out.println("Result: " + mailProxy.send(message).toString)
val received: String = mailProxy.send(message).toString
// cleanup
client.close
server.close
System.out.println("Server stopped")
received === message.body
}
}
}
|
julianpeeters/sbt-avrohugger
|
src/sbt-test/avrohugger/SpecificVectorSerializationTests/src/test/scala/specific/SpecificRPCTest.scala
|
Scala
|
apache-2.0
| 2,218 |
/* Copyright 2009-2016 EPFL, Lausanne */
object CaseObject1 {
abstract sealed class A
case class B(size: Int) extends A
case object C extends A
def foo(): A = {
C
}
def foo1(a: A): A = a match {
case C => a
case B(s) => a
}
def foo2(a: A): A = a match {
case c @ C => c
case B(s) => a
}
}
|
regb/leon
|
src/test/resources/regression/verification/purescala/valid/CaseObject1.scala
|
Scala
|
gpl-3.0
| 334 |
package sql
import scala.virtualization.lms.common._
trait QueryAST {
type Table
type Schema = Vector[String]
// relational algebra ops
sealed abstract class Operator
case class Scan(name: Table, schema: Schema, delim: Char, extSchema: Boolean) extends Operator
case class PrintCSV(parent: Operator) extends Operator
case class Project(outSchema: Schema, inSchema: Schema, parent: Operator) extends Operator
case class Filter(pred: Predicate, parent: Operator) extends Operator
case class Join(parent1: Operator, parent2: Operator) extends Operator
case class Group(keys: Schema, agg: Schema, parent: Operator) extends Operator
case class HashJoin(parent1: Operator, parent2: Operator) extends Operator
// // filter predicates
sealed abstract class Predicate
case class Eq(a: Ref, b: Ref) extends Predicate
sealed abstract class Ref
case class Field(name: String) extends Ref
case class Value(x: Any) extends Ref
// some smart constructors
def Schema(schema: String*): Schema = schema.toVector
def Scan(tableName: String): Scan = Scan(tableName, None, None)
def Scan(tableName: String, schema: Option[Schema], delim: Option[Char]): Scan
}
|
RomanTsegelskyi/lms-truffle
|
src/main/scala/sql/QueryAST.scala
|
Scala
|
gpl-2.0
| 1,189 |
package jinesra.vkMessageHistory
import scalax.file.Path
object IO {
def readFromFile(path: String) = {
Path.fromString(path).lines(includeTerminator = true).
dropWhile { _.isEmpty }.
takeWhile { _.nonEmpty }.
fold("")((wholeString, newLine) => wholeString ++ newLine)
}
def writeToFile(offset: Int, content: String) =
{
val p = Path.fromString("/home/arsenij/chatik/chatik_" + offset.toString).createFile(failIfExists = false)
p.write(content)
}
}
|
arsenij-solovjev/vkMessageHistory
|
src/main/scala/jinesra/vkMessageHistory/IO.scala
|
Scala
|
apache-2.0
| 504 |
package lila.event
import org.joda.time.DateTime
import play.api.data._
import play.api.data.Forms._
import play.api.i18n.Lang
import lila.common.Form.stringIn
import lila.common.Form.UTCDate._
import lila.i18n.LangList
import lila.user.User
object EventForm {
object icon {
val default = ""
val broadcast = "broadcast.icon"
val choices = List(
default -> "Microphone",
"lichess.event.png" -> "Lichess",
"trophy.event.png" -> "Trophy",
broadcast -> "Broadcast",
"offerspill.logo.png" -> "Offerspill"
)
}
val form = Form(
mapping(
"title" -> text(minLength = 3, maxLength = 40),
"headline" -> text(minLength = 5, maxLength = 30),
"description" -> optional(text(minLength = 5, maxLength = 4000)),
"homepageHours" -> bigDecimal(10, 2).verifying(d => d >= 0 && d <= 24),
"url" -> nonEmptyText,
"lang" -> text.verifying(l => LangList.allChoices.exists(_._1 == l)),
"enabled" -> boolean,
"startsAt" -> utcDate,
"finishesAt" -> utcDate,
"hostedBy" -> optional {
lila.user.UserForm.historicalUsernameField
.transform[User.ID](_.toLowerCase, identity)
},
"icon" -> stringIn(icon.choices),
"countdown" -> boolean
)(Data.apply)(Data.unapply)
) fill Data(
title = "",
headline = "",
description = none,
homepageHours = 0,
url = "",
lang = lila.i18n.enLang.code,
enabled = true,
startsAt = DateTime.now,
finishesAt = DateTime.now,
countdown = true
)
case class Data(
title: String,
headline: String,
description: Option[String],
homepageHours: BigDecimal,
url: String,
lang: String,
enabled: Boolean,
startsAt: DateTime,
finishesAt: DateTime,
hostedBy: Option[User.ID] = None,
icon: String = "",
countdown: Boolean
) {
def update(event: Event, by: User) =
event.copy(
title = title,
headline = headline,
description = description,
homepageHours = homepageHours.toDouble,
url = url,
lang = Lang(lang),
enabled = enabled,
startsAt = startsAt,
finishesAt = finishesAt,
hostedBy = hostedBy,
icon = icon.some.filter(_.nonEmpty),
countdown = countdown,
updatedAt = DateTime.now.some,
updatedBy = Event.UserId(by.id).some
)
def make(userId: String) =
Event(
_id = Event.makeId,
title = title,
headline = headline,
description = description,
homepageHours = homepageHours.toDouble,
url = url,
lang = Lang(lang),
enabled = enabled,
startsAt = startsAt,
finishesAt = finishesAt,
createdBy = Event.UserId(userId),
createdAt = DateTime.now,
updatedAt = none,
updatedBy = none,
hostedBy = hostedBy,
icon = icon.some.filter(_.nonEmpty),
countdown = countdown
)
}
object Data {
def make(event: Event) =
Data(
title = event.title,
headline = event.headline,
description = event.description,
homepageHours = event.homepageHours,
url = event.url,
lang = event.lang.code,
enabled = event.enabled,
startsAt = event.startsAt,
finishesAt = event.finishesAt,
hostedBy = event.hostedBy,
icon = ~event.icon,
countdown = event.countdown
)
}
}
|
luanlv/lila
|
modules/event/src/main/EventForm.scala
|
Scala
|
mit
| 3,574 |
package se.meldrum.machine.http.routes
import akka.http.scaladsl.server.Directives._
import se.meldrum.machine.dao.TaskDao
import se.meldrum.machine.db.models.Task
import slick.driver.PostgresDriver.api._
import scala.concurrent.{ExecutionContext, Future}
import scala.util.{Failure, Success}
class TaskRoute(implicit db: Database, implicit val ec: ExecutionContext) {
private val dao = new TaskDao()
import se.meldrum.machine.http.JsonSupport._
val route =
pathPrefix("task") {
pathPrefix(IntNumber) { id =>
get {
complete(dao.getTasks(id))
}
}~
path("create") {
post {
entity(as[Task]) { task =>
complete(createTask(task))
}
}
}
}
private def createTask(t: Task): Future[String] = {
val result = dao.create(t).map {
case Success(t) => "Created task"
case Failure(e) => e.getMessage
}
result
}
}
|
Max-Meldrum/machine
|
src/main/scala/se/meldrum/machine/http/routes/TaskRoute.scala
|
Scala
|
apache-2.0
| 947 |
/*
* Artificial Intelligence for Humans
* Volume 2: Nature Inspired Algorithms
* Java Version
* http://www.aifh.org
* http://www.jeffheaton.com
*
* Code repository:
* https://github.com/jeffheaton/aifh
*
* Copyright 2014 by Jeff Heaton
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* For more information on Heaton Research copyrights, licenses
* and trademarks visit:
* http://www.heatonresearch.com/copyright
*/
package com.heatonresearch.aifh.genetic.crossover
import com.heatonresearch.aifh.AIFHError
import com.heatonresearch.aifh.evolutionary.genome.Genome
import com.heatonresearch.aifh.evolutionary.opp.EvolutionaryOperator
import com.heatonresearch.aifh.evolutionary.train.EvolutionaryAlgorithm
import com.heatonresearch.aifh.genetic.genome.IntegerArrayGenome
import com.heatonresearch.aifh.randomize.GenerateRandom
/**
* A simple cross over where genes are simply "spliced". Genes are not allowed
* to repeat. This method only works with IntegerArrayGenome.
*/
object SpliceNoRepeat {
/**
* Get a list of the genes that have not been taken before. This is useful
* if you do not wish the same gene to appear more than once in a
* genome.
*
* @param source The pool of genes to select from.
* @param taken An array of the taken genes.
* @return Those genes in source that are not taken.
*/
private def getNotTaken(source: IntegerArrayGenome, taken: java.util.Set[Integer]): Int = {
for (trial <- source.getData) {
if (!taken.contains(trial)) {
taken.add(trial)
return trial
}
}
throw new AIFHError("Ran out of integers to select.")
}
}
/**
* Construct a splice crossover.
*
* @param cutLength The cut length.
*/
class SpliceNoRepeat(owner: EvolutionaryAlgorithm,cutLength: Int) extends EvolutionaryOperator(owner,2,2) {
override def performOperation(rnd: GenerateRandom, parents: Array[Genome], parentIndex: Int,
offspring: Array[Genome], offspringIndex: Int) {
val mother = parents(parentIndex).asInstanceOf[IntegerArrayGenome]
val father = parents(parentIndex + 1).asInstanceOf[IntegerArrayGenome]
val offspring1 = owner.population.genomeFactory.factor.asInstanceOf[IntegerArrayGenome]
val offspring2 = owner.population.genomeFactory.factor.asInstanceOf[IntegerArrayGenome]
offspring(offspringIndex) = offspring1
offspring(offspringIndex + 1) = offspring2
val geneLength = mother.size
val cutPoint1 = rnd.nextInt(geneLength - this.cutLength)
val cutPoint2 = cutPoint1 + this.cutLength
val taken1 = new java.util.HashSet[Integer]
val taken2 = new java.util.HashSet[Integer]
for(i <- 0 until geneLength) {
if (!((i < cutPoint1) || (i > cutPoint2))) {
offspring1.copy(father, i, i)
offspring2.copy(mother, i, i)
taken1.add(father.getData(i))
taken2.add(mother.getData(i))
}
}
for(i <- 0 until geneLength) {
if ((i < cutPoint1) || (i > cutPoint2)) {
offspring1.getData(i) = SpliceNoRepeat.getNotTaken(mother, taken1)
offspring2.getData(i) = SpliceNoRepeat.getNotTaken(father, taken2)
}
}
}
}
|
PeterLauris/aifh
|
vol2/vol2-scala-examples/src/main/scala/com/heatonresearch/aifh/genetic/crossover/SpliceNoRepeat.scala
|
Scala
|
apache-2.0
| 3,678 |
/**
* Copyright (C) 2011 Orbeon, Inc.
*
* This program is free software; you can redistribute it and/or modify it under the terms of the
* GNU Lesser General Public License as published by the Free Software Foundation; either version
* 2.1 of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
* without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU Lesser General Public License for more details.
*
* The full text of the license is available at http://www.gnu.org/copyleft/lesser.html
*/
package org.orbeon.oxf.xforms.event.events
import org.orbeon.oxf.xforms.event.XFormsEvents._
import org.orbeon.oxf.xforms.event.{XFormsEvent, XFormsEventTarget}
import org.orbeon.oxf.xforms.event.XFormsEvent._
class XXFormsSetindexEvent(target: XFormsEventTarget, properties: PropertyGetter)
extends XFormsEvent(XXFORMS_SETINDEX, target, properties, bubbles = true, cancelable = false) {
def this(target: XFormsEventTarget, index: Int) =
this(target, Map("index" -> Option(index)))
def index = property[Int]("index").get
}
|
orbeon/orbeon-forms
|
xforms-runtime/shared/src/main/scala/org/orbeon/oxf/xforms/event/events/XXFormsSetindexEvent.scala
|
Scala
|
lgpl-2.1
| 1,191 |
package edu.uchicago.cs.encsel.dataset.feature
import java.io.File
import edu.uchicago.cs.encsel.dataset.column.Column
import edu.uchicago.cs.encsel.model.DataType
import org.junit.Assert.assertEquals
import org.junit.Test
class AvgRunLengthTest {
@Test
def testExtract: Unit = {
val col = new Column(null, -1, "", DataType.INTEGER)
col.colFile = new File("src/test/resource/test_col_avgrl.data").toURI
val features = AvgRunLength.extract(col).toArray
assertEquals(1, features.length)
assertEquals("AvgRunLength", features(0).featureType)
assertEquals("value", features(0).name)
assertEquals(1.9, features(0).value, 0.001)
}
}
|
harperjiang/enc-selector
|
src/test/scala/edu/uchicago/cs/encsel/dataset/feature/AvgRunLengthTest.scala
|
Scala
|
apache-2.0
| 667 |
/*
* Copyright (c) 2014-2016
* nonblocking.at gmbh [http://www.nonblocking.at]
*
* This file is part of Cliwix.
*
* Cliwix is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package at.nonblocking.cliwix.core.util
import java.{util => jutil}
import at.nonblocking.cliwix.core.Cliwix
import com.typesafe.scalalogging.slf4j.LazyLogging
import org.mapdb.DBMaker
import scala.annotation.tailrec
import scala.util.Random
/**
* Collections created by this factory have a limited RAM usage
*/
sealed trait ResourceAwareCollectionFactory {
val listSizeThreshold = 100000
def createMap[K, V](sizeHint: Long = listSizeThreshold * 2): jutil.Map[K, V];
}
private[core] class ResourceAwareCollectionFactoryImpl extends ResourceAwareCollectionFactory with LazyLogging {
val availableRamMB = Runtime.getRuntime.maxMemory() / 1024 / 1024
logger.info(s"Available RAM: $availableRamMB MB")
def createMap[K, V](sizeHint: Long) = {
if (sizeHint < listSizeThreshold) {
new jutil.HashMap
} else {
val db = if (availableRamMB < 2048) {
DBMaker
.newTempFileDB()
.transactionDisable()
.deleteFilesAfterClose()
.closeOnJvmShutdown()
.make()
} else {
DBMaker
.newTempFileDB()
.transactionDisable()
.mmapFileEnablePartial()
.deleteFilesAfterClose()
.closeOnJvmShutdown()
.make()
}
db.getHashMap(randomString)
}
}
def randomString: String = {
@tailrec
def randomStringTailRecursive(n: Int, list: List[Char]): List[Char] = {
if (n == 1) Random.nextPrintableChar :: list
else randomStringTailRecursive(n - 1, Random.nextPrintableChar :: list)
}
randomStringTailRecursive(10, Nil).mkString
}
}
class ResourceAwareCollectionFactoryDummyImpl extends ResourceAwareCollectionFactory {
def createMap[K, V](sizeHint: Long) = new jutil.HashMap
}
|
nonblocking/cliwix
|
cliwix-core/src/main/scala/at/nonblocking/cliwix/core/util/ResourceAwareCollectionFactory.scala
|
Scala
|
agpl-3.0
| 2,548 |
/**
* Copyright 2009 Jorge Ortiz
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
**/
package com.github.nscala_time.time
import org.joda.time._
import com.github.nscala_time.PimpedType
class RichLocalDateTime(val underlying: LocalDateTime) extends Super with PimpedType[LocalDateTime] {
def -(duration: ReadableDuration): LocalDateTime = underlying.minus(duration)
def -(period: ReadablePeriod): LocalDateTime = underlying.minus(period)
def -(builder: DurationBuilder): LocalDateTime = underlying.minus(builder.underlying)
def +(duration: ReadableDuration): LocalDateTime = underlying.plus(duration)
def +(period: ReadablePeriod): LocalDateTime = underlying.plus(period)
def +(builder: DurationBuilder): LocalDateTime = underlying.plus(builder.underlying)
def second: LocalDateTime.Property = underlying.secondOfMinute
def minute: LocalDateTime.Property = underlying.minuteOfHour
def hour: LocalDateTime.Property = underlying.hourOfDay
def day: LocalDateTime.Property = underlying.dayOfMonth
def week: LocalDateTime.Property = underlying.weekOfWeekyear
def month: LocalDateTime.Property = underlying.monthOfYear
def year: LocalDateTime.Property = underlying.year
def century: LocalDateTime.Property = underlying.centuryOfEra
def era: LocalDateTime.Property = underlying.era
def withSecond(second: Int) = underlying.withSecondOfMinute(second)
def withMinute(minute: Int) = underlying.withMinuteOfHour(minute)
def withHour(hour: Int) = underlying.withHourOfDay(hour)
def withDay(day: Int) = underlying.withDayOfMonth(day)
def withWeek(week: Int) = underlying.withWeekOfWeekyear(week)
def withMonth(month: Int) = underlying.withMonthOfYear(month)
def withYear(year: Int) = underlying.withYear(year)
def withCentury(century: Int) = underlying.withCenturyOfEra(century)
def withEra(era: Int) = underlying.withEra(era)
}
|
mamdouhweb/nscala-time
|
src/main/scala/com/github/nscala_time/time/RichLocalDateTime.scala
|
Scala
|
apache-2.0
| 2,402 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License
*/
package org.apache.toree.plugins.dependencies
import java.net.URL
import java.lang.{ ClassLoader => JClassLoader }
/**
* Created by mariu on 2016-07-16.
*/
object ClassLoaderHelper {
def URLClassLoader(urls: Seq[URL], parent: JClassLoader) = {
new scala.reflect.internal.util.ScalaClassLoader.URLClassLoader(urls, parent)
}
}
|
chipsenkbeil/incubator-toree
|
plugins/src/test/scala-2.11/org/apache/toree/plugins/dependencies/ClassLoaderHelper.scala
|
Scala
|
apache-2.0
| 1,155 |
/*
* Copyright (C) 2013 Alcatel-Lucent.
*
* See the NOTICE file distributed with this work for additional
* information regarding copyright ownership.
* Licensed to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package molecule
package benchmarks.comparison
package molecule.executors
import java.util.concurrent.ExecutorService
import platform.Executor
import platform.executors.TrampolineExecutor
import platform.{ ThreadFactory, MoleculeThread }
/**
* Executor used by the Flow Parallel Scheduler
*/
final class TrampolineTPExecutorLog(pool: ExecutorService, group: ThreadGroup) extends Executor {
/**
* One task queue per kernel threads. A kernel thread will submit a task to the
* thread pool only if there are more than one task in its local task queue.
*/
private final val context = new ThreadLocal[TrampolineTask]() {
override protected def initialValue() = null
}
private[this] final class TrampolineTask( final var nextTask: Runnable) extends Runnable {
def run() = {
// When we reach here, the next task is null
context.set(this)
while (nextTask != null) {
val task = nextTask
nextTask = null
task.run()
}
}
}
def execute(task: Runnable) {
//println(Thread.currentThread())
//println(Thread.currentThread().getThreadGroup() + "==" + group)
// it is necessary to compare the marker trait because some frameworks like swing
// copy the thread group of the thread that started it...
try {
val thread = Thread.currentThread()
if ((thread.getThreadGroup() eq group) && thread.isInstanceOf[MoleculeThread]) {
val trampoline = context.get()
if (trampoline.nextTask != null) {
//println(Thread.currentThread + ":FAIL")
TrampolineTPExecutorLog.submitCount.getAndIncrement()
pool.submit(new TrampolineTask(trampoline.nextTask))
} else {
TrampolineTPExecutorLog.bounceCount.getAndIncrement()
}
trampoline.nextTask = task
} else {
TrampolineTPExecutorLog.submitCount.getAndIncrement()
pool.submit(new TrampolineTask(task))
}
} catch {
case t: java.util.concurrent.RejectedExecutionException =>
// stdin is never gracefully shutdown and may submit a last key event
// to this pool, which has been shutdown.
if (Thread.currentThread.getThreadGroup().getName() != "stdin")
throw t
}
}
def shutdownNow() =
pool.shutdownNow()
/**
* execute shutdown task.
*/
def shutdown() =
pool.shutdown()
}
object TrampolineTPExecutorLog {
import java.util.concurrent.atomic.AtomicInteger
val submitCount = new AtomicInteger(0)
val bounceCount = new AtomicInteger(0)
def reset() = {
submitCount.set(0)
bounceCount.set(0)
}
import java.util.concurrent.{ TimeUnit, LinkedBlockingQueue, ThreadPoolExecutor }
// def apply(tf:ThreadFactory, nbThreads:Int):TrampolineTPExecutorLog =
// new TrampolineTPExecutorLog(new ThreadPoolExecutor(nbThreads, nbThreads,
// 0L, TimeUnit.MILLISECONDS,
// new LinkedBlockingQueue[Runnable](),
// tf), tf.group)
def apply(tf: ThreadFactory, nbThreads: Int): TrampolineTPExecutorLog = {
val tp = new ThreadPoolExecutor(nbThreads, nbThreads,
30L, TimeUnit.SECONDS,
new LinkedBlockingQueue[Runnable](),
tf)
tp.allowCoreThreadTimeOut(true)
new TrampolineTPExecutorLog(tp, tf.group)
}
}
|
molecule-labs/molecule
|
molecule-benchmarks/src/main/scala/molecule/benchmarks/comparison/molecule/executors/TrampolineTPExecutorLog.scala
|
Scala
|
apache-2.0
| 4,088 |
package org.jetbrains.plugins.scala
package codeInspection
package typeChecking
import com.intellij.codeInspection.LocalInspectionTool
import com.intellij.testFramework.EditorTestUtil.{SELECTION_END_TAG => END, SELECTION_START_TAG => START}
/**
* Nikolay.Tropin
* 9/26/13
*/
abstract class ComparingUnrelatedTypesInspectionTest extends ScalaInspectionTestBase {
override protected val classOfInspection: Class[_ <: LocalInspectionTool] =
classOf[ComparingUnrelatedTypesInspection]
}
class Test1 extends ComparingUnrelatedTypesInspectionTest {
override protected val description: String =
InspectionBundle.message("comparing.unrelated.types.hint", "Short", "Int")
def testWeakConformance(): Unit = checkTextHasNoErrors(
s"""val a = 0
|val b: Short = 1
|${START}b == a$END
""".stripMargin
)
}
class Test2 extends ComparingUnrelatedTypesInspectionTest {
override protected val description: String =
InspectionBundle.message("comparing.unrelated.types.hint", "Double", "Int")
def testWeakConformance(): Unit = checkTextHasNoErrors(
s"""val a = 0
|val b = 1.0
|${START}b != a$END
""".stripMargin
)
}
class Test3 extends ComparingUnrelatedTypesInspectionTest {
override protected val description: String =
InspectionBundle.message("comparing.unrelated.types.hint", "Double", "Byte")
def testWeakConformance(): Unit = checkTextHasNoErrors(
s"""val a = 0.0
|val b: Byte = 100
|${START}a == b$END
""".stripMargin
)
}
class Test4 extends ComparingUnrelatedTypesInspectionTest {
override protected val description: String =
InspectionBundle.message("comparing.unrelated.types.hint", "Int", "Double")
def testWeakConformance(): Unit = checkTextHasNoErrors(
s"${START}1 == 1.0$END"
)
}
class Test5 extends ComparingUnrelatedTypesInspectionTest {
override protected val description: String =
InspectionBundle.message("comparing.unrelated.types.hint", "Int", "Boolean")
def testValueType(): Unit = checkTextHasError(
s"""val a = true
|val b = 1
|${START}b == a$END
""".stripMargin
)
def testInstanceOf(): Unit = checkTextHasError(
s"${START}1.isInstanceOf[Boolean]$END"
)
}
class Test6 extends ComparingUnrelatedTypesInspectionTest {
override protected val description: String =
InspectionBundle.message("comparing.unrelated.types.hint", "Boolean", "Double")
def testValueType(): Unit = checkTextHasError(
s"""val a = true
|val b = 0.0
|${START}a != b$END
""".stripMargin
)
}
class Test7 extends ComparingUnrelatedTypesInspectionTest {
override protected val description: String =
InspectionBundle.message("comparing.unrelated.types.hint", "Boolean", "Int")
def testValueType(): Unit = checkTextHasError(
s"${START}true != 0$END"
)
}
class Test8 extends ComparingUnrelatedTypesInspectionTest {
override protected val description: String =
InspectionBundle.message("comparing.unrelated.types.hint", "Array[Char]", "String")
def testString(): Unit = checkTextHasError(
s"""val a = "a"
|val b = Array('a')
|${START}b == a$END
""".stripMargin
)
}
class Test9 extends ComparingUnrelatedTypesInspectionTest {
override protected val description: String =
InspectionBundle.message("comparing.unrelated.types.hint", "String", "Int")
def testString(): Unit = checkTextHasError(
s"""val a = "0"
|val b = 0
|${START}a == b$END
""".stripMargin
)
}
class Test10 extends ComparingUnrelatedTypesInspectionTest {
override protected val description: String =
InspectionBundle.message("comparing.unrelated.types.hint", "String", "Char")
def testString(): Unit = checkTextHasError(
s"""val s = "s"
|${START}s == 's'$END
""".stripMargin
)
}
class Test11 extends ComparingUnrelatedTypesInspectionTest {
override protected val description: String =
InspectionBundle.message("comparing.unrelated.types.hint", "CharSequence", "String")
def testString(): Unit = checkTextHasNoErrors(
s"""val a = "a"
|val b: CharSequence = null
|${START}b != a$END
""".stripMargin
)
}
class Test12 extends ComparingUnrelatedTypesInspectionTest {
override protected val description: String =
InspectionBundle.message("comparing.unrelated.types.hint", "scala.collection.Iterable", "scala.collection.List")
def testInheritors(): Unit = checkTextHasNoErrors(
s"""val a = scala.collection.Iterable(1)
|val b = List(0)
|${START}b == a$END
""".stripMargin
)
}
class Test13 extends ComparingUnrelatedTypesInspectionTest {
override protected val description: String =
InspectionBundle.message("comparing.unrelated.types.hint", "A", "B")
def testInheritors(): Unit = checkTextHasNoErrors(
s"""case class A(i: Int)
|final class B extends A(1)
|val a: A = A(0)
|val b: B = new B
|${START}a == b$END
""".stripMargin
)
def testFinal(): Unit = checkTextHasError(
s"""final class A extends Serializable
|final class B extends Serializable
|val a: A = new A
|val b: B = new B
|${START}a == b$END
""".stripMargin
)
def testInstanceOf(): Unit = checkTextHasError(
s"""final class A extends Serializable
|final class B extends Serializable
|val a: A = new A
|${START}a.isInstanceOf[B]$END
"""
)
def testTraits(): Unit = checkTextHasNoErrors(
s"""trait A
|trait B
|val a: A = _
|val b: B = _
|${START}a == b$END
""".stripMargin
)
}
class Test14 extends ComparingUnrelatedTypesInspectionTest {
override protected val description: String =
InspectionBundle.message("comparing.unrelated.types.hint", "B", "A")
def testInheritors(): Unit = checkTextHasNoErrors(
s"""trait A
|object B extends A
|${START}B.isInstanceOf[A]$END
""".stripMargin
)
}
class Test15 extends ComparingUnrelatedTypesInspectionTest {
override protected val description: String =
InspectionBundle.message("comparing.unrelated.types.hint", "A", "B.type")
def testObject(): Unit = checkTextHasNoErrors(
s"""trait A
|object B extends A
|val a: A = _
|${START}a == B$END
""".stripMargin
)
def testObject2(): Unit = checkTextHasNoErrors(
s"""trait A
|object B extends A
|class C extends A
|val c: A = new C
|${START}c != B$END
""".stripMargin
)
}
class Test16 extends ComparingUnrelatedTypesInspectionTest {
override protected val description: String =
InspectionBundle.message("comparing.unrelated.types.hint", "C", "B.type")
def testObject(): Unit = checkTextHasError(
s"""trait A
|object B extends A
|class C extends A
|val c = new C
|${START}c == B$END
""".stripMargin
)
}
class Test17 extends ComparingUnrelatedTypesInspectionTest {
override protected val description: String =
InspectionBundle.message("comparing.unrelated.types.hint", "Int", "java.lang.Integer")
def testBoxedTypes(): Unit = checkTextHasNoErrors(
"""val i = new java.lang.Integer(0)
|i == 100
""".stripMargin
)
}
class Test18 extends ComparingUnrelatedTypesInspectionTest {
override protected val description: String =
InspectionBundle.message("comparing.unrelated.types.hint", "Boolean", "java.lang.Boolean")
def testBoxedTypes(): Unit = checkTextHasNoErrors(
"""val b = new java.lang.Boolean(false)
|b equals true
""".stripMargin
)
}
class Test19 extends ComparingUnrelatedTypesInspectionTest {
override protected val description: String =
InspectionBundle.message("comparing.unrelated.types.hint", "java.lang.Integer", "Null")
def testBoxedTypes(): Unit = checkTextHasNoErrors(
"def test(i: Integer) = if (i == null) \\"foo\\" else \\"bar\\""
)
}
class Test20 extends ComparingUnrelatedTypesInspectionTest {
override protected val description: String =
InspectionBundle.message("comparing.unrelated.types.hint", "Seq[Int]", "List[_]")
def testExistential(): Unit = checkTextHasNoErrors(
"Seq(1).isInstanceOf[List[_])"
)
}
class Test21 extends ComparingUnrelatedTypesInspectionTest {
override protected val description: String =
InspectionBundle.message("comparing.unrelated.types.hint", "Some[Int]", "List[_]")
def testExistential(): Unit = checkTextHasError(
s"${START}Some(1).isInstanceOf[List[_]]$END"
)
}
class Test22 extends ComparingUnrelatedTypesInspectionTest {
override protected val description: String =
InspectionBundle.message("comparing.unrelated.types.hint", "Some[_]", "Some[Int]")
def testExistential(): Unit = checkTextHasNoErrors(
"def foo(x: Some[_]) { x == Some(1) }"
)
}
class Test23 extends ComparingUnrelatedTypesInspectionTest {
override protected val description: String =
InspectionBundle.message("comparing.unrelated.types.hint", "Some[_]", "Seq[Int]")
def testExistential(): Unit = checkTextHasError(
s"def foo(x: Some[_]) { ${START}x == Seq(1)$END }"
)
}
class Test24 extends ComparingUnrelatedTypesInspectionTest {
override protected val description: String =
InspectionBundle.message("comparing.unrelated.types.hint", "BigInt", "Int")
def testNumeric(): Unit = checkTextHasNoErrors(
"BigInt(1) == 1"
)
}
class Test25 extends ComparingUnrelatedTypesInspectionTest {
override protected val description: String =
InspectionBundle.message("comparing.unrelated.types.hint", "BigInt", "Long")
def testNumeric(): Unit = checkTextHasNoErrors(
"BigInt(1) == 1L"
)
}
class Test26 extends ComparingUnrelatedTypesInspectionTest {
override protected val description: String =
InspectionBundle.message("comparing.unrelated.types.hint", "BigInt", "java.lang.Integer")
def testNumeric(): Unit = checkTextHasNoErrors(
"BigInt(1) == new java.lang.Integer(1)"
)
}
class Test27 extends ComparingUnrelatedTypesInspectionTest {
override protected val description: String =
InspectionBundle.message("comparing.unrelated.types.hint", "BigInt", "Boolean")
def testNumeric(): Unit = checkTextHasError(
s"${START}BigInt(1) == true$END"
)
}
class Test28 extends ComparingUnrelatedTypesInspectionTest {
override protected val description: String =
InspectionBundle.message("comparing.unrelated.types.hint", "BigInt", "String")
def testNumeric(): Unit = checkTextHasError(
s"${START}BigInt(1) == 1.toString$END"
)
}
class Test29 extends ComparingUnrelatedTypesInspectionTest {
override protected val description: String =
InspectionBundle.message("comparing.unrelated.types.hint", "A.Coord", "Int")
def testTypeAlias(): Unit = checkTextHasNoErrors(
"""
|object A {
| type Coord = Float
| def isZero(n: Coord): Boolean = {
| n == 0
| }
|}
""".stripMargin
)
def testTypeAlias2(): Unit = checkTextHasError(
s"""
|object A {
| type Coord = String
| def isZero(n: Coord): Boolean = {
| ${START}n == 0$END
| }
|}
""".stripMargin
)
def testTypeAlias3(): Unit = checkTextHasNoErrors(
"""
|trait A {
| type Coord
|
| def isZero(n: Coord): Boolean = {
| n == 0
| }
|}
""".stripMargin
)
}
class Test30 extends ComparingUnrelatedTypesInspectionTest {
override protected val description: String =
InspectionBundle.message("comparing.unrelated.types.hint", "Dummy", "Int")
def testOverridenMethods(): Unit = checkTextHasNoErrors(
"""
|case class Dummy(v: Int) {
| def ==(value: Int): String = v + " == " + value
| def !=(value: Int): Boolean = v != value
|}
|
|object Test {
| val a: String = Dummy(5) == 10
| val b: Boolean = Dummy(5) != 10
|}
""".stripMargin
)
def testOverridenMethods2(): Unit = checkTextHasError(
s"""
|case class Dummy(v: Int) {
| def ==(value: Int): String = v + " == " + value
| def !=(value: Int): Boolean = v != value
|}
|
|object Test {
| val b: Boolean = ${START}Dummy(5) eq 10$END
|}
""".stripMargin
)
def testOverridenEquals(): Unit = checkTextHasError(
s"""
|case class Dummy(v: Int) {
| override def equals(other: Any): Boolean = other match {
| case Dummy(o) => o == v
| case _ => false
| }
|}
|
|object Test {
| val b: Boolean = ${START}Dummy(5) equals 10$END
|}
""".stripMargin
)
def testOverridenEquals2(): Unit = checkTextHasError(
s"""
|case class Dummy(v: Int) {
| override def equals(other: Any): Boolean = other match {
| case Dummy(o) => o == v
| case _ => false
| }
|}
|
|object Test {
| val b: Boolean = ${START}Dummy(5) == 10$END
|}
""".stripMargin)
}
class Test31 extends ComparingUnrelatedTypesInspectionTest {
override protected val description: String =
InspectionBundle.message("comparing.unrelated.types.hint", "FooBinder", "String")
def testOverridenWithImplicitParam(): Unit = checkTextHasError(
s"""
|class Store(val foo: Int, val bar: String)
|trait Binder[T] {
| def get(implicit store: Store): T
| def ==(other: Binder[T])(implicit store: Store) = get == other.get
| def ==(other: T)(implicit store: Store) = get == other
|}
|class FooBinder extends Binder[Int] {
| def get(implicit store: Store) = store.foo
|}
|class BarBinder extends Binder[String] {
| def get(implicit store: Store) = store.bar
|}
|
|val fooBinder = new FooBinder
|val barBinder = new BarBinder
|
|{
| implicit val store = new Store(12, ":)")
| (fooBinder == 12, fooBinder == 3, ${START}fooBinder == ":)"$END, barBinder == ":)") // (true, false, false, true)
|}
""".stripMargin
)
}
class Test32 extends ComparingUnrelatedTypesInspectionTest {
override protected val description: String =
InspectionBundle.message("comparing.unrelated.types.hint", "abc.Dummy", "cde.Dummy")
def testSameNameTypes(): Unit = checkTextHasError(
s"""
|package abc {
| class Dummy
|}
|
|package cde {
| class Dummy
|}
|
|object Test {
| val d1 = new abc.Dummy
| val d2 = new cde.Dummy
| ${START}d1 == d2$END
|}
""".stripMargin
)
}
|
loskutov/intellij-scala
|
test/org/jetbrains/plugins/scala/codeInspection/typeChecking/ComparingUnrelatedTypesInspectionTest.scala
|
Scala
|
apache-2.0
| 14,697 |
/**
* (c) Copyright 2014 WibiData, Inc.
*
* See the NOTICE file distributed with this work for additional
* information regarding copyright ownership.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.kiji.express.flow
import scala.collection.JavaConverters.seqAsJavaListConverter
import scala.collection.mutable.Buffer
import com.twitter.scalding.Args
import com.twitter.scalding.JobTest
import com.twitter.scalding.TextLine
import com.twitter.scalding.Tsv
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.hbase.HBaseConfiguration
import org.junit.runner.RunWith
import org.scalatest.junit.JUnitRunner
import org.kiji.express.KijiSuite
import org.kiji.express.flow.EntityId.HashedEntityId
import org.kiji.express.flow.util.ResourceUtil
import org.kiji.express.flow.util.TestingResourceUtil
import org.kiji.schema.EntityIdFactory
import org.kiji.schema.KijiTable
import org.kiji.schema.KijiURI
import org.kiji.schema.layout.KijiTableLayout
import org.kiji.schema.layout.KijiTableLayouts
/**
* Unit tests for [[org.kiji.express.flow.EntityId]].
*/
@RunWith(classOf[JUnitRunner])
class EntityIdSuite extends KijiSuite {
import org.kiji.express.flow.EntityIdSuite._
/** Table layout with formatted entity IDs to use for tests. */
val formattedEntityIdLayout: KijiTableLayout =
TestingResourceUtil.layout(KijiTableLayouts.FORMATTED_RKF)
// Create a table to use for testing
val formattedTableUri: KijiURI =
ResourceUtil.doAndRelease(makeTestKijiTable(formattedEntityIdLayout)) { table: KijiTable =>
table.getURI
}
/** Table layout with hashed entity IDs to use for tests. */
val hashedEntityIdLayout: KijiTableLayout =
TestingResourceUtil.layout(KijiTableLayouts.HASHED_FORMATTED_RKF)
// Create a table to use for testing
val hashedTableUri: KijiURI =
ResourceUtil.doAndRelease(makeTestKijiTable(hashedEntityIdLayout)) { table: KijiTable =>
table.getURI
}
val configuration: Configuration = HBaseConfiguration.create()
val formattedEidFactory = EntityIdFactory.getFactory(formattedEntityIdLayout)
val hashedEidFactory = EntityIdFactory.getFactory(hashedEntityIdLayout)
// ------- "Unit tests" for comparisons and creation. -------
test("Create an Express EntityId from a Kiji EntityId and vice versa in a formatted table.") {
val expressEid = EntityId("test", "1", "2", 1, 7L)
val kijiEid = expressEid.toJavaEntityId(formattedEidFactory)
val expected: java.util.List[AnyRef] =
Seq[AnyRef]("test", "1", "2", 1: java.lang.Integer, 7L: java.lang.Long).asJava
assert(expected === kijiEid.getComponents)
val recreate = EntityId.fromJavaEntityId(kijiEid)
assert(expressEid === recreate)
assert(recreate(0) === "test")
}
test("Create an Express EntityId from a Kiji EntityId and vice versa in a hashed table.") {
val origKijiEid = hashedEidFactory.getEntityId("test")
val expressEid = HashedEntityId(origKijiEid.getHBaseRowKey)
val expressToKijiEid = expressEid.toJavaEntityId(hashedEidFactory)
val recreate = EntityId.fromJavaEntityId(expressToKijiEid)
assert(recreate.components.equals(List(origKijiEid.getHBaseRowKey)))
}
test("Creating an EntityId from a Hashed table fails if there is more than one component.") {
val eid: EntityId = EntityId("one", 2)
val exception = intercept[org.kiji.schema.EntityIdException] {
eid.toJavaEntityId(hashedEidFactory)
}
assert(exception.getMessage.contains("Too many components"))
}
test("Test equality between two EntityIds.") {
val eidComponents1: EntityId = EntityId("test", 1)
val eidComponents2: EntityId = EntityId("test", 1)
assert(eidComponents1 === eidComponents2)
assert(eidComponents2 === eidComponents1)
}
test("Test comparison between two EntityIds.") {
val eidComponents1: EntityId = EntityId("test", 2)
val eidComponents2: EntityId = EntityId("test", 3)
assert(eidComponents2 > eidComponents1)
assert(eidComponents1 < eidComponents2)
}
test("Test comparison between two EntityIds with different lengths.") {
val eidComponents1: EntityId = EntityId("test", 2)
val eidComponents2: EntityId = EntityId("test", 2, 1)
assert(eidComponents2 > eidComponents1)
assert(eidComponents1 < eidComponents2)
}
test("Test comparison between two EntityIds with different formats fails.") {
val eidComponents1: EntityId = EntityId("test", 2)
val eidComponents2: EntityId = EntityId("test", 2L)
val exception = intercept[EntityIdFormatMismatchException] {
eidComponents1 < eidComponents2
}
// Exception message should be something like:
// Mismatched Formats: Components: [java.lang.String,java.lang.Integer] and Components:
// [java.lang.String,java.lang.Long] do not match.
assert(exception.getMessage.contains("String"))
assert(exception.getMessage.contains("Integer"))
assert(exception.getMessage.contains("Long"))
}
// ------- "integration tests" for joins. -------
/** Simple table layout to use for tests. The row keys are hashed. */
val simpleLayout: KijiTableLayout =
TestingResourceUtil.layout(KijiTableLayouts.SIMPLE_TWO_COLUMNS)
/** Table layout using Avro schemas to use for tests. The row keys are formatted. */
val avroLayout: KijiTableLayout = TestingResourceUtil.layout("layout/avro-types.json")
test("Runs a job that joins two pipes, on user-created EntityIds.") {
// Create main input.
val mainInput: List[(String, String)] = List(
("0", "0row"),
("1", "1row"),
("2", "2row"))
// Create input from side data.
val sideInput: List[(String, String)] = List(("0", "0row"), ("1", "2row"))
// Validate output.
def validateTest(outputBuffer: Buffer[Tuple1[String]]): Unit = {
assert(outputBuffer.size === 2)
}
// Create the JobTest for this test.
val jobTest = JobTest(new JoinUserEntityIdsJob(_))
.arg("input", "mainInputFile")
.arg("side-input", "sideInputFile")
.arg("output", "outputFile")
.source(TextLine("mainInputFile"), mainInput)
.source(TextLine("sideInputFile"), sideInput)
.sink(Tsv("outputFile"))(validateTest)
// Run the test in local mode.
jobTest.run.finish
// Run the test in hadoop mode.
jobTest.runHadoop.finish
}
test("Runs a job that joins two pipes, on user-created and from a table (formatted) EntityIds.") {
// URI of the Kiji table to use.
val uri: String = ResourceUtil.doAndRelease(makeTestKijiTable(avroLayout)) { table: KijiTable =>
table.getURI.toString
}
// Create input from Kiji table.
val joinKijiInput: List[(EntityId, Seq[FlowCell[String]])] = List(
(EntityId("0row"), mapSlice("animals", ("0column", 0L, "0 dogs"))),
(EntityId("1row"), mapSlice("animals", ("0column", 0L, "1 cat"))),
(EntityId("2row"), mapSlice("animals", ("0column", 0L, "2 fish"))))
// Create input from side data.
val sideInput: List[(String, String)] = List(("0", "0row"), ("1", "2row"))
// Validate output.
def validateTest(outputBuffer: Buffer[Tuple1[String]]): Unit = {
assert(outputBuffer.size === 2)
}
// Create the JobTest for this test.
val jobTest = JobTest(new JoinUserAndFormattedFromTableJob(_))
.arg("input", uri)
.arg("side-input", "sideInputFile")
.arg("output", "outputFile")
.source(KijiInput.builder
.withTableURI(uri)
.withColumns("animals" -> 'animals)
.build, joinKijiInput)
.source(TextLine("sideInputFile"), sideInput)
.sink(Tsv("outputFile"))(validateTest)
// Run the test in local mode.
jobTest.run.finish
// Run the test in hadoop mode.
jobTest.runHadoop.finish
}
test("Runs a job that joins two pipes, on EntityIds from a table (hashed), in local mode.") {
// URI of the hashed Kiji table to use.
val uri: String =
ResourceUtil.doAndRelease(makeTestKijiTable(simpleLayout)) { table: KijiTable =>
table.getURI.toString
}
// Create input from hashed Kiji table.
val joinInput1: List[(EntityId, Seq[FlowCell[String]])] = List(
(EntityId("0row"), slice("family:column1", (0L, "0 dogs"))),
(EntityId("1row"), slice("family:column1", (0L, "1 cat"))),
(EntityId("2row"), slice("family:column1", (0L, "2 fish"))))
// Create input from hashed Kiji table.
val joinInput2: List[(EntityId, Seq[FlowCell[String]])] = List(
(EntityId("0row"), slice("family:column2", (0L, "0 boop"))),
(EntityId("2row"), slice("family:column2", (1L, "1 cat")))
)
// Validate output.
def validateTest(outputBuffer: Buffer[Tuple1[String]]): Unit = {
assert(outputBuffer.size === 2)
}
// Create the JobTest for this test.
val jobTest = JobTest(new JoinHashedEntityIdsJob(_))
.arg("input1", uri)
.arg("input2", uri)
.arg("output", "outputFile")
.source(KijiInput.builder
.withTableURI(uri)
.withColumns("family:column1" -> 'animals)
.build, joinInput1)
.source(KijiInput.builder
.withTableURI(uri)
.withColumns("family:column2" -> 'slice)
.build, joinInput2)
.sink(Tsv("outputFile"))(validateTest)
// Run the test in local mode.
jobTest.run.finish
}
test("Runs a job that joins two pipes, on EntityIds from a table (hashed), in hadoop mode.") {
// URI of the hashed Kiji table to use.
val uri: String =
ResourceUtil.doAndRelease(makeTestKijiTable(simpleLayout)) { table: KijiTable =>
table.getURI.toString
}
// Create input from hashed Kiji table.
val joinInput1: List[(EntityId, Seq[FlowCell[String]])] = List(
(EntityId("0row"), slice("family:column1", (0L, "0 dogs"))),
(EntityId("1row"), slice("family:column1", (0L, "1 cat"))),
(EntityId("2row"), slice("family:column1", (0L, "2 fish"))))
// Create input from hashed Kiji table.
val joinInput2: List[(EntityId, Seq[FlowCell[String]])] = List(
(EntityId("0row"), slice("family:column2", (0L, "0 boop"))),
(EntityId("2row"), slice("family:column2", (0L, "2 beep"))))
// Validate output.
def validateTest(outputBuffer: Buffer[Tuple1[String]]): Unit = {
assert(outputBuffer.size === 2)
}
// Create the JobTest for this test.
val jobTest = JobTest(new JoinHashedEntityIdsJob(_))
.arg("input1", uri)
.arg("input2", uri)
.arg("output", "outputFile")
.source(KijiInput.builder
.withTableURI(uri)
.withColumns("family:column1" -> 'animals)
.build, joinInput1)
.source(KijiInput.builder
.withTableURI(uri)
.withColumns("family:column2" -> 'slice)
.build, joinInput2)
.sink(Tsv("outputFile"))(validateTest)
// Run the test in hadoop mode.
jobTest.runHadoop.finish
}
test("A job that joins two pipes, on EntityIds from a table (formatted) in local mode.") {
// URI of a formatted Kiji table to use.
val uri: String = ResourceUtil.doAndRelease(makeTestKijiTable(avroLayout)) { table: KijiTable =>
table.getURI.toString
}
// Create input from formatted Kiji table.
val joinInput1: List[(EntityId, Seq[FlowCell[Int]])] = List(
(EntityId("0row"), mapSlice("searches", ("0column", 0L, 0))),
(EntityId("2row"), mapSlice("searches", ("0column", 0L, 2))))
// Create input from formatted Kiji table.
val joinInput2: List[(EntityId, Seq[FlowCell[String]])] = List(
(EntityId("0row"), mapSlice("animals", ("0column", 0L, "0 dogs"))),
(EntityId("1row"), mapSlice("animals", ("0column", 0L, "1 cat"))),
(EntityId("2row"), mapSlice("animals", ("0column", 0L, "2 fish"))))
// Validate output.
def validateTest(outputBuffer: Buffer[Tuple1[String]]): Unit = {
assert(outputBuffer.size === 2)
}
// Create the JobTest for this test.
val jobTest = JobTest(new JoinFormattedEntityIdsJob(_))
.arg("input1", uri)
.arg("input2", uri)
.arg("output", "outputFile")
.source(KijiInput.builder
.withTableURI(uri)
.withColumns("searches" -> 'searches)
.build, joinInput1)
.source(KijiInput.builder
.withTableURI(uri)
.withColumns("animals" -> 'animals)
.build, joinInput2)
.sink(Tsv("outputFile"))(validateTest)
// Run the test in local mode.
jobTest.run.finish
}
test("A job that joins two pipes, on EntityIds from a table (formatted) in hadoop mode.") {
// URI of a formatted Kiji table to use.
val uri: String = ResourceUtil.doAndRelease(makeTestKijiTable(avroLayout)) { table: KijiTable =>
table.getURI.toString
}
// Create input from formatted Kiji table.
val joinInput1: List[(EntityId, Seq[FlowCell[Int]])] = List(
(EntityId("0row"), mapSlice("searches", ("0column", 0L, 0))),
(EntityId("2row"), mapSlice("searches", ("0column", 0L, 2))))
// Create input from formatted Kiji table.
val joinInput2: List[(EntityId, Seq[FlowCell[String]])] = List(
(EntityId("0row"), mapSlice("animals", ("0column", 0L, "0 dogs"))),
(EntityId("1row"), mapSlice("animals", ("0column", 0L, "1 cat"))),
(EntityId("2row"), mapSlice("animals", ("0column", 0L, "2 fish"))))
// Validate output.
def validateTest(outputBuffer: Buffer[Tuple1[String]]): Unit = {
assert(outputBuffer.size === 2)
}
// Create the JobTest for this test.
val jobTest = JobTest(new JoinFormattedEntityIdsJob(_))
.arg("input1", uri)
.arg("input2", uri)
.arg("output", "outputFile")
.source(KijiInput.builder
.withTableURI(uri)
.withColumns("searches" -> 'searches)
.build, joinInput1)
.source(KijiInput.builder
.withTableURI(uri)
.withColumns("animals" -> 'animals)
.build, joinInput2)
.sink(Tsv("outputFile"))(validateTest)
// Run the test in hadoop mode.
jobTest.runHadoop.finish
}
}
/** Companion object for EntityIdSuite. Contains test jobs. */
object EntityIdSuite {
/**
* A job that tests joining two pipes, on user-constructed EntityIds.
*
* @param args to the job. Two arguments are expected: "input", which specifies the URI to a
* Kiji table, and "output", which specifies the path to a text file.
*/
class JoinUserEntityIdsJob(args: Args) extends KijiJob(args) {
val sidePipe = TextLine(args("side-input"))
.read
.map('line -> 'entityId) { line: String => EntityId(line) }
.project('entityId)
TextLine(args("input"))
.map('line -> 'entityId) { line: String => EntityId(line) }
.joinWithSmaller('entityId -> 'entityId, sidePipe)
.write(Tsv(args("output")))
}
/**
* A job that tests joining two pipes, one with a user-constructed EntityId and one with
* a formatted EntityId from a Kiji table.
*
* @param args to the job. Two arguments are expected: "input", which specifies the URI to a
* Kiji table, and "output", which specifies the path to a text file.
*/
class JoinUserAndFormattedFromTableJob(args: Args) extends KijiJob(args) {
val sidePipe = TextLine(args("side-input"))
.read
.map('line -> 'entityId) { line: String => EntityId(line) }
.project('entityId)
KijiInput.builder
.withTableURI(args("input"))
.withColumns("animals" -> 'animals)
.build
.map('animals -> 'terms) { animals: Seq[FlowCell[CharSequence]] => animals.toString }
.joinWithSmaller('entityId -> 'entityId, sidePipe)
.write(Tsv(args("output")))
}
/**
* A job that tests joining two pipes, one with a user-constructed EntityId and one with
* a hashed EntityId from a Kiji table.
*
* @param args to the job. Two arguments are expected: "input", which specifies the URI to a
* Kiji table, and "output", which specifies the path to a text file.
*/
class JoinUserAndHashedFromTableJob(args: Args) extends KijiJob(args) {
val sidePipe = TextLine(args("side-input"))
.read
.map('line -> 'entityId) { line: String => EntityId(line) }
.project('entityId)
KijiInput.builder
.withTableURI(args("input"))
.withColumns("family:column1" -> 'slice)
.build
.map('slice -> 'terms) { slice: Seq[FlowCell[CharSequence]] => slice.head.datum.toString }
.joinWithSmaller('entityId -> 'entityId, sidePipe)
.write(Tsv(args("output")))
}
/**
* A job that tests joining two pipes, on EntityIds from a table with row key format HASHED.
*
* @param args to the job. Two arguments are expected: "input", which specifies the URI to a
* Kiji table, and "output", which specifies the path to a text file.
*/
class JoinHashedEntityIdsJob(args: Args) extends KijiJob(args) {
val pipe1 = KijiInput.builder
.withTableURI(args("input1"))
.withColumns("family:column1" -> 'animals)
.build
KijiInput.builder
.withTableURI(args("input2"))
.withColumns("family:column2" -> 'slice)
.build
.map('animals -> 'animal) {
slice: Seq[FlowCell[CharSequence]] => slice.head.datum.toString
}
KijiInput.builder
.withTableURI(args("input2"))
.withColumns("family:column2" -> 'slice)
.build
.map('slice -> 'terms) { slice:Seq[FlowCell[CharSequence]] => slice.head.datum.toString }
.joinWithSmaller('entityId -> 'entityId, pipe1)
.write(Tsv(args("output")))
}
/**
* A job that tests joining two pipes, on EntityIds from a table with row key format formatted.
*
* @param args to the job. Two arguments are expected: "input", which specifies the URI to a
* Kiji table, and "output", which specifies the path to a text file.
*/
class JoinFormattedEntityIdsJob(args: Args) extends KijiJob(args) {
val pipe1 = KijiInput.builder
.withTableURI(args("input1"))
.withColumns("searches" -> 'searches)
.build
.map('searches -> 'term) { slice:Seq[FlowCell[Int]] => slice.head.datum }
KijiInput.builder
.withTableURI(args("input2"))
.withColumns("animals" -> 'animals)
.build
.map('animals -> 'animal) {
slice: Seq[FlowCell[CharSequence]] => slice.head.datum.toString
}
.joinWithSmaller('entityId -> 'entityId, pipe1)
.write(Tsv(args("output")))
}
}
|
kijiproject/kiji-express
|
kiji-express/src/test/scala/org/kiji/express/flow/EntityIdSuite.scala
|
Scala
|
apache-2.0
| 19,093 |
trait D {
trait Manifest {
class Entry
}
val M: Manifest
def m: M.Entry = ???
}
object D1 extends D {
object M extends Manifest
}
object D2 extends D {
val M: Manifest = ???
}
object Hello {
def main(args: Array[String]) {
// 2.10.3 - ok
// 2.11.0-M7 - type mismatch; found : Seq[DB1.MANIFEST.Entry]
// required: Seq[DB1.MANIFEST.Entry]
val t1: D1.M.Entry = D1.m
// 2.10.3 - ok
// 2.11.0-M7 - ok
val t2: D2.M.Entry = D2.m
}
}
|
loskutov/intellij-scala
|
testdata/scalacTests/pos/t8054.scala
|
Scala
|
apache-2.0
| 481 |
package com.outr.arango.collection
import com.outr.arango.core.ArangoDBCollection
import com.outr.arango.{CollectionType, Document, DocumentModel, Graph}
import fabric.Value
class DocumentCollection[D <: Document[D]](protected[arango] val graph: Graph,
protected[arango] val arangoCollection: ArangoDBCollection,
val model: DocumentModel[D],
val `type`: CollectionType) extends WritableCollection[D] {
override def dbName: String = graph.databaseName
override def name: String = arangoCollection.name
override lazy val query: DocumentCollectionQuery[D] = new DocumentCollectionQuery[D](this)
override protected def beforeStorage(value: Value): Value = model.allMutations.foldLeft(value)((v, m) => m.store(v))
override protected def afterRetrieval(value: Value): Value = model.allMutations.foldLeft(value)((v, m) => m.retrieve(v))
}
|
outr/scarango
|
driver/src/main/scala/com/outr/arango/collection/DocumentCollection.scala
|
Scala
|
mit
| 974 |
package org.atnos.eff.syntax
import org.atnos.eff._
import org.atnos.eff.concurrent.Scheduler
import scala.concurrent.duration.FiniteDuration
import scala.concurrent.{ExecutionContext, Future}
trait future {
implicit final def toFutureOps[R, A](e: Eff[R, A]): FutureOps[R, A] = new FutureOps[R, A](e)
}
object future extends future
final class FutureOps[R, A](private val e: Eff[R, A]) extends AnyVal {
def futureAttempt(implicit future: TimedFuture /= R): Eff[R, Throwable Either A] =
FutureInterpretation.futureAttempt(e)
def futureMemo(key: AnyRef, cache: Cache)(implicit future: TimedFuture /= R): Eff[R, A] =
FutureInterpretation.futureMemo(key, cache, e)
def runAsync(implicit scheduler: Scheduler, exc: ExecutionContext, m: Member.Aux[TimedFuture, R, NoFx]): Future[A] =
FutureInterpretation.runAsync(e)
def runAsyncOn(executorServices: ExecutorServices)(implicit m: Member.Aux[TimedFuture, R, NoFx]): Future[A] =
FutureInterpretation.runAsyncOn(executorServices)(e)
def runSequentialOn(executorServices: ExecutorServices)(implicit m: Member.Aux[TimedFuture, R, NoFx]): Future[A] =
FutureInterpretation.runSequentialOn(executorServices)(e)
def runSequential(implicit scheduler: Scheduler, exc: ExecutionContext, m: Member.Aux[TimedFuture, R, NoFx]): Future[A] =
FutureInterpretation.runSequential(e)
def retryUntil(condition: A => Boolean, durations: List[FiniteDuration])(implicit future: TimedFuture |= R): Eff[R, A] =
FutureCreation.retryUntil(e, condition, durations)
}
|
etorreborre/eff-cats
|
shared/src/main/scala/org/atnos/eff/syntax/future.scala
|
Scala
|
mit
| 1,541 |
package com.github.cuzfrog.webdriver
import org.openqa.selenium.{WebDriver, WebElement}
private[webdriver] sealed trait Container {
val driver: Driver
}
private[webdriver] case class DriverContainer(driver: Driver, seleniumDriver: WebDriver) extends Container {
val elements = scala.collection.mutable.ArrayBuffer.empty[Long]
}
private[webdriver] case class ElementContainer(element: Element, seleniumElement: WebElement) extends Container {
val driver = element.driver
}
private[webdriver] case class WindowContainer(window: Window, seleniumDriver: WebDriver) extends Container {
val driver = window.driver
}
|
cuzfrog/WebDriverServ
|
server/src/main/scala/com/github/cuzfrog/webdriver/Container.scala
|
Scala
|
apache-2.0
| 619 |
package org.machine.engine.graph.commands
import reflect.runtime.universe._
// import scala.collection._
// import scala.collection.generic._
import scala.collection.mutable.{ArrayBuffer, ListBuffer, Map}
import org.machine.engine.exceptions._
import org.machine.engine.graph.nodes.PropertyDefinitions
object GraphCommandOptions{
def apply():GraphCommandOptions = new GraphCommandOptions()
}
//http://daily-scala.blogspot.com/2010/04/creating-custom-traversable.html
class GraphCommandOptions{
private var graphValue = Map.empty[String, AnyVal]
private var graphObjects = Map.empty[String, AnyRef]
def fieldValues = this.graphValue
def fieldObjects = this.graphObjects
def optionValues = this.graphValue
def optionObjects = this.graphObjects
def addField(fieldName: String, fieldValue: Any):GraphCommandOptions = {
fieldValue match {
case x: Boolean => graphValue.+=(fieldName -> fieldValue.asInstanceOf[Boolean])
case x: Byte => graphValue.+=(fieldName -> fieldValue.asInstanceOf[Byte])
case x: Short => graphValue.+=(fieldName -> fieldValue.asInstanceOf[Short])
case x: Int => graphValue.+=(fieldName -> fieldValue.asInstanceOf[Int])
case x: Long => graphValue.+=(fieldName -> fieldValue.asInstanceOf[Long])
case x: Float => graphValue.+=(fieldName -> fieldValue.asInstanceOf[Float])
case x: Double => graphValue.+=(fieldName -> fieldValue.asInstanceOf[Double])
case x: Char => graphValue.+=(fieldName -> fieldValue.asInstanceOf[Char])
case x: String => graphObjects.+=(fieldName -> fieldValue.asInstanceOf[String])
case x: PropertyDefinitions => graphObjects.+=(fieldName -> fieldValue.asInstanceOf[PropertyDefinitions])
case _ => throw new InternalErrorException("GraphCommandOptions.addField: Unsupported type.")
}
this
}
def addOption(optionName:String, optionValue: Any):GraphCommandOptions = {
addField(optionName, optionValue)
}
def field[T: TypeTag](name: String):T = option[T](name)
def option[T: TypeTag](name: String):T = {
val map = name match {
case v if typeOf[T] <:< typeOf[AnyVal] => graphValue
case r if typeOf[T] <:< typeOf[AnyRef] => graphObjects
case r if typeOf[T] <:< typeOf[Boolean] => graphValue
case x if typeOf[T] <:< typeOf[Byte] => graphValue
case x if typeOf[T] <:< typeOf[Short] => graphValue
case x if typeOf[T] <:< typeOf[Int] => graphValue
case x if typeOf[T] <:< typeOf[Long] => graphValue
case x if typeOf[T] <:< typeOf[Float] => graphValue
case x if typeOf[T] <:< typeOf[Double] => graphValue
case x if typeOf[T] <:< typeOf[Char] => graphValue
case _ => throw new InternalErrorException("GraphCommandOptions.option: Unsupported type.")
}
return map.get(name).getOrElse(throw new InternalErrorException("Could not find: %s".format(name))).asInstanceOf[T]
}
def contains(name: String):Boolean = {
return graphValue.contains(name) || graphObjects.contains(name)
}
def reset:Unit = {
graphValue = Map.empty[String, AnyVal]
graphObjects = Map.empty[String, AnyRef]
}
//Create a Java Map
def toJavaMap:java.util.HashMap[java.lang.String, Object] = {
val map = new java.util.HashMap[java.lang.String, Object]()
loadValues(map)
graphObjects.foreach(t => map.put(t._1, t._2))
return map
}
private def loadValues(javaMap: java.util.HashMap[java.lang.String, Object]):Unit = {
graphValue.foreach(option => {
val fieldName = option._1
val fieldValue = option._2
fieldValue match {
case x: Boolean => javaMap.put(fieldName, fieldValue.asInstanceOf[java.lang.Boolean])
case x: Byte => javaMap.put(fieldName, fieldValue.asInstanceOf[java.lang.Byte])
case x: Short => javaMap.put(fieldName, fieldValue.asInstanceOf[java.lang.Short])
case x: Int => javaMap.put(fieldName, fieldValue.asInstanceOf[java.lang.Integer])
case x: Long => javaMap.put(fieldName, fieldValue.asInstanceOf[java.lang.Long])
case x: Float => javaMap.put(fieldName, fieldValue.asInstanceOf[java.lang.Float])
case x: Double => javaMap.put(fieldName, fieldValue.asInstanceOf[java.lang.Double])
case x: Char => javaMap.put(fieldName, fieldValue.asInstanceOf[java.lang.Character])
case _ => throw new InternalErrorException("GraphCommandOptions: Unhandled type.")
}
})
}
def toMap:scala.collection.immutable.Map[String, Any] = {
val map = Map.empty[String, Any]
graphValue.foreach(t => map.put(t._1, t._2))
graphObjects.foreach(t => map.put(t._1, t._2))
return map.toMap
}
def foreach(f: ((String, Any)) => Unit): Unit = {
graphValue.foreach(f)
graphObjects.foreach(f)
}
def keys: List[String] = {
val items = ArrayBuffer.empty[String]
graphValue.keys.foreach(key => items += key)
graphObjects.keys.foreach(key => items += key)
return items.toList
}
override def toString:String = {
import scala.collection.mutable.StringBuilder
val sb = StringBuilder.newBuilder
this.foreach{ case (k,v) => sb ++=(s"$k -> $v\\n")}
return sb.toString
}
}
|
sholloway/graph-engine
|
src/main/scala/org/machine/engine/graph/commands/GraphCommandOptions.scala
|
Scala
|
mit
| 5,152 |
package app.board.utils
import com.typesafe.config.ConfigFactory
trait Config {
private val config = ConfigFactory.load()
private val httpConfig = config.getConfig("http")
private val databaseConfig = config.getConfig("database")
val httpHost = httpConfig.getString("interface")
val httpPort = httpConfig.getInt("port")
val jdbcUrl = databaseConfig.getString("url")
val dbUser = databaseConfig.getString("user")
val dbPassword = databaseConfig.getString("password")
}
|
hwshim0810/scalable-board
|
src/main/scala/app/board/utils/Config.scala
|
Scala
|
gpl-3.0
| 488 |
/*
* Copyright (C) 2011 Mikhail Vorozhtsov
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.github.mvv.sawoko
class JoinThreadDeadlockException extends RuntimeException {
override def getMessage = "Thread tried to join itself"
}
trait ForkAsyncExecutor[T[+_]] extends AsyncExecutor {
def registerFork[A](
pid: Pid, body: Async[this.type, A],
callback: SimpleCallback[T[A]]): Option[SimpleResult[T[A]]]
def registerCurrentTid(pid: Pid): SimpleResult[T[Any]]
}
final class ForkAsyncOp[-X <: AsyncExecutor, T[+_], A](
body: Async[X, A])
extends AsyncOp[ForkAsyncExecutor[T] with X, T[A]] {
def register(ep: EP, callback: Callback) =
ep.executor.registerFork(ep.pid, body, callback)
}
final class CurrentTidOp[T[+_]]
extends AsyncOp[ForkAsyncExecutor[T], T[Any]] {
def register(ep: EP, callback: Callback) =
Some(ep.executor.registerCurrentTid(ep.pid))
}
trait JoinThreadWaitCap[T[+_]] extends WaitCap
final case class JoinThread[T[+_], A](tid: T[A]) extends WaitOp {
type Cap = JoinThreadWaitCap[T]
type Result = AsyncResult[Nothing, A]
}
trait ForkOps[T[+_]] {
import AsyncOps._
import WaitOps._
@inline
def fork[X <: AsyncExecutor, A](body: => Async[X, A]) =
exec(new ForkAsyncOp[X, T, A](guard(body)))
@inline
def currentThread =
exec(new CurrentTidOp[T])
@inline
def joinThread[T[+_], A](tid: T[A]) =
waitOne(JoinThread(tid))
@inline
def joinThread[T[+_], A](tid: T[A], timeout: Timeout) =
waitOne(JoinThread(tid), timeout)
}
|
mvv/sawoko
|
src/Fork.scala
|
Scala
|
apache-2.0
| 2,074 |
/*
* Copyright 2015 Dmitriy Yefremov
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package scala.net.yefremov.sleipnir.test.map
import com.linkedin.data.ByteString
import net.yefremov.sleipnir.test.CustomPoint
import scala.net.yefremov.sleipnir.test.{SimpleEnum, SleipnirSpec}
class MapTest extends SleipnirSpec {
"Map types" should {
"support custom names through typerefs" in {
val map = Map("key" -> SimpleRecordValue)
val wrapper = CustomNamedMap(map)
wrapper.map must beEqualTo(map)
val wrapperFromJson = checkSerialization(wrapper, """{"key":{"field":"string value"}}""")
wrapperFromJson.map must beEqualTo(map)
}
"support primitive values" in {
val map = Map("key" -> StringValue)
val record = MapPrimitiveRecord(map)
record.mapField must beEqualTo(map)
val recordFromJson = checkSerialization(record, """{"mapField":{"key":"string value"}}""")
recordFromJson.mapField must beEqualTo(map)
}
"support complex values" in {
val map = Map("key" -> SimpleRecordValue)
val record = MapComplexRecord(map)
record.mapField must beEqualTo(map)
val recordFromJson = checkSerialization(record, """{"mapField":{"key":{"field":"string value"}}}""")
recordFromJson.mapField must beEqualTo(map)
}
"support enum values" in {
val map = Map("key" -> SimpleEnum.Foo)
val record = MapEnumRecord(map)
record.mapField must beEqualTo(map)
val recordFromJson = checkSerialization(record, """{"mapField":{"key":"Foo"}}""")
recordFromJson.mapField must beEqualTo(map)
}
"support unknown enum values" in {
val mapEnumRecordSchema = MapEnumRecord(Map()).schema()
val recordFromJson = fromJson[MapEnumRecord]("""{"mapField":{"key":"Baz"}}""", mapEnumRecordSchema)
recordFromJson.mapField must beEqualTo(Map("key" -> SimpleEnum.$Unknown))
}
"support bytes values" in {
val map = Map("key" -> ByteString.copy(Array[Byte](100)))
val record = MapBytesRecord(map)
record.mapField must beEqualTo(map)
val recordFromJson = checkSerialization(record, """{"mapField":{"key":"d"}}""")
recordFromJson.mapField must beEqualTo(map)
}
"support array values" in {
val map = Map("key" -> Seq(SimpleRecordValue))
val wrapper = MapOfArrays(map)
wrapper.map must beEqualTo(map)
val wrapperFromJson = checkSerialization(wrapper, """{"key":[{"field":"string value"}]}""")
wrapperFromJson.map must beEqualTo(map)
}
"support map values" in {
val map = Map("key" -> Map("key" -> SimpleRecordValue))
val wrapper = MapOfMaps(map)
wrapper.map must beEqualTo(map)
val wrapperFromJson = checkSerialization(wrapper, """{"key":{"key":{"field":"string value"}}}""")
wrapperFromJson.map must beEqualTo(map)
}
"support custom java bindings" in {
val customPointMap = Map("key" -> new CustomPoint(1, 2))
val wrapper = MapCustomPoint(customPointMap)
wrapper.map must beEqualTo(customPointMap)
val wrapperFromJson = checkSerialization(wrapper, """{"key":"1,2"}""")
wrapperFromJson.map must beEqualTo(customPointMap)
}
}
}
|
dmitriy-yefremov/sleipnir
|
sample-data/src/test/scala/scala/net/yefremov/sleipnir/test/map/MapTest.scala
|
Scala
|
apache-2.0
| 3,757 |
/*
* (c) Copyright 2016 Hewlett Packard Enterprise Development LP
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cogdebugger.ui.fieldvisualizations.scalar
import cogx.runtime.debugger.ProbedField
import libcog._
import cogdebugger.ui.fieldvisualizations.{UnsupportedDimensionException, EventDrivenViewer}
import scala.swing.Panel
import org.jfree.chart.plot.PlotOrientation
import org.jfree.data.xy.{XYSeries, DefaultTableXYDataset}
/*
* Created with IntelliJ IDEA.
* User: gonztobi
* Date: 10/8/13
* Time: 3:14 PM
*/
/** A visualization for ScalarFields of two dimensions or less that presents
* the field as a scatter plot of column index vs value. For 2D fields, rows
* are differentiated by using a different set of colors/symbols for each row.
*
* An example of a 2x10 2D scalar field containing values in [0, 1] viewed as
* a scatter plot would look something like this:
*
* {{{
* 1.0 | x
* | o
* | o x o
* | x o
* value | x x o x x
* | o x o o
* | x o o x
* |
* 0 +------------------------------
* 0 1 2 3 4 5 6 7 8 9
* column
* +-----------------------------+
* | x = row 0 o = row 1 |
* +-----------------------------+
* }}}
*
* @param fieldType A FieldType describing the shape of the field to be
* visualized.
*/
class ScatterPlot(fieldType: FieldType)
extends Panel
with EventDrivenViewer {
def this(target: ProbedField) = this(target.fieldType)
private def fieldShape = fieldType.fieldShape
val (rows, cols) = fieldShape.dimensions match {
case 0 => (1, 1)
case 1 => (1, fieldShape(0))
case 2 => (fieldShape(0), fieldShape(1))
case x => throw new UnsupportedDimensionException(x)
}
private val dataset = new DefaultTableXYDataset()
for (row <- 0 until rows) {
val series = new XYSeries(s"Row $row", false, false)
dataset.addSeries(series)
}
val chart = org.jfree.chart.ChartFactory.createScatterPlot(
null, // chart title
"column", // x-axis label
"value", // y-acis label
dataset,
PlotOrientation.VERTICAL,
rows > 1, false, false
)
peer.add(new org.jfree.chart.ChartPanel(chart))
/** Updates the visualization based on the contents of `data`. */
def update(src: AnyRef, data: AbstractFieldMemory, simTime: Long): Unit = {
data match {
case sfr: ScalarFieldReader => update(sfr)
case x => throw new Exception("Expecting a ScalarFieldReader; got: "+x)
}
}
def update(data: ScalarFieldReader) {
data.fieldShape.dimensions match {
case 0 =>
val f = data.read()
dataset.getSeries(0).addOrUpdate(0, f)
case 1 =>
val series = dataset.getSeries(0)
series.setNotify(false)
for (c <- 0 until cols) series.addOrUpdate(c, data.read(c))
series.setNotify(true)
series.fireSeriesChanged()
case 2 =>
for (r <- 0 until rows) {
val series = dataset.getSeries(r)
series.setNotify(false)
for (c <- 0 until cols) series.addOrUpdate(c, data.read(r, c))
series.setNotify(true)
series.fireSeriesChanged()
}
// Is there a fireDatasetChanged method somewhere? We manage to avoid
// refreshing/redrawing the scatter plot after each individual element
// update by making use of setNotify and fireSeriesChanged, but we'd
// really like to only fire an event after updating the entire dataset.
case x => throw new UnsupportedDimensionException(x)
}
}
}
|
hpe-cct/cct-core
|
src/main/scala/cogdebugger/ui/fieldvisualizations/scalar/ScatterPlot.scala
|
Scala
|
apache-2.0
| 4,313 |
package io.iohk.ethereum.jsonrpc
import akka.actor.ActorSystem
import akka.testkit.{TestKit, TestProbe}
import io.iohk.ethereum._
import io.iohk.ethereum.blockchain.sync.regular.RegularSync.NewCheckpoint
import io.iohk.ethereum.consensus.Consensus
import io.iohk.ethereum.consensus.ethash.EthashConfig
import io.iohk.ethereum.consensus.ethash.MinerResponses.MiningOrdered
import io.iohk.ethereum.consensus.ethash.MockedMinerProtocol.MineBlocks
import io.iohk.ethereum.crypto.ECDSASignature
import io.iohk.ethereum.domain._
import io.iohk.ethereum.jsonrpc.QAService._
import io.iohk.ethereum.nodebuilder.BlockchainConfigBuilder
import org.scalamock.scalatest.AsyncMockFactory
import scala.concurrent.Future
class QAServiceSpec
extends TestKit(ActorSystem("QAServiceSpec_ActorSystem"))
with FlatSpecBase
with WithActorSystemShutDown
with SpecFixtures
with ByteGenerators
with AsyncMockFactory {
"QAService" should "send msg to miner and return miner's response" in testCaseM { fixture =>
import fixture._
(testConsensus.sendMiner _)
.expects(mineBlocksMsg)
.returning(Future.successful(MiningOrdered))
.atLeastOnce()
qaService.mineBlocks(mineBlocksReq).map(_ shouldBe Right(MineBlocksResponse(MiningOrdered)))
}
it should "send msg to miner and return InternalError in case of problems" in testCaseM { fixture =>
import fixture._
(testConsensus.sendMiner _)
.expects(mineBlocksMsg)
.returning(Future.failed(new ClassCastException("error")))
.atLeastOnce()
qaService.mineBlocks(mineBlocksReq).map(_ shouldBe Left(JsonRpcError.InternalError))
}
it should "generate checkpoint for block with given blockHash and send it to sync" in customTestCaseM(
new Fixture with CheckpointsGenerationFixture
) { fixture =>
import fixture._
val result = qaService.generateCheckpoint(req)
result.map { r =>
syncController.expectMsg(NewCheckpoint(block.hash, signatures))
r shouldBe Right(GenerateCheckpointResponse(checkpoint))
}
}
it should "generate checkpoint for best block when no block hash given and send it to sync" in customTestCaseM(
new Fixture with CheckpointsGenerationFixture
) { fixture =>
import fixture._
val reqWithoutBlockHash = req.copy(blockHash = None)
(blockchain.getBestBlock _)
.expects()
.returning(block)
.once()
val result: ServiceResponse[GenerateCheckpointResponse] =
qaService.generateCheckpoint(reqWithoutBlockHash)
result.map { r =>
syncController.expectMsg(NewCheckpoint(block.hash, signatures))
r shouldBe Right(GenerateCheckpointResponse(checkpoint))
}
}
it should "return federation public keys when requesting federation members info" in testCaseM { fixture =>
import fixture._
val result: ServiceResponse[GetFederationMembersInfoResponse] =
qaService.getFederationMembersInfo(GetFederationMembersInfoRequest())
result.map(_ shouldBe Right(GetFederationMembersInfoResponse(blockchainConfig.checkpointPubKeys.toList)))
}
class Fixture extends BlockchainConfigBuilder {
protected trait TestConsensus extends Consensus {
override type Config = EthashConfig
}
lazy val testConsensus: TestConsensus = mock[TestConsensus]
lazy val blockchain = mock[BlockchainImpl]
lazy val syncController = TestProbe()
lazy val qaService = new QAService(
testConsensus,
blockchain,
blockchainConfig,
syncController.ref
)
lazy val mineBlocksReq = MineBlocksRequest(1, true, None)
lazy val mineBlocksMsg =
MineBlocks(mineBlocksReq.numBlocks, mineBlocksReq.withTransactions, mineBlocksReq.parentBlock)
val fakeChainId: Byte = 42.toByte
}
trait CheckpointsGenerationFixture {
val block = Fixtures.Blocks.ValidBlock.block
val privateKeys = seqByteStringOfNItemsOfLengthMGen(3, 32).sample.get
val signatures = privateKeys.map(ECDSASignature.sign(block.hash, _))
val checkpoint = Checkpoint(signatures)
val req = GenerateCheckpointRequest(privateKeys, Some(block.hash))
}
def createFixture(): Fixture = new Fixture
}
|
input-output-hk/etc-client
|
src/test/scala/io/iohk/ethereum/jsonrpc/QAServiceSpec.scala
|
Scala
|
mit
| 4,155 |
package io.escalante.quickstarts.lift.jpa
import org.jboss.logging.Logger
/**
* Logging interface.
*
* @author Galder Zamarreño
* @since 1.0
*/
trait Log {
private lazy val log = Logger.getLogger(getClass.getPackage.getName)
def error(t: Throwable, msg: => String) {
log.errorf(t, msg)
}
}
|
escalante/escalante-quickstart
|
library-lift-jpa/src/main/scala/io/escalante/quickstarts/lift/jpa/Log.scala
|
Scala
|
apache-2.0
| 316 |
package com.delprks.productservicesprototype.client
import java.sql.Timestamp
import com.delprks.productservicesprototype.config.Config
import com.delprks.productservicesprototype.domain.{Offer, OfferEvent, Status}
import org.joda.time.DateTime
import slick.driver.PostgresDriver.api._
import scala.language.implicitConversions
import scala.concurrent.{ExecutionContext, Future}
import slick.jdbc.GetResult
case class OfferQueryResult(
id: Int,
userId: Int,
title: String,
description: String,
headline: Option[String],
condition: String,
availableFrom: Timestamp,
availableTo: Timestamp,
startingPrice: Int,
currency: String,
category: String,
status: String
)
case class CreateOfferProps(
userId: Int,
title: String,
description: String,
headline: Option[String],
condition: String,
availableFrom: Timestamp,
availableTo: Timestamp,
startingPrice: Int,
status: String,
currency: String,
category: String
)
class OfferClient(database: Database)
(implicit val executionContext: ExecutionContext) extends Config {
private val EmptyQuery: String = ""
implicit val offerQueryResult: AnyRef with GetResult[OfferQueryResult] = GetResult { result =>
OfferQueryResult(
id = result.nextInt(),
userId = result.nextInt(),
title = result.nextString(),
description = result.nextString(),
headline = result.nextStringOption(),
condition = result.nextString(),
availableFrom = result.nextTimestamp(),
availableTo = result.nextTimestamp(),
startingPrice = result.nextInt(),
currency = result.nextString(),
category = result.nextString(),
status = result.nextString()
)
}
def offersQuery(offset: Int, limit: Int, filter: OfferFilter): DBIO[Seq[OfferQueryResult]] = {
sql"""
SELECT
id,
user_id,
title,
description,
headline,
condition,
available_from,
available_to,
starting_price,
currency,
category,
status
FROM public.offer
#${useFilters(filter)}
OFFSET $offset
LIMIT $limit
""".as[OfferQueryResult]
}
def offerQuery(offerId: Int): DBIO[Option[OfferQueryResult]] = {
sql"""
SELECT
id,
user_id,
title,
description,
headline,
condition,
available_from,
available_to,
starting_price,
currency,
category,
status
FROM public.offer
WHERE id = $offerId
LIMIT 1
""".as[OfferQueryResult].headOption
}
def offersCountQuery(filter: OfferFilter): DBIO[Int] = {
sql"""
SELECT COUNT(*)
FROM public.offer
#${useFilters(filter)}
""".as[Int].head
}
def useFilters(filter: OfferFilter): String = {
val filters: List[String] = List(
useStatusFilter(filter),
useUserIdFilter(filter)
)
val filterQueries = filters.filter(_.nonEmpty)
if (filterQueries.nonEmpty) {
filterQueries mkString("WHERE ", " AND ", "")
} else {
EmptyQuery
}
}
private def useStatusFilter(filter: OfferFilter): String = filter.status match {
case Some(status) if status == Status.Available => "available_from < now() AND available_to > now() AND status = 'available'"
case Some(status) if status == Status.Pending => "available_from > now() AND status = 'available'"
case Some(status) if status == Status.Expired => "available_to < now() AND status = 'available'"
case Some(status) if status == Status.Cancelled => "status = 'cancelled'"
case _ => "status = 'available'"
}
private def useUserIdFilter(filter: OfferFilter) = filter.userId match {
case Some(userId) => s"user_id = ${userId}"
case _ => EmptyQuery
}
def createOfferQuery(offer: CreateOfferProps): DBIO[Int] = {
sqlu"""
INSERT INTO public.offer (
user_id,
title,
description,
headline,
condition,
available_from,
available_to,
starting_price,
currency,
category,
status
) VALUES (
${offer.userId},
${offer.title},
${offer.description},
${offer.headline},
${offer.condition},
${offer.availableFrom},
${offer.availableTo},
${offer.startingPrice},
${offer.currency},
${offer.category},
${offer.status}
)
"""
}
def updateOfferStatusQuery(offerId: Int, offerStatus: String): DBIO[Int] = {
sqlu"""
UPDATE public.offer SET status = $offerStatus WHERE id = $offerId
"""
}
def offers(offset: Int, limit: Int, filter: OfferFilter = OfferFilter()): Future[Seq[Offer]] = {
for {
offersQueryResult <- database run offersQuery(offset, limit, filter)
offers = offersQueryResult map OfferMapper.mapOffer
} yield offers
}
def create(offer: OfferEvent): Future[Int] = {
implicit def str2Timestamp(date: String): Timestamp = {
val dateTime = new DateTime(date).getMillis
new Timestamp(dateTime)
}
val offerQuery = CreateOfferProps(
userId = offer.userId,
title = offer.title,
description = offer.description,
headline = offer.headline,
condition = offer.condition,
availableFrom = offer.availableFrom,
availableTo = offer.availableTo,
startingPrice = offer.startingPrice,
status = Status.Available.toString,
currency = offer.currency,
category = offer.category
)
database run createOfferQuery(offerQuery)
}
def updateStatus(offerId: Int, offerStatus: String): Future[Int] = {
database run updateOfferStatusQuery(offerId, offerStatus)
}
def offer(offerId: Int): Future[Option[Offer]] = {
for {
offerQueryResult <- database run offerQuery(offerId)
offer = offerQueryResult map OfferMapper.mapOffer
} yield offer
}
def offersCount(filter: OfferFilter = OfferFilter()): Future[Int] = database run offersCountQuery(filter)
protected def toSqlStringSet(items: Seq[String]): String = {
s"('${items.mkString("','")}')"
}
}
|
delprks/product-services-prototype
|
src/main/scala/com/delprks/productservicesprototype/client/OfferClient.scala
|
Scala
|
mit
| 6,221 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.scheduler
import java.io._
import java.net.URI
import java.nio.charset.StandardCharsets
import scala.collection.mutable.{ArrayBuffer, Map}
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{FileSystem, FSDataOutputStream, Path}
import org.apache.hadoop.fs.permission.FsPermission
import org.json4s.JsonAST.JValue
import org.json4s.jackson.JsonMethods._
import org.apache.spark.{SPARK_VERSION, SparkConf}
import org.apache.spark.deploy.SparkHadoopUtil
import org.apache.spark.executor.ExecutorMetrics
import org.apache.spark.internal.Logging
import org.apache.spark.internal.config._
import org.apache.spark.io.CompressionCodec
import org.apache.spark.util.{JsonProtocol, Utils}
/**
* A SparkListener that logs events to persistent storage.
*
* Event logging is specified by the following configurable parameters:
* spark.eventLog.enabled - Whether event logging is enabled.
* spark.eventLog.logBlockUpdates.enabled - Whether to log block updates
* spark.eventLog.compress - Whether to compress logged events
* spark.eventLog.overwrite - Whether to overwrite any existing files.
* spark.eventLog.dir - Path to the directory in which events are logged.
* spark.eventLog.buffer.kb - Buffer size to use when writing to output streams
* spark.eventLog.logStageExecutorMetrics.enabled - Whether to log stage executor metrics
*/
private[spark] class EventLoggingListener(
appId: String,
appAttemptId : Option[String],
logBaseDir: URI,
sparkConf: SparkConf,
hadoopConf: Configuration)
extends SparkListener with Logging {
import EventLoggingListener._
def this(appId: String, appAttemptId : Option[String], logBaseDir: URI, sparkConf: SparkConf) =
this(appId, appAttemptId, logBaseDir, sparkConf,
SparkHadoopUtil.get.newConfiguration(sparkConf))
private val shouldCompress = sparkConf.get(EVENT_LOG_COMPRESS)
private val shouldOverwrite = sparkConf.get(EVENT_LOG_OVERWRITE)
private val shouldLogBlockUpdates = sparkConf.get(EVENT_LOG_BLOCK_UPDATES)
private val shouldAllowECLogs = sparkConf.get(EVENT_LOG_ALLOW_EC)
private val shouldLogStageExecutorMetrics = sparkConf.get(EVENT_LOG_STAGE_EXECUTOR_METRICS)
private val testing = sparkConf.get(EVENT_LOG_TESTING)
private val outputBufferSize = sparkConf.get(EVENT_LOG_OUTPUT_BUFFER_SIZE).toInt
private val fileSystem = Utils.getHadoopFileSystem(logBaseDir, hadoopConf)
private val compressionCodec =
if (shouldCompress) {
Some(CompressionCodec.createCodec(sparkConf))
} else {
None
}
private val compressionCodecName = compressionCodec.map { c =>
CompressionCodec.getShortName(c.getClass.getName)
}
// Only defined if the file system scheme is not local
private var hadoopDataStream: Option[FSDataOutputStream] = None
private var writer: Option[PrintWriter] = None
// For testing. Keep track of all JSON serialized events that have been logged.
private[scheduler] val loggedEvents = new ArrayBuffer[JValue]
// Visible for tests only.
private[scheduler] val logPath = getLogPath(logBaseDir, appId, appAttemptId, compressionCodecName)
// map of (stageId, stageAttempt), to peak executor metrics for the stage
private val liveStageExecutorMetrics = Map.empty[(Int, Int), Map[String, ExecutorMetrics]]
/**
* Creates the log file in the configured log directory.
*/
def start() {
if (!fileSystem.getFileStatus(new Path(logBaseDir)).isDirectory) {
throw new IllegalArgumentException(s"Log directory $logBaseDir is not a directory.")
}
val workingPath = logPath + IN_PROGRESS
val path = new Path(workingPath)
val uri = path.toUri
val defaultFs = FileSystem.getDefaultUri(hadoopConf).getScheme
val isDefaultLocal = defaultFs == null || defaultFs == "file"
if (shouldOverwrite && fileSystem.delete(path, true)) {
logWarning(s"Event log $path already exists. Overwriting...")
}
/* The Hadoop LocalFileSystem (r1.0.4) has known issues with syncing (HADOOP-7844).
* Therefore, for local files, use FileOutputStream instead. */
val dstream =
if ((isDefaultLocal && uri.getScheme == null) || uri.getScheme == "file") {
new FileOutputStream(uri.getPath)
} else {
hadoopDataStream = Some(if (shouldAllowECLogs) {
fileSystem.create(path)
} else {
SparkHadoopUtil.createNonECFile(fileSystem, path)
})
hadoopDataStream.get
}
try {
val cstream = compressionCodec.map(_.compressedOutputStream(dstream)).getOrElse(dstream)
val bstream = new BufferedOutputStream(cstream, outputBufferSize)
EventLoggingListener.initEventLog(bstream, testing, loggedEvents)
fileSystem.setPermission(path, LOG_FILE_PERMISSIONS)
writer = Some(new PrintWriter(bstream))
logInfo("Logging events to %s".format(logPath))
} catch {
case e: Exception =>
dstream.close()
throw e
}
}
/** Log the event as JSON. */
private def logEvent(event: SparkListenerEvent, flushLogger: Boolean = false) {
val eventJson = JsonProtocol.sparkEventToJson(event)
// scalastyle:off println
writer.foreach(_.println(compact(render(eventJson))))
// scalastyle:on println
if (flushLogger) {
writer.foreach(_.flush())
hadoopDataStream.foreach(_.hflush())
}
if (testing) {
loggedEvents += eventJson
}
}
// Events that do not trigger a flush
override def onStageSubmitted(event: SparkListenerStageSubmitted): Unit = {
logEvent(event)
if (shouldLogStageExecutorMetrics) {
// record the peak metrics for the new stage
liveStageExecutorMetrics.put((event.stageInfo.stageId, event.stageInfo.attemptNumber()),
Map.empty[String, ExecutorMetrics])
}
}
override def onTaskStart(event: SparkListenerTaskStart): Unit = logEvent(event)
override def onTaskGettingResult(event: SparkListenerTaskGettingResult): Unit = logEvent(event)
override def onTaskEnd(event: SparkListenerTaskEnd): Unit = logEvent(event)
override def onEnvironmentUpdate(event: SparkListenerEnvironmentUpdate): Unit = {
logEvent(redactEvent(event))
}
// Events that trigger a flush
override def onStageCompleted(event: SparkListenerStageCompleted): Unit = {
if (shouldLogStageExecutorMetrics) {
// clear out any previous attempts, that did not have a stage completed event
val prevAttemptId = event.stageInfo.attemptNumber() - 1
for (attemptId <- 0 to prevAttemptId) {
liveStageExecutorMetrics.remove((event.stageInfo.stageId, attemptId))
}
// log the peak executor metrics for the stage, for each live executor,
// whether or not the executor is running tasks for the stage
val executorOpt = liveStageExecutorMetrics.remove(
(event.stageInfo.stageId, event.stageInfo.attemptNumber()))
executorOpt.foreach { execMap =>
execMap.foreach { case (executorId, peakExecutorMetrics) =>
logEvent(new SparkListenerStageExecutorMetrics(executorId, event.stageInfo.stageId,
event.stageInfo.attemptNumber(), peakExecutorMetrics))
}
}
}
// log stage completed event
logEvent(event, flushLogger = true)
}
override def onJobStart(event: SparkListenerJobStart): Unit = logEvent(event, flushLogger = true)
override def onJobEnd(event: SparkListenerJobEnd): Unit = logEvent(event, flushLogger = true)
override def onBlockManagerAdded(event: SparkListenerBlockManagerAdded): Unit = {
logEvent(event, flushLogger = true)
}
override def onBlockManagerRemoved(event: SparkListenerBlockManagerRemoved): Unit = {
logEvent(event, flushLogger = true)
}
override def onUnpersistRDD(event: SparkListenerUnpersistRDD): Unit = {
logEvent(event, flushLogger = true)
}
override def onApplicationStart(event: SparkListenerApplicationStart): Unit = {
logEvent(event, flushLogger = true)
}
override def onApplicationEnd(event: SparkListenerApplicationEnd): Unit = {
logEvent(event, flushLogger = true)
}
override def onExecutorAdded(event: SparkListenerExecutorAdded): Unit = {
logEvent(event, flushLogger = true)
}
override def onExecutorRemoved(event: SparkListenerExecutorRemoved): Unit = {
logEvent(event, flushLogger = true)
}
override def onExecutorBlacklisted(event: SparkListenerExecutorBlacklisted): Unit = {
logEvent(event, flushLogger = true)
}
override def onExecutorBlacklistedForStage(
event: SparkListenerExecutorBlacklistedForStage): Unit = {
logEvent(event, flushLogger = true)
}
override def onNodeBlacklistedForStage(event: SparkListenerNodeBlacklistedForStage): Unit = {
logEvent(event, flushLogger = true)
}
override def onExecutorUnblacklisted(event: SparkListenerExecutorUnblacklisted): Unit = {
logEvent(event, flushLogger = true)
}
override def onNodeBlacklisted(event: SparkListenerNodeBlacklisted): Unit = {
logEvent(event, flushLogger = true)
}
override def onNodeUnblacklisted(event: SparkListenerNodeUnblacklisted): Unit = {
logEvent(event, flushLogger = true)
}
override def onBlockUpdated(event: SparkListenerBlockUpdated): Unit = {
if (shouldLogBlockUpdates) {
logEvent(event, flushLogger = true)
}
}
override def onExecutorMetricsUpdate(event: SparkListenerExecutorMetricsUpdate): Unit = {
if (shouldLogStageExecutorMetrics) {
// For the active stages, record any new peak values for the memory metrics for the executor
event.executorUpdates.foreach { executorUpdates =>
liveStageExecutorMetrics.values.foreach { peakExecutorMetrics =>
val peakMetrics = peakExecutorMetrics.getOrElseUpdate(
event.execId, new ExecutorMetrics())
peakMetrics.compareAndUpdatePeakValues(executorUpdates)
}
}
}
}
override def onOtherEvent(event: SparkListenerEvent): Unit = {
if (event.logEvent) {
logEvent(event, flushLogger = true)
}
}
/**
* Stop logging events. The event log file will be renamed so that it loses the
* ".inprogress" suffix.
*/
def stop(): Unit = {
writer.foreach(_.close())
val target = new Path(logPath)
if (fileSystem.exists(target)) {
if (shouldOverwrite) {
logWarning(s"Event log $target already exists. Overwriting...")
if (!fileSystem.delete(target, true)) {
logWarning(s"Error deleting $target")
}
} else {
throw new IOException("Target log file already exists (%s)".format(logPath))
}
}
fileSystem.rename(new Path(logPath + IN_PROGRESS), target)
// touch file to ensure modtime is current across those filesystems where rename()
// does not set it, -and which support setTimes(); it's a no-op on most object stores
try {
fileSystem.setTimes(target, System.currentTimeMillis(), -1)
} catch {
case e: Exception => logDebug(s"failed to set time of $target", e)
}
}
private[spark] def redactEvent(
event: SparkListenerEnvironmentUpdate): SparkListenerEnvironmentUpdate = {
// environmentDetails maps a string descriptor to a set of properties
// Similar to:
// "JVM Information" -> jvmInformation,
// "Spark Properties" -> sparkProperties,
// ...
// where jvmInformation, sparkProperties, etc. are sequence of tuples.
// We go through the various of properties and redact sensitive information from them.
val redactedProps = event.environmentDetails.map{ case (name, props) =>
name -> Utils.redact(sparkConf, props)
}
SparkListenerEnvironmentUpdate(redactedProps)
}
}
private[spark] object EventLoggingListener extends Logging {
// Suffix applied to the names of files still being written by applications.
val IN_PROGRESS = ".inprogress"
val DEFAULT_LOG_DIR = "/tmp/spark-events"
private val LOG_FILE_PERMISSIONS = new FsPermission(Integer.parseInt("770", 8).toShort)
// A cache for compression codecs to avoid creating the same codec many times
private val codecMap = Map.empty[String, CompressionCodec]
/**
* Write metadata about an event log to the given stream.
* The metadata is encoded in the first line of the event log as JSON.
*
* @param logStream Raw output stream to the event log file.
*/
def initEventLog(
logStream: OutputStream,
testing: Boolean,
loggedEvents: ArrayBuffer[JValue]): Unit = {
val metadata = SparkListenerLogStart(SPARK_VERSION)
val eventJson = JsonProtocol.logStartToJson(metadata)
val metadataJson = compact(eventJson) + "\\n"
logStream.write(metadataJson.getBytes(StandardCharsets.UTF_8))
if (testing && loggedEvents != null) {
loggedEvents += eventJson
}
}
/**
* Return a file-system-safe path to the log file for the given application.
*
* Note that because we currently only create a single log file for each application,
* we must encode all the information needed to parse this event log in the file name
* instead of within the file itself. Otherwise, if the file is compressed, for instance,
* we won't know which codec to use to decompress the metadata needed to open the file in
* the first place.
*
* The log file name will identify the compression codec used for the contents, if any.
* For example, app_123 for an uncompressed log, app_123.lzf for an LZF-compressed log.
*
* @param logBaseDir Directory where the log file will be written.
* @param appId A unique app ID.
* @param appAttemptId A unique attempt id of appId. May be the empty string.
* @param compressionCodecName Name to identify the codec used to compress the contents
* of the log, or None if compression is not enabled.
* @return A path which consists of file-system-safe characters.
*/
def getLogPath(
logBaseDir: URI,
appId: String,
appAttemptId: Option[String],
compressionCodecName: Option[String] = None): String = {
val base = new Path(logBaseDir).toString.stripSuffix("/") + "/" + Utils.sanitizeDirName(appId)
val codec = compressionCodecName.map("." + _).getOrElse("")
if (appAttemptId.isDefined) {
base + "_" + Utils.sanitizeDirName(appAttemptId.get) + codec
} else {
base + codec
}
}
/**
* Opens an event log file and returns an input stream that contains the event data.
*
* @return input stream that holds one JSON record per line.
*/
def openEventLog(log: Path, fs: FileSystem): InputStream = {
val in = new BufferedInputStream(fs.open(log))
try {
val codec = codecName(log).map { c =>
codecMap.getOrElseUpdate(c, CompressionCodec.createCodec(new SparkConf, c))
}
codec.map(_.compressedContinuousInputStream(in)).getOrElse(in)
} catch {
case e: Throwable =>
in.close()
throw e
}
}
def codecName(log: Path): Option[String] = {
// Compression codec is encoded as an extension, e.g. app_123.lzf
// Since we sanitize the app ID to not include periods, it is safe to split on it
val logName = log.getName.stripSuffix(IN_PROGRESS)
logName.split("\\\\.").tail.lastOption
}
}
|
aosagie/spark
|
core/src/main/scala/org/apache/spark/scheduler/EventLoggingListener.scala
|
Scala
|
apache-2.0
| 16,057 |
package im.tox.antox.callbacks
import android.content.{SharedPreferences, Context}
import android.net.ConnectivityManager
import android.preference.PreferenceManager
import im.tox.antox.data.{State, AntoxDB}
import im.tox.antox.tox.{MessageHelper, Reactive, ToxSingleton}
import im.tox.antox.utils.{ConnectionTypeChangeListener, ConnectionManager}
import im.tox.tox4j.core.callbacks.FriendConnectionStatusCallback
import im.tox.tox4j.core.enums.ToxConnection
import scala.collection.JavaConversions._
object AntoxOnConnectionStatusCallback {
private val TAG = "im.tox.antox.TAG"
}
class AntoxOnConnectionStatusCallback(private var ctx: Context) extends FriendConnectionStatusCallback {
private val preferences = PreferenceManager.getDefaultSharedPreferences(ctx)
private var preferencesListener: SharedPreferences.OnSharedPreferenceChangeListener = _
def setAllStatusNone(): Unit = {
if (!ToxSingleton.isToxConnected(preferences, ctx)) {
for (friend <- ToxSingleton.getAntoxFriendList.all()) {
friendConnectionStatus(friend.getFriendNumber, ToxConnection.NONE)
}
}
}
ConnectionManager.addConnectionTypeChangeListener(new ConnectionTypeChangeListener {
override def connectionTypeChange(connectionType: Int): Unit = {
setAllStatusNone()
}
})
preferencesListener = new SharedPreferences.OnSharedPreferenceChangeListener() {
override def onSharedPreferenceChanged(prefs: SharedPreferences, key: String): Unit = {
key match {
case "wifi_only" =>
setAllStatusNone()
case _ =>
}
}
}
preferences.registerOnSharedPreferenceChangeListener(preferencesListener)
override def friendConnectionStatus(friendNumber: Int, connectionStatus: ToxConnection): Unit = {
val online = connectionStatus != ToxConnection.NONE
val db = new AntoxDB(ctx)
val friendKey = ToxSingleton.getAntoxFriend(friendNumber).get.getKey
db.updateUserOnline(friendKey, online)
ToxSingleton.getAntoxFriend(friendNumber).get.setOnline(online)
if (online) {
MessageHelper.sendUnsentMessages(ctx)
State.transfers.updateSelfAvatar(ctx)
} else {
ToxSingleton.typingMap.put(friendKey, false)
Reactive.typing.onNext(true)
}
ToxSingleton.updateFriendsList(ctx)
ToxSingleton.updateMessages(ctx)
db.close()
}
}
|
Ansa89/Antox
|
app/src/main/scala/im/tox/antox/callbacks/AntoxOnConnectionStatusCallback.scala
|
Scala
|
gpl-3.0
| 2,353 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.carbondata.benchmark
import java.io.File
import java.text.SimpleDateFormat
import java.util
import java.util.Date
import java.util.concurrent.{Callable, Executors, Future, TimeUnit}
import scala.util.Random
import org.apache.spark.sql.{CarbonEnv, DataFrame, Row, SaveMode, SparkSession}
import org.apache.carbondata.core.constants.{CarbonCommonConstants, CarbonVersionConstants}
import org.apache.carbondata.core.util.{CarbonProperties, CarbonUtil}
import org.apache.carbondata.spark.util.DataGenerator
// scalastyle:off println
/**
* Test concurrent query performance of CarbonData
*
* This benchmark will print out some information:
* 1.Environment information
* 2.Parameters information
* 3.concurrent query performance result using parquet format
* 4.concurrent query performance result using CarbonData format
*
* This benchmark default run in local model,
* user can change 'runInLocal' to false if want to run in cluster,
* user can change variables like:
*
* spark-submit \\
* --class org.apache.carbondata.benchmark.ConcurrentQueryBenchmark \\
* --master yarn \\
* --deploy-mode client \\
* --driver-memory 16g \\
* --executor-cores 4g \\
* --executor-memory 24g \\
* --num-executors 3 \\
* concurrencyTest.jar \\
* totalNum threadNum taskNum resultIsEmpty runInLocal generateFile
* deleteFile openSearchMode storeLocation
* details in initParameters method of this benchmark
*/
object ConcurrentQueryBenchmark {
// generate number of data
var totalNum = 10 * 1000 * 1000
// the number of thread pool
var threadNum = 16
// task number of spark sql query
var taskNum = 100
// whether is result empty, if true then result is empty
var resultIsEmpty = true
// the store path of task details
var path: String = "/tmp/carbondata"
// whether run in local or cluster
var runInLocal = true
// whether generate new file
var generateFile = true
// whether delete file
var deleteFile = true
// carbon store location
var storeLocation = "/tmp"
val cardinalityId = 100 * 1000 * 1000
val cardinalityCity = 6
def parquetTableName: String = "Num" + totalNum + "_" + "comparetest_parquet"
def orcTableName: String = "Num" + totalNum + "_" + "comparetest_orc"
def carbonTableName(version: String): String =
"Num" + totalNum + "_" + s"comparetest_carbonV$version"
// performance test queries, they are designed to test various data access type
val r = new Random()
lazy val tmpId = r.nextInt(cardinalityId) % totalNum
lazy val tmpCity = "city" + (r.nextInt(cardinalityCity) % totalNum)
// different query SQL
lazy val queries: Array[Query] = Array(
Query(
"select * from $table" + s" where id = '$tmpId' ",
"filter scan",
"filter on high card dimension"
)
, Query(
"select id from $table" + s" where id = '$tmpId' ",
"filter scan",
"filter on high card dimension"
),
Query(
"select city from $table" + s" where id = '$tmpId' ",
"filter scan",
"filter on high card dimension"
),
Query(
"select * from $table" + s" where city = '$tmpCity' limit 100",
"filter scan",
"filter on low card dimension, medium result set, fetch all columns"
),
Query(
"select city from $table" + s" where city = '$tmpCity' limit 100",
"filter scan",
"filter on low card dimension"
),
Query(
"select id from $table" + s" where city = '$tmpCity' limit 100",
"filter scan",
"filter on low card dimension"
),
Query(
"select country, sum(m1) from $table group by country",
"aggregate",
"group by on big data, on medium card column, medium result set,"
),
Query(
"select country, sum(m1) from $table" +
s" where id = '$tmpId' group by country",
"aggregate",
"group by on big data, on medium card column, medium result set,"
),
Query(
"select t1.country, sum(t1.m1) from $table t1 join $table t2"
+ s" on t1.id = t2.id where t1.id = '$tmpId' group by t1.country",
"aggregate",
"group by on big data, on medium card column, medium result set,"
),
Query(
"select t2.country, sum(t2.m1) " +
"from $table t1 join $table t2 join $table t3 " +
"join $table t4 join $table t5 join $table t6 join $table t7 " +
s"on t1.id=t2.id and t1.id=t3.id and t1.id=t4.id " +
s"and t1.id=t5.id and t1.id=t6.id and " +
s"t1.id=t7.id " +
s" where t2.id = '$tmpId' " +
s" group by t2.country",
"aggregate",
"group by on big data, on medium card column, medium result set,"
)
)
/**
* generate parquet format table
*
* @param spark SparkSession
* @param input DataFrame
* @param table table name
* @return the time of generating parquet format table
*/
private def generateParquetTable(spark: SparkSession, input: DataFrame, table: String)
: Double = time {
// partitioned by last 1 digit of id column
val dfWithPartition = input.withColumn("partitionCol", input.col("id").%(10))
dfWithPartition.write
.partitionBy("partitionCol")
.mode(SaveMode.Overwrite)
.parquet(table)
}
/**
* generate ORC format table
*
* @param spark SparkSession
* @param input DataFrame
* @param table table name
* @return the time of generating ORC format table
*/
private def generateOrcTable(spark: SparkSession, input: DataFrame, table: String): Double =
time {
// partitioned by last 1 digit of id column
input.write
.mode(SaveMode.Overwrite)
.orc(table)
}
/**
* generate carbon format table
*
* @param spark SparkSession
* @param input DataFrame
* @param tableName table name
* @return the time of generating carbon format table
*/
private def generateCarbonTable(spark: SparkSession, input: DataFrame, tableName: String)
: Double = {
CarbonProperties.getInstance().addProperty(
CarbonCommonConstants.CARBON_DATA_FILE_VERSION,
"3"
)
spark.sql(s"drop table if exists $tableName")
time {
input.write
.format("carbondata")
.option("tableName", tableName)
.option("tempCSV", "false")
.option("table_blocksize", "32")
.mode(SaveMode.Overwrite)
.save()
}
}
/**
* load data into parquet, carbonV2, carbonV3
*
* @param spark SparkSession
* @param table1 table1 name
* @param table2 table2 name
*/
def prepareTable(spark: SparkSession, table1: String, table2: String): Unit = {
val df = if (generateFile) {
DataGenerator.generateDataFrame(spark, totalNum).cache
} else {
null
}
val table1Time = time {
if (table1.endsWith("parquet")) {
if (generateFile) {
generateParquetTable(spark, df, storeLocation + "/" + table1)
}
spark.read.parquet(storeLocation + "/" + table1).createOrReplaceTempView(table1)
} else if (table1.endsWith("orc")) {
if (generateFile) {
generateOrcTable(spark, df, table1)
spark.read.orc(table1).createOrReplaceTempView(table1)
}
} else {
sys.error("invalid table: " + table1)
}
}
println(s"$table1 completed, time: $table1Time sec")
val table2Time: Double = if (generateFile) {
generateCarbonTable(spark, df, table2)
} else {
0.0
}
println(s"$table2 completed, time: $table2Time sec")
if (null != df) {
df.unpersist()
}
}
/**
* Run all queries for the specified table
*
* @param spark SparkSession
* @param tableName table name
*/
private def runQueries(spark: SparkSession, tableName: String): Unit = {
println()
println(s"Start running queries for $tableName...")
println(
"Min: min time" +
"\\tMax: max time" +
"\\t90%: 90% time" +
"\\t99%: 99% time" +
"\\tAvg: average time" +
"\\tCount: number of result" +
"\\tQuery X: running different query sql" +
"\\tResult: show it when ResultIsEmpty is false" +
"\\tTotal execute time: total runtime")
queries.zipWithIndex.map { case (query, index) =>
val sqlText = query.sqlText.replace("$table", tableName)
val executorService = Executors.newFixedThreadPool(threadNum)
val tasks = new java.util.ArrayList[Callable[Results]]()
val tasksStartTime = System.nanoTime()
for (num <- 1 to taskNum) {
tasks.add(new QueryTask(spark, sqlText))
}
val results = executorService.invokeAll(tasks)
executorService.shutdown()
executorService.awaitTermination(600, TimeUnit.SECONDS)
val tasksEndTime = System.nanoTime()
val sql = s"Query ${index + 1}: $sqlText "
printResults(results, sql, tasksStartTime)
val taskTime = (tasksEndTime - tasksStartTime).toDouble / (1000 * 1000 * 1000)
println("Total execute time: " + taskTime.formatted("%.3f") + " s")
val timeString = new SimpleDateFormat("yyyyMMddHHmmssSSS").format(new Date())
writeResults(spark, results, sql, tasksStartTime,
path + s"/${tableName}_query${index + 1}_$timeString")
}
}
/**
* save the result for subsequent analysis
*
* @param spark SparkSession
* @param results Results
* @param sql query sql
* @param start tasks start time
* @param filePath write file path
*/
def writeResults(
spark: SparkSession,
results: java.util.List[Future[Results]],
sql: String = "",
start: Long,
filePath: String): Unit = {
val timeArray = new Array[(Double, Double, Double)](results.size())
for (i <- 0 until results.size()) {
timeArray(i) =
((results.get(i).get().startTime - start) / (1000.0 * 1000),
(results.get(i).get().endTime - start) / (1000.0 * 1000),
(results.get(i).get().endTime - results.get(i).get().startTime) / (1000.0 * 1000))
}
val timeArraySorted = timeArray.sortBy(x => x._1)
val timeArrayString = timeArraySorted.map { e =>
e._1.formatted("%.3f") + ",\\t" + e._2.formatted("%.3f") + ",\\t" + e._3.formatted("%.3f")
}
val saveArray = Array(sql, "startTime, endTime, runtime, measure time by the microsecond",
s"${timeArrayString.length}")
.union(timeArrayString)
val rdd = spark.sparkContext.parallelize(saveArray, 1)
rdd.saveAsTextFile(filePath)
}
/**
* print out results
*
* @param results Results
* @param sql query sql
* @param tasksStartTime tasks start time
*/
def printResults(results: util.List[Future[Results]], sql: String = "", tasksStartTime: Long) {
val timeArray = new Array[Double](results.size())
val sqlResult = results.get(0).get().sqlResult
for (i <- 0 until results.size()) {
results.get(i).get()
}
for (i <- 0 until results.size()) {
timeArray(i) = results.get(i).get().time
}
val sortTimeArray = timeArray.sorted
// the time of 90 percent sql are finished
val time90 = ((sortTimeArray.length) * 0.9).toInt - 1
// the time of 99 percent sql are finished
val time99 = ((sortTimeArray.length) * 0.99).toInt - 1
print(
"Min: " + sortTimeArray.head.formatted("%.3f") + " s," +
"\\tMax: " + sortTimeArray.last.formatted("%.3f") + " s," +
"\\t90%: " + sortTimeArray(time90).formatted("%.3f") + " s," +
"\\t99%: " + sortTimeArray(time99).formatted("%.3f") + " s," +
"\\tAvg: " + (timeArray.sum / timeArray.length).formatted("%.3f") + " s," +
"\\t\\tCount: " + results.get(0).get.count +
"\\t\\t\\t\\t" + sql +
"\\t" + sqlResult.mkString(",") + "\\t")
}
/**
* save result after finishing each task/thread
*
* @param time each task time of executing query sql and with millis time
* @param sqlResult query sql result
* @param count result count
* @param startTime task start time with nano time
* @param endTime task end time with nano time
*/
case class Results(
time: Double,
sqlResult: Array[Row],
count: Int,
startTime: Long,
endTime: Long)
class QueryTask(spark: SparkSession, query: String)
extends Callable[Results] with Serializable {
override def call(): Results = {
var result: Array[Row] = null
val startTime = System.nanoTime()
val rt = time {
result = spark.sql(query).collect()
}
val endTime = System.nanoTime()
if (resultIsEmpty) {
Results(rt, Array.empty[Row], count = result.length, startTime, endTime)
} else {
Results(rt, result, count = result.length, startTime, endTime)
}
}
}
/**
* run testcases and print comparison result
*
* @param spark SparkSession
* @param table1 table1 name
* @param table2 table2 name
*/
def runTest(spark: SparkSession, table1: String, table2: String): Unit = {
// run queries on parquet and carbon
runQueries(spark, table1)
// do GC and sleep for some time before running next table
System.gc()
Thread.sleep(1000)
System.gc()
Thread.sleep(1000)
runQueries(spark, table2)
}
/**
* the time of running code
*
* @param code the code
* @return the run time
*/
def time(code: => Unit): Double = {
val start = System.currentTimeMillis()
code
// return time in second
(System.currentTimeMillis() - start).toDouble / 1000
}
/**
* init parameters
*
* @param arr parameters
*/
def initParameters(arr: Array[String]): Unit = {
if (arr.length > 0) {
totalNum = arr(0).toInt
}
if (arr.length > 1) {
threadNum = arr(1).toInt
}
if (arr.length > 2) {
taskNum = arr(2).toInt
}
if (arr.length > 3) {
resultIsEmpty = if (arr(3).equalsIgnoreCase("true")) {
true
} else if (arr(3).equalsIgnoreCase("false")) {
false
} else {
throw new Exception("error parameter, should be true or false")
}
}
if (arr.length > 4) {
path = arr(4)
}
if (arr.length > 5) {
runInLocal = if (arr(5).equalsIgnoreCase("true")) {
val rootPath = new File(this.getClass.getResource("/").getPath
+ "../../../..").getCanonicalPath
storeLocation = s"$rootPath/examples/spark/target/store"
true
} else if (arr(5).equalsIgnoreCase("false")) {
false
} else {
throw new Exception("error parameter, should be true or false")
}
}
if (arr.length > 6) {
generateFile = if (arr(6).equalsIgnoreCase("true")) {
true
} else if (arr(6).equalsIgnoreCase("false")) {
false
} else {
throw new Exception("error parameter, should be true or false")
}
}
if (arr.length > 7) {
deleteFile = if (arr(7).equalsIgnoreCase("true")) {
true
} else if (arr(7).equalsIgnoreCase("false")) {
false
} else {
throw new Exception("error parameter, should be true or false")
}
}
if (arr.length > 8) {
storeLocation = arr(8)
}
}
/**
* main method of this benchmark
*
* @param args parameters
*/
def main(args: Array[String]): Unit = {
CarbonProperties.getInstance()
.addProperty("carbon.enable.vector.reader", "true")
.addProperty("enable.unsafe.sort", "true")
.addProperty("carbon.blockletgroup.size.in.mb", "32")
.addProperty(CarbonCommonConstants.ENABLE_UNSAFE_COLUMN_PAGE, "false")
.addProperty(CarbonCommonConstants.ENABLE_UNSAFE_IN_QUERY_EXECUTION, "false")
import org.apache.spark.sql.CarbonUtils._
// 1. initParameters
initParameters(args)
val table1 = parquetTableName
val table2 = carbonTableName("3")
val parameters = "totalNum: " + totalNum +
"\\tthreadNum: " + threadNum +
"\\ttaskNum: " + taskNum +
"\\tresultIsEmpty: " + resultIsEmpty +
"\\tfile path: " + path +
"\\trunInLocal: " + runInLocal +
"\\tgenerateFile: " + generateFile +
"\\tdeleteFile: " + deleteFile +
"\\tstoreLocation: " + storeLocation
val spark = if (runInLocal) {
SparkSession
.builder()
.appName(parameters)
.master("local[8]")
.enableHiveSupport()
.config("spark.sql.extensions", "org.apache.spark.sql.CarbonExtensions")
.getOrCreate()
} else {
SparkSession
.builder()
.appName(parameters)
.enableHiveSupport()
.config("spark.sql.extensions", "org.apache.spark.sql.CarbonExtensions")
.getOrCreate()
}
CarbonEnv.getInstance(spark)
spark.sparkContext.setLogLevel("ERROR")
println("\\nEnvironment information:")
val env = Array(
"spark.master",
"spark.driver.cores",
"spark.driver.memory",
"spark.executor.cores",
"spark.executor.memory",
"spark.executor.instances")
env.foreach { each =>
println(each + ":\\t" + spark.conf.get(each, "default value") + "\\t")
}
println("SPARK_VERSION:" + spark.version + "\\t")
println("CARBONDATA_VERSION:" + CarbonVersionConstants.CARBONDATA_VERSION + "\\t")
println("\\nParameters information:")
println(parameters)
// 2. prepareTable
prepareTable(spark, table1, table2)
// 3. runTest
runTest(spark, table1, table2)
if (deleteFile) {
CarbonUtil.deleteFoldersAndFiles(new File(table1))
spark.sql(s"drop table $table2")
}
spark.close()
}
}
// scalastyle:on println
|
jackylk/incubator-carbondata
|
examples/spark/src/main/scala/org/apache/carbondata/benchmark/ConcurrentQueryBenchmark.scala
|
Scala
|
apache-2.0
| 18,374 |
package io.zengin.telegrambot.types
import java.io.{FileInputStream, InputStream, File => JFile}
trait InputFile {
val name: String
val mimeType: String = "application/octet-stream"
val bytes: Array[Byte]
}
object InputFile {
def apply(filePath: String): InputFile = apply(new JFile(filePath))
def apply(file: JFile): InputFile = {
apply(file.getName, new FileInputStream(file))
}
def apply(fileName: String, inputStream: InputStream): InputFile = new InputFile {
val name = fileName
val bytes = Iterator.continually(inputStream.read()) takeWhile (-1 !=) map (_.toByte) toArray
}
}
|
hzengin/telegrambot
|
src/main/scala/io/zengin/telegrambot/types/InputFile.scala
|
Scala
|
mit
| 616 |
/*
* Copyright (c) 2002-2018 "Neo Technology,"
* Network Engine for Objects in Lund AB [http://neotechnology.com]
*
* This file is part of Neo4j.
*
* Neo4j is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.neo4j.cypher.internal.frontend.v2_3.ast
import org.neo4j.cypher.internal.frontend.v2_3.ast.Expression.SemanticContext
import org.neo4j.cypher.internal.frontend.v2_3.test_helpers.CypherFunSuite
import org.neo4j.cypher.internal.frontend.v2_3.{DummyPosition, SemanticError, SemanticState}
class HexIntegerLiteralTest extends CypherFunSuite {
test("correctly parses hexadecimal numbers") {
assert(SignedHexIntegerLiteral("0x22")(DummyPosition(0)).value === 0x22)
assert(SignedHexIntegerLiteral("0x0")(DummyPosition(0)).value === 0)
assert(SignedHexIntegerLiteral("0xffFF")(DummyPosition(0)).value === 0xffff)
assert(SignedHexIntegerLiteral("-0x9abc")(DummyPosition(0)).value === -0x9abc)
}
test("throws error for invalid hexadecimal numbers") {
assertSemanticError("0x12g3", "invalid literal number")
assertSemanticError("0x", "invalid literal number")
assertSemanticError("0x33Y23", "invalid literal number")
assertSemanticError("-0x12g3", "invalid literal number")
}
test("throws error for too large hexadecimal numbers") {
assertSemanticError("0xfffffffffffffffff", "integer is too large")
}
private def assertSemanticError(stringValue: String, errorMessage: String) {
val literal = SignedHexIntegerLiteral(stringValue)(DummyPosition(4))
val result = literal.semanticCheck(SemanticContext.Simple)(SemanticState.clean)
assert(result.errors === Vector(SemanticError(errorMessage, DummyPosition(4))))
}
}
|
HuangLS/neo4j
|
community/cypher/frontend-2.3/src/test/scala/org/neo4j/cypher/internal/frontend/v2_3/ast/HexIntegerLiteralTest.scala
|
Scala
|
apache-2.0
| 2,278 |
/*
* Copyright (c) 2014-2018 by The Monix Project Developers.
* See the project homepage at: https://monix.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package monix.execution.schedulers
import java.util.concurrent.TimeUnit
import minitest.SimpleTestSuite
import monix.execution.ExecutionModel.AlwaysAsyncExecution
import monix.execution.Scheduler
import monix.execution.misc.Local
import scala.concurrent.Future
import scala.util.control.NonFatal
object TracingSchedulerServiceSuite extends SimpleTestSuite {
testAsync("captures locals in actual async execution") {
val service = TracingSchedulerService(Scheduler.singleThread("test"))
val f1 = {
implicit val ec = service
val local1 = Local(0)
val local2 = Local(0)
local2 := 100
val ref = local1.bind(100)(Future(local1.get + local2.get))
local1 := 999
local2 := 999
ref
}
import Scheduler.Implicits.global
val f2 = service.awaitTermination(100, TimeUnit.HOURS, global)
val ff = f1.map { r =>
try {
assert(!service.isShutdown, "!service.isShutdown")
assert(!service.isTerminated)
assertEquals(r, 200)
service.shutdown()
} catch {
case NonFatal(e) if !service.isShutdown =>
service.shutdown()
throw e
}
}
for (_ <- ff; _ <- f2) yield {
assert(service.isTerminated, "service.isTerminated")
assert(service.isShutdown, "service.isShutdown")
}
}
test("executionModel") {
val ec: SchedulerService = Scheduler.singleThread("test")
val traced = TracingSchedulerService(ec)
try {
assertEquals(traced.executionModel, ec.executionModel)
val traced2 = traced.withExecutionModel(AlwaysAsyncExecution)
assertEquals(traced2.executionModel, AlwaysAsyncExecution)
} finally {
traced.shutdown()
}
}
}
|
Wogan/monix
|
monix-execution/jvm/src/test/scala/monix/execution/schedulers/TracingSchedulerServiceSuite.scala
|
Scala
|
apache-2.0
| 2,394 |
package org.bitcoins.testkit.node.fixture
import org.bitcoins.node.{NeutrinoNode, Node, SpvNode}
import org.bitcoins.rpc.client.common.BitcoindRpcClient
/** Gives us a fixture that has a SPV node connected with the bitcoind instance */
trait NodeConnectedWithBitcoind {
def node: Node
def bitcoind: BitcoindRpcClient
}
case class SpvNodeConnectedWithBitcoind(
node: SpvNode,
bitcoind: BitcoindRpcClient)
extends NodeConnectedWithBitcoind
case class NeutrinoNodeConnectedWithBitcoind(
node: NeutrinoNode,
bitcoind: BitcoindRpcClient)
extends NodeConnectedWithBitcoind
|
bitcoin-s/bitcoin-s-core
|
testkit/src/main/scala/org/bitcoins/testkit/node/fixture/NodeConnectedWithBitcoind.scala
|
Scala
|
mit
| 597 |
package com.twitter.finagle.exp
import com.twitter.conversions.time._
import com.twitter.finagle.{Service, SimpleFilter, NoStacktrace}
import com.twitter.finagle.stats.StatsReceiver
import com.twitter.util.{Future, Return, Throw, Duration, Timer}
/**
* Issue a backup request after `delay` time has elapsed. This is
* useful for curtailing tail latencies in distributed systems.
*
* '''Note:''' Care must be taken to ensure that application of this
* filter preserves semantics since a request may be issued twice
* (ie. they are idempotent).
*/
class BackupRequestFilter[Req, Rep](delay: Duration, timer: Timer, statsReceiver: StatsReceiver)
extends SimpleFilter[Req, Rep]
{
assert(delay >= 0.seconds)
private[this] val backups = statsReceiver.scope("backup")
private[this] val timeouts = backups.counter("timeouts")
private[this] val won = backups.counter("won")
private[this] val lost = backups.counter("lost")
def apply(req: Req, service: Service[Req, Rep]): Future[Rep] = {
val backup = timer.doLater(delay) {
timeouts.incr()
service(req)
} flatten
Future.select(Seq(service(req), backup)) flatMap {
case (Return(res), Seq(other)) =>
if (other eq backup) won.incr() else lost.incr()
other.raise(BackupRequestLost)
Future.value(res)
case (Throw(_), Seq(other)) => other
}
}
}
object BackupRequestLost extends Exception with NoStacktrace
|
foursquare/finagle
|
finagle-core/src/main/scala/com/twitter/finagle/exp/BackupRequestFilter.scala
|
Scala
|
apache-2.0
| 1,434 |
package com.twitter.inject.thrift
import com.twitter.conversions.DurationOps._
import com.twitter.finagle.http.Status.Ok
import com.twitter.finatra.http.{EmbeddedHttpServer, HttpTest}
import com.twitter.finatra.thrift.EmbeddedThriftServer
import com.twitter.greeter.thriftscala.Greeter
import com.twitter.inject.server.FeatureTest
import com.twitter.inject.thrift.DoEverythingReqRepThriftMethodBuilderClientModuleFeatureTest._
import com.twitter.inject.thrift.integration.reqrepserviceperendpoint.{
ReqRepServicePerEndpointHttpController,
GreeterReqRepThriftMethodBuilderClientModule,
GreeterThriftService
}
import com.twitter.inject.thrift.integration.{TestHttpServer, TestThriftServer}
import com.twitter.util.Duration
import com.twitter.util.tunable.Tunable
object DoEverythingReqRepThriftMethodBuilderClientModuleFeatureTest {
case class HelloHeaders(empty: Boolean)
case class HelloResponse(value: String, headers: HelloHeaders)
}
class DoEverythingReqRepThriftMethodBuilderClientModuleFeatureTest
extends FeatureTest
with HttpTest {
private val requestHeaderKey = "com.twitter.greeter.test.header"
private val httpServiceClientId = "http-service"
private val perRequestTimeoutTunable: Tunable[Duration] =
Tunable.mutable("per-request", 50.millis)
private val greeterThriftServer = new EmbeddedThriftServer(
twitterServer = new TestThriftServer(
new GreeterThriftService(httpServiceClientId, requestHeaderKey).toThriftService),
disableTestLogging = true
)
override val server = new EmbeddedHttpServer(
twitterServer = new TestHttpServer[ReqRepServicePerEndpointHttpController](
"rrspe-server",
new GreeterReqRepThriftMethodBuilderClientModule(requestHeaderKey, perRequestTimeoutTunable)),
args = Seq(
s"-thrift.clientId=$httpServiceClientId",
resolverMap("greeter-thrift-service" -> greeterThriftServer.thriftHostAndPort)
)
)
override def afterAll(): Unit = {
greeterThriftServer.close()
super.afterAll()
}
test("Greeter.ReqRepServicePerEndpoint is available from the injector") {
server.injector.instance[Greeter.ReqRepServicePerEndpoint] should not be null
}
test("Greeter.MethodPerEndpoint is available from the injector") {
server.injector.instance[Greeter.MethodPerEndpoint] should not be null
}
test("Say hi") {
// fails more times (3) than the MethodBuilder number of retries (2).
intercept[Exception] {
server.httpGet(path = "/hi?name=Bob", andExpect = Ok, withBody = "Hi Bob")
}
// per-method -- all the requests in this test were to the same method
/* assert counters added by ThriftServicePerEndpoint#statsFilter */
server.inMemoryStats.counters.assert("clnt/greeter-thrift-client/Greeter/hi/requests", 3)
server.inMemoryStats.counters.assert("clnt/greeter-thrift-client/Greeter/hi/success", 1)
server.inMemoryStats.counters.assert("clnt/greeter-thrift-client/Greeter/hi/failures", 2)
/* assert MethodBuilder stats exist */
server.inMemoryStats.stats
.get("clnt/greeter-thrift-client/hi/logical/request_latency_ms") should not be None
server.inMemoryStats.stats.assert("clnt/greeter-thrift-client/hi/retries", Seq(2.0f))
/* assert MethodBuilder counters */
server.inMemoryStats.counters.assert("clnt/greeter-thrift-client/hi/logical/requests", 1)
server.inMemoryStats.counters.assert("clnt/greeter-thrift-client/hi/logical/success", 1)
}
test("Say hello") {
val response =
server.httpGetJson[HelloResponse](path = "/hello?name=Bob", andExpect = Ok)
assert(response.value == "Hello Bob")
assert(!response.headers.empty)
// per-method -- all the requests in this test were to the same method
/* assert counters added by ThriftServicePerEndpoint#statsFilter */
server.inMemoryStats.counters.assert("clnt/greeter-thrift-client/Greeter/hello/requests", 3)
server.inMemoryStats.counters.assert("clnt/greeter-thrift-client/Greeter/hello/success", 1)
server.inMemoryStats.counters.assert("clnt/greeter-thrift-client/Greeter/hello/failures", 2)
/* assert MethodBuilder stats exist */
server.inMemoryStats.stats
.get("clnt/greeter-thrift-client/hello/logical/request_latency_ms") should not be None
server.inMemoryStats.stats.assert("clnt/greeter-thrift-client/hello/retries", Seq(2.0f))
/* assert MethodBuilder counters */
server.inMemoryStats.counters.assert("clnt/greeter-thrift-client/hello/logical/requests", 1)
server.inMemoryStats.counters.assert("clnt/greeter-thrift-client/hello/logical/success", 1)
}
test("Say bye") {
server.httpGet(
path = "/bye?name=Bob&age=18",
andExpect = Ok,
withBody = "Bye Bob of 18 years!"
)
// per-method -- all the requests in this test were to the same method
/* assert counters added by ThriftServicePerEndpoint#statsFilter */
server.inMemoryStats.counters.assert("clnt/greeter-thrift-client/Greeter/bye/requests", 3)
server.inMemoryStats.counters.assert("clnt/greeter-thrift-client/Greeter/bye/success", 1)
server.inMemoryStats.counters.assert("clnt/greeter-thrift-client/Greeter/bye/failures", 2)
/* assert MethodBuilder stats exist */
server.inMemoryStats.stats
.get("clnt/greeter-thrift-client/bye/logical/request_latency_ms") should not be None
server.inMemoryStats.stats.assert("clnt/greeter-thrift-client/bye/retries", Seq(2.0f))
/* assert MethodBuilder counters */
server.inMemoryStats.counters.assert("clnt/greeter-thrift-client/bye/logical/requests", 1)
server.inMemoryStats.counters.assert("clnt/greeter-thrift-client/bye/logical/success", 1)
}
}
|
twitter/finatra
|
inject/inject-thrift-client/src/test/scala/com/twitter/inject/thrift/DoEverythingReqRepThriftMethodBuilderClientModuleFeatureTest.scala
|
Scala
|
apache-2.0
| 5,657 |
package io.vamp.pulse
import akka.actor.Actor
import io.vamp.common.ClassMapper
import io.vamp.common.akka.IoC
import io.vamp.common.vitals.{ InfoRequest, StatsRequest }
import io.vamp.model.resolver.NamespaceValueResolver
import io.vamp.pulse.Percolator.{ GetPercolator, RegisterPercolator, UnregisterPercolator }
import io.vamp.pulse.notification._
class NatsPulseActorMapper extends ClassMapper {
val name = "nats"
val clazz: Class[_] = classOf[NatsPulseActor]
}
object NatsPulseActor {
val config: String = PulseActor.config
}
/**
* NATS Pulse Actor forward messages to NATS, Elasticsearch and also forwards other types of messages to Elasticsearch
*
*/
class NatsPulseActor extends NamespaceValueResolver with PulseActor {
import PulseActor._
def receive: Actor.Receive = {
case InfoRequest ⇒ IoC.actorFor[PulseActorSupport].forward(InfoRequest)
case StatsRequest ⇒ IoC.actorFor[PulseActorSupport].forward(StatsRequest)
case Publish(event, publishEventValue) ⇒
IoC.actorFor[PulseActorSupport].forward(Publish(event, publishEventValue))
IoC.actorFor[PulseActorPublisher].forward(Publish(event, publishEventValue))
case Query(envelope) ⇒ IoC.actorFor[PulseActorSupport].forward(Query(envelope))
case GetPercolator(name) ⇒ IoC.actorFor[PulseActorSupport].forward(GetPercolator(name))
case RegisterPercolator(name, tags, kind, message) ⇒ IoC.actorFor[PulseActorSupport].forward(RegisterPercolator(name, tags, kind, message))
case UnregisterPercolator(name) ⇒ IoC.actorFor[PulseActorSupport].forward(UnregisterPercolator(name))
case any ⇒ unsupported(UnsupportedPulseRequest(any))
}
}
|
magneticio/vamp
|
nats/src/main/scala/io/vamp/pulse/NatsPulseActor.scala
|
Scala
|
apache-2.0
| 1,677 |
/*
* This file is part of Evo2DSim.
*
* Evo2DSim is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Evo2DSim is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with Evo2DSim. If not, see <http://www.gnu.org/licenses/>.
*/
package org.vastness.evo2dsim.core.environment.mixins.foodSources
import org.vastness.evo2dsim.core.simulator.food.StaticFoodSource
import org.vastness.evo2dsim.core.gui.Color
trait BlueTestSource extends FoodSources {
def foodSources = List(
new StaticFoodSource(color = Color.BLUE, max = 20, reward = 0, foodRadius, activationRange, smellRange)
)
}
|
vchuravy/Evo2DSim
|
core/src/main/scala/org/vastness/evo2dsim/core/environment/mixins/foodSources/BlueTestSource.scala
|
Scala
|
mit
| 1,061 |
package sclack.ui
import swing._
import scala.swing._
import swing.event._
import javax.swing.border.EmptyBorder
import sclack.ui.factories.{TextFieldFactory, TextAreaFactory}
/**
* The Credits dialog to respect the artists (and ego whoring :D)
* @author Simon Symeonidis
*/
class Credits extends Dialog {
val tff = TextFieldFactory
val taf = TextAreaFactory
val programmingLabel = new Label{ text = "Programming: " }
val authorField = tff.disabledTextField("Simon (psyomn) Symeonidis")
val thanksLabel = new Label{ text = "Thanks to: " }
val thanksArea = taf.disabledTextArea(
"Jerom for the fantasy tileset:\\n"
+ " http://opengameart.org/sites/default/files/tileset_16x16_Jerom_CC-BY-SA-3.0_1.png&nid=17136\\n\\n"
+ "Gwes for the NES style tileset:\\n"
+ " http://opengameart.org/content/16x16-dungeon-tiles-nes-remake")
val back = new Button{ text = "Back" }
val components : Array[Component] =
Array[Component](programmingLabel, authorField, thanksLabel,
thanksArea, back)
title = "Credits"
modal = true
preferredSize = new Dimension(700,400)
maximumSize = new Dimension(700,400)
minimumSize = new Dimension(700,400)
contents = new BoxPanel(Orientation.Vertical) {
contents ++= components
border = new EmptyBorder(10,10,10,10)
}
listenTo(back)
reactions += {
case ButtonClicked(b) =>
dispose
}
centerOnScreen()
open()
}
|
psyomn/sclack
|
src/main/scala/ui/Credits.scala
|
Scala
|
gpl-3.0
| 1,484 |
package com.twitter.finagle.http
import com.twitter.finagle.CancelledRequestException
import com.twitter.finagle.Failure
import com.twitter.finagle.context.Contexts
import com.twitter.finagle.context.RemoteInfo
import com.twitter.finagle.transport.Transport
import com.twitter.finagle.util.DefaultTimer
import com.twitter.logging.Level
import com.twitter.logging.Logger
import com.twitter.util._
import java.util.concurrent.atomic.AtomicReference
private[finagle] object GenStreamingSerialServerDispatcher {
private val logger = Logger.get()
// Note: this is a slightly different Eof than the finagle-core version, but I don't think it matters
private val Eof = Future.exception(Failure("EOF"))
private val cancelled = new CancelledRequestException
private sealed trait DispatchState
private case object Idle extends DispatchState
private case object Running extends DispatchState
private case object Closing extends DispatchState
}
/**
* A generic version of
* [[com.twitter.finagle.dispatch.SerialServerDispatcher SerialServerDispatcher]],
* allowing the implementor to furnish custom dispatchers & handlers.
*/
private[finagle] abstract class GenStreamingSerialServerDispatcher[Req, Rep, In, Out](
trans: StreamTransport[In, Out])
extends Closable {
def this(trans: Transport[In, Out]) = this(new IdentityStreamTransport(trans))
import GenStreamingSerialServerDispatcher._
private[this] val state = new AtomicReference[DispatchState](Idle)
/**
* Dispatches a request. The first argument is the request. The second
* argument `eos` (end-of-stream promise) must be fulfilled when the request
* is complete.
*
* For non-streaming requests, `eos.setDone()` should be called immediately,
* since the entire request is present. For streaming requests,
* `eos.setDone()` must be called at the end of stream (in HTTP, this is on
* receipt of last chunk). Refer to the implementation in
* [[com.twitter.finagle.http.codec.HttpServerDispatcher]].
*/
protected def dispatch(req: Out): Future[Rep]
protected def handle(rep: Rep): Future[Unit]
/**
* Only the dispatch loop can make state transitions to Idle and Running but close
* operations can transition the state to Closing. If the loop finds that the state
* has been transitioned from Idle -> Closing, it is the closer's job to close the
* transport. If the loops finds that the state has transitioned from Running -> Closing,
* it has been given a chance to drain the last connection and will ensure that the
* transport is closed.
*/
private[this] def loop(): Future[Unit] = {
trans
.read()
.flatMap(dispatchAndHandleFn)
.transform(continueLoopFn)
}
private[this] val handleFn: Rep => Future[Unit] = handle(_)
// Dispatches and handles a message from the transport or closes down if necessary
private[this] val dispatchAndHandleFn: Multi[Out] => Future[Unit] = {
case Multi(req, eos) =>
if (state.compareAndSet(Idle, Running)) {
val save = Local.save()
val dispatched =
try {
Contexts.local.let(
RemoteInfo.Upstream.AddressCtx, // key 1
trans.context.remoteAddress, // value 1
Transport.sslSessionInfoCtx, // key 2
trans.context.sslSessionInfo // value 2
)(dispatch(req))
} finally Local.restore(save)
val handled = dispatched.flatMap(handleFn)
// This version of `Future.join` doesn't collect the values from the Futures, but
// since they are both Future[Unit], we know what the result is and can avoid the
// overhead of collecting two Units just to throw them away via another flatMap.
Future.join(handled :: eos :: Nil)
} else {
// must have transitioned from Idle to Closing, by someone else who is
// responsible for closing the transport
val st = state.get
if (st == Closing) Eof
else {
// Something really bad happened. Shutdown and log as loudly as possible.
trans.close()
val msg = s"Dispatch loop found in illegal state: $st"
val ex = new IllegalStateException(msg)
logger.error(ex, msg)
Future.exception(ex)
}
}
}
// Checks the state after a dispatch and continues or shuts down the transport if necessary
private[this] val continueLoopFn: Try[Unit] => Future[Unit] = { res =>
if (res.isReturn && state.compareAndSet(Running, Idle)) loop()
else {
// The loop has been canceled and we have been given the opportunity to drain so
// we need to close the transport.
// Note: We don't sequence the transport.close() Future because we don't care to wait
// for it and also don't want to clobber the result of the loop.
if (logger.isLoggable(Level.TRACE)) {
if (res.isThrow) {
logger.trace(res.throwable, s"closing $trans due to read error")
} else {
logger.trace(
s"closing $trans due to status.cas failure, state is ${state.get()}, expect Running"
)
}
}
trans.close()
Future.const(res)
}
}
// Clear all locals to start the loop; we want a clean slate.
private[this] val looping = Local.letClear { loop() }
trans.onClose.ensure {
state.set(Closing)
looping.raise(cancelled)
}
/** Exposed for testing */
protected[http] def isClosing: Boolean = state.get() == Closing
/** Exposed for testing */
private[http] def timer: Timer = DefaultTimer
// Note: this is racy, but that's inherent in draining (without
// protocol support). Presumably, half-closing a TCP connection is
// also possible.
def close(deadline: Time): Future[Unit] = {
// What to do next depends on the state of the dispatcher:
// - Idle: we can close the transport immediately.
// - Running: we need to allow time to drain. Set a timer to ensure it closes by the deadline
// - Closing: close has already been called or the transport closed: return the trans.onClose future.
state.getAndSet(Closing) match {
case Idle => trans.close(deadline)
case Running =>
trans.onClose.by(timer, deadline).onFailure { _ =>
trans.close(deadline) // The dispatcher took too long, ask the transport to close
}
case Closing => () // No action required.
}
trans.onClose.unit
}
}
|
twitter/finagle
|
finagle-base-http/src/main/scala/com/twitter/finagle/http/GenStreamingSerialServerDispatcher.scala
|
Scala
|
apache-2.0
| 6,456 |
package org.openurp.edu.eams.teach.program.major.web.action
import java.util.Date
import java.util.Locale
import org.beangle.commons.collection.Collections
import org.beangle.data.jpa.dao.OqlBuilder
import org.beangle.commons.text.seq.SeqPattern
import org.openurp.edu.eams.teach.program.Program
import org.openurp.edu.eams.teach.program.doc.ProgramDocMeta
import org.openurp.edu.eams.teach.program.doc.model.ProgramDocBean
import org.openurp.edu.eams.teach.program.doc.model.ProgramDocSectionBean
import org.openurp.edu.eams.teach.program.doc.model.ProgramDocTemplateBean
import org.openurp.edu.eams.teach.program.major.MajorPlan
import com.ekingstar.eams.web.action.common.ProjectSupportAction
//remove if not needed
class ProgramDocAction extends ProjectSupportAction {
def indexSetting() {
val majorPlanId = getLongId("majorPlan")
val plan = entityDao.get(classOf[MajorPlan], majorPlanId)
val docs = entityDao.get(classOf[ProgramDocBean], "program", plan.getProgram)
put("docs", docs)
}
def info(): String = {
val majorPlanId = getLongId("majorPlan")
val plan = entityDao.get(classOf[MajorPlan], majorPlanId)
put("plan", plan)
val program = plan.getProgram
val request_locale = getLocale
val builder = OqlBuilder.from(classOf[ProgramDocBean], "pd")
builder.where("pd.program =:program", program)
if (request_locale == null) {
builder.where("pd.locale=:locale", new Locale("zh", "CN"))
} else {
builder.where("pd.locale=:locale", request_locale)
}
var seqPattern: SeqPattern = null
seqPattern = if (request_locale == new Locale("zh", "CN")) new SeqPattern(new HanZi2SeqStyle(), "{1}") else new SeqPattern(new LuomaSeqStyle(),
"{1}")
put("seqPattern", seqPattern)
val docs = entityDao.search(builder)
var doc: ProgramDocBean = null
if (docs.size > 0) doc = docs.get(0)
put("doc", doc)
forward()
}
override def edit(): String = {
val majorPlanId = getLongId("majorPlan")
val templateId = getLongId("template")
val plan = entityDao.get(classOf[MajorPlan], majorPlanId)
put("plan", plan)
val program = plan.getProgram
val builder2 = OqlBuilder.from(classOf[ProgramDocTemplateBean], "pdt")
builder2.where("pdt.project=:project and pdt.education=:education", program.getMajor.getProject,
plan.getProgram.getEducation)
if (null != program.getStdType) {
builder2.where(":givenType = some elements(pdt.types) or size(pdt.types)=0", program.getStdType)
}
builder2.where("pdt.effectiveAt <=:invalidAt and( pdt.invalidAt is null or :effetiveOn<=pdt.invalidAt)",
if (program.getInvalidOn == null) program.getEffectiveOn else program.getInvalidOn, program.getEffectiveOn)
var template: ProgramDocTemplateBean = null
val templates = entityDao.search(builder2)
put("templates", templates)
if (null != templateId) {
template = entityDao.get(classOf[ProgramDocTemplateBean], templateId)
} else if (!templates.isEmpty) {
template = templates.get(0)
}
if (null != template) put("template", template)
val builder = OqlBuilder.from(classOf[ProgramDocBean], "pd")
builder.where("pd.program =:program", program)
if (null != template) builder.where("pd.locale=:locale", template.getLocale)
val docs = entityDao.search(builder)
var doc: ProgramDocBean = null
if (!docs.isEmpty) {
doc = docs.get(0)
} else {
doc = new ProgramDocBean()
if (template != null) {
val sections = Collections.newBuffer[Any]
for (meta <- template.getMetas) {
val section = new ProgramDocSectionBean()
section.setName(meta.getName)
section.setCode(String.valueOf(meta.getIndexno))
section.setDoc(doc)
sections.add(section)
}
doc.setSections(sections)
}
}
put("doc", doc)
forward()
}
def save(): String = {
val majorPlanId = getLongId("majorPlan")
val plan = entityDao.get(classOf[MajorPlan], majorPlanId)
val doc = populateEntity(classOf[ProgramDocBean], "programDoc")
val template = getEntity(classOf[ProgramDocTemplateBean], "template")
if (doc.isTransient) {
doc.setProgram(plan.getProgram)
doc.setLocale(template.getLocale)
doc.setCreatedAt(new Date())
doc.setUpdatedAt(new Date())
}
val sections = Collections.newMap[Any]
for (section <- doc.getSections) {
sections.put(section.getName, section)
}
var i = 1
for (meta <- template.getMetas) {
var section = sections.get(meta.getName)
if (null == section) {
section = new ProgramDocSectionBean()
section.setDoc(doc)
section.setName(meta.getName)
doc.getSections.add(section)
}
section.setCode(String.valueOf(i))
i += 1
val content = get("content" + meta.getIndexno)
section.setContent(content)
}
entityDao.save(doc)
redirect("edit", "info.save.success", "&majorPlan.id=" + majorPlanId)
}
}
|
openurp/edu-eams-webapp
|
plan/src/main/scala/org/openurp/edu/eams/teach/program/major/web/action/ProgramDocAction.scala
|
Scala
|
gpl-3.0
| 5,032 |
package controllers.cadmin
import play.api._
import play.api.mvc._
import play.api.data._
import play.api.data.Forms._
import format.Formats._
import com.alibaba.fastjson.serializer._
import com.alibaba.fastjson.{JSON => FJSON}
import views._
import loom.models._
import loom.models.admin.{Role, APermission}
import controllers._
/**
*
* @author chaosky
*/
object Roles extends Controller with Secured with BaseController {
private val permissionFilter = new PropertyFilter() {
override def apply(source: Any, name: String, value: Any): Boolean = {
Logger.debug("class %s name %s".format(source.getClass, name))
if (name.indexOf("$") >= 0) false else true
}
}
def list() = AdminAction(APermission.Admin, APermission.Role_Read) { implicit request =>
val (pageNo, pageSize) = page(request)
val list = Role.list(PageRequest(pageNo, pageSize))
val format = request.getQueryString("format").getOrElse("html")
render {
case Accepts.Html() if (format == "json") => Ok(FJSON.toJSONString(list, permissionFilter, SerializerFeature.PrettyFormat, SerializerFeature.DisableCircularReferenceDetect)).as(JSON)
case Accepts.Json() => Ok(FJSON.toJSONString(list, permissionFilter)).as(JSON)
case Accepts.Html() => Ok(html.admin.roles.list(list))
}
}
val roleForm = Form(tuple(
"name" -> nonEmptyText(minLength = Role.nameMinLen, maxLength = Role.nameMaxLen),
"permissions" -> Forms.list(text)
))
def createForm() = AdminAction(APermission.Admin, APermission.Role_Create) { implicit request =>
Ok(html.admin.roles.createForm(roleForm))
}
def create() = AdminAction(APermission.Admin, APermission.Role_Create) { implicit request =>
roleForm.bindFromRequest().fold(
formWithError => {
BadRequest(html.admin.roles.createForm(formWithError))
},
rolePost => {
val psList = permissions(rolePost._2)
val (ret, role, i18nMsg) = Role.create(rolePost._1, psList)
if (!ret) {
val nForm = roleForm.withGlobalError(i18nMsg)
BadRequest(html.admin.roles.createForm(nForm))
} else {
Redirect(routes.Roles.list()).flashing(
"success" -> "common.create.success"
)
}
}
)
}
private def permissions(permissionsStr: List[String])(implicit request: AdminRequest) = {
val pslist =
if (request.aSession.hasPermissions(APermission.Admin, APermission.Permission_Read)) {
val permissions = permissionsStr.map { p =>
try {
APermission.withName(p).asInstanceOf[APermission.PVal]
} catch {
case e: Exception =>
Logger.error(e.getMessage())
null
}
}
permissions.filter(p => p != null || p != APermission.Admin)
} else Nil
pslist
}
def editForm(id: Long) = AdminAction(APermission.Admin, APermission.Role_Update) { implicit request =>
val role = Role.findOne(id)
role match {
case Some(r) => Ok(html.admin.roles.editForm(roleForm, r))
case None => _404
}
}
def edit(id: Long) = AdminAction(APermission.Admin, APermission.Role_Update) { implicit request =>
val role = Role.findOne(id)
role match {
case None => NotFound("not found")
case Some(r) =>
val nForm = roleForm.bindFromRequest()
nForm.fold(
formWithError => {
BadRequest(html.admin.roles.editForm(formWithError, r))
},
rolePost => {
val psList = permissions(rolePost._2)
Role.updateRole(r, rolePost._1, psList)
Redirect(routes.Roles.list().url, Map("p" -> Seq(nForm.data.get("p").getOrElse("1")))).flashing(
"success" -> "common.create.success"
)
}
)
}
}
val toggleForm = Form(single("id" -> of[Long]))
def togglestatus() = AdminAction(APermission.Admin, APermission.Role_Disable) { implicit reqeust =>
toggleForm.bindFromRequest().fold(
formWithError => {
Json(Model.Error, "m.role.error.id")
},
form => {
val id = form
val (ret, event, i18nMsg) = Role.toggleStatus(id)
if (ret) Json(Model.Success, i18nMsg, "event" -> event)
else Json(Model.Error, i18nMsg)
}
)
}
}
|
chaosky/loom
|
app/controllers/cadmin/Roles.scala
|
Scala
|
mit
| 4,323 |
package net.hangyas.moss
import java.io.FileOutputStream
import scala.collection.mutable
/**
* Created by hangyas on 15-5-23
*/
object LexicalParser {
var char: Char = ' ';
var index = -1;
var str: String = null;
def parse(code: String): List[Token] = {
val r = new mutable.MutableList[Token]();
str = code;
next();
while (index < str.length){
r += nextToken();
}
return r.toList;
}
private def next(): Unit = {
index += 1;
if (index >= str.length){
char = 0;
return;
}
char = str.charAt(index);
}
private def nextToken(): Token = {
while (char.isWhitespace && char != 0)
next();
//identifier
if (char.isLetter || char == '_'){
var identifier: String = "";
while (char.isLetterOrDigit || char == '_'){
identifier += char;
next();
}
return identifier match {
case "func" => TokenFunc();
case "if" => TokenIf();
case "while" => TokenWhile();
case "let" => TokenLet();
case "end" => TokenEnd();
case "return" => TokenReturn();
case _ => TokenIdentifier(identifier);
}
}
//digit
if (char.isDigit){
var number = 0;
while (char.isDigit){
number = number * 10 + (char - '0');
next();
}
return TokenNumber(number);
}
val r = char;
next();
return TokenOther(r);
}
}
|
hangyas/moss
|
mossc/src/net/hangyas/moss/LexicalParser.scala
|
Scala
|
mit
| 1,441 |
package io.citrine.lolo.validation
import io.citrine.lolo.TestUtils
import io.citrine.lolo.learners.RandomForest
import io.citrine.lolo.stats.functions.Friedman
import org.junit.Test
import scala.util.Random
class CrossValidationTest {
val rng = new Random(92486L)
/**
* Test that CV results are consistent with out-of-bag estimates from the bagged learner
*
* Who cross-validates the cross-validator?
*/
@Test
def testCompareToOutOfBag(): Unit = {
val learner = RandomForest()
val data = TestUtils.generateTrainingData(128, 8, Friedman.friedmanSilverman, seed = rng.nextLong())
val metric = RootMeanSquareError
val (rmseFromCV, uncertainty) =
CrossValidation.kFoldCrossvalidation(data, learner, Map("rmse" -> metric), k = 3)("rmse")
val trainingResult = learner.train(data)
val rmseFromPVA = Math.sqrt(
trainingResult
.getPredictedVsActual()
.get
.map {
case (_, p: Double, a: Double) => Math.pow(p - a, 2.0)
}
.sum / trainingResult.getPredictedVsActual().get.size
)
// These have a false negative rate less than 1/100 at the time of original authorship
assert(rmseFromPVA < rmseFromCV + uncertainty)
assert((rmseFromPVA - rmseFromCV) / rmseFromPVA < 0.2)
}
}
|
CitrineInformatics/lolo
|
src/test/scala/io/citrine/lolo/validation/CrossValidationTest.scala
|
Scala
|
apache-2.0
| 1,296 |
package io.prediction.controller
import org.scalatest.FunSuite
import org.scalatest.Inside
import org.scalatest.Matchers._
import org.scalatest.Inspectors._
import org.apache.spark.SparkContext
import org.apache.spark.SparkContext._
import org.apache.spark.SparkConf
import org.apache.spark.rdd.RDD
import io.prediction.core._
import io.prediction.workflow.SharedSparkContext
import io.prediction.workflow.PersistentModelManifest
import io.prediction.workflow.StopAfterReadInterruption
import io.prediction.workflow.StopAfterPrepareInterruption
import grizzled.slf4j.{ Logger, Logging }
import _root_.java.lang.Thread
import org.scalatest.BeforeAndAfterAll
import org.scalatest.Suite
import scala.util.Random
class FastEngineDevSuite
extends FunSuite with Inside with SharedSparkContext {
import io.prediction.controller.Engine0._
test("Single Evaluation") {
val engine = new FastEvalEngine(
Map("" -> classOf[PDataSource2]),
Map("" -> classOf[PPreparator1]),
Map(
"PAlgo2" -> classOf[PAlgo2],
"PAlgo3" -> classOf[PAlgo3]
),
Map("" -> classOf[LServing1]))
val qn = 10
val en = 3
val engineParams = EngineParams(
dataSourceParams = PDataSource2.Params(id = 0, en = en, qn = qn),
preparatorParams = PPreparator1.Params(1),
algorithmParamsList = Seq(
("PAlgo2", PAlgo2.Params(20)),
("PAlgo2", PAlgo2.Params(21)),
("PAlgo3", PAlgo3.Params(22))
),
servingParams = LServing1.Params(3))
val algoCount = engineParams.algorithmParamsList.size
val pd = ProcessedData(1, TrainingData(0))
val model0 = PAlgo2.Model(20, pd)
val model1 = PAlgo2.Model(21, pd)
val model2 = PAlgo3.Model(22, pd)
val evalDataSet = engine.eval(sc, engineParams, WorkflowParams())
evalDataSet should have size en
forAll(evalDataSet.zipWithIndex) { case (evalData, ex) => {
val (evalInfo, qpaRDD) = evalData
evalInfo shouldBe EvalInfo(0)
val qpaSeq: Seq[(Query, Prediction, Actual)] = qpaRDD.collect
qpaSeq should have size qn
forAll (qpaSeq) { case (q, p, a) =>
val Query(qId, qEx, qQx) = q
val Actual(aId, aEx, aQx) = a
qId shouldBe aId
qEx shouldBe ex
aEx shouldBe ex
qQx shouldBe aQx
inside (p) { case Prediction(pId, pQ, pModels, pPs) => {
pId shouldBe 3
pQ shouldBe q
pModels shouldBe None
pPs should have size algoCount
pPs shouldBe Seq(
Prediction(id = 20, q = q, models = Some(model0)),
Prediction(id = 21, q = q, models = Some(model1)),
Prediction(id = 22, q = q, models = Some(model2))
)
}}
}
}}
}
test("Batch Evaluation") {
val engine = new FastEvalEngine(
Map("" -> classOf[PDataSource2]),
Map("" -> classOf[PPreparator1]),
Map("" -> classOf[PAlgo2]),
Map("" -> classOf[LServing1]))
val qn = 10
val en = 3
val baseEngineParams = EngineParams(
dataSourceParams = PDataSource2.Params(id = 0, en = en, qn = qn),
preparatorParams = PPreparator1.Params(1),
algorithmParamsList = Seq(("", PAlgo2.Params(2))),
servingParams = LServing1.Params(3))
val ep0 = baseEngineParams
val ep1 = baseEngineParams.copy(
algorithmParamsList = Seq(("", PAlgo2.Params(2))))
val ep2 = baseEngineParams.copy(
algorithmParamsList = Seq(("", PAlgo2.Params(20))))
val engineEvalDataSet = engine.batchEval(
sc,
Seq(ep0, ep1, ep2),
WorkflowParams())
val evalDataSet0 = engineEvalDataSet(0)._2
val evalDataSet1 = engineEvalDataSet(1)._2
val evalDataSet2 = engineEvalDataSet(2)._2
evalDataSet0 shouldBe evalDataSet1
evalDataSet0 should not be evalDataSet2
evalDataSet1 should not be evalDataSet2
// evalDataSet0._1 should be theSameInstanceAs evalDataSet1._1
// When things are cached correctly, evalDataSet0 and 1 should share the
// same EI
evalDataSet0.zip(evalDataSet1).foreach { case (e0, e1) => {
e0._1 should be theSameInstanceAs e1._1
e0._2 should be theSameInstanceAs e1._2
}}
// So as set1 and set2, however, the QPA-RDD should be different.
evalDataSet1.zip(evalDataSet2).foreach { case (e1, e2) => {
e1._1 should be theSameInstanceAs e2._1
val e1Qpa = e1._2
val e2Qpa = e2._2
e1Qpa should not be theSameInstanceAs (e2Qpa)
}}
}
test("Not cached when isEqual not implemented") {
// PDataSource3.Params is a class not case class. Need to implement the
// isEqual function for hashing.
val engine = new FastEvalEngine(
Map("" -> classOf[PDataSource4]),
Map("" -> classOf[PPreparator1]),
Map("" -> classOf[PAlgo2]),
Map("" -> classOf[LServing1]))
val qn = 10
val en = 3
val baseEngineParams = EngineParams(
dataSourceParams = new PDataSource4.Params(id = 0, en = en, qn = qn),
preparatorParams = PPreparator1.Params(1),
algorithmParamsList = Seq(("", PAlgo2.Params(2))),
servingParams = LServing1.Params(3))
val ep0 = baseEngineParams
val ep1 = baseEngineParams.copy(
algorithmParamsList = Seq(("", PAlgo2.Params(3))))
// ep2.dataSource is different from ep0.
val ep2 = baseEngineParams.copy(
dataSourceParams = ("", new PDataSource4.Params(id = 0, en = en, qn = qn)),
algorithmParamsList = Seq(("", PAlgo2.Params(3))))
val engineEvalDataSet = engine.batchEval(
sc,
Seq(ep0, ep1, ep2),
WorkflowParams())
val evalDataSet0 = engineEvalDataSet(0)._2
val evalDataSet1 = engineEvalDataSet(1)._2
val evalDataSet2 = engineEvalDataSet(2)._2
evalDataSet0 should not be evalDataSet1
evalDataSet0 should not be evalDataSet2
evalDataSet1 should not be evalDataSet2
// Set0 should have same EI as Set1, since their dsp are the same instance.
evalDataSet0.zip(evalDataSet1).foreach { case (e0, e1) => {
e0._1 should be theSameInstanceAs (e1._1)
}}
// Set1 should have different EI as Set2, since Set2's dsp is another
// instance
evalDataSet1.zip(evalDataSet2).foreach { case (e1, e2) => {
e1._1 should not be theSameInstanceAs (e2._1)
}}
}
}
|
nvoron23/PredictionIO
|
core/src/test/scala/controller/FastEvalEngineTest.scala
|
Scala
|
apache-2.0
| 6,281 |
/*
* ____ ____ _____ ____ ___ ____
* | _ \\ | _ \\ | ____| / ___| / _/ / ___| Precog (R)
* | |_) | | |_) | | _| | | | | /| | | _ Advanced Analytics Engine for NoSQL Data
* | __/ | _ < | |___ | |___ |/ _| | | |_| | Copyright (C) 2010 - 2013 SlamData, Inc.
* |_| |_| \\_\\ |_____| \\____| /__/ \\____| All Rights Reserved.
*
* This program is free software: you can redistribute it and/or modify it under the terms of the
* GNU Affero General Public License as published by the Free Software Foundation, either version
* 3 of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
* without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
* the GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License along with this
* program. If not, see <http://www.gnu.org/licenses/>.
*
*/
package com.precog.ingest
package kafka
import com.precog.common._
import com.precog.common.jobs._
import com.precog.common.accounts._
import com.precog.common.ingest._
import com.precog.common.security._
import com.precog.common.security.service._
import com.precog.common.client._
import com.precog.common.services.ServiceLocation
import com.precog.ingest.service._
import WebJobManager._
import blueeyes.BlueEyesServer
import blueeyes.bkka._
import blueeyes.core.data._
import blueeyes.core.service.engines.HttpClientXLightWeb
import blueeyes.util.Clock
import akka.util.Timeout
import akka.dispatch.{ ExecutionContext, Future }
import org.joda.time.Instant
import org.streum.configrity.Configuration
import scalaz._
import scalaz.NonEmptyList._
import scalaz.syntax.applicative._
import scalaz.syntax.std.option._
object KafkaEventServer extends BlueEyesServer with EventService with AkkaDefaults {
val clock = Clock.System
implicit val executionContext = defaultFutureDispatch
implicit val M: Monad[Future] = new FutureMonad(defaultFutureDispatch)
def configureEventService(config: Configuration): EventService.State = {
val accountFinder = new CachingAccountFinder(WebAccountFinder(config.detach("accounts")).map(_.withM[Future]) valueOr { errs =>
sys.error("Unable to build new WebAccountFinder: " + errs.list.mkString("\\n", "\\n", ""))
})
val apiKeyFinder = new CachingAPIKeyFinder(WebAPIKeyFinder(config.detach("security")).map(_.withM[Future]) valueOr { errs =>
sys.error("Unable to build new WebAPIKeyFinder: " + errs.list.mkString("\\n", "\\n", ""))
})
val permissionsFinder = new PermissionsFinder(apiKeyFinder, accountFinder, new Instant(config[Long]("ingest.timestamp_required_after", 1363327426906L)))
val (eventStore, stoppable) = KafkaEventStore(config.detach("eventStore"), permissionsFinder) valueOr { errs =>
sys.error("Unable to build new KafkaEventStore: " + errs.list.mkString("\\n", "\\n", ""))
}
val jobManager = WebJobManager(config.detach("jobs")) valueOr { errs =>
sys.error("Unable to build new WebJobManager: " + errs.list.mkString("\\n", "\\n", ""))
}
val serviceConfig = EventService.ServiceConfig.fromConfiguration(config) valueOr { errors =>
sys.error("Unable to obtain self-referential service locator for event service: %s".format(errors.list.mkString("; ")))
}
buildServiceState(serviceConfig, apiKeyFinder, permissionsFinder, eventStore, jobManager, stoppable)
}
}
|
precog/platform
|
ingest/src/main/scala/com/precog/ingest/kafka/KafkaEventServer.scala
|
Scala
|
agpl-3.0
| 3,572 |
/**
* Created by Variant on 16/4/7.
*/
import scala.reflect.runtime.universe._
object Scala
class Java1
class JVM{def method1:this.type = this}
class JVM_Language extends JVM {def method2 : this.type = this}
object Singleton_Type {
def main(args: Array[String]) {
println(Scala.getClass)
println(typeOf[Scala.type ])
val java = new Java1
val java2 =new Java1
println(typeOf[java.type])
println(typeOf[java2.type ])
val content :java.type = java
println(content)
val jvm = new JVM_Language
jvm.method1.method2
//运行时this调用的是jvm,this.type返回的是java_language类型,所以才能链式调用method2
println(jvm.method1)
}
}
|
sparkLiwei/ProgrammingNote
|
scalaLearning/scalaInSpark/Singleton_Type.scala
|
Scala
|
cc0-1.0
| 704 |
package com.eharmony.aloha.models
/**
* Created by ryan on 1/18/17.
*/
case class Subvalue[+B, +N](audited: B, natural: Option[N]) {
def fold[A](fail: => A, success: N => A): A = natural.fold(fail)(success)
}
|
eHarmony/aloha
|
aloha-core/src/main/scala/com/eharmony/aloha/models/Subvalue.scala
|
Scala
|
mit
| 216 |
package im.actor.server.db
import com.github.tminglei.slickpg._
trait ActorPostgresDriver extends ExPostgresDriver
with PgDate2Support
with PgArraySupport
with PgLTreeSupport {
override val api = new API with ArrayImplicits with LTreeImplicits with DateTimeImplicits
}
object ActorPostgresDriver extends ActorPostgresDriver
|
yangchaogit/actor-platform
|
actor-server/actor-persist/src/main/scala/im/actor/server/db/ActorPostgresDriver.scala
|
Scala
|
mit
| 335 |
package com.twitter.hello
import com.twitter.finagle.http.Status._
import com.twitter.finatra.http.test.EmbeddedHttpServer
import com.twitter.inject.server.FeatureTest
class HelloWorldFeatureTest extends FeatureTest {
override val server = new EmbeddedHttpServer(new HelloWorldServer)
"Server" should {
"Say hi" in {
server.httpGet(
path = "/hi?name=Bob",
andExpect = Ok,
withBody = "Hello Bob")
}
}
}
|
kaushik94/finatra
|
examples/finatra-hello-world/src/test/scala/com/twitter/hello/HelloWorldFeatureTest.scala
|
Scala
|
apache-2.0
| 450 |
package com.krux.hyperion.activity
import com.typesafe.config.ConfigFactory
import org.scalatest.WordSpec
import com.krux.hyperion.common.{PipelineObjectId, S3Uri}
import com.krux.hyperion.common.S3Uri._
import com.krux.hyperion.database.RedshiftDatabase
import com.krux.hyperion.HyperionContext
import com.krux.hyperion.Implicits._
import com.krux.hyperion.resource.Ec2Resource
import com.krux.hyperion.expression.{Parameter, ParameterValues}
class RedshiftUnloadActivitySpec extends WordSpec {
"RedshiftUnloadActivity" should {
implicit val hc: HyperionContext = new HyperionContext(ConfigFactory.load("example"))
implicit val pv: ParameterValues = new ParameterValues()
val ec2 = Ec2Resource()
val awsAccessKeyId = Parameter("AwsAccessKeyId", "someId").encrypted
val awsAccessKeySecret = Parameter.encrypted("AwsAccessKeySecret", "someSecret")
val mockRedshift = RedshiftDatabase("mockuser", "mockpass", "mock-redshift")
.named("_MockRedshift")
.withDatabaseName("mock_db")
"Produce the correct unload script" in {
val testingQuery = """
|select * from t where
|id = 'myid'
|and {tim'e} = #{format(@actualRunTime, 'yyyy-MM-dd')}
|and some{OtherWeird'Forma}t = #{"{ } a'dfa {" + ' { ex"aef { }'}
|and name = 'abcdefg'
|limit 10""".stripMargin
val escapedUnloadScript = """
|UNLOAD ('
|select * from t where
|id = \\\\'myid\\\\'
|and {tim\\\\'e} = #{format(@actualRunTime, 'yyyy-MM-dd')}
|and some{OtherWeird\\\\'Forma}t = #{"{ } a'dfa {" + ' { ex"aef { }'}
|and name = \\\\'abcdefg\\\\'
|limit 10')
|TO 's3://not-important/'
|WITH CREDENTIALS AS
|'aws_access_key_id=#{*my_AwsAccessKeyId};aws_secret_access_key=#{*my_AwsAccessKeySecret}'
""".stripMargin
val act = RedshiftUnloadActivity(
mockRedshift, testingQuery, s3 / "not-important/", awsAccessKeyId, awsAccessKeySecret
)(ec2)
assert(act.unloadScript.trim === escapedUnloadScript.trim)
}
}
}
|
hoangelos/hyperion
|
contrib/activity/definition/src/test/scala/com/krux/hyperion/activity/RedshiftUnloadActivitySpec.scala
|
Scala
|
apache-2.0
| 2,072 |
object SCL8242 {
def foo(x: Float) = {
val t: Double = 56
if (true) x + t
else /*start*/x/*end*/
}
}
//Double
|
whorbowicz/intellij-scala
|
testdata/typeInference/bugs5/SCL8242.scala
|
Scala
|
apache-2.0
| 126 |
/**
* Copyright (C) 2009-2011 Scalable Solutions AB <http://scalablesolutions.se>
*/
package akka.util
import java.util.concurrent.TimeUnit
import TimeUnit._
import java.lang.{ Long => JLong, Double => JDouble }
object Duration {
def apply(length: Long, unit: TimeUnit): Duration = new FiniteDuration(length, unit)
def apply(length: Double, unit: TimeUnit): Duration = fromNanos(unit.toNanos(1) * length)
def apply(length: Long, unit: String): Duration = new FiniteDuration(length, timeUnit(unit))
def fromNanos(nanos: Long): Duration = {
if (nanos % 86400000000000L == 0) {
Duration(nanos / 86400000000000L, DAYS)
} else if (nanos % 3600000000000L == 0) {
Duration(nanos / 3600000000000L, HOURS)
} else if (nanos % 60000000000L == 0) {
Duration(nanos / 60000000000L, MINUTES)
} else if (nanos % 1000000000L == 0) {
Duration(nanos / 1000000000L, SECONDS)
} else if (nanos % 1000000L == 0) {
Duration(nanos / 1000000L, MILLISECONDS)
} else if (nanos % 1000L == 0) {
Duration(nanos / 1000L, MICROSECONDS)
} else {
Duration(nanos, NANOSECONDS)
}
}
def fromNanos(nanos: Double): Duration = fromNanos((nanos + 0.5).asInstanceOf[Long])
/**
* Construct a Duration by parsing a String. In case of a format error, a
* RuntimeException is thrown. See `unapply(String)` for more information.
*/
def apply(s: String): Duration = unapply(s) getOrElse sys.error("format error")
/**
* Deconstruct a Duration into length and unit if it is finite.
*/
def unapply(d: Duration): Option[(Long, TimeUnit)] = {
if (d.finite_?) {
Some((d.length, d.unit))
} else {
None
}
}
private val RE = ("""^\\s*(\\d+(?:\\.\\d+)?)\\s*""" + // length part
"(?:" + // units are distinguished in separate match groups
"(d|day|days)|" +
"(h|hour|hours)|" +
"(min|minute|minutes)|" +
"(s|sec|second|seconds)|" +
"(ms|milli|millis|millisecond|milliseconds)|" +
"(µs|micro|micros|microsecond|microseconds)|" +
"(ns|nano|nanos|nanosecond|nanoseconds)" +
""")\\s*$""").r // close the non-capturing group
private val REinf = """^\\s*Inf\\s*$""".r
private val REminf = """^\\s*(?:-\\s*|Minus)Inf\\s*""".r
/**
* Parse String, return None if no match. Format is `"<length><unit>"`, where
* whitespace is allowed before, between and after the parts. Infinities are
* designated by `"Inf"` and `"-Inf"` or `"MinusInf"`.
*/
def unapply(s: String): Option[Duration] = s match {
case RE(length, d, h, m, s, ms, mus, ns) =>
if (d ne null) Some(Duration(JDouble.parseDouble(length), DAYS)) else if (h ne null) Some(Duration(JDouble.parseDouble(length), HOURS)) else if (m ne null) Some(Duration(JDouble.parseDouble(length), MINUTES)) else if (s ne null) Some(Duration(JDouble.parseDouble(length), SECONDS)) else if (ms ne null) Some(Duration(JDouble.parseDouble(length), MILLISECONDS)) else if (mus ne null) Some(Duration(JDouble.parseDouble(length), MICROSECONDS)) else if (ns ne null) Some(Duration(JDouble.parseDouble(length), NANOSECONDS)) else
sys.error("made some error in regex (should not be possible)")
case REinf() => Some(Inf)
case REminf() => Some(MinusInf)
case _ => None
}
/**
* Parse TimeUnit from string representation.
*/
def timeUnit(unit: String) = unit.toLowerCase match {
case "d" | "day" | "days" => DAYS
case "h" | "hour" | "hours" => HOURS
case "min" | "minute" | "minutes" => MINUTES
case "s" | "sec" | "second" | "seconds" => SECONDS
case "ms" | "milli" | "millis" | "millisecond" | "milliseconds" => MILLISECONDS
case "µs" | "micro" | "micros" | "microsecond" | "microseconds" => MICROSECONDS
case "ns" | "nano" | "nanos" | "nanosecond" | "nanoseconds" => NANOSECONDS
}
val Zero: Duration = new FiniteDuration(0, NANOSECONDS)
trait Infinite {
this: Duration =>
override def equals(other: Any) = false
def +(other: Duration): Duration =
other match {
case _: this.type => this
case _: Infinite => throw new IllegalArgumentException("illegal addition of infinities")
case _ => this
}
def -(other: Duration): Duration =
other match {
case _: this.type => throw new IllegalArgumentException("illegal subtraction of infinities")
case _ => this
}
def *(factor: Double): Duration = this
def /(factor: Double): Duration = this
def /(other: Duration): Double =
other match {
case _: Infinite => throw new IllegalArgumentException("illegal division of infinities")
// maybe questionable but pragmatic: Inf / 0 => Inf
case x => Double.PositiveInfinity * (if ((this > Zero) ^ (other >= Zero)) -1 else 1)
}
def finite_? = false
def length: Long = throw new IllegalArgumentException("length not allowed on infinite Durations")
def unit: TimeUnit = throw new IllegalArgumentException("unit not allowed on infinite Durations")
def toNanos: Long = throw new IllegalArgumentException("toNanos not allowed on infinite Durations")
def toMicros: Long = throw new IllegalArgumentException("toMicros not allowed on infinite Durations")
def toMillis: Long = throw new IllegalArgumentException("toMillis not allowed on infinite Durations")
def toSeconds: Long = throw new IllegalArgumentException("toSeconds not allowed on infinite Durations")
def toMinutes: Long = throw new IllegalArgumentException("toMinutes not allowed on infinite Durations")
def toHours: Long = throw new IllegalArgumentException("toHours not allowed on infinite Durations")
def toDays: Long = throw new IllegalArgumentException("toDays not allowed on infinite Durations")
def toUnit(unit: TimeUnit): Double = throw new IllegalArgumentException("toUnit not allowed on infinite Durations")
def printHMS = toString
}
/**
* Infinite duration: greater than any other and not equal to any other,
* including itself.
*/
val Inf: Duration = new Duration with Infinite {
override def toString = "Duration.Inf"
def >(other: Duration) = true
def >=(other: Duration) = true
def <(other: Duration) = false
def <=(other: Duration) = false
def unary_- : Duration = MinusInf
}
/**
* Infinite negative duration: lesser than any other and not equal to any other,
* including itself.
*/
val MinusInf: Duration = new Duration with Infinite {
override def toString = "Duration.MinusInf"
def >(other: Duration) = false
def >=(other: Duration) = false
def <(other: Duration) = true
def <=(other: Duration) = true
def unary_- : Duration = Inf
}
// Java Factories
def create(length: Long, unit: TimeUnit): Duration = apply(length, unit)
def create(length: Double, unit: TimeUnit): Duration = apply(length, unit)
def create(length: Long, unit: String): Duration = apply(length, unit)
def parse(s: String): Duration = unapply(s).get
}
/**
* Utility for working with java.util.concurrent.TimeUnit durations.
*
* <p/>
* Examples of usage from Java:
* <pre>
* import akka.util.FiniteDuration;
* import java.util.concurrent.TimeUnit;
*
* Duration duration = new FiniteDuration(100, MILLISECONDS);
* Duration duration = new FiniteDuration(5, "seconds");
*
* duration.toNanos();
* </pre>
*
* <p/>
* Examples of usage from Scala:
* <pre>
* import akka.util.Duration
* import java.util.concurrent.TimeUnit
*
* val duration = Duration(100, MILLISECONDS)
* val duration = Duration(100, "millis")
*
* duration.toNanos
* duration < 1.second
* duration <= Duration.Inf
* </pre>
*
* <p/>
* Implicits are also provided for Int, Long and Double. Example usage:
* <pre>
* import akka.util.duration._
*
* val duration = 100 millis
* </pre>
*
* Extractors, parsing and arithmetic are also included:
* <pre>
* val d = Duration("1.2 µs")
* val Duration(length, unit) = 5 millis
* val d2 = d * 2.5
* val d3 = d2 + 1.millisecond
* </pre>
*/
abstract class Duration {
def length: Long
def unit: TimeUnit
def toNanos: Long
def toMicros: Long
def toMillis: Long
def toSeconds: Long
def toMinutes: Long
def toHours: Long
def toDays: Long
def toUnit(unit: TimeUnit): Double
def printHMS: String
def <(other: Duration): Boolean
def <=(other: Duration): Boolean
def >(other: Duration): Boolean
def >=(other: Duration): Boolean
def +(other: Duration): Duration
def -(other: Duration): Duration
def *(factor: Double): Duration
def /(factor: Double): Duration
def /(other: Duration): Double
def unary_- : Duration
def finite_? : Boolean
// Java API
def lt(other: Duration) = this < other
def lteq(other: Duration) = this <= other
def gt(other: Duration) = this > other
def gteq(other: Duration) = this >= other
def plus(other: Duration) = this + other
def minus(other: Duration) = this - other
def mul(factor: Double) = this * factor
def div(factor: Double) = this / factor
def div(other: Duration) = this / other
def neg() = -this
def isFinite() = finite_?
}
class FiniteDuration(val length: Long, val unit: TimeUnit) extends Duration {
import Duration._
def this(length: Long, unit: String) = this(length, Duration.timeUnit(unit))
def toNanos = unit.toNanos(length)
def toMicros = unit.toMicros(length)
def toMillis = unit.toMillis(length)
def toSeconds = unit.toSeconds(length)
def toMinutes = unit.toMinutes(length)
def toHours = unit.toHours(length)
def toDays = unit.toDays(length)
def toUnit(u: TimeUnit) = long2double(toNanos) / NANOSECONDS.convert(1, u)
override def toString = this match {
case Duration(1, DAYS) => "1 day"
case Duration(x, DAYS) => x + " days"
case Duration(1, HOURS) => "1 hour"
case Duration(x, HOURS) => x + " hours"
case Duration(1, MINUTES) => "1 minute"
case Duration(x, MINUTES) => x + " minutes"
case Duration(1, SECONDS) => "1 second"
case Duration(x, SECONDS) => x + " seconds"
case Duration(1, MILLISECONDS) => "1 millisecond"
case Duration(x, MILLISECONDS) => x + " milliseconds"
case Duration(1, MICROSECONDS) => "1 microsecond"
case Duration(x, MICROSECONDS) => x + " microseconds"
case Duration(1, NANOSECONDS) => "1 nanosecond"
case Duration(x, NANOSECONDS) => x + " nanoseconds"
}
def printHMS = "%02d:%02d:%06.3f".format(toHours, toMinutes % 60, toMillis / 1000. % 60)
def <(other: Duration) = {
if (other.finite_?) {
toNanos < other.asInstanceOf[FiniteDuration].toNanos
} else {
other > this
}
}
def <=(other: Duration) = {
if (other.finite_?) {
toNanos <= other.asInstanceOf[FiniteDuration].toNanos
} else {
other >= this
}
}
def >(other: Duration) = {
if (other.finite_?) {
toNanos > other.asInstanceOf[FiniteDuration].toNanos
} else {
other < this
}
}
def >=(other: Duration) = {
if (other.finite_?) {
toNanos >= other.asInstanceOf[FiniteDuration].toNanos
} else {
other <= this
}
}
def +(other: Duration) = {
if (!other.finite_?) {
other
} else {
val nanos = toNanos + other.asInstanceOf[FiniteDuration].toNanos
fromNanos(nanos)
}
}
def -(other: Duration) = {
if (!other.finite_?) {
other
} else {
val nanos = toNanos - other.asInstanceOf[FiniteDuration].toNanos
fromNanos(nanos)
}
}
def *(factor: Double) = fromNanos(long2double(toNanos) * factor)
def /(factor: Double) = fromNanos(long2double(toNanos) / factor)
def /(other: Duration) = if (other.finite_?) long2double(toNanos) / other.toNanos else 0
def unary_- = Duration(-length, unit)
def finite_? = true
override def equals(other: Any) =
other.isInstanceOf[FiniteDuration] &&
toNanos == other.asInstanceOf[FiniteDuration].toNanos
override def hashCode = toNanos.asInstanceOf[Int]
}
class DurationInt(n: Int) {
def nanoseconds = Duration(n, NANOSECONDS)
def nanos = Duration(n, NANOSECONDS)
def nanosecond = Duration(n, NANOSECONDS)
def nano = Duration(n, NANOSECONDS)
def microseconds = Duration(n, MICROSECONDS)
def micros = Duration(n, MICROSECONDS)
def microsecond = Duration(n, MICROSECONDS)
def micro = Duration(n, MICROSECONDS)
def milliseconds = Duration(n, MILLISECONDS)
def millis = Duration(n, MILLISECONDS)
def millisecond = Duration(n, MILLISECONDS)
def milli = Duration(n, MILLISECONDS)
def seconds = Duration(n, SECONDS)
def second = Duration(n, SECONDS)
def minutes = Duration(n, MINUTES)
def minute = Duration(n, MINUTES)
def hours = Duration(n, HOURS)
def hour = Duration(n, HOURS)
def days = Duration(n, DAYS)
def day = Duration(n, DAYS)
}
class DurationLong(n: Long) {
def nanoseconds = Duration(n, NANOSECONDS)
def nanos = Duration(n, NANOSECONDS)
def nanosecond = Duration(n, NANOSECONDS)
def nano = Duration(n, NANOSECONDS)
def microseconds = Duration(n, MICROSECONDS)
def micros = Duration(n, MICROSECONDS)
def microsecond = Duration(n, MICROSECONDS)
def micro = Duration(n, MICROSECONDS)
def milliseconds = Duration(n, MILLISECONDS)
def millis = Duration(n, MILLISECONDS)
def millisecond = Duration(n, MILLISECONDS)
def milli = Duration(n, MILLISECONDS)
def seconds = Duration(n, SECONDS)
def second = Duration(n, SECONDS)
def minutes = Duration(n, MINUTES)
def minute = Duration(n, MINUTES)
def hours = Duration(n, HOURS)
def hour = Duration(n, HOURS)
def days = Duration(n, DAYS)
def day = Duration(n, DAYS)
}
class DurationDouble(d: Double) {
def nanoseconds = Duration(d, NANOSECONDS)
def nanos = Duration(d, NANOSECONDS)
def nanosecond = Duration(d, NANOSECONDS)
def nano = Duration(d, NANOSECONDS)
def microseconds = Duration(d, MICROSECONDS)
def micros = Duration(d, MICROSECONDS)
def microsecond = Duration(d, MICROSECONDS)
def micro = Duration(d, MICROSECONDS)
def milliseconds = Duration(d, MILLISECONDS)
def millis = Duration(d, MILLISECONDS)
def millisecond = Duration(d, MILLISECONDS)
def milli = Duration(d, MILLISECONDS)
def seconds = Duration(d, SECONDS)
def second = Duration(d, SECONDS)
def minutes = Duration(d, MINUTES)
def minute = Duration(d, MINUTES)
def hours = Duration(d, HOURS)
def hour = Duration(d, HOURS)
def days = Duration(d, DAYS)
def day = Duration(d, DAYS)
}
|
felixmulder/scala
|
test/disabled/presentation/akka/src/akka/util/Duration.scala
|
Scala
|
bsd-3-clause
| 14,619 |
package planstack.anml.model.concrete
import java.util
import planstack.anml.model.abs.AbstractDecomposition
import planstack.anml.model.concrete.statements.{BindingConstraint, Statement}
import planstack.anml.model.{AnmlProblem, Context}
import scala.collection.JavaConversions._
class Decomposition(
val context:Context,
val container:Action)
extends StateModifier with TemporalInterval {
val statements = new util.LinkedList[Statement]()
val bindingConstraints = new util.LinkedList[BindingConstraint]()
val temporalConstraints = new util.LinkedList[TemporalConstraint]()
val actions = new util.LinkedList[Action]()
val actionConditions = new util.LinkedList[ActionCondition]()
assert(context.interval == null)
context.setInterval(this)
def vars = seqAsJavaList(context.varsToCreate)
}
object Decomposition {
def apply(pb:AnmlProblem, parent:Action, dec:AbstractDecomposition) : Decomposition = {
val context = dec.context.buildContext(pb, Some(parent.context))
val decomposition = new Decomposition(context, parent)
decomposition.addAll(dec.statements, context, pb)
decomposition
}
}
|
planstack/anml
|
src/main/scala/planstack/anml/model/concrete/Decomposition.scala
|
Scala
|
apache-2.0
| 1,149 |
/* __ *\\
** ________ ___ / / ___ Scala API **
** / __/ __// _ | / / / _ | (c) 2003-2010, LAMP/EPFL **
** __\\ \\/ /__/ __ |/ /__/ __ | http://scala-lang.org/ **
** /____/\\___/_/ |_/____/_/ | | **
** |/ **
\\* */
package scala.collection
package mutable
import generic._
/** A class for polymorphic arrays of elements that's represented
* internally by an array of objects. This means that elements of
* primitive types are boxed.
*
* @author Martin Odersky
* @version 2.8
* @since 2.8
*
* @tparam A type of the elements contained in this array sequence.
* @param length the length of the underlying array.
*
* @define Coll ArraySeq
* @define coll array sequence
* @define thatinfo the class of the returned collection. In the standard library configuration,
* `That` is always `ArraySeq[B]` because an implicit of type `CanBuildFrom[ArraySeq, B, ArraySeq[B]]`
* is defined in object `ArraySeq`.
* @define $bfinfo an implicit value of class `CanBuildFrom` which determines the
* result class `That` from the current representation type `Repr`
* and the new element type `B`. This is usually the `canBuildFrom` value
* defined in object `ArraySeq`.
* @define orderDependent
* @define orderDependentFold
* @define mayNotTerminateInf
* @define willNotTerminateInf
*/
class ArraySeq[A](override val length: Int)
extends IndexedSeq[A]
with GenericTraversableTemplate[A, ArraySeq]
with IndexedSeqOptimized[A, ArraySeq[A]] {
override def companion: GenericCompanion[ArraySeq] = ArraySeq
val array: Array[AnyRef] = new Array[AnyRef](length)
def apply(idx: Int): A = {
if (idx >= length) throw new IndexOutOfBoundsException(idx.toString)
array(idx).asInstanceOf[A]
}
def update(idx: Int, elem: A) {
if (idx >= length) throw new IndexOutOfBoundsException(idx.toString)
array(idx) = elem.asInstanceOf[AnyRef]
}
override def foreach[U](f: A => U) {
var i = 0
while (i < length) {
f(array(i).asInstanceOf[A])
i += 1
}
}
/** Fills the given array `xs` with at most `len` elements of
* this traversable starting at position `start`.
* Copying will stop once either the end of the current traversable is reached or
* `len` elements have been copied or the end of the array is reached.
*
* @param xs the array to fill.
* @param start starting index.
* @param len number of elements to copy
*/
override def copyToArray[B >: A](xs: Array[B], start: Int, len: Int) {
val len1 = len min (xs.length - start) min length
Array.copy(array, 0, xs, start, len1)
}
}
/** $factoryInfo
* @define coll array sequence
* @define Coll ArraySeq
*/
object ArraySeq extends SeqFactory[ArraySeq] {
/** $genericCanBuildFromInfo */
implicit def canBuildFrom[A]: CanBuildFrom[Coll, A, ArraySeq[A]] = new GenericCanBuildFrom[A]
def newBuilder[A]: Builder[A, ArraySeq[A]] =
new ArrayBuffer[A] mapResult { buf =>
val result = new ArraySeq[A](buf.length)
buf.copyToArray(result.array.asInstanceOf[Array[Any]], 0)
result
}
}
|
cran/rkafkajars
|
java/scala/collection/mutable/ArraySeq.scala
|
Scala
|
apache-2.0
| 3,428 |
package wallet
//import wallet.UserController
import org.springframework.boot.SpringApplication
object Application{
def main(args: Array[String]){
SpringApplication.run(classOf[UserController])
}
}
|
shreedhar22/Restful-Digital-Wallet
|
src/main/scala/wallet/Application.scala
|
Scala
|
mit
| 207 |
package codacy.plugins.test.multiple
import java.nio.file.Paths
import scala.xml.Elem
import com.codacy.analysis.core.model.{FileError, Issue, LineLocation, Parameter, Pattern, ToolResult}
import com.codacy.plugins.api
import com.codacy.plugins.api.results.Result.Level
import com.fasterxml.jackson.core.JsonParseException
import play.api.libs.json.Json
private[multiple] object CheckstyleFormatParser {
def parseResultsXml(root: Elem): Seq[ToolResult] = {
for {
fileTag <- root \\\\ "checkstyle" \\ "file"
filePath = Paths.get(fileTag \\@ "name")
errorsTag <- fileTag \\ "error"
message = errorsTag \\@ "message"
lineAttr = errorsTag \\ "@line"
patternIdAttr = errorsTag \\ "@source"
severityAttr = errorsTag \\ "@severity"
} yield {
if (patternIdAttr.isEmpty && severityAttr.isEmpty && lineAttr.isEmpty) {
FileError(filePath, message)
} else if (patternIdAttr.nonEmpty && severityAttr.nonEmpty && lineAttr.nonEmpty) {
val line = lineAttr.text.toInt
val severity = severityAttr.text
val patternId = patternIdAttr.text
val level = severity match {
case "info" => Level.Info
case "warning" => Level.Warn
case "error" => Level.Err
case _ => throw new Exception(s"""$severity is not a valid level. Use one of ["info", "warning", "error"]""")
}
Issue(api.results.Pattern.Id(patternId), filePath, Issue.Message(message), level, None, LineLocation(line))
} else {
throw new Exception("""Errors should be either results or file errors:
|Example result:
| <error source="pattern_id" line="1" message="Message from the tool." severity="info" />
|Example file error:
| <error message="Error message" />""".stripMargin)
}
}
}
def parsePatternsXml(root: Elem): (Seq[Pattern], Option[Map[String, play.api.libs.json.JsValue]], Option[String]) = {
val extraValues = (root \\ "property").map { node =>
val v = node \\@ "value"
val value = try {
Json.parse(v)
} catch {
case _: JsonParseException => // support non quoted strings
Json.parse(s""""$v"""")
}
(node \\@ "name", value)
}.toMap
val patternsSeq = for {
rootChildren <- root \\ "module"
if rootChildren \\@ "name" != "BeforeExecutionExclusionFileFilter"
patternId: String = rootChildren \\@ "name"
parameters = (rootChildren \\ "property").map { node =>
Parameter(node \\@ "name", node \\@ "value")
}.toSet
} yield Pattern(patternId, parameters)
val excludedFilesRegex: Option[String] = {
val rootChildren = root \\ "module"
val fileFilterOption = rootChildren.find(_ \\@ "name" == "BeforeExecutionExclusionFileFilter")
fileFilterOption.map { fileFilter =>
val property = fileFilter \\ "property"
if (property \\@ "name" == "fileNamePattern")
property \\@ "value"
else
throw new Exception(
""""BeforeExecutionExclusionFileFilter" module should have a "property" tag with name="fileNamePattern"
|Example:
|<module name="BeforeExecutionExclusionFileFilter">
| <property name="fileNamePattern" value="module\\-info\\.java$"/>
|</module>""".stripMargin
)
}
}
(patternsSeq, if (extraValues.isEmpty) None else Some(extraValues), excludedFilesRegex)
}
}
|
codacy/codacy-plugins-test
|
src/main/scala/codacy/plugins/test/multiple/CheckstyleFormatParser.scala
|
Scala
|
mit
| 3,553 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.