code
stringlengths 5
1M
| repo_name
stringlengths 5
109
| path
stringlengths 6
208
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 5
1M
|
---|---|---|---|---|---|
package com.emstlk.nacl4s.crypto
import com.emstlk.nacl4s.crypto.Utils._
import com.emstlk.nacl4s.crypto.box.Curve25519XSalsa20Poly1305._
object Box {
def randomNonce() = {
val nonce = new Array[Byte](nonceBytes)
random.nextBytes(nonce)
nonce
}
}
case class Box(publicKey: Array[Byte], privateKey: Array[Byte]) {
checkLength(publicKey, publicKeyBytes)
checkLength(privateKey, secretKeyBytes)
def encrypt(nonce: Array[Byte], message: Array[Byte]): Array[Byte] = {
checkLength(nonce, nonceBytes)
val msg = new Array[Byte](zeroBytes) ++ message
cryptoBox(msg, msg, msg.length, nonce, publicKey, privateKey)
msg.drop(boxZeroBytes)
}
def decrypt(nonce: Array[Byte], message: Array[Byte]): Array[Byte] = {
checkLength(nonce, nonceBytes)
val msg = new Array[Byte](boxZeroBytes) ++ message
cryptoBoxOpen(msg, msg, msg.length, nonce, publicKey, privateKey)
msg.drop(zeroBytes)
}
}
| emstlk/nacl4s | src/main/scala/com/emstlk/nacl4s/crypto/Box.scala | Scala | mit | 941 |
/*
* Copyright (c) 2013 Aviat Networks.
* This file is part of DocReg+Web. Please refer to the NOTICE.txt file for license details.
*/
package vvv.docreg.snippet
import _root_.net.liftweb._
import http._
import S._
import SHtml._
import common._
import util._
import Helpers._
import js._
import JsCmds._
import _root_.net.liftweb.http.js.jquery.JqJsCmds._
import _root_.net.liftweb.http.js.JE.JsRaw
import vvv.docreg.helper.ProjectSelection
import vvv.docreg.comet._
import vvv.docreg.util.StringUtil._
import vvv.docreg.model._
import xml.{Text, NodeSeq}
import vvv.docreg.util.Bits
class Search extends Loggable with ProjectSelection {
object searchMode extends RequestVar[StreamMode.Value](StreamMode.all)
override def viewCurrentMode = searchMode.is
override def viewChangeMode(to: StreamMode.Value) {
searchMode(to)
}
val searchInput = S.param("q") openOr ""
def input = {
if (User.loggedIn_?) {
".search-input" #> SHtml.onSubmit((s) => {val x = s; S.redirectTo("/search?q=" + escapeInput(x))})
} else {
".all" #> NodeSeq.Empty
}
}
def escapeInput(text: String): String = {
text.trim.replaceAll("#","%23")
}
def bindResults(in: NodeSeq): NodeSeq = {
val f = UserSession.inStreamFilter(viewCurrentMode)
val list: List[(Document, Project, Revision, User)] = FilteredDocument.search(searchInput.trim)
results(in, list.filter(x => f(x._1, x._3, x._2)), list.size >= FilteredDocument.searchLimit)
}
var html: NodeSeq = NodeSeq.Empty
def results(in: NodeSeq): NodeSeq = {
html = in
bindResults(html)
}
def results(in: NodeSeq, ds: List[(Document, Project, Revision, User)], tooBig: Boolean): NodeSeq = {
val pageUserId = User.loggedInUser.is.map(_.id) getOrElse -1L
val inputText = Option(searchInput.trim).getOrElse("")
val open = ds
(
".search-for *" #> <span>for "{ inputText }"</span> &
".match-count" #> <span>Results <span class="badge">{open.size}</span></span> &
"#search-too-big" #> (if (tooBig) PassThru else ClearNodes) &
".search-item" #> open.map { x =>
val (d,p,r,u) = x
".doc-project" #> p.infoLink() &
".doc-author" #> u.knownOption.map(_.profileLabel(pageUserId)).getOrElse(Text(r.rawAuthor)) &
".doc-key" #> <a href={d.infoLink}>{d.number}</a> &
".doc-date" #> r.dateOnlyWithHint() &
".doc-download [href]" #> d.downloadHref() &
".doc-title" #> <a href={d.infoLink}>{d.title}</a>
}
).apply(in)
}
override def modeSelectionUpdate(): JsCmd = {
reload()
}
override def projectSelectionUpdate(): JsCmd = {
reload()
}
def reload(): JsCmd = {
Replace("search_results", ("#search_results ^^" #> "noused").apply(bindResults(html)) )
}
}
| scott-abernethy/docreg-web | src/main/scala/vvv/docreg/snippet/Search.scala | Scala | gpl-3.0 | 2,797 |
package semantics
/**
* Created by inzamamrahaman on 14/11/2015.
*/
import interpreter.Interpreter.State
import scala.annotation.tailrec
sealed abstract class JayExpression {
private def binOperationMatch(op: JayBinOp, val1: JayValue, val2: JayValue): Either[JayValue, String] =
(op, val1.getCorrectType, val2.getCorrectType) match {
case (JayBinArithOp(op1), JInt, JInt) => Left(op1.eval(val1, val2))
case ((JayBinRelOp(op1), JInt, JInt)) => Left(op1.eval(val1, val2))
case ((JayBinBoolOp(op1), JBool, JBool)) => Left(op1.eval(val1, val2))
case _ => Right("Error, incompatiable types and operation")
}
private def unaryOperationMatch(op : JayUnaryOp, val1 : JayValue) : Either[JayValue, String] = val1 match {
case JayBool(b) => op match {
case JayUnaryOp(op1) => Left(op1.eval(val1))
}
case _ => Right("Error, incompatible types and operation")
}
def internalEval(env : Environment) : Either[(JayValue, Environment), String] = this match {
case Value(exp) => Left((exp, env))
case Variable(name) => env.retrieveValue(name) match {
case None => Right("No variable of name " + name + " exists! Must be declared before use")
case Some(v) => Left((v, env))
}
case BinExpression(op, exp1, exp2) => (exp1.internalEval(env), exp2.internalEval(env)) match {
case (Left(p1), Left(p2)) => binOperationMatch(op, p1._1, p2._1) match {
case Left(res) => Left(res, env)
case Right(err) => Right(err)
}
case ((Right(err1), Right(err2))) => Right(err1 + ". " + err2)
case (_, Right(err2)) => Right(err2)
case (Right(err1), _) => Right(err1)
}
case UnaryExpression(op, exp1) => (exp1.internalEval(env)) match {
case Left(p1) => unaryOperationMatch(op, p1._1) match {
case Left(res) => Left(res, env)
case Right(err) => Right(err)
}
case Right(err) => Right(err)
}
}
}
case class BinExpression(op : JayBinOp, exp1 : JayExpression, exp2 : JayExpression) extends JayExpression
case class UnaryExpression(op : JayUnaryOp, exp1 : JayExpression) extends JayExpression
case class Value(exp1 : JayValue) extends JayExpression
//case class Assignment(destination : String, source : JayExpression)
case class Variable(exp1 : String) extends JayExpression
//case Assignment(dest, source) => source.internalEval(env) match {
//case Right(err) => Right(err)
//case Left((valid, env2)) => env2.set(dest, valid) match {
//case Left(env3) => Left((valid, env3))
//case Right(err2) => Right(err2)
//}
| InzamamRahaman/JayInterpreter | src/main/scala/semantics/JayExpression.scala | Scala | mit | 2,563 |
package common
import upickle._
import org.scalajs.dom.XMLHttpRequest
import org.scalajs.dom.ext.Ajax
import shared._
class ExtAjax(ajax: Ajax.type) {
def postAsForm(url: String,
data: String = "",
timeout: Int = 0,
headers: Map[String, String] = Map.empty,
withCredentials: Boolean = false) = {
val contentType = ("Content-Type" -> "application/x-www-form-urlencoded")
apply("POST", url, data, timeout, headers + contentType, withCredentials)
}
def postAsJson(url: String,
data: String = "",
timeout: Int = 0,
headers: Map[String, String] = Map.empty,
withCredentials: Boolean = false) = {
val contentType = ("Content-Type" -> "application/json")
apply("POST", url, data, timeout, headers + contentType, withCredentials)
}
def apply(method: String,
url: String,
data: String = "",
timeout: Int = 0,
headers: Map[String, String] = Map.empty,
withCredentials: Boolean = false) = {
val ajaxReq = ("X-Requested-With" -> "XMLHttpRequest")
ajax.apply(method, url, data, timeout, headers + ajaxReq, withCredentials, "")
}
}
class ExtXMLHttpRequest(req: XMLHttpRequest) {
// def responseAs[T](implicit readWrite: ReadWriter[T]): T = read[T](req.responseText)
def ok = req.status == 200
}
object ExtAjax {
implicit def wrapperForAjax(ajax: Ajax.type) = new ExtAjax(ajax)
implicit def wrapperForXMLHttpRequest(req: XMLHttpRequest) = new ExtXMLHttpRequest(req)
} | gurghet/rota-scalajs | example-client/src/main/scala/common/ExtAjax.scala | Scala | mit | 1,636 |
/*
* Accio is a platform to launch computer science experiments.
* Copyright (C) 2016-2018 Vincent Primault <[email protected]>
*
* Accio is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Accio is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with Accio. If not, see <http://www.gnu.org/licenses/>.
*/
package fr.cnrs.liris.lumos.storage.memory
import java.util.concurrent.ConcurrentHashMap
import com.github.nscala_time.time.Imports._
import com.twitter.finagle.stats.{NullStatsReceiver, StatsReceiver}
import com.twitter.util.{Future, Time}
import fr.cnrs.liris.lumos.domain.{Job, JobList, Status}
import fr.cnrs.liris.lumos.storage.{JobQuery, JobStore}
import scala.collection.JavaConverters._
/**
* Run repository storing data in memory.
*
* @param statsReceiver Stats receiver.
*/
private[storage] final class MemoryJobStore(statsReceiver: StatsReceiver) extends JobStore {
private[this] val index = new ConcurrentHashMap[String, Job].asScala
statsReceiver.provideGauge("storage", "job", "index_size")(index.size)
override def list(query: JobQuery, limit: Option[Int], offset: Option[Int]): Future[JobList] = {
val results = index.values
.filter(query.matches)
.toSeq
.sortWith((a, b) => a.createTime > b.createTime)
Future.value(JobList.slice(results, offset = offset, limit = limit))
}
override def get(name: String): Future[Option[Job]] = Future.value(index.get(name))
override def create(job: Job): Future[Status] = {
if (index.putIfAbsent(job.name, job).isEmpty) {
Future.value(Status.Ok)
} else {
Future.value(Status.AlreadyExists(job.name))
}
}
override def replace(job: Job): Future[Status] = {
if (index.replace(job.name, job).isDefined) {
Future.value(Status.Ok)
} else {
Future.value(Status.NotFound(job.name))
}
}
override def delete(name: String): Future[Status] = {
if (index.remove(name).isDefined) {
Future.value(Status.Ok)
} else {
Future.value(Status.NotFound(name))
}
}
override def startUp(): Future[Unit] = Future.Done
override def close(deadline: Time): Future[Unit] = Future.Done
}
object MemoryJobStore {
/**
* Creates a new empty in-memory job store for use in testing.
*/
def empty: JobStore = new MemoryJobStore(NullStatsReceiver)
} | privamov/accio | accio/java/fr/cnrs/liris/lumos/storage/memory/MemoryJobStore.scala | Scala | gpl-3.0 | 2,778 |
/**
* Copyright (C) 2012 Kaj Magnus Lindberg (born 1979)
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package debiki
import com.debiki.core._
import com.debiki.core.Prelude._
import java.{util => ju}
import play.api.libs.json._
import requests.PageRequest
object ReactJson {
def userNoPageToJson(anyUser: Option[User]): JsObject = {
// Warning: some dupl code, see `userDataJson()` below.
val userData = anyUser match {
case None => JsObject(Nil)
case Some(user) =>
Json.obj(
"isLoggedIn" -> JsBoolean(true),
"isAdmin" -> JsBoolean(user.isAdmin),
"userId" -> JsString(user.id),
"username" -> JsStringOrNull(user.username),
"fullName" -> JsString(user.displayName),
"isEmailKnown" -> JsBoolean(user.email.nonEmpty),
"isAuthenticated" -> JsBoolean(user.isAuthenticated))
}
Json.obj("user" -> userData)
}
def pageToJson(pageReq: PageRequest[_], socialLinksHtml: String): JsObject = {
val numPosts = pageReq.thePageParts.postCount
val numPostsExclTitle =
numPosts - (if (pageReq.thePageParts.titlePost.isDefined) 1 else 0)
var allPostsJson = pageReq.thePageParts.getAllPosts.map { post =>
post.id.toString -> postToJson(post)
}
if (pageReq.thePageRole == PageRole.EmbeddedComments) {
allPostsJson +:=
PageParts.BodyId.toString ->
embeddedCommentsDummyRootPost(pageReq.thePageParts.topLevelComments)
}
val topLevelComments = pageReq.thePageParts.topLevelComments
val topLevelCommentIdsSorted =
Post.sortPosts(topLevelComments).map(reply => JsNumber(reply.id))
val anyLatestTopics: Seq[JsObject] =
if (pageReq.thePageRole == PageRole.Forum) {
val orderOffset = PageOrderOffset.ByBumpTime(None)
var topics =
pageReq.dao.listTopicsInTree(rootPageId = pageReq.thePageId,
orderOffset, limit = controllers.ForumController.NumTopicsToList)
topics.map(controllers.ForumController.topicToJson(_))
}
else {
Nil
}
Json.obj(
"now" -> JsNumber((new ju.Date).getTime),
"pageId" -> pageReq.thePageId,
"pageRole" -> JsString(pageReq.thePageRole.toString),
"pagePath" -> JsString(pageReq.pagePath.value),
"numPosts" -> numPosts,
"numPostsExclTitle" -> numPostsExclTitle,
"isInEmbeddedCommentsIframe" -> JsBoolean(pageReq.pageRole == Some(PageRole.EmbeddedComments)),
"categories" -> categoriesJson(pageReq),
"topics" -> JsArray(anyLatestTopics),
"user" -> NoUserSpecificData,
"rootPostId" -> JsNumber(BigDecimal(pageReq.pageRoot getOrElse PageParts.BodyId)),
"allPosts" -> JsObject(allPostsJson),
"topLevelCommentIdsSorted" -> JsArray(topLevelCommentIdsSorted),
"horizontalLayout" -> JsBoolean(pageReq.thePageSettings.horizontalComments.valueIsTrue),
"socialLinksHtml" -> JsString(socialLinksHtml))
}
def postToJson(post: Post, includeUnapproved: Boolean = false): JsObject = {
val lastEditAppliedAt = post.lastEditAppliedAt map { date =>
JsNumber(date.getTime)
} getOrElse JsNull
val (sanitizedHtml, isApproved) =
if (includeUnapproved)
(Some(post.currentHtmlSanitized), post.currentVersionApproved)
else
(post.approvedHtmlSanitized, post.approvedHtmlSanitized.nonEmpty)
JsObject(Vector(
"postId" -> JsNumber(post.id),
"parentId" -> post.parentId.map(JsNumber(_)).getOrElse(JsNull),
"multireplyPostIds" -> JsArray(post.multireplyPostIds.toSeq.map(JsNumber(_))),
"authorId" -> JsString(post.userId),
"authorFullName" -> JsStringOrNull(Some(post.theUser.displayName)),
"authorUsername" -> JsStringOrNull(post.theUser.username),
"createdAt" -> JsNumber(post.creationDati.getTime),
"lastEditAppliedAt" -> lastEditAppliedAt,
"numEditors" -> JsNumber(post.numDistinctEditors),
"numLikeVotes" -> JsNumber(post.numLikeVotes),
"numWrongVotes" -> JsNumber(post.numWrongVotes),
"numOffTopicVotes" -> JsNumber(post.numOffTopicVotes),
"numPendingEditSuggestions" -> JsNumber(post.numPendingEditSuggestions),
"isTreeDeleted" -> JsBoolean(post.isTreeDeleted),
"isPostDeleted" -> JsBoolean(post.isPostDeleted),
"isTreeCollapsed" -> JsBoolean(post.isTreeCollapsed),
"isPostCollapsed" -> JsBoolean(post.isPostCollapsed),
"isTreeClosed" -> JsBoolean(post.isTreeClosed),
"isApproved" -> JsBoolean(isApproved),
"pinnedPosition" -> post.pinnedPosition.map(JsNumber(_)).getOrElse(JsNull),
"likeScore" -> JsNumber(post.likeScore),
"childIdsSorted" -> JsArray(Post.sortPosts(post.replies).map(reply => JsNumber(reply.id))),
"sanitizedHtml" -> JsStringOrNull(sanitizedHtml)))
}
/** Creates a dummy root post, needed when rendering React elements. */
def embeddedCommentsDummyRootPost(topLevelComments: Seq[Post]) = Json.obj(
"postId" -> JsNumber(PageParts.BodyId),
"childIdsSorted" -> JsArray(Post.sortPosts(topLevelComments).map(reply => JsNumber(reply.id))))
val NoUserSpecificData = Json.obj(
"permsOnPage" -> JsObject(Nil),
"rolePageSettings" -> JsObject(Nil),
"votes" -> JsObject(Nil),
"unapprovedPosts" -> JsObject(Nil),
"postIdsAutoReadLongAgo" -> JsArray(Nil),
"postIdsAutoReadNow" -> JsArray(Nil),
"marksByPostId" -> JsObject(Nil))
def userDataJson(pageRequest: PageRequest[_]): Option[JsObject] = {
val user = pageRequest.user getOrElse {
return None
}
val rolePageSettings = user.anyRoleId map { roleId =>
val settings = pageRequest.dao.loadRolePageSettings(
roleId = roleId, pageId = pageRequest.thePageId)
rolePageSettingsToJson(settings)
} getOrElse JsNull
// Warning: some dupl code, see `userNoPageToJson()` above.
Some(Json.obj(
"isLoggedIn" -> JsBoolean(true),
"isAdmin" -> JsBoolean(user.isAdmin),
"userId" -> JsString(user.id),
"username" -> JsStringOrNull(user.username),
"fullName" -> JsString(user.displayName),
"isEmailKnown" -> JsBoolean(user.email.nonEmpty),
"isAuthenticated" -> JsBoolean(user.isAuthenticated),
"permsOnPage" -> permsOnPageJson(pageRequest.permsOnPage),
"rolePageSettings" -> rolePageSettings,
"votes" -> votesJson(pageRequest),
"unapprovedPosts" -> unapprovedPostsJson(pageRequest),
"postIdsAutoReadLongAgo" -> JsArray(Nil),
"postIdsAutoReadNow" -> JsArray(Nil),
"marksByPostId" -> JsObject(Nil)))
}
private def permsOnPageJson(perms: PermsOnPage): JsObject = {
Json.obj(
"accessPage" -> JsBoolean(perms.accessPage),
"createPage" -> JsBoolean(perms.createPage),
"moveRenamePage" -> JsBoolean(perms.moveRenamePage),
"hidePageIdInUrl" -> JsBoolean(perms.hidePageIdInUrl),
"editPageTemplate" -> JsBoolean(perms.editPageTemplate),
"editPage" -> JsBoolean(perms.editPage),
"editAnyReply" -> JsBoolean(perms.editAnyReply),
"editGuestReply" -> JsBoolean(perms.editUnauReply),
"collapseThings" -> JsBoolean(perms.collapseThings),
"deleteAnyReply" -> JsBoolean(perms.deleteAnyReply),
"pinReplies" -> JsBoolean(perms.pinReplies))
}
private def rolePageSettingsToJson(settings: RolePageSettings): JsObject = {
Json.obj(
"notfLevel" -> JsString(settings.notfLevel.toString))
}
private def votesJson(pageRequest: PageRequest[_]): JsObject = {
val userVotesMap = pageRequest.thePageParts.userVotesMap(pageRequest.userIdData)
val votesByPostId = userVotesMap map { case (postId, votes) =>
var voteStrs = Vector[String]()
if (votes.votedLike) voteStrs = voteStrs :+ "VoteLike"
if (votes.votedWrong) voteStrs = voteStrs :+ "VoteWrong"
if (votes.votedOffTopic) voteStrs = voteStrs :+ "VoteOffTopic"
postId.toString -> Json.toJson(voteStrs)
}
JsObject(votesByPostId.toSeq)
}
private def unapprovedPostsJson(request: PageRequest[_]): JsObject = {
val relevantPosts =
if (request.theUser.isAdmin) request.thePageParts.getAllPosts
else request.thePageParts.postsByUser(request.theUser.id)
val unapprovedPosts = relevantPosts filter { post =>
!post.currentVersionApproved
}
val json = JsObject(unapprovedPosts.map { post =>
post.id.toString -> postToJson(post, includeUnapproved = true)
})
json
}
private def categoriesJson(request: PageRequest[_]): JsArray = {
if (request.pageRole != Some(PageRole.Forum))
return JsArray(Nil)
val categories: Seq[Category] = request.dao.loadCategoryTree(request.thePageId)
val categoriesJson = JsArray(categories map { category =>
JsObject(Seq(
"name" -> JsString(category.categoryName),
"pageId" -> JsString(category.pageId),
"slug" -> JsString(controllers.ForumController.categoryNameToSlug(category.categoryName)),
"subCategories" -> JsArray()))
})
categoriesJson
}
private def JsStringOrNull(value: Option[String]) =
value.map(JsString(_)).getOrElse(JsNull)
}
| debiki/debiki-server-old | app/debiki/ReactJson.scala | Scala | agpl-3.0 | 9,747 |
package lila.db
import com.typesafe.config.Config
import play.modules.reactivemongo.ReactiveMongoPlugin
import reactivemongo.api.DB
import Types._
final class Env(config: Config) {
lazy val db = {
import play.api.Play.current
ReactiveMongoPlugin.db
}
def apply(name: String): Coll = db(name)
}
object Env {
lazy val current = "[boot] db" describes new Env(
lila.common.PlayApp loadConfig "mongodb")
}
| Happy0/lila | modules/db/src/main/Env.scala | Scala | mit | 427 |
/** Sangria GraphQL server library.
*
* Sangria is a library that provides parsing, validation, execution and other services for GraphQL
* queries.
*
* It typically requires other libraries to build a complete GraphQL service—including, perhaps,
* ones that provide a HTTP service interface and a database interface—as well as custom code to
* bridge the gap between data representations that are natural to the database vs. to the GraphQL
* schema.
*
* @see
* [[https://sangria-graphql.github.io/ the Sangria home page]]
*/
package object sangria {
/** "Since" field for 3.0.0 deprecations. */
private[sangria] final val since3_0_0 = "Sangria 3.0.0"
}
| sangria-graphql/sangria | modules/core/src/main/scala/sangria/package.scala | Scala | apache-2.0 | 688 |
package ch3
import List._
object Exercise2 {
def tail[T](l: List[T]) = l match {
case Nil => Nil
case Cons(x, Nil) => Nil
case Cons(x, xs) => xs
}
}
import Exercise2._
/*
from repl you can test typing:
:load src/main/scala/fpinscala/ch3/List.scala
:load src/main/scala/fpinscala/ch3/Exercise2.scala
tail(Nil)
tail(List())
tail(List(1))
tail(List(1,2,3,4,5))
*/
| rucka/fpinscala | src/main/scala/fpinscala/ch3/Exercise2.scala | Scala | gpl-2.0 | 393 |
package uk.gov.bis.levyApiMock.mongo
import play.api.libs.json.Json.JsValueWrapper
import play.api.libs.json._
import play.modules.reactivemongo.ReactiveMongoApi
import reactivemongo.api.Cursor
import reactivemongo.play.json.compat._
import json2bson._
import scala.concurrent.{ExecutionContext, Future}
import reactivemongo.api.bson.collection.BSONCollection
trait MongoCollection[T] {
def mongodb: ReactiveMongoApi
def collectionName: String
def collectionF(implicit ec: ExecutionContext): Future[BSONCollection] = mongodb.database.map(_.collection[BSONCollection](collectionName))
def findMany(params: (String, JsValueWrapper)*)(implicit ec: ExecutionContext, reads: Reads[T]): Future[Seq[T]] = {
val selector = Json.obj(params: _*)
for {
collection <- collectionF
o <- collection.find(selector,projection=Option.empty[JsObject]).cursor[JsObject]().collect[List](100, Cursor.FailOnError[List[JsObject]]())
} yield o.flatMap {
_.validate[T] match {
case JsSuccess(resp, _) => Some(resp)
case JsError(errs) => None
}
}
}
def findOne(params: (String, JsValueWrapper)*)(implicit ec: ExecutionContext, reads: Reads[T]): Future[Option[T]] = {
val selector = Json.obj(params: _*)
val of = for {
collection <- collectionF
o <- collection.find(selector).cursor[JsObject]().collect[List](1, Cursor.FailOnError[List[JsObject]]()).map(_.headOption)
} yield o
of.map {
case Some(o) => o.validate[T] match {
case JsSuccess(resp, _) => Some(resp)
case JsError(errs) => None
}
case _ => None
}
}
def remove(params: (String, JsValueWrapper)*)(implicit ec: ExecutionContext): Future[Unit] = {
collectionF.map(coll => coll.delete().one(Json.obj(params: _*)))
}
}
| SkillsFundingAgency/das-alpha-hmrc-api-mock | src/main/scala/uk/gov/bis/levyApiMock/mongo/MongoCollection.scala | Scala | mit | 1,803 |
package xitrum.handler.outbound
import org.scalatest.flatspec.AnyFlatSpec
import org.scalatest.matchers.should.Matchers
class RangeParserTest extends AnyFlatSpec with Matchers {
behavior of "RangeParser"
def test(spec: String, expected: RangeParserResult): Unit = {
"parse" should s"handle $spec" in {
RangeParser.parse(spec, 1048576) shouldBe expected
}
}
// Invalid
test(null, UnsupportedRange)
test("bytes=", UnsupportedRange)
test("bytes=-", UnsupportedRange)
test("bytes=--", UnsupportedRange)
test("bytes=0--1", UnsupportedRange)
test("bytes=10-5", UnsatisfiableRange)
test("bytes=1048576", UnsatisfiableRange)
// last-byte-pos value is absent
test("bytes=0", SatisfiableRange(0, 1048575))
test("bytes=0-", SatisfiableRange(0, 1048575))
test("bytes=1048574", SatisfiableRange(1048574, 1048575))
// last-byte-pos value is present
test("bytes=0-0", SatisfiableRange(0, 0))
test("bytes=0-1", SatisfiableRange(0, 1))
test("bytes=0-1048574", SatisfiableRange(0, 1048574))
test("bytes=0-1048575", SatisfiableRange(0, 1048575))
test("bytes=0-1048576", SatisfiableRange(0, 1048575))
// first-byte-pos value greater than the length
test("bytes=0-1048577", SatisfiableRange(0, 1048575))
}
| xitrum-framework/xitrum | src/test/scala/xitrum/handler/outbound/RangeParserTest.scala | Scala | mit | 1,252 |
package com.aristocrat.mandrill.requests.Senders
import com.aristocrat.mandrill.requests.MandrillRequest
case class TimeSeries(key: String, address: String) extends MandrillRequest
| aristocratic/mandrill | src/main/scala/com/aristocrat/mandrill/requests/Senders/TimeSeries.scala | Scala | mit | 183 |
/* Title: Pure/ML/ml_lex.scala
Author: Makarius
Lexical syntax for Isabelle/ML and Standard ML.
*/
package isabelle
import scala.collection.mutable
import scala.util.parsing.input.{Reader, CharSequenceReader}
object ML_Lex
{
/** keywords **/
val keywords: Set[String] =
Set("#", "(", ")", ",", "->", "...", ":", ":>", ";", "=", "=>",
"[", "]", "_", "{", "|", "}", "abstype", "and", "andalso", "as",
"case", "datatype", "do", "else", "end", "eqtype", "exception",
"fn", "fun", "functor", "handle", "if", "in", "include",
"infix", "infixr", "let", "local", "nonfix", "of", "op", "open",
"orelse", "raise", "rec", "sharing", "sig", "signature",
"struct", "structure", "then", "type", "val", "where", "while",
"with", "withtype")
val keywords2: Set[String] =
Set("and", "case", "do", "else", "end", "if", "in", "let", "local",
"of", "sig", "struct", "then", "while", "with")
val keywords3: Set[String] =
Set("handle", "open", "raise")
private val lexicon: Scan.Lexicon = Scan.Lexicon(keywords.toList: _*)
/** tokens **/
object Kind extends Enumeration
{
val KEYWORD = Value("keyword")
val IDENT = Value("identifier")
val LONG_IDENT = Value("long identifier")
val TYPE_VAR = Value("type variable")
val WORD = Value("word")
val INT = Value("integer")
val REAL = Value("real")
val CHAR = Value("character")
val STRING = Value("quoted string")
val SPACE = Value("white space")
val CARTOUCHE = Value("text cartouche")
val COMMENT = Value("comment text")
val ANTIQ = Value("antiquotation")
val ANTIQ_START = Value("antiquotation: start")
val ANTIQ_STOP = Value("antiquotation: stop")
val ANTIQ_OTHER = Value("antiquotation: other")
val ANTIQ_STRING = Value("antiquotation: quoted string")
val ANTIQ_ALT_STRING = Value("antiquotation: back-quoted string")
val ANTIQ_CARTOUCHE = Value("antiquotation: text cartouche")
val ERROR = Value("bad input")
}
sealed case class Token(kind: Kind.Value, source: String)
{
def is_keyword: Boolean = kind == Kind.KEYWORD
def is_delimiter: Boolean = is_keyword && !Symbol.is_ascii_identifier(source)
}
/** parsers **/
case object ML_String extends Scan.Line_Context
case class Antiq(ctxt: Scan.Line_Context) extends Scan.Line_Context
private object Parsers extends Scan.Parsers with Antiquote.Parsers
{
/* string material */
private val blanks = many(character(Symbol.is_ascii_blank))
private val blanks1 = many1(character(Symbol.is_ascii_blank))
private val gap = "\\\\" ~ blanks1 ~ "\\\\" ^^ { case x ~ y ~ z => x + y + z }
private val gap_start = "\\\\" ~ blanks ~ """\\z""".r ^^ { case x ~ y ~ _ => x + y }
private val escape =
one(character("\\"\\\\abtnvfr".contains(_))) |
"^" ~ one(character(c => '@' <= c && c <= '_')) ^^ { case x ~ y => x + y } |
repeated(character(Symbol.is_ascii_digit), 3, 3)
private val str =
one(character(c => c != '"' && c != '\\\\' && ' ' <= c && c <= '~')) |
one(s => Symbol.is_symbolic(s) | Symbol.is_control(s)) |
"\\\\" ~ escape ^^ { case x ~ y => x + y }
/* ML char -- without gaps */
private val ml_char: Parser[Token] =
"#\\"" ~ str ~ "\\"" ^^ { case x ~ y ~ z => Token(Kind.CHAR, x + y + z) }
private val recover_ml_char: Parser[String] =
"#\\"" ~ opt(str) ^^ { case x ~ Some(y) => x + y case x ~ None => x }
/* ML string */
private val ml_string_body: Parser[String] =
rep(gap | str) ^^ (_.mkString)
private val recover_ml_string: Parser[String] =
"\\"" ~ ml_string_body ^^ { case x ~ y => x + y }
private val ml_string: Parser[Token] =
"\\"" ~ ml_string_body ~ "\\"" ^^ { case x ~ y ~ z => Token(Kind.STRING, x + y + z) }
private def ml_string_line(ctxt: Scan.Line_Context): Parser[(Token, Scan.Line_Context)] =
{
def result(x: String, c: Scan.Line_Context) = (Token(Kind.STRING, x), c)
ctxt match {
case Scan.Finished =>
"\\"" ~ ml_string_body ~ ("\\"" | gap_start) ^^
{ case x ~ y ~ z => result(x + y + z, if (z == "\\"") Scan.Finished else ML_String) }
case ML_String =>
blanks ~ opt_term("\\\\" ~ ml_string_body ~ ("\\"" | gap_start)) ^^
{ case x ~ Some(y ~ z ~ w) =>
result(x + y + z + w, if (w == "\\"") Scan.Finished else ML_String)
case x ~ None => result(x, ML_String) }
case _ => failure("")
}
}
/* ML cartouche */
private val ml_cartouche: Parser[Token] =
cartouche ^^ (x => Token(Kind.CARTOUCHE, x))
private def ml_cartouche_line(ctxt: Scan.Line_Context): Parser[(Token, Scan.Line_Context)] =
cartouche_line(ctxt) ^^ { case (x, c) => (Token(Kind.CARTOUCHE, x), c) }
/* ML comment */
private val ml_comment: Parser[Token] =
comment ^^ (x => Token(Kind.COMMENT, x))
private def ml_comment_line(ctxt: Scan.Line_Context): Parser[(Token, Scan.Line_Context)] =
comment_line(ctxt) ^^ { case (x, c) => (Token(Kind.COMMENT, x), c) }
/* delimited token */
private def delimited_token: Parser[Token] =
ml_char | (ml_string | (ml_cartouche | ml_comment))
private val recover_delimited: Parser[Token] =
(recover_ml_char | (recover_ml_string | (recover_cartouche | recover_comment))) ^^
(x => Token(Kind.ERROR, x))
private def other_token: Parser[Token] =
{
/* identifiers */
val letdigs = many(character(Symbol.is_ascii_letdig))
val alphanumeric =
one(character(Symbol.is_ascii_letter)) ~ letdigs ^^ { case x ~ y => x + y }
val symbolic = many1(character("!#$%&*+-/:<=>?@\\\\^`|~".contains(_)))
val ident = (alphanumeric | symbolic) ^^ (x => Token(Kind.IDENT, x))
val long_ident =
rep1(alphanumeric ~ "." ^^ { case x ~ y => x + y }) ~
(alphanumeric | (symbolic | "=")) ^^
{ case x ~ y => Token(Kind.LONG_IDENT, x.mkString + y) }
val type_var = "'" ~ letdigs ^^ { case x ~ y => Token(Kind.TYPE_VAR, x + y) }
/* numerals */
val dec = many1(character(Symbol.is_ascii_digit))
val hex = many1(character(Symbol.is_ascii_hex))
val sign = opt("~") ^^ { case Some(x) => x case None => "" }
val decint = sign ~ dec ^^ { case x ~ y => x + y }
val exp = ("E" | "e") ~ decint ^^ { case x ~ y => x + y }
val word =
("0wx" ~ hex ^^ { case x ~ y => x + y } | "0w" ~ dec ^^ { case x ~ y => x + y }) ^^
(x => Token(Kind.WORD, x))
val int =
sign ~ ("0x" ~ hex ^^ { case x ~ y => x + y } | dec) ^^
{ case x ~ y => Token(Kind.INT, x + y) }
val real =
(decint ~ "." ~ dec ~ (opt(exp) ^^ { case Some(x) => x case None => "" }) ^^
{ case x ~ y ~ z ~ w => x + y + z + w } |
decint ~ exp ^^ { case x ~ y => x + y }) ^^ (x => Token(Kind.REAL, x))
/* main */
val space = blanks1 ^^ (x => Token(Kind.SPACE, x))
val keyword = literal(lexicon) ^^ (x => Token(Kind.KEYWORD, x))
val ml_antiq = antiq ^^ (x => Token(Kind.ANTIQ, x))
val bad = one(_ => true) ^^ (x => Token(Kind.ERROR, x))
space | (recover_delimited | (ml_antiq |
(((word | (real | (int | (long_ident | (ident | type_var))))) ||| keyword) | bad)))
}
/* antiquotations (line-oriented) */
def ml_antiq_start(ctxt: Scan.Line_Context): Parser[(Token, Scan.Line_Context)] =
ctxt match {
case Scan.Finished => "@{" ^^ (x => (Token(Kind.ANTIQ_START, x), Antiq(Scan.Finished)))
case _ => failure("")
}
def ml_antiq_stop(ctxt: Scan.Line_Context): Parser[(Token, Scan.Line_Context)] =
ctxt match {
case Antiq(Scan.Finished) => "}" ^^ (x => (Token(Kind.ANTIQ_STOP, x), Scan.Finished))
case _ => failure("")
}
def ml_antiq_body(context: Scan.Line_Context): Parser[(Token, Scan.Line_Context)] =
context match {
case Antiq(ctxt) =>
(if (ctxt == Scan.Finished) antiq_other ^^ (x => (Token(Kind.ANTIQ_OTHER, x), context))
else failure("")) |
quoted_line("\\"", ctxt) ^^ { case (x, c) => (Token(Kind.ANTIQ_STRING, x), Antiq(c)) } |
quoted_line("`", ctxt) ^^ { case (x, c) => (Token(Kind.ANTIQ_ALT_STRING, x), Antiq(c)) } |
cartouche_line(ctxt) ^^ { case (x, c) => (Token(Kind.ANTIQ_CARTOUCHE, x), Antiq(c)) }
case _ => failure("")
}
/* token */
def token: Parser[Token] = delimited_token | other_token
def token_line(SML: Boolean, ctxt: Scan.Line_Context): Parser[(Token, Scan.Line_Context)] =
{
val other = (ml_char | other_token) ^^ (x => (x, Scan.Finished))
if (SML) ml_string_line(ctxt) | (ml_comment_line(ctxt) | other)
else
ml_string_line(ctxt) |
(ml_cartouche_line(ctxt) |
(ml_comment_line(ctxt) |
(ml_antiq_start(ctxt) | (ml_antiq_stop(ctxt) | (ml_antiq_body(ctxt) | other)))))
}
}
/* tokenize */
def tokenize(input: CharSequence): List[Token] =
{
Parsers.parseAll(Parsers.rep(Parsers.token), new CharSequenceReader(input)) match {
case Parsers.Success(tokens, _) => tokens
case _ => error("Unexpected failure of tokenizing input:\\n" + input.toString)
}
}
def tokenize_line(SML: Boolean, input: CharSequence, context: Scan.Line_Context)
: (List[Token], Scan.Line_Context) =
{
var in: Reader[Char] = new CharSequenceReader(input)
val toks = new mutable.ListBuffer[Token]
var ctxt = context
while (!in.atEnd) {
Parsers.parse(Parsers.token_line(SML, ctxt), in) match {
case Parsers.Success((x, c), rest) => toks += x; ctxt = c; in = rest
case Parsers.NoSuccess(_, rest) =>
error("Unexpected failure of tokenizing input:\\n" + rest.source.toString)
}
}
(toks.toList, ctxt)
}
}
| wneuper/libisabelle | pide/2015/src/main/scala/ML/ml_lex.scala | Scala | mit | 9,926 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.planner.plan.metadata
import org.apache.flink.table.api.TableException
import org.apache.flink.table.planner.JBoolean
import org.apache.flink.table.planner.calcite.FlinkRelBuilder.PlannerNamedWindowProperty
import org.apache.flink.table.planner.plan.nodes.FlinkRelNode
import org.apache.flink.table.planner.plan.nodes.calcite.{Expand, Rank, WindowAggregate}
import org.apache.flink.table.planner.plan.nodes.common.CommonLookupJoin
import org.apache.flink.table.planner.plan.nodes.logical._
import org.apache.flink.table.planner.plan.nodes.physical.batch._
import org.apache.flink.table.planner.plan.nodes.physical.stream._
import org.apache.flink.table.planner.plan.schema.FlinkPreparingTableBase
import org.apache.flink.table.planner.plan.utils.{FlinkRelMdUtil, RankUtil}
import org.apache.flink.table.runtime.operators.rank.RankType
import org.apache.flink.table.sources.TableSource
import org.apache.calcite.plan.RelOptTable
import org.apache.calcite.plan.volcano.RelSubset
import org.apache.calcite.rel.`type`.RelDataType
import org.apache.calcite.rel.convert.Converter
import org.apache.calcite.rel.core._
import org.apache.calcite.rel.metadata._
import org.apache.calcite.rel.{RelNode, SingleRel}
import org.apache.calcite.rex.{RexCall, RexInputRef, RexNode}
import org.apache.calcite.sql.SqlKind
import org.apache.calcite.sql.fun.SqlStdOperatorTable
import org.apache.calcite.util.{Bug, BuiltInMethod, ImmutableBitSet, Util}
import java.util
import scala.collection.JavaConversions._
/**
* FlinkRelMdColumnUniqueness supplies a implementation of
* [[RelMetadataQuery#areColumnsUnique]] for the standard logical algebra.
*/
class FlinkRelMdColumnUniqueness private extends MetadataHandler[BuiltInMetadata.ColumnUniqueness] {
def getDef: MetadataDef[BuiltInMetadata.ColumnUniqueness] = BuiltInMetadata.ColumnUniqueness.DEF
def areColumnsUnique(
rel: TableScan,
mq: RelMetadataQuery,
columns: ImmutableBitSet,
ignoreNulls: Boolean): JBoolean = {
areTableColumnsUnique(rel, null, rel.getTable, columns)
}
def areColumnsUnique(
rel: FlinkLogicalLegacyTableSourceScan,
mq: RelMetadataQuery,
columns: ImmutableBitSet,
ignoreNulls: Boolean): JBoolean = {
areTableColumnsUnique(rel, rel.tableSource, rel.getTable, columns)
}
private def areTableColumnsUnique(
rel: TableScan,
tableSource: TableSource[_],
relOptTable: RelOptTable,
columns: ImmutableBitSet): JBoolean = {
if (columns.cardinality == 0) {
return false
}
// TODO get uniqueKeys from TableSchema of TableSource
relOptTable match {
case table: FlinkPreparingTableBase => {
val ukOptional = table.uniqueKeysSet
if (ukOptional.isPresent) {
if (ukOptional.get().isEmpty) {
false
} else {
ukOptional.get().exists(columns.contains)
}
} else {
null
}
}
case _ => rel.getTable.isKey(columns)
}
}
def areColumnsUnique(
rel: Values,
mq: RelMetadataQuery,
columns: ImmutableBitSet,
ignoreNulls: Boolean): JBoolean = {
if (rel.tuples.size < 2) {
return true
}
columns.foreach { idx =>
val columnValues = rel.tuples map { tuple =>
val literal = tuple.get(idx)
if (literal.isNull) {
NullSentinel.INSTANCE
} else {
literal.getValueAs(classOf[Comparable[_]])
}
}
if (columnValues.toSet.size == columnValues.size) {
return true
}
}
false
}
def areColumnsUnique(
rel: Project,
mq: RelMetadataQuery,
columns: ImmutableBitSet,
ignoreNulls: Boolean): JBoolean = {
// LogicalProject maps a set of rows to a different set;
// Without knowledge of the mapping function(whether it
// preserves uniqueness), it is only safe to derive uniqueness
// info from the child of a project when the mapping is f(a) => a.
//
// Also need to map the input column set to the corresponding child
// references
areColumnsUniqueOfProject(rel.getProjects, mq, columns, ignoreNulls, rel)
}
def areColumnsUnique(
rel: Filter,
mq: RelMetadataQuery,
columns: ImmutableBitSet,
ignoreNulls: Boolean): JBoolean = mq.areColumnsUnique(rel.getInput, columns, ignoreNulls)
/**
* Determines whether a specified set of columns from a Calc relational expression are unique.
*
* @param rel the Calc relational expression
* @param mq metadata query instance
* @param columns column mask representing the subset of columns for which
* uniqueness will be determined
* @param ignoreNulls if true, ignore null values when determining column
* uniqueness
* @return whether the columns are unique, or
* null if not enough information is available to make that determination
*/
def areColumnsUnique(
rel: Calc,
mq: RelMetadataQuery,
columns: ImmutableBitSet,
ignoreNulls: Boolean): JBoolean = {
// Calc is composed by projects and conditions. conditions does no change unique property;
// while projects maps a set of rows to a different set.
// Without knowledge of the mapping function(whether it
// preserves uniqueness), it is only safe to derive uniqueness
// info from the child of a project when the mapping is f(a) => a.
//
// Also need to map the input column set to the corresponding child
// references
val program = rel.getProgram
val projects = program.getProjectList.map(program.expandLocalRef)
areColumnsUniqueOfProject(projects, mq, columns, ignoreNulls, rel)
}
private def areColumnsUniqueOfProject(
projects: util.List[RexNode],
mq: RelMetadataQuery,
columns: ImmutableBitSet,
ignoreNulls: Boolean,
originalNode: SingleRel): JBoolean = {
val childColumns = ImmutableBitSet.builder
columns.foreach { idx =>
val project = projects.get(idx)
project match {
case inputRef: RexInputRef => childColumns.set(inputRef.getIndex)
case asCall: RexCall if asCall.getKind.equals(SqlKind.AS) &&
asCall.getOperands.get(0).isInstanceOf[RexInputRef] =>
childColumns.set(asCall.getOperands.get(0).asInstanceOf[RexInputRef].getIndex)
case call: RexCall if ignoreNulls =>
// If the expression is a cast such that the types are the same
// except for the nullability, then if we're ignoring nulls,
// it doesn't matter whether the underlying column reference
// is nullable. Check that the types are the same by making a
// nullable copy of both types and then comparing them.
if (call.getOperator eq SqlStdOperatorTable.CAST) {
val castOperand = call.getOperands.get(0)
castOperand match {
case castRef: RexInputRef =>
val typeFactory = originalNode.getCluster.getTypeFactory
val castType = typeFactory.createTypeWithNullability(project.getType, true)
val origType = typeFactory.createTypeWithNullability(castOperand.getType, true)
if (castType == origType) {
childColumns.set(castRef.getIndex)
}
case _ => // ignore
}
}
case _ =>
// If the expression will not influence uniqueness of the
// projection, then skip it.
}
}
// If no columns can affect uniqueness, then return unknown
if (childColumns.cardinality == 0) {
null
} else {
mq.areColumnsUnique(originalNode.getInput(), childColumns.build, ignoreNulls)
}
}
def areColumnsUnique(
rel: Expand,
mq: RelMetadataQuery,
columns: ImmutableBitSet,
ignoreNulls: Boolean): JBoolean = {
// values of expand_id are unique in rows expanded from a row,
// and a input unique key combined with expand_id are also unique
val expandIdIndex = rel.expandIdIndex
if (!columns.get(expandIdIndex)) {
return false
}
val columnsSkipExpandId = ImmutableBitSet.builder().addAll(columns).clear(expandIdIndex).build()
if (columnsSkipExpandId.cardinality == 0) {
return false
}
val inputRefColumns = columnsSkipExpandId.flatMap {
column =>
val inputRefs = FlinkRelMdUtil.getInputRefIndices(column, rel)
if (inputRefs.size == 1 && inputRefs.head >= 0) {
Array(inputRefs.head)
} else {
Array.empty[Int]
}
}.toSeq
if (inputRefColumns.isEmpty) {
return false
}
mq.areColumnsUnique(rel.getInput, ImmutableBitSet.of(inputRefColumns: _*), ignoreNulls)
}
def areColumnsUnique(
rel: Converter,
mq: RelMetadataQuery,
columns: ImmutableBitSet,
ignoreNulls: Boolean): JBoolean = mq.areColumnsUnique(rel.getInput, columns, ignoreNulls)
def areColumnsUnique(
rel: Exchange,
mq: RelMetadataQuery,
columns: ImmutableBitSet,
ignoreNulls: Boolean): JBoolean = mq.areColumnsUnique(rel.getInput, columns, ignoreNulls)
def areColumnsUnique(
rank: Rank,
mq: RelMetadataQuery,
columns: ImmutableBitSet,
ignoreNulls: Boolean): JBoolean = {
val input = rank.getInput
val rankFunColumnIndex = RankUtil.getRankNumberColumnIndex(rank).getOrElse(-1)
if (rankFunColumnIndex < 0) {
mq.areColumnsUnique(input, columns, ignoreNulls)
} else {
val childColumns = columns.clear(rankFunColumnIndex)
val isChildColumnsUnique = mq.areColumnsUnique(input, childColumns, ignoreNulls)
if (isChildColumnsUnique != null && isChildColumnsUnique) {
true
} else {
rank.rankType match {
case RankType.ROW_NUMBER =>
val fields = columns.toArray
(rank.partitionKey.toArray :+ rankFunColumnIndex).forall(fields.contains(_))
case _ => false
}
}
}
}
def areColumnsUnique(
rel: Sort,
mq: RelMetadataQuery,
columns: ImmutableBitSet,
ignoreNulls: Boolean): JBoolean = mq.areColumnsUnique(rel.getInput, columns, ignoreNulls)
def areColumnsUnique(
rel: StreamExecDeduplicate,
mq: RelMetadataQuery,
columns: ImmutableBitSet,
ignoreNulls: Boolean): JBoolean = {
columns != null && util.Arrays.equals(columns.toArray, rel.getUniqueKeys)
}
def areColumnsUnique(
rel: StreamPhysicalChangelogNormalize,
mq: RelMetadataQuery,
columns: ImmutableBitSet,
ignoreNulls: Boolean): JBoolean = {
columns != null && ImmutableBitSet.of(rel.uniqueKeys: _*).equals(columns)
}
def areColumnsUnique(
rel: StreamPhysicalDropUpdateBefore,
mq: RelMetadataQuery,
columns: ImmutableBitSet,
ignoreNulls: Boolean): JBoolean = {
mq.areColumnsUnique(rel.getInput, columns, ignoreNulls)
}
def areColumnsUnique(
rel: Aggregate,
mq: RelMetadataQuery,
columns: ImmutableBitSet,
ignoreNulls: Boolean): JBoolean = {
areColumnsUniqueOnAggregate(rel.getGroupSet.toArray, mq, columns, ignoreNulls)
}
def areColumnsUnique(
rel: BatchPhysicalGroupAggregateBase,
mq: RelMetadataQuery,
columns: ImmutableBitSet,
ignoreNulls: Boolean): JBoolean = {
if (rel.isFinal) {
areColumnsUniqueOnAggregate(rel.grouping, mq, columns, ignoreNulls)
} else {
null
}
}
def areColumnsUnique(
rel: StreamPhysicalGroupAggregate,
mq: RelMetadataQuery,
columns: ImmutableBitSet,
ignoreNulls: Boolean): JBoolean = {
areColumnsUniqueOnAggregate(rel.grouping, mq, columns, ignoreNulls)
}
def areColumnsUnique(
rel: StreamPhysicalGlobalGroupAggregate,
mq: RelMetadataQuery,
columns: ImmutableBitSet,
ignoreNulls: Boolean): JBoolean = {
areColumnsUniqueOnAggregate(rel.grouping, mq, columns, ignoreNulls)
}
def areColumnsUnique(
rel: StreamPhysicalLocalGroupAggregate,
mq: RelMetadataQuery,
columns: ImmutableBitSet,
ignoreNulls: Boolean): JBoolean = null
private def areColumnsUniqueOnAggregate(
grouping: Array[Int],
mq: RelMetadataQuery,
columns: ImmutableBitSet,
ignoreNulls: Boolean): JBoolean = {
// group key of agg output always starts from 0
val outputGroupKey = ImmutableBitSet.of(grouping.indices: _*)
columns.contains(outputGroupKey)
}
def areColumnsUnique(
rel: WindowAggregate,
mq: RelMetadataQuery,
columns: ImmutableBitSet,
ignoreNulls: Boolean): JBoolean = {
areColumnsUniqueOnWindowAggregate(
rel.getGroupSet.toArray,
rel.getNamedProperties,
rel.getRowType.getFieldCount,
mq,
columns,
ignoreNulls)
}
def areColumnsUnique(
rel: BatchPhysicalWindowAggregateBase,
mq: RelMetadataQuery,
columns: ImmutableBitSet,
ignoreNulls: Boolean): JBoolean = {
if (rel.isFinal) {
areColumnsUniqueOnWindowAggregate(
rel.grouping,
rel.namedWindowProperties,
rel.getRowType.getFieldCount,
mq,
columns,
ignoreNulls)
} else {
null
}
}
def areColumnsUnique(
rel: StreamPhysicalGroupWindowAggregate,
mq: RelMetadataQuery,
columns: ImmutableBitSet,
ignoreNulls: Boolean): JBoolean = {
areColumnsUniqueOnWindowAggregate(
rel.grouping,
rel.namedWindowProperties,
rel.getRowType.getFieldCount,
mq,
columns,
ignoreNulls)
}
private def areColumnsUniqueOnWindowAggregate(
grouping: Array[Int],
namedProperties: Seq[PlannerNamedWindowProperty],
outputFieldCount: Int,
mq: RelMetadataQuery,
columns: ImmutableBitSet,
ignoreNulls: Boolean): JBoolean = {
if (namedProperties.nonEmpty) {
val begin = outputFieldCount - namedProperties.size
val end = outputFieldCount - 1
val keys = ImmutableBitSet.of(grouping.indices: _*)
(begin to end).map {
i => keys.union(ImmutableBitSet.of(i))
}.exists(columns.contains)
} else {
false
}
}
def areColumnsUnique(
rel: Window,
mq: RelMetadataQuery,
columns: ImmutableBitSet,
ignoreNulls: Boolean): JBoolean = areColumnsUniqueOfOverAgg(rel, mq, columns, ignoreNulls)
def areColumnsUnique(
rel: BatchExecOverAggregate,
mq: RelMetadataQuery,
columns: ImmutableBitSet,
ignoreNulls: Boolean): JBoolean = areColumnsUniqueOfOverAgg(rel, mq, columns, ignoreNulls)
def areColumnsUnique(
rel: StreamExecOverAggregate,
mq: RelMetadataQuery,
columns: ImmutableBitSet,
ignoreNulls: Boolean): JBoolean = areColumnsUniqueOfOverAgg(rel, mq, columns, ignoreNulls)
private def areColumnsUniqueOfOverAgg(
overAgg: SingleRel,
mq: RelMetadataQuery,
columns: ImmutableBitSet,
ignoreNulls: Boolean): JBoolean = {
val input = overAgg.getInput
val inputFieldLength = input.getRowType.getFieldCount
val columnsBelongsToInput = ImmutableBitSet.of(columns.filter(_ < inputFieldLength).toList)
val isSubColumnsUnique = mq.areColumnsUnique(
input,
columnsBelongsToInput,
ignoreNulls)
if (isSubColumnsUnique != null && isSubColumnsUnique) {
true
} else if (columnsBelongsToInput.cardinality() < columns.cardinality()) {
// We are not sure whether not belongs to input are unique or not
null
} else {
isSubColumnsUnique
}
}
def areColumnsUnique(
rel: Join,
mq: RelMetadataQuery,
columns: ImmutableBitSet,
ignoreNulls: Boolean): JBoolean = {
rel.getJoinType match {
case JoinRelType.SEMI | JoinRelType.ANTI =>
// only return the unique keys from the LHS since a SEMI/ANTI join only
// returns the LHS
mq.areColumnsUnique(rel.getLeft, columns, ignoreNulls)
case _ =>
areColumnsUniqueOfJoin(
rel.analyzeCondition(),
rel.getJoinType,
rel.getLeft.getRowType,
(leftSet: ImmutableBitSet) => mq.areColumnsUnique(rel.getLeft, leftSet, ignoreNulls),
(rightSet: ImmutableBitSet) => mq.areColumnsUnique(rel.getRight, rightSet, ignoreNulls),
mq,
columns
)
}
}
def areColumnsUnique(
rel: StreamExecIntervalJoin,
mq: RelMetadataQuery,
columns: ImmutableBitSet,
ignoreNulls: Boolean): JBoolean = {
val joinInfo = JoinInfo.of(rel.getLeft, rel.getRight, rel.joinCondition)
areColumnsUniqueOfJoin(
joinInfo,
rel.joinType,
rel.getLeft.getRowType,
(leftSet: ImmutableBitSet) => mq.areColumnsUnique(rel.getLeft, leftSet, ignoreNulls),
(rightSet: ImmutableBitSet) => mq.areColumnsUnique(rel.getRight, rightSet, ignoreNulls),
mq,
columns
)
}
def areColumnsUnique(
join: CommonLookupJoin,
mq: RelMetadataQuery,
columns: ImmutableBitSet,
ignoreNulls: Boolean): JBoolean = {
val left = join.getInput
areColumnsUniqueOfJoin(
join.joinInfo, join.joinType, left.getRowType,
(leftSet: ImmutableBitSet) => mq.areColumnsUnique(left, leftSet, ignoreNulls),
// TODO get uniqueKeys from TableSchema of TableSource
(_: ImmutableBitSet) => null,
mq, columns
)
}
def areColumnsUniqueOfJoin(
joinInfo: JoinInfo,
joinRelType: JoinRelType,
leftRowType: RelDataType,
isLeftUnique: ImmutableBitSet => JBoolean,
isRightUnique: ImmutableBitSet => JBoolean,
mq: RelMetadataQuery,
columns: ImmutableBitSet): JBoolean = {
if (columns.cardinality == 0) {
return false
}
// Divide up the input column mask into column masks for the left and
// right sides of the join
val (leftColumns, rightColumns) =
FlinkRelMdUtil.splitColumnsIntoLeftAndRight(leftRowType.getFieldCount, columns)
// If the original column mask contains columns from both the left and
// right hand side, then the columns are unique if and only if they're
// unique for their respective join inputs
val leftUnique = isLeftUnique(leftColumns)
val rightUnique = isRightUnique(rightColumns)
if ((leftColumns.cardinality > 0) && (rightColumns.cardinality > 0)) {
if ((leftUnique == null) || (rightUnique == null)) {
return null
}
else {
return leftUnique && rightUnique
}
}
// If we're only trying to determine uniqueness for columns that
// originate from one join input, then determine if the equijoin
// columns from the other join input are unique. If they are, then
// the columns are unique for the entire join if they're unique for
// the corresponding join input, provided that input is not null
// generating.
if (leftColumns.cardinality > 0) {
if (joinRelType.generatesNullsOnLeft) {
false
} else {
val rightJoinColsUnique = isRightUnique(joinInfo.rightSet)
if ((rightJoinColsUnique == null) || (leftUnique == null)) {
null
} else {
rightJoinColsUnique && leftUnique
}
}
} else if (rightColumns.cardinality > 0) {
if (joinRelType.generatesNullsOnRight) {
false
} else {
val leftJoinColsUnique = isLeftUnique(joinInfo.leftSet)
if ((leftJoinColsUnique == null) || (rightUnique == null)) {
null
} else {
leftJoinColsUnique && rightUnique
}
}
} else {
throw new AssertionError
}
}
def areColumnsUnique(
rel: Correlate,
mq: RelMetadataQuery,
columns: ImmutableBitSet,
ignoreNulls: Boolean): JBoolean = {
rel.getJoinType match {
case JoinRelType.ANTI | JoinRelType.SEMI =>
mq.areColumnsUnique(rel.getLeft, columns, ignoreNulls)
case JoinRelType.LEFT | JoinRelType.INNER =>
val left = rel.getLeft
val right = rel.getRight
val leftFieldCount = left.getRowType.getFieldCount
val (leftColumns, rightColumns) =
FlinkRelMdUtil.splitColumnsIntoLeftAndRight(leftFieldCount, columns)
if (leftColumns.cardinality > 0 && rightColumns.cardinality > 0) {
val leftUnique = mq.areColumnsUnique(left, leftColumns, ignoreNulls)
val rightUnique = mq.areColumnsUnique(right, rightColumns, ignoreNulls)
if (leftUnique == null || rightUnique == null) null else leftUnique && rightUnique
} else {
null
}
case _ => throw new TableException(
s"Unknown join type ${rel.getJoinType} for correlate relation $rel")
}
}
def areColumnsUnique(
rel: BatchPhysicalCorrelate,
mq: RelMetadataQuery,
columns: ImmutableBitSet,
ignoreNulls: Boolean): JBoolean = null
def areColumnsUnique(
rel: StreamPhysicalCorrelate,
mq: RelMetadataQuery,
columns: ImmutableBitSet,
ignoreNulls: Boolean): JBoolean = null
def areColumnsUnique(
rel: SetOp,
mq: RelMetadataQuery,
columns: ImmutableBitSet,
ignoreNulls: Boolean): JBoolean = {
// If not ALL then the rows are distinct.
// Therefore the set of all columns is a key.
!rel.all && columns.nextClearBit(0) >= rel.getRowType.getFieldCount
}
def areColumnsUnique(
rel: Intersect,
mq: RelMetadataQuery,
columns: ImmutableBitSet,
ignoreNulls: Boolean): JBoolean = {
if (areColumnsUnique(rel.asInstanceOf[SetOp], mq, columns, ignoreNulls)) {
return true
}
rel.getInputs foreach { input =>
val unique = mq.areColumnsUnique(input, columns, ignoreNulls)
if (unique != null && unique) {
return true
}
}
false
}
def areColumnsUnique(
rel: Minus,
mq: RelMetadataQuery,
columns: ImmutableBitSet,
ignoreNulls: Boolean): JBoolean = {
if (areColumnsUnique(rel.asInstanceOf[SetOp], mq, columns, ignoreNulls)) {
true
} else {
mq.areColumnsUnique(rel.getInput(0), columns, ignoreNulls)
}
}
/**
* Determines whether a specified set of columns from a RelSubSet relational expression are
* unique.
*
* FIX BUG in <a href="https://issues.apache.org/jira/browse/CALCITE-2134">[CALCITE-2134] </a>
*
* @param subset the RelSubSet relational expression
* @param mq metadata query instance
* @param columns column mask representing the subset of columns for which
* uniqueness will be determined
* @param ignoreNulls if true, ignore null values when determining column uniqueness
* @return whether the columns are unique, or
* null if not enough information is available to make that determination
*/
def areColumnsUnique(
subset: RelSubset,
mq: RelMetadataQuery,
columns: ImmutableBitSet,
ignoreNulls: Boolean): JBoolean = {
if (!Bug.CALCITE_1048_FIXED) {
val rel = Util.first(subset.getBest, subset.getOriginal)
return mq.areColumnsUnique(rel, columns, ignoreNulls)
}
var nullCount = 0
for (rel <- subset.getRels) {
rel match {
// NOTE: If add estimation uniqueness for new RelNode type e.g. Rank / Expand,
// add the RelNode to pattern matching in RelSubset.
case _: Aggregate | _: Filter | _: Values | _: TableScan | _: Project | _: Correlate |
_: Join | _: Exchange | _: Sort | _: SetOp | _: Calc | _: Converter | _: Window |
_: Expand | _: Rank | _: FlinkRelNode =>
try {
val unique = mq.areColumnsUnique(rel, columns, ignoreNulls)
if (unique != null) {
if (unique) {
return true
}
} else {
nullCount += 1
}
}
catch {
case _: CyclicMetadataException =>
// Ignore this relational expression; there will be non-cyclic ones in this set.
}
case _ => // skip
}
}
if (nullCount == 0) false else null
}
/**
* Catch-all implementation for
* [[BuiltInMetadata.ColumnUniqueness#areColumnsUnique(ImmutableBitSet, boolean)]],
* invoked using reflection, for any relational expression not
* handled by a more specific method.
*
* @param rel Relational expression
* @param mq Metadata query
* @param columns column mask representing the subset of columns for which
* uniqueness will be determined
* @param ignoreNulls if true, ignore null values when determining column uniqueness
* @return whether the columns are unique, or
* null if not enough information is available to make that determination
* @see org.apache.calcite.rel.metadata.RelMetadataQuery#areColumnsUnique(
* RelNode, ImmutableBitSet, boolean)
*/
def areColumnsUnique(
rel: RelNode,
mq: RelMetadataQuery,
columns: ImmutableBitSet,
ignoreNulls: Boolean): JBoolean = null
}
object FlinkRelMdColumnUniqueness {
private val INSTANCE = new FlinkRelMdColumnUniqueness
val SOURCE: RelMetadataProvider = ReflectiveRelMetadataProvider.reflectiveSource(
BuiltInMethod.COLUMN_UNIQUENESS.method, INSTANCE)
}
| aljoscha/flink | flink-table/flink-table-planner-blink/src/main/scala/org/apache/flink/table/planner/plan/metadata/FlinkRelMdColumnUniqueness.scala | Scala | apache-2.0 | 26,190 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import sbt._
import sbt.Keys._
object CassandraSparkBuild extends Build {
import Settings._
import Versions.scalaBinary
val namespace = "spark-cassandra-connector"
val demosPath = file(s"$namespace-demos")
lazy val root = RootProject(
name = "root",
dir = file("."),
settings = rootSettings,
contains = Seq(embedded, connector, demos, jconnector)
)
lazy val embedded = CrossScalaVersionsProject(
name = s"$namespace-embedded",
conf = defaultSettings ++ Seq(libraryDependencies ++= Dependencies.embedded)
) configs IntegrationTest
lazy val connector = CrossScalaVersionsProject(
name = namespace,
conf = assembledSettings ++ Seq(libraryDependencies ++= Dependencies.connector ++ Seq(
"org.scala-lang" % "scala-reflect" % scalaVersion.value,
"org.scala-lang" % "scala-compiler" % scalaVersion.value % "test,it"))
).copy(dependencies = Seq(embedded % "test->test;it->it,test;")
) configs IntegrationTest
lazy val jconnector = Project(
id = s"$namespace-java",
base = file(s"$namespace-java"),
settings = japiSettings ++ connector.settings,
dependencies = Seq(connector % "compile;runtime->runtime;test->test;it->it,test;provided->provided")
) configs IntegrationTest
lazy val demos = RootProject(
name = "demos",
dir = demosPath,
contains = Seq(simpleDemos, kafkaStreaming, twitterStreaming)
)
lazy val simpleDemos = Project(
id = "simple-demos",
base = demosPath / "simple-demos",
settings = japiSettings ++ demoSettings,
dependencies = Seq(connector, jconnector, embedded)
)
lazy val kafkaStreaming = CrossScalaVersionsProject(
name = "kafka-streaming",
conf = demoSettings ++ kafkaDemoSettings ++ Seq(
libraryDependencies ++= (CrossVersion.partialVersion(scalaVersion.value) match {
case Some((2, minor)) if minor < 11 => Dependencies.kafka
case _ => Seq.empty
}))).copy(base = demosPath / "kafka-streaming", dependencies = Seq(connector, embedded))
lazy val twitterStreaming = Project(
id = "twitter-streaming",
base = demosPath / "twitter-streaming",
settings = demoSettings ++ Seq(libraryDependencies ++= Dependencies.twitter),
dependencies = Seq(connector)
)
def crossBuildPath(base: sbt.File, v: String): sbt.File = base / s"scala-$v" / "src"
/* templates */
def CrossScalaVersionsProject(name: String,
conf: Seq[Def.Setting[_]],
reliesOn: Seq[ClasspathDep[ProjectReference]] = Seq.empty) =
Project(id = name, base = file(name), dependencies = reliesOn, settings = conf ++ Seq(
unmanagedSourceDirectories in (Compile, packageBin) +=
crossBuildPath(baseDirectory.value, scalaBinaryVersion.value),
unmanagedSourceDirectories in (Compile, doc) +=
crossBuildPath(baseDirectory.value, scalaBinaryVersion.value),
unmanagedSourceDirectories in Compile +=
crossBuildPath(baseDirectory.value, scalaBinaryVersion.value)
))
def RootProject(name: String, dir: sbt.File, settings: => scala.Seq[sbt.Def.Setting[_]] = Seq.empty, contains: Seq[ProjectReference]): Project =
Project(id = name, base = dir, settings = parentSettings ++ settings, aggregate = contains)
}
object Dependencies {
import Versions._
implicit class Exclude(module: ModuleID) {
def guavaExclude: ModuleID =
module exclude("com.google.guava", "guava")
def sparkExclusions: ModuleID = module.guavaExclude
.exclude("org.apache.spark", s"spark-core_$scalaBinary")
def logbackExclude: ModuleID = module
.exclude("ch.qos.logback", "logback-classic")
.exclude("ch.qos.logback", "logback-core")
def replExclusions: ModuleID = module.guavaExclude
.exclude("org.apache.spark", s"spark-bagel_$scalaBinary")
.exclude("org.apache.spark", s"spark-mllib_$scalaBinary")
.exclude("org.scala-lang", "scala-compiler")
def kafkaExclusions: ModuleID = module
.exclude("org.slf4j", "slf4j-simple")
.exclude("com.sun.jmx", "jmxri")
.exclude("com.sun.jdmk", "jmxtools")
.exclude("net.sf.jopt-simple", "jopt-simple")
}
object Compile {
val akkaActor = "com.typesafe.akka" %% "akka-actor" % Akka % "provided" // ApacheV2
val akkaRemote = "com.typesafe.akka" %% "akka-remote" % Akka % "provided" // ApacheV2
val akkaSlf4j = "com.typesafe.akka" %% "akka-slf4j" % Akka % "provided" // ApacheV2
val cassandraClient = "org.apache.cassandra" % "cassandra-clientutil" % Cassandra guavaExclude // ApacheV2
val cassandraDriver = "com.datastax.cassandra" % "cassandra-driver-core" % CassandraDriver guavaExclude // ApacheV2
val commonsLang3 = "org.apache.commons" % "commons-lang3" % CommonsLang3 // ApacheV2
val config = "com.typesafe" % "config" % Config % "provided" // ApacheV2
val guava = "com.google.guava" % "guava" % Guava
val jodaC = "org.joda" % "joda-convert" % JodaC
val jodaT = "joda-time" % "joda-time" % JodaT
val lzf = "com.ning" % "compress-lzf" % Lzf % "provided"
val slf4jApi = "org.slf4j" % "slf4j-api" % Slf4j % "provided" // MIT
val jsr166e = "com.twitter" % "jsr166e" % JSR166e // Creative Commons
/* To allow spark artifact inclusion in the demos at runtime, we set 'provided' below. */
val sparkCore = "org.apache.spark" %% "spark-core" % Spark guavaExclude // ApacheV2
val sparkUnsafe = "org.apache.spark" %% "spark-unsafe" % Spark guavaExclude // ApacheV2
val sparkStreaming = "org.apache.spark" %% "spark-streaming" % Spark guavaExclude // ApacheV2
val sparkSql = "org.apache.spark" %% "spark-sql" % Spark sparkExclusions // ApacheV2
val sparkCatalyst = "org.apache.spark" %% "spark-catalyst" % Spark sparkExclusions // ApacheV2
val sparkHive = "org.apache.spark" %% "spark-hive" % Spark sparkExclusions // ApacheV2
object Metrics {
val metricsCore = "com.codahale.metrics" % "metrics-core" % CodaHaleMetrics % "provided"
val metricsJson = "com.codahale.metrics" % "metrics-json" % CodaHaleMetrics % "provided"
}
object Jetty {
val jettyServer = "org.eclipse.jetty" % "jetty-server" % SparkJetty % "provided"
val jettyServlet = "org.eclipse.jetty" % "jetty-servlet" % SparkJetty % "provided"
}
object Embedded {
val akkaCluster = "com.typesafe.akka" %% "akka-cluster" % Akka // ApacheV2
val cassandraServer = "org.apache.cassandra" % "cassandra-all" % Cassandra logbackExclude // ApacheV2
val jopt = "net.sf.jopt-simple" % "jopt-simple" % JOpt
val kafka = "org.apache.kafka" %% "kafka" % Kafka kafkaExclusions // ApacheV2
val sparkRepl = "org.apache.spark" %% "spark-repl" % Spark % "provided" replExclusions // ApacheV2
val snappy = "org.xerial.snappy" % "snappy-java" % "1.1.1.7"
}
object Demos {
val kafka = "org.apache.kafka" % "kafka_2.10" % Kafka kafkaExclusions // ApacheV2
val kafkaStreaming = "org.apache.spark" % "spark-streaming-kafka_2.10" % Spark % "provided" sparkExclusions // ApacheV2
val twitterStreaming = "org.apache.spark" %% "spark-streaming-twitter" % Spark % "provided" sparkExclusions // ApacheV2
}
object Test {
val akkaTestKit = "com.typesafe.akka" %% "akka-testkit" % Akka % "test,it" // ApacheV2
val commonsIO = "commons-io" % "commons-io" % CommonsIO % "test,it" // ApacheV2
val scalaMock = "org.scalamock" %% "scalamock-scalatest-support" % ScalaMock % "test,it" // BSD
val scalaTest = "org.scalatest" %% "scalatest" % ScalaTest % "test,it" // ApacheV2
val scalactic = "org.scalactic" %% "scalactic" % Scalactic % "test,it" // ApacheV2
val mockito = "org.mockito" % "mockito-all" % "1.10.19" % "test,it" // MIT
val junit = "junit" % "junit" % "4.11" % "test,it"
val junitInterface = "com.novocode" % "junit-interface" % "0.10" % "test,it"
val powerMock = "org.powermock" % "powermock-module-junit4" % "1.6.2" % "test,it" // ApacheV2
val powerMockMockito = "org.powermock" % "powermock-api-mockito" % "1.6.2" % "test,it" // ApacheV2
}
}
import Compile._
import BuildUtil._
val logging = Seq(slf4jApi)
val metrics = Seq(Metrics.metricsCore, Metrics.metricsJson)
val jetty = Seq(Jetty.jettyServer, Jetty.jettyServlet)
val testKit = Seq(
Test.akkaTestKit,
Test.commonsIO,
Test.junit,
Test.junitInterface,
Test.scalaMock,
Test.scalaTest,
Test.scalactic,
Test.mockito,
Test.powerMock,
Test.powerMockMockito
)
val akka = Seq(akkaActor, akkaRemote, akkaSlf4j)
val cassandra = Seq(cassandraClient, cassandraDriver)
val spark = Seq(sparkCore, sparkStreaming, sparkSql, sparkCatalyst, sparkHive, sparkUnsafe)
val connector = testKit ++ metrics ++ jetty ++ logging ++ akka ++ cassandra ++ spark.map(_ % "provided") ++ Seq(
commonsLang3, config, guava, jodaC, jodaT, lzf, jsr166e)
val embedded = logging ++ spark ++ cassandra ++ Seq(
Embedded.cassandraServer, Embedded.jopt, Embedded.sparkRepl, Embedded.kafka, Embedded.snappy)
val kafka = Seq(Demos.kafka, Demos.kafkaStreaming)
val twitter = Seq(sparkStreaming, Demos.twitterStreaming)
val documentationMappings = Seq(
DocumentationMapping(url(s"http://spark.apache.org/docs/${Versions.Spark}/api/scala/"),
sparkCore, sparkStreaming, sparkSql, sparkCatalyst, sparkHive
),
DocumentationMapping(url(s"http://doc.akka.io/api/akka/${Versions.Akka}/"),
akkaActor, akkaRemote, akkaSlf4j
)
)
}
| rafaelbarreto87/spark-cassandra-connector | project/CassandraSparkBuild.scala | Scala | apache-2.0 | 11,813 |
/*
* Copyright 2017 Zhang Di
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.dizhang.seqspark.annot
import org.apache.spark.SparkContext
import org.dizhang.seqspark.annot.NucleicAcid._
import org.dizhang.seqspark.ds.Region
import org.slf4j.LoggerFactory
/**
* refgene
*/
object RefGene {
val logger = LoggerFactory.getLogger(this.getClass)
def makeLocation(line: String, header: Array[String]): Location = {
val s = line.split("\\t")
val m = header.zip(s).toMap
val geneName = m("geneName")
val mRNAName = m("name")
val strand = if (m("strand") == "+") Location.Strand.Positive else Location.Strand.Negative
val t = s"${m(s"chrom")}:${m("cdsStart")}-${m("cdsEnd")}"
//println(t)
val cds = Region(t)
val exons = m("exonStarts").split(",").zip(m("exonEnds").split(",")).map(e => Region(s"${cds.chr}:${e._1}-${e._2}"))
Location(geneName, mRNAName, strand, exons, cds)
}
def makeExons(line: String, header: Array[String]): Array[Region] = {
//println(line)
val s = line.split("\\t")
val m = header.zip(s).toMap
val exons = m("exonStarts").split(",").zip(m("exonEnds").split(","))
.map{ e => val t = s"${m("chrom")}:${e._1.toInt - 2}-${e._2.toInt + 2}"; Region(t)}
exons
}
def apply(build: String, coordFile: String, seqFile: String)(implicit sc: SparkContext): RefGene = {
logger.info(s"load RefSeq: coord: $coordFile seq: $seqFile")
val locRaw = sc.textFile(coordFile, 10)
//locRaw.cache()
val header = locRaw.first().split("\\t")
val locRdd = locRaw.zipWithUniqueId().filter(_._2 > 0).map(_._1)
val loci = IntervalTree(locRdd.map(l => makeLocation(l, header)).toLocalIterator)
val seqName = """>(\\w+_\\d+)\\.\\d+""".r
val seqLines = sc.textFile(seqFile, 20)
val seq2 = seqLines.map{
case seqName(n) => Array((n, ""))
case l => Array(("", l))
}
def mergeFa(a: Array[(String, String)], b: Array[(String, String)]): Array[(String, String)] = {
if (a.isEmpty) {
b
} else if (b.isEmpty) {
a
} else if (b.head._1 != "") {
a ++ b
} else {
(a.take(a.length - 1) :+ (a.last._1, a.last._2 + b.head._2)) ++ b.drop(1)
}
}
/**
* Somehow the RDD fold method doesn't generate right result
* for non-commutable functions, we need to fold within partition first,
* then fold the partitions.
* */
val seq = seq2.mapPartitions{p =>
p.fold(Array()){(a, b) =>
mergeFa(a, b)
}.toIterator
}.collect().map(x => Array(x)).fold(Array()){(a, b) =>
mergeFa(a,b)
}.map(s => s._1 -> makeRNA(s._1, s._2)).toMap
/**
val seq = seq2.fold(Array()){(a, b) =>
mergeFa(a, b)
}.map(s => (s._1, makeRNA(s._1, s._2))).toMap
*/
//logger.debug(s"${seq.take(100).keys.mkString(":")}")
logger.info(s"${seq.size} transcript sequences")
/**
val names = seq.keys
val pw = new PrintWriter(new File("output/test.seq"))
//pw.write(s"${seq("").toString}\\n")
for (k <- names) {
val s = seq(k)
pw.write(s"$k: ${s.length}\\n")
}
pw.close()
*/
val res = new RefGene(build, loci, seq)
logger.info(s"${IntervalTree.count(res.loci)} locations generated")
/**
val test = IntervalTree.lookup(res.loci, Single(1, 1296690))
test match {
case Nil => logger.info("cannot find anything for chr1:1296691")
case _ => logger.info(s"here we go: ${test.last.toString}")
}
*/
res
}
}
@SerialVersionUID(0L)
class RefGene(val build: String,
val loci: IntervalTree[Location],
val seq: Map[String, mRNA]) extends Serializable | statgenetics/seqspark | src/main/scala/org/dizhang/seqspark/annot/RefGene.scala | Scala | apache-2.0 | 4,208 |
/*
* Copyright 2007-2010 WorldWide Conferencing, LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package net.liftweb {
package record {
package field {
import scala.xml._
import net.liftweb.util._
import net.liftweb.common._
import net.liftweb.http.{S, SHtml}
import net.liftweb.http.js._
import S._
import Helpers._
import JE._
class EnumField[OwnerType <: Record[OwnerType], ENUM <: Enumeration](rec: OwnerType, enum: ENUM) extends Field[ENUM#Value, OwnerType] {
def owner = rec
def toInt = value.id
def fromInt(in: Int): ENUM#Value = enum(in)
override protected def set_!(value: ENUM#Value): ENUM#Value = {
if (value != data) {
data = value
dirty_?(true)
}
data
}
def setFromAny(in: Any): Box[ENUM#Value] = {
in match {
case n: Int => Full(set(fromInt(n)))
case n: Long => Full(set(fromInt(n.toInt)))
case n: Number => Full(set(fromInt(n.intValue)))
case (n: Number) :: _ => Full(set(fromInt(n.intValue)))
case Some(n: Number) => Full(set(fromInt(n.intValue)))
case Full(n: Number) => Full(set(fromInt(n.intValue)))
case None | Empty | Failure(_, _, _) => Full(set(defaultValue))
case (s: String) :: _ => Full(set(fromInt(Helpers.toInt(s))))
case vs: ENUM#Value => Full(set(vs))
case null => Full(set(defaultValue))
case s: String => Full(set(fromInt(Helpers.toInt(s))))
case o => Full(set(fromInt(Helpers.toInt(o))))
}
}
def setFromString(s: String): Box[ENUM#Value] = setFromAny(s)
/**
* Build a list for v => this.set(fromInt(v)the select. Return a tuple of (String, String) where the first string
* is the id.string of the Value and the second string is the Text name of the Value.
*/
def buildDisplayList: List[(Int, String)] = enum.elements.toList.map(a => (a.id, a.toString))
private def elem = SHtml.selectObj[Int](buildDisplayList, Full(toInt), this.setFromAny(_)) % ("tabindex" -> tabIndex.toString)
def toForm = {
var el = elem
uniqueFieldId match {
case Full(id) =>
<div id={id+"_holder"}><div><label for={id+"_field"}>{displayName}</label></div>{el % ("id" -> (id+"_field"))}<lift:msg id={id}/></div>
case _ => <div>{el}</div>
}
}
def asXHtml: NodeSeq = {
var el = elem
uniqueFieldId match {
case Full(id) => el % ("id" -> (id+"_field"))
case _ => el
}
}
def defaultValue: ENUM#Value = enum.elements.next
def asJs = Str(toString)
}
import _root_.java.sql.{ResultSet, Types}
import _root_.net.liftweb.mapper.{DriverType}
/**
* An enum field holding DB related logic
*/
abstract class DBEnumField[OwnerType <: DBRecord[OwnerType], ENUM <: Enumeration](rec: OwnerType, enum: ENUM) extends
EnumField(rec, enum) with JDBCFieldFlavor[Integer] {
def targetSQLType = Types.VARCHAR
/**
* Given the driver type, return the string required to create the column in the database
*/
def fieldCreatorString(dbType: DriverType, colName: String): String = colName + " " + dbType.enumColumnType
def jdbcFriendly(field: String) = new _root_.java.lang.Integer(toInt)
}
}
}
}
| jeppenejsum/liftweb | framework/lift-persistence/lift-record/src/main/scala/net/liftweb/record/field/EnumField.scala | Scala | apache-2.0 | 3,637 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.aggregate
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.expressions.aggregate._
import org.apache.spark.sql.execution.SparkPlan
import org.apache.spark.sql.execution.streaming.{StateStoreRestoreExec, StateStoreSaveExec}
/**
* Utility functions used by the query planner to convert our plan to new aggregation code path.
*/
object AggUtils {
def planAggregateWithoutPartial(
groupingExpressions: Seq[NamedExpression],
aggregateExpressions: Seq[AggregateExpression],
resultExpressions: Seq[NamedExpression],
child: SparkPlan): Seq[SparkPlan] = {
val completeAggregateExpressions = aggregateExpressions.map(_.copy(mode = Complete))
val completeAggregateAttributes = completeAggregateExpressions.map(_.resultAttribute)
SortAggregateExec(
requiredChildDistributionExpressions = Some(groupingExpressions),
groupingExpressions = groupingExpressions,
aggregateExpressions = completeAggregateExpressions,
aggregateAttributes = completeAggregateAttributes,
initialInputBufferOffset = 0,
resultExpressions = resultExpressions,
child = child
) :: Nil
}
private def createAggregate(
requiredChildDistributionExpressions: Option[Seq[Expression]] = None,
groupingExpressions: Seq[NamedExpression] = Nil,
aggregateExpressions: Seq[AggregateExpression] = Nil,
aggregateAttributes: Seq[Attribute] = Nil,
initialInputBufferOffset: Int = 0,
resultExpressions: Seq[NamedExpression] = Nil,
child: SparkPlan): SparkPlan = {
val useHash = HashAggregateExec.supportsAggregate(
aggregateExpressions.flatMap(_.aggregateFunction.aggBufferAttributes))
if (useHash) {
HashAggregateExec(
requiredChildDistributionExpressions = requiredChildDistributionExpressions,
groupingExpressions = groupingExpressions,
aggregateExpressions = aggregateExpressions,
aggregateAttributes = aggregateAttributes,
initialInputBufferOffset = initialInputBufferOffset,
resultExpressions = resultExpressions,
child = child)
} else {
SortAggregateExec(
requiredChildDistributionExpressions = requiredChildDistributionExpressions,
groupingExpressions = groupingExpressions,
aggregateExpressions = aggregateExpressions,
aggregateAttributes = aggregateAttributes,
initialInputBufferOffset = initialInputBufferOffset,
resultExpressions = resultExpressions,
child = child)
}
}
def planAggregateWithoutDistinct(
groupingExpressions: Seq[NamedExpression],
aggregateExpressions: Seq[AggregateExpression],
resultExpressions: Seq[NamedExpression],
child: SparkPlan): Seq[SparkPlan] = {
// Check if we can use HashAggregate.
// 1. Create an Aggregate Operator for partial aggregations.
val groupingAttributes = groupingExpressions.map(_.toAttribute)
val partialAggregateExpressions = aggregateExpressions.map(_.copy(mode = Partial))
val partialAggregateAttributes =
partialAggregateExpressions.flatMap(_.aggregateFunction.aggBufferAttributes)
val partialResultExpressions =
groupingAttributes ++
partialAggregateExpressions.flatMap(_.aggregateFunction.inputAggBufferAttributes)
val partialAggregate = createAggregate(
requiredChildDistributionExpressions = None,
groupingExpressions = groupingExpressions,
aggregateExpressions = partialAggregateExpressions,
aggregateAttributes = partialAggregateAttributes,
initialInputBufferOffset = 0,
resultExpressions = partialResultExpressions,
child = child)
// 2. Create an Aggregate Operator for final aggregations.
val finalAggregateExpressions = aggregateExpressions.map(_.copy(mode = Final))
// The attributes of the final aggregation buffer, which is presented as input to the result
// projection:
val finalAggregateAttributes = finalAggregateExpressions.map(_.resultAttribute)
val finalAggregate = createAggregate(
requiredChildDistributionExpressions = Some(groupingAttributes),
groupingExpressions = groupingAttributes,
aggregateExpressions = finalAggregateExpressions,
aggregateAttributes = finalAggregateAttributes,
initialInputBufferOffset = groupingExpressions.length,
resultExpressions = resultExpressions,
child = partialAggregate)
finalAggregate :: Nil
}
def planAggregateWithOneDistinct(
groupingExpressions: Seq[NamedExpression],
functionsWithDistinct: Seq[AggregateExpression],
functionsWithoutDistinct: Seq[AggregateExpression],
resultExpressions: Seq[NamedExpression],
child: SparkPlan): Seq[SparkPlan] = {
// functionsWithDistinct is guaranteed to be non-empty. Even though it may contain more than one
// DISTINCT aggregate function, all of those functions will have the same column expressions.
// For example, it would be valid for functionsWithDistinct to be
// [COUNT(DISTINCT foo), MAX(DISTINCT foo)], but [COUNT(DISTINCT bar), COUNT(DISTINCT foo)] is
// disallowed because those two distinct aggregates have different column expressions.
val distinctExpressions = functionsWithDistinct.head.aggregateFunction.children
val namedDistinctExpressions = distinctExpressions.map {
case ne: NamedExpression => ne
case other => Alias(other, other.toString)()
}
val distinctAttributes = namedDistinctExpressions.map(_.toAttribute)
val groupingAttributes = groupingExpressions.map(_.toAttribute)
// 1. Create an Aggregate Operator for partial aggregations.
val partialAggregate: SparkPlan = {
val aggregateExpressions = functionsWithoutDistinct.map(_.copy(mode = Partial))
val aggregateAttributes = aggregateExpressions.map(_.resultAttribute)
// We will group by the original grouping expression, plus an additional expression for the
// DISTINCT column. For example, for AVG(DISTINCT value) GROUP BY key, the grouping
// expressions will be [key, value].
createAggregate(
groupingExpressions = groupingExpressions ++ namedDistinctExpressions,
aggregateExpressions = aggregateExpressions,
aggregateAttributes = aggregateAttributes,
resultExpressions = groupingAttributes ++ distinctAttributes ++
aggregateExpressions.flatMap(_.aggregateFunction.inputAggBufferAttributes),
child = child)
}
// 2. Create an Aggregate Operator for partial merge aggregations.
val partialMergeAggregate: SparkPlan = {
val aggregateExpressions = functionsWithoutDistinct.map(_.copy(mode = PartialMerge))
val aggregateAttributes = aggregateExpressions.map(_.resultAttribute)
createAggregate(
requiredChildDistributionExpressions =
Some(groupingAttributes ++ distinctAttributes),
groupingExpressions = groupingAttributes ++ distinctAttributes,
aggregateExpressions = aggregateExpressions,
aggregateAttributes = aggregateAttributes,
initialInputBufferOffset = (groupingAttributes ++ distinctAttributes).length,
resultExpressions = groupingAttributes ++ distinctAttributes ++
aggregateExpressions.flatMap(_.aggregateFunction.inputAggBufferAttributes),
child = partialAggregate)
}
// 3. Create an Aggregate operator for partial aggregation (for distinct)
val distinctColumnAttributeLookup = distinctExpressions.zip(distinctAttributes).toMap
val rewrittenDistinctFunctions = functionsWithDistinct.map {
// Children of an AggregateFunction with DISTINCT keyword has already
// been evaluated. At here, we need to replace original children
// to AttributeReferences.
case agg @ AggregateExpression(aggregateFunction, mode, true, _) =>
aggregateFunction.transformDown(distinctColumnAttributeLookup)
.asInstanceOf[AggregateFunction]
}
val partialDistinctAggregate: SparkPlan = {
val mergeAggregateExpressions = functionsWithoutDistinct.map(_.copy(mode = PartialMerge))
// The attributes of the final aggregation buffer, which is presented as input to the result
// projection:
val mergeAggregateAttributes = mergeAggregateExpressions.map(_.resultAttribute)
val (distinctAggregateExpressions, distinctAggregateAttributes) =
rewrittenDistinctFunctions.zipWithIndex.map { case (func, i) =>
// We rewrite the aggregate function to a non-distinct aggregation because
// its input will have distinct arguments.
// We just keep the isDistinct setting to true, so when users look at the query plan,
// they still can see distinct aggregations.
val expr = AggregateExpression(func, Partial, isDistinct = true)
// Use original AggregationFunction to lookup attributes, which is used to build
// aggregateFunctionToAttribute
val attr = functionsWithDistinct(i).resultAttribute
(expr, attr)
}.unzip
val partialAggregateResult = groupingAttributes ++
mergeAggregateExpressions.flatMap(_.aggregateFunction.inputAggBufferAttributes) ++
distinctAggregateExpressions.flatMap(_.aggregateFunction.inputAggBufferAttributes)
createAggregate(
groupingExpressions = groupingAttributes,
aggregateExpressions = mergeAggregateExpressions ++ distinctAggregateExpressions,
aggregateAttributes = mergeAggregateAttributes ++ distinctAggregateAttributes,
initialInputBufferOffset = (groupingAttributes ++ distinctAttributes).length,
resultExpressions = partialAggregateResult,
child = partialMergeAggregate)
}
// 4. Create an Aggregate Operator for the final aggregation.
val finalAndCompleteAggregate: SparkPlan = {
val finalAggregateExpressions = functionsWithoutDistinct.map(_.copy(mode = Final))
// The attributes of the final aggregation buffer, which is presented as input to the result
// projection:
val finalAggregateAttributes = finalAggregateExpressions.map(_.resultAttribute)
val (distinctAggregateExpressions, distinctAggregateAttributes) =
rewrittenDistinctFunctions.zipWithIndex.map { case (func, i) =>
// We rewrite the aggregate function to a non-distinct aggregation because
// its input will have distinct arguments.
// We just keep the isDistinct setting to true, so when users look at the query plan,
// they still can see distinct aggregations.
val expr = AggregateExpression(func, Final, isDistinct = true)
// Use original AggregationFunction to lookup attributes, which is used to build
// aggregateFunctionToAttribute
val attr = functionsWithDistinct(i).resultAttribute
(expr, attr)
}.unzip
createAggregate(
requiredChildDistributionExpressions = Some(groupingAttributes),
groupingExpressions = groupingAttributes,
aggregateExpressions = finalAggregateExpressions ++ distinctAggregateExpressions,
aggregateAttributes = finalAggregateAttributes ++ distinctAggregateAttributes,
initialInputBufferOffset = groupingAttributes.length,
resultExpressions = resultExpressions,
child = partialDistinctAggregate)
}
finalAndCompleteAggregate :: Nil
}
/**
* Plans a streaming aggregation using the following progression:
* - Partial Aggregation
* - Shuffle
* - Partial Merge (now there is at most 1 tuple per group)
* - StateStoreRestore (now there is 1 tuple from this batch + optionally one from the previous)
* - PartialMerge (now there is at most 1 tuple per group)
* - StateStoreSave (saves the tuple for the next batch)
* - Complete (output the current result of the aggregation)
*/
def planStreamingAggregation(
groupingExpressions: Seq[NamedExpression],
functionsWithoutDistinct: Seq[AggregateExpression],
resultExpressions: Seq[NamedExpression],
child: SparkPlan): Seq[SparkPlan] = {
val groupingAttributes = groupingExpressions.map(_.toAttribute)
val partialAggregate: SparkPlan = {
val aggregateExpressions = functionsWithoutDistinct.map(_.copy(mode = Partial))
val aggregateAttributes = aggregateExpressions.map(_.resultAttribute)
// We will group by the original grouping expression, plus an additional expression for the
// DISTINCT column. For example, for AVG(DISTINCT value) GROUP BY key, the grouping
// expressions will be [key, value].
createAggregate(
groupingExpressions = groupingExpressions,
aggregateExpressions = aggregateExpressions,
aggregateAttributes = aggregateAttributes,
resultExpressions = groupingAttributes ++
aggregateExpressions.flatMap(_.aggregateFunction.inputAggBufferAttributes),
child = child)
}
val partialMerged1: SparkPlan = {
val aggregateExpressions = functionsWithoutDistinct.map(_.copy(mode = PartialMerge))
val aggregateAttributes = aggregateExpressions.map(_.resultAttribute)
createAggregate(
requiredChildDistributionExpressions =
Some(groupingAttributes),
groupingExpressions = groupingAttributes,
aggregateExpressions = aggregateExpressions,
aggregateAttributes = aggregateAttributes,
initialInputBufferOffset = groupingAttributes.length,
resultExpressions = groupingAttributes ++
aggregateExpressions.flatMap(_.aggregateFunction.inputAggBufferAttributes),
child = partialAggregate)
}
val restored = StateStoreRestoreExec(groupingAttributes, None, partialMerged1)
val partialMerged2: SparkPlan = {
val aggregateExpressions = functionsWithoutDistinct.map(_.copy(mode = PartialMerge))
val aggregateAttributes = aggregateExpressions.map(_.resultAttribute)
createAggregate(
requiredChildDistributionExpressions =
Some(groupingAttributes),
groupingExpressions = groupingAttributes,
aggregateExpressions = aggregateExpressions,
aggregateAttributes = aggregateAttributes,
initialInputBufferOffset = groupingAttributes.length,
resultExpressions = groupingAttributes ++
aggregateExpressions.flatMap(_.aggregateFunction.inputAggBufferAttributes),
child = restored)
}
// Note: stateId and returnAllStates are filled in later with preparation rules
// in IncrementalExecution.
val saved = StateStoreSaveExec(
groupingAttributes, stateId = None, returnAllStates = None, partialMerged2)
val finalAndCompleteAggregate: SparkPlan = {
val finalAggregateExpressions = functionsWithoutDistinct.map(_.copy(mode = Final))
// The attributes of the final aggregation buffer, which is presented as input to the result
// projection:
val finalAggregateAttributes = finalAggregateExpressions.map(_.resultAttribute)
createAggregate(
requiredChildDistributionExpressions = Some(groupingAttributes),
groupingExpressions = groupingAttributes,
aggregateExpressions = finalAggregateExpressions,
aggregateAttributes = finalAggregateAttributes,
initialInputBufferOffset = groupingAttributes.length,
resultExpressions = resultExpressions,
child = saved)
}
finalAndCompleteAggregate :: Nil
}
}
| gioenn/xSpark | sql/core/src/main/scala/org/apache/spark/sql/execution/aggregate/AggUtils.scala | Scala | apache-2.0 | 16,348 |
package pl.edu.agh.mplt.parser.AMPL
import org.scalatest.{Matchers, FlatSpec}
import pl.edu.agh.mplt.parser.{AMPLParser, IntercodeImplicits}
import pl.edu.agh.mplt.parser.phrase.set.{Indexing, IndexedSet}
import pl.edu.agh.mplt.parser.reference.SimpleReference
import pl.edu.agh.mplt.parser.declaration.data.SetDeclaration
class CommentsTest extends FlatSpec with Matchers with IntercodeImplicits {
val parser = AMPLParser()
def expr = parser.datatypeDeclaration
def parse(input: String) = parser.parseAll(expr, input)
"Comment parser" should "parse simple comments" in {
parse(
""" # Here goes comment
| set # some meaningless comment
| x {i # never expected a comment in here?
| in A #surprise
| }
| ; #Another one goes here
| #This is the las one
| ## except for this one""".stripMargin).get should be(SetDeclaration("x",
indexing = Some(Indexing(List(IndexedSet(List("i"), SimpleReference("A")))))))
}
}
| marek1840/MPLT | src/test/scala/pl/edu/agh/mplt/parser/AMPL/CommentsTest.scala | Scala | mit | 1,029 |
package com.socrata.balboa.admin
import com.socrata.balboa.admin.tools.Lister
import com.socrata.balboa.metrics.data.{DataStore, DataStoreFactory}
import org.scalatest.{FlatSpec, Matchers}
import org.scalatest.mock.MockitoSugar
import org.mockito.Mockito._
import org.mockito.ArgumentMatchers.{eq => eqTo}
import scala.collection.mutable.ListBuffer
import scala.collection.JavaConverters._
class ListerTest extends FlatSpec with Matchers with MockitoSugar {
val dsf = mock[DataStoreFactory]
val datastore = mock[DataStore]
val entities = List(1, 2, 4, 3, 2, 5).map(_.toString)
val filter = "filter"
when(dsf.get).thenReturn(datastore)
when(datastore.entities()).thenReturn(entities.iterator)
when(datastore.entities(eqTo[String](filter))).thenReturn(List("2", "2").iterator)
val lister = new Lister(dsf)
"list() with no filters" should "print all the entities directly, removing duplicates" in {
val printed = new ListBuffer[String]
lister.list(Nil.iterator, entity => printed += entity)
printed.toList should be (entities.distinct)
}
"list() with filters" should "print all entities passing any of the filters, removing duplicates" in {
val printed = new ListBuffer[String]
lister.list(List(filter).iterator, entity => printed += entity)
printed.toList should be (List("2"))
}
"list() from java with no filters" should "not throw errors" in {
lister.listJava(List.empty[String].iterator.asJava)
}
}
| socrata-platform/balboa | balboa-admin/src/test/scala/com/socrata/balboa/admin/ListerTest.scala | Scala | apache-2.0 | 1,463 |
package org.bone.ircballoon.model
case class IRCInfo(
hostname: String, port: Int, nickname: String,
channel: String, password: Option[String] = None,
showJoin: Boolean, showLeave: Boolean
)
case class IRCUser(nickname: String, isOP: Boolean, isBroadcaster: Boolean)
| brianhsu/IRCBalloon | src/main/scala/model/IRCModel.scala | Scala | gpl-3.0 | 278 |
package utils.remote.amazon.operation
import play.api.test._
import utils.remote.amazon.stackable.CartRG
import scala.concurrent.duration.FiniteDuration
import utils.remote.amazon.AmazonSpecification
/**
* @author alari ([email protected])
* @since 24.10.13 13:59
*/
class CartCreateSpec extends AmazonSpecification {
import main.Implicits.amazonOp
"cart create operation" should {
"create a cart" in new WithApplication {
maybeUnavailable {
CartCreate.byAsins("1476745374" -> 2) must beLike[Seq[CartRG]] {
case cart =>
cart.head.id.length must be_>=(1)
cart.head.purchaseUrl.length must be_>=(10)
}.await(2, FiniteDuration(2, "seconds"))
}
}
}
}
| alari/amazon-scala-ecommerce | src/test/scala/amazon/operation/CartCreateSpec.scala | Scala | apache-2.0 | 734 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.streaming.kafka
import scala.reflect.ClassTag
import scala.collection.JavaConversions._
import java.lang.{Integer => JInt}
import java.util.{Map => JMap}
import kafka.serializer.{Decoder, StringDecoder}
import org.apache.spark.storage.StorageLevel
import org.apache.spark.streaming.StreamingContext
import org.apache.spark.streaming.api.java.{JavaPairReceiverInputDStream, JavaStreamingContext, JavaPairDStream}
import org.apache.spark.streaming.dstream.{ReceiverInputDStream, DStream}
object KafkaUtils {
/**
* Create an input stream that pulls messages from a Kafka Broker.
* @param ssc StreamingContext object
* @param zkQuorum Zookeeper quorum (hostname:port,hostname:port,..)
* @param groupId The group id for this consumer
* @param topics Map of (topic_name -> numPartitions) to consume. Each partition is consumed
* in its own thread
* @param storageLevel Storage level to use for storing the received objects
* (default: StorageLevel.MEMORY_AND_DISK_SER_2)
*/
def createStream(
ssc: StreamingContext,
zkQuorum: String,
groupId: String,
topics: Map[String, Int],
storageLevel: StorageLevel = StorageLevel.MEMORY_AND_DISK_SER_2
): ReceiverInputDStream[(String, String)] = {
val kafkaParams = Map[String, String](
"zookeeper.connect" -> zkQuorum, "group.id" -> groupId,
"zookeeper.connection.timeout.ms" -> "10000")
createStream[String, String, StringDecoder, StringDecoder](
ssc, kafkaParams, topics, storageLevel)
}
/**
* Create an input stream that pulls messages from a Kafka Broker.
* @param ssc StreamingContext object
* @param kafkaParams Map of kafka configuration parameters,
* see http://kafka.apache.org/08/configuration.html
* @param topics Map of (topic_name -> numPartitions) to consume. Each partition is consumed
* in its own thread.
* @param storageLevel Storage level to use for storing the received objects
*/
def createStream[K: ClassTag, V: ClassTag, U <: Decoder[_]: Manifest, T <: Decoder[_]: Manifest](
ssc: StreamingContext,
kafkaParams: Map[String, String],
topics: Map[String, Int],
storageLevel: StorageLevel
): ReceiverInputDStream[(K, V)] = {
new KafkaInputDStream[K, V, U, T](ssc, kafkaParams, topics, storageLevel)
}
/**
* Create an input stream that pulls messages form a Kafka Broker.
* Storage level of the data will be the default StorageLevel.MEMORY_AND_DISK_SER_2.
* @param jssc JavaStreamingContext object
* @param zkQuorum Zookeeper quorum (hostname:port,hostname:port,..)
* @param groupId The group id for this consumer
* @param topics Map of (topic_name -> numPartitions) to consume. Each partition is consumed
* in its own thread
*/
def createStream(
jssc: JavaStreamingContext,
zkQuorum: String,
groupId: String,
topics: JMap[String, JInt]
): JavaPairReceiverInputDStream[String, String] = {
implicit val cmt: ClassTag[String] =
implicitly[ClassTag[AnyRef]].asInstanceOf[ClassTag[String]]
createStream(jssc.ssc, zkQuorum, groupId, Map(topics.mapValues(_.intValue()).toSeq: _*))
}
/**
* Create an input stream that pulls messages form a Kafka Broker.
* @param jssc JavaStreamingContext object
* @param zkQuorum Zookeeper quorum (hostname:port,hostname:port,..).
* @param groupId The group id for this consumer.
* @param topics Map of (topic_name -> numPartitions) to consume. Each partition is consumed
* in its own thread.
* @param storageLevel RDD storage level.
*
*/
def createStream(
jssc: JavaStreamingContext,
zkQuorum: String,
groupId: String,
topics: JMap[String, JInt],
storageLevel: StorageLevel
): JavaPairReceiverInputDStream[String, String] = {
implicit val cmt: ClassTag[String] =
implicitly[ClassTag[AnyRef]].asInstanceOf[ClassTag[String]]
createStream(jssc.ssc, zkQuorum, groupId, Map(topics.mapValues(_.intValue()).toSeq: _*),
storageLevel)
}
/**
* Create an input stream that pulls messages form a Kafka Broker.
* @param jssc JavaStreamingContext object
* @param keyTypeClass Key type of RDD
* @param valueTypeClass value type of RDD
* @param keyDecoderClass Type of kafka key decoder
* @param valueDecoderClass Type of kafka value decoder
* @param kafkaParams Map of kafka configuration parameters,
* see http://kafka.apache.org/08/configuration.html
* @param topics Map of (topic_name -> numPartitions) to consume. Each partition is consumed
* in its own thread
* @param storageLevel RDD storage level.
*/
def createStream[K, V, U <: Decoder[_], T <: Decoder[_]](
jssc: JavaStreamingContext,
keyTypeClass: Class[K],
valueTypeClass: Class[V],
keyDecoderClass: Class[U],
valueDecoderClass: Class[T],
kafkaParams: JMap[String, String],
topics: JMap[String, JInt],
storageLevel: StorageLevel
): JavaPairReceiverInputDStream[K, V] = {
implicit val keyCmt: ClassTag[K] =
implicitly[ClassTag[AnyRef]].asInstanceOf[ClassTag[K]]
implicit val valueCmt: ClassTag[V] =
implicitly[ClassTag[AnyRef]].asInstanceOf[ClassTag[V]]
implicit val keyCmd: Manifest[U] = implicitly[Manifest[AnyRef]].asInstanceOf[Manifest[U]]
implicit val valueCmd: Manifest[T] = implicitly[Manifest[AnyRef]].asInstanceOf[Manifest[T]]
createStream[K, V, U, T](
jssc.ssc, kafkaParams.toMap, Map(topics.mapValues(_.intValue()).toSeq: _*), storageLevel)
}
}
| zhangjunfang/eclipse-dir | spark/external/kafka/src/main/scala/org/apache/spark/streaming/kafka/KafkaUtils.scala | Scala | bsd-2-clause | 6,565 |
package net.lshift.diffa.kernel.diag
import collection.mutable.{ListBuffer, HashMap}
import net.lshift.diffa.kernel.differencing.{PairScanState, PairScanListener}
import net.lshift.diffa.kernel.lifecycle.{NotificationCentre, AgentLifecycleAware}
import org.slf4j.LoggerFactory
import java.io._
import java.util.zip.{ZipEntry, ZipOutputStream}
import org.apache.commons.io.IOUtils
import net.lshift.diffa.kernel.config.{ConfigOption, DiffaPairRef, DomainConfigStore}
import net.lshift.diffa.kernel.config.system.SystemConfigStore
import org.joda.time.format.{DateTimeFormat, ISODateTimeFormat}
import org.joda.time.{DateTimeZone, DateTime}
/**
* Local in-memory implementation of the DiagnosticsManager.
*
* TODO: Release resources when pair is removed
*/
class LocalDiagnosticsManager(systemConfigStore: SystemConfigStore, domainConfigStore:DomainConfigStore, explainRootDir:String)
extends DiagnosticsManager
with PairScanListener
with AgentLifecycleAware {
private val pairs = HashMap[DiffaPairRef, PairDiagnostics]()
private val defaultMaxEventsPerPair = 100
private val defaultMaxExplainFilesPerPair = 20
private val timeFormatter = ISODateTimeFormat.time()
private val fileNameFormatter = DateTimeFormat.forPattern(DiagnosticsManager.fileSystemFriendlyDateFormat)
def getPairFromRef(ref: DiffaPairRef) = domainConfigStore.getPairDef(ref.domain, ref.key)
def checkpointExplanations(pair: DiffaPairRef) {
maybeGetPair(pair).map(p => p.checkpointExplanations())
}
def logPairEvent(level: DiagnosticLevel, pair: DiffaPairRef, msg: String) {
val pairDiag = getOrCreatePair(pair)
pairDiag.logPairEvent(PairEvent(new DateTime(), level, msg))
}
def logPairExplanation(pair: DiffaPairRef, source:String, msg: String) {
getOrCreatePair(pair).logPairExplanation(source, msg)
}
def writePairExplanationObject(pair:DiffaPairRef, source:String, objName: String, f:OutputStream => Unit) {
getOrCreatePair(pair).writePairExplanationObject(source, objName, f)
}
def queryEvents(pair:DiffaPairRef, maxEvents: Int) = {
pairs.synchronized { pairs.get(pair) } match {
case None => Seq()
case Some(pairDiag) => pairDiag.queryEvents(maxEvents)
}
}
def retrievePairScanStatesForDomain(domain:String) = {
val domainPairs = domainConfigStore.listPairs(domain)
pairs.synchronized {
domainPairs.map(p => pairs.get(DiffaPairRef(p.key, domain)) match {
case None => p.key -> PairScanState.UNKNOWN
case Some(pairDiag) => p.key -> pairDiag.scanState
}).toMap
}
}
def pairScanStateChanged(pair: DiffaPairRef, scanState: PairScanState) = pairs.synchronized {
val pairDiag = getOrCreatePair(pair)
pairDiag.scanState = scanState
}
/**
* When pairs are deleted, we stop tracking their status in the pair scan map.
*/
def onDeletePair(pair:DiffaPairRef) {
pairs.synchronized {
pairs.remove(pair) match {
case None =>
case Some(pairDiag) => pairDiag.checkpointExplanations
}
}
}
//
// Lifecycle Management
//
override def onAgentInstantiationCompleted(nc: NotificationCentre) {
nc.registerForPairScanEvents(this)
}
//
// Internals
//
private def getOrCreatePair(pair:DiffaPairRef) =
pairs.synchronized { pairs.getOrElseUpdate(pair, new PairDiagnostics(pair)) }
private def maybeGetPair(pair:DiffaPairRef) =
pairs.synchronized { pairs.get(pair) }
private class PairDiagnostics(pair:DiffaPairRef) {
private val pairExplainRoot = new File(explainRootDir, pair.identifier)
private val log = ListBuffer[PairEvent]()
var scanState:PairScanState = PairScanState.UNKNOWN
private val pairDef = getPairFromRef(pair)
private val domainEventsPerPair = getConfigOrElse(pair.domain,
ConfigOption.eventExplanationLimitKey, defaultMaxEventsPerPair)
private val domainExplainFilesPerPair = getConfigOrElse(pair.domain,
ConfigOption.explainFilesLimitKey, defaultMaxExplainFilesPerPair)
private def getConfigOrElse(domain: String, configKey: String, defaultVal: Int) = try {
systemConfigStore.maybeSystemConfigOption(configKey).get.toInt
} catch {
case _ => defaultVal
}
private val maxEvents = math.min(domainEventsPerPair, pairDef.eventsToLog)
private val maxExplainFiles = math.min(domainExplainFilesPerPair, pairDef.maxExplainFiles)
private val isLoggingEnabled = maxExplainFiles > 0 && maxEvents > 0
private val explainLock = new Object
private var explainDir:File = null
private var explanationWriter:PrintWriter = null
def logPairEvent(evt:PairEvent) {
log.synchronized {
log += evt
val drop = log.length - maxEvents
if (drop > 0)
log.remove(0, drop)
}
}
def queryEvents(maxEvents:Int):Seq[PairEvent] = {
log.synchronized {
val startIdx = log.length - maxEvents
if (startIdx < 0) {
log.toSeq
} else {
log.slice(startIdx, log.length).toSeq
}
}
}
def checkpointExplanations() {
explainLock.synchronized {
if (explanationWriter != null) {
explanationWriter.close()
explanationWriter = null
}
// Compress the contents of the explanation directory
if (explainDir != null) {
compressExplanationDir(explainDir)
explainDir = null
// Ensure we don't keep too many explanation files
trimExplanations()
}
}
}
def logPairExplanation(source:String, msg:String) {
if (isLoggingEnabled) {
explainLock.synchronized {
if (explanationWriter == null) {
explanationWriter = new PrintWriter(new FileWriter(new File(currentExplainDirectory, "explain.log")))
}
explanationWriter.println("%s: [%s] %s".format(timeFormatter.print(new DateTime()), source, msg))
}
}
}
def writePairExplanationObject(source:String, objName: String, f:OutputStream => Unit) {
if (isLoggingEnabled) {
explainLock.synchronized {
val outputFile = new File(currentExplainDirectory, objName)
val outputStream = new FileOutputStream(outputFile)
try {
f(outputStream)
} finally {
outputStream.close()
}
logPairExplanation(source, "Attached object " + objName)
}
}
}
private def currentExplainDirectory = {
if (explainDir == null) {
explainDir = new File(pairExplainRoot, fileNameFormatter.print(new DateTime))
explainDir.mkdirs()
}
explainDir
}
private def compressExplanationDir(dir:File) {
val explainFiles = dir.listFiles()
if (explainFiles != null) {
val zos = new ZipOutputStream(new FileOutputStream(new File(pairExplainRoot, dir.getName + ".zip")))
explainFiles.foreach(f => {
zos.putNextEntry(new ZipEntry(f.getName))
val inputFile = new FileInputStream(f)
try {
IOUtils.copy(inputFile, zos)
} finally {
inputFile.close()
}
zos.closeEntry()
f.delete()
})
zos.close()
}
dir.delete()
}
/**
* Ensures that for each pair, only <maxExplainFilesPerPair> zips are kept. When this value is exceeded,
* files with older modification dates are removed first.
*/
private def trimExplanations() {
val explainFiles = pairExplainRoot.listFiles(new FilenameFilter() {
def accept(dir: File, name: String) = name.endsWith(".zip")
})
if (explainFiles != null && explainFiles.length > maxExplainFiles) {
val orderedFiles = explainFiles.toSeq.sortBy(f => (f.lastModified, f.getName))
orderedFiles.take(explainFiles.length - maxExplainFiles).foreach(f => f.delete())
}
}
}
}
| aprescott/diffa | kernel/src/main/scala/net/lshift/diffa/kernel/diag/LocalDiagnosticsManager.scala | Scala | apache-2.0 | 7,962 |
package cn.hjmao.learning.akka.http.demo.model.db
import cn.hjmao.learning.akka.http.demo.model.TokenEntity
/**
* Created by hjmao on 17-5-10.
*/
trait TokenEntityTable extends UserEntityTable {
protected val datasource: DataSource
import datasource.driver.api._
class Token(tag: Tag) extends Table[TokenEntity](tag, "token") {
def id = column[Option[Long]]("id", O.PrimaryKey, O.AutoInc)
def username = column[String]("username")
def token = column[String]("token")
def userFk = foreignKey("USER_FK", username, users)(_.username, onUpdate = ForeignKeyAction.Restrict, onDelete = ForeignKeyAction.Cascade)
def * = (id, username, token) <> ((TokenEntity.apply _).tupled, TokenEntity.unapply)
}
protected val tokens = TableQuery[Token]
}
| huajianmao/learning | framework/akka-http/demo/src/main/scala/cn/hjmao/learning/akka/http/demo/model/db/TokenEntityTable.scala | Scala | mit | 774 |
package tastytest
import Logarithms._
object TestLogarithms extends Suite("TestLogarithms") {
val Some(l1) = Logarithm.of(2)
val Some(l2) = Logarithm.of(3)
test(assert((l1 + l2).toDouble == 4.999999999999999))
test(assert((l1 * l2).toDouble == 6.0))
}
| scala/scala | test/tasty/run/src-2/tastytest/TestLogarithms.scala | Scala | apache-2.0 | 265 |
/*
* FlatBP.scala
* A structured factored inference algorithm using belief propagation.
*
* Created By: Avi Pfeffer ([email protected])
* Creation Date: March 1, 2015
*
* Copyright 2015 Avrom J. Pfeffer and Charles River Analytics, Inc.
* See http://www.cra.com or email [email protected] for information.
*
* See http://www.github.com/p2t2/figaro for a copy of the software license.
*/
package com.cra.figaro.algorithm.structured.algorithm.flat
import com.cra.figaro.language._
import com.cra.figaro.algorithm.factored.factors.SumProductSemiring
import com.cra.figaro.algorithm.structured._
import com.cra.figaro.algorithm.structured.strategy._
import com.cra.figaro.algorithm.structured.solver._
import com.cra.figaro.algorithm.structured.strategy.solve._
import com.cra.figaro.algorithm.structured.algorithm._
import com.cra.figaro.algorithm.structured.strategy.decompose._
import com.cra.figaro.algorithm.factored.factors.factory._
class FlatBP(universe: Universe, iterations: Int, targets: Element[_]*) extends StructuredProbQueryAlgorithm(universe, targets:_*) {
val semiring = SumProductSemiring()
def run() {
val strategy = DecompositionStrategy.recursiveFlattenStrategy(problem, new ConstantStrategy(marginalBeliefPropagation(iterations)), defaultRangeSizer, Lower, false)
strategy.execute(initialComponents)
val joint = problem.solution.foldLeft(Factory.unit(semiring))(_.product(_))
targets.foreach(t => marginalizeToTarget(t, joint))
}
}
object FlatBP {
/**
* Create a structured belief propagation algorithm.
* @param iterations the number of iterations to use for each subproblem
* @param targets the query targets, which will all be part of the top level problem
*/
def apply(iterations: Int, targets: Element[_]*) = {
if (targets.isEmpty) throw new IllegalArgumentException("Cannot run VE with no targets")
val universes = targets.map(_.universe).toSet
if (universes.size > 1) throw new IllegalArgumentException("Cannot have targets in different universes")
new FlatBP(targets(0).universe, iterations, targets:_*)
}
/**
* Use BP to compute the probability that the given element satisfies the given predicate.
*/
def probability[T](target: Element[T], predicate: T => Boolean, iterations: Int): Double = {
val alg = FlatBP(iterations, target)
alg.start()
val result = alg.probability(target, predicate)
alg.kill()
result
}
/**
* Use BP to compute the probability that the given element has the given value.
*/
def probability[T](target: Element[T], value: T, iterations: Int = 100): Double =
probability(target, (t: T) => t == value, iterations)
}
| scottcb/figaro | Figaro/src/main/scala/com/cra/figaro/algorithm/structured/algorithm/flat/FlatBP.scala | Scala | bsd-3-clause | 2,699 |
package model
import org.scalatest.{BeforeAndAfterEach, FlatSpec, Matchers}
import scala.collection.immutable.HashSet
/**
* Created by salim on 9/9/2016.
*/
class GraphSpec extends FlatSpec with Matchers with BeforeAndAfterEach {
var w: World = null
override def beforeEach(): Unit = {
super.beforeEach()
w = World.factory(snGenratorFactory())
}
"Nodes" can "be joined together to make edges" in {
val r0 = w.newRoom
val r1 = w.newRoom
val e = w.addEdge(r0, r1)
}
they can "initially have no incident edges" in {
val r0 = w.newRoom
assert(w.incidentEdges(r0) == HashSet())
}
they can "have multiple incident edges" in {
val r0 = w.newRoom
val r1 = w.newRoom
val r2 = w.newRoom
val e0 = w.addEdge(r0, r1)
val e1 = w.addEdge(r0, r2)
assert(w.incidentEdges(r0) == HashSet(Edge(r0.sn, r1.sn), Edge(r0.sn, r2.sn)))
}
they can "be requested from their destination as well as the source" in {
val r0 = w.newRoom
val r1 = w.newRoom
val r2 = w.newRoom
val e0 = w.addEdge(r1, r0)
val e1 = w.addEdge(r1, r0)
assert(w.incidentEdges(r0) == HashSet(Edge(r1.sn, r0.sn), Edge(r1.sn, r0.sn)))
}
"Edges" can "be automatically deleted when nodes are deleted" in {
val r0 = w.newRoom
val r1 = w.newRoom
val r2 = w.newRoom
val e0 = w.addEdge(r0, r1)
val e1 = w.addEdge(r0, r2)
w.remove(r0.sn)
assert(w.incidentEdges(r1) == Set())
assert(w.incidentEdges(r2) == Set())
}
}
| salimfadhley/scalamoo | src/test/scala/model/GraphSpec.scala | Scala | mit | 1,507 |
package ionroller.aws
import com.amazonaws.services.dynamodbv2.AmazonDynamoDBClient
import com.amazonaws.services.dynamodbv2.document._
import com.amazonaws.services.dynamodbv2.document.spec.{QuerySpec, ScanSpec}
import com.amazonaws.services.dynamodbv2.model._
import ionroller.tracking._
import ionroller.{JsonUtil, TimelineConfiguration, TimelineName}
import org.joda.time.DateTime
import play.api.libs.json._
import scala.collection.JavaConverters._
import scala.collection.immutable.HashMap
import scalaz.\\/
import scalaz.concurrent.Task
import scalaz.std.list._
import scalaz.syntax.foldable._
import scalaz.syntax.std.option._
object Dynamo {
import JsonUtil.Implicits._
val client = new AmazonDynamoDBClient()
implicit val db = new DynamoDB(client)
val defaultConfigTable: String = "IonrollerConfig"
val defaultStateTable: String = "IonrollerState"
val defaultEventTable: String = "IonrollerEvents"
def listTables: Task[Set[String]] = {
def go(tablesSoFar: Set[String], exclusiveStartTableName: Option[String]): Task[Set[String]] = {
val req = new ListTablesRequest()
exclusiveStartTableName foreach req.setExclusiveStartTableName
Task(client.listTables(req))(awsExecutorService) flatMap { listTablesResult =>
val tables = listTablesResult.getTableNames.asScala.toSet ++ tablesSoFar
Option(listTablesResult.getLastEvaluatedTableName) match {
case None => Task.now(tables)
case l @ Some(lastTable) => go(tables, l)
}
}
}
go(Set.empty, None)
}
def configTable(tableName: Option[String]): Task[Table] = {
Task(db.getTable(tableName | defaultConfigTable))(awsExecutorService)
}
def stateTable(tableName: Option[String]): Task[Table] = {
Task(db.getTable(tableName | defaultStateTable))(awsExecutorService)
}
def eventTable(tableName: Option[String]): Task[Table] = {
Task(db.getTable(tableName | defaultEventTable))(awsExecutorService)
}
def createConfigTable(tableName: Option[String]): Task[Table] = {
val req = new CreateTableRequest()
.withTableName(tableName | defaultConfigTable)
.withKeySchema(new KeySchemaElement().withKeyType(KeyType.HASH).withAttributeName("name"))
.withAttributeDefinitions(new AttributeDefinition().withAttributeName("name").withAttributeType(ScalarAttributeType.S))
.withKeySchema(new KeySchemaElement().withKeyType(KeyType.RANGE).withAttributeName("timestamp"))
.withAttributeDefinitions(new AttributeDefinition().withAttributeName("timestamp").withAttributeType(ScalarAttributeType.N))
.withProvisionedThroughput(new ProvisionedThroughput().withReadCapacityUnits(5L).withWriteCapacityUnits(1L))
Task(db.createTable(req))(awsExecutorService)
}
def createStateTable(tableName: Option[String]): Task[Table] = {
val req = new CreateTableRequest()
.withTableName(tableName | defaultStateTable)
.withKeySchema(new KeySchemaElement().withKeyType(KeyType.HASH).withAttributeName("name"))
.withAttributeDefinitions(new AttributeDefinition().withAttributeName("name").withAttributeType(ScalarAttributeType.S))
.withProvisionedThroughput(new ProvisionedThroughput().withReadCapacityUnits(1L).withWriteCapacityUnits(1L))
Task(db.createTable(req))(awsExecutorService)
}
def createEventsTable(tableName: Option[String]): Task[Table] = {
val req = new CreateTableRequest()
.withTableName(tableName | defaultEventTable)
.withKeySchema(new KeySchemaElement().withKeyType(KeyType.HASH).withAttributeName("name"))
.withAttributeDefinitions(new AttributeDefinition().withAttributeName("name").withAttributeType(ScalarAttributeType.S))
.withKeySchema(new KeySchemaElement().withKeyType(KeyType.RANGE).withAttributeName("timestamp"))
.withAttributeDefinitions(new AttributeDefinition().withAttributeName("timestamp").withAttributeType(ScalarAttributeType.N))
.withAttributeDefinitions(new AttributeDefinition().withAttributeName("version").withAttributeType(ScalarAttributeType.S))
.withLocalSecondaryIndexes(new LocalSecondaryIndex()
.withIndexName("Version-index")
.withKeySchema(new KeySchemaElement().withKeyType(KeyType.HASH).withAttributeName("name"))
.withKeySchema(new KeySchemaElement().withKeyType(KeyType.RANGE).withAttributeName("version"))
.withProjection(new Projection().withProjectionType(ProjectionType.ALL)))
.withProvisionedThroughput(new ProvisionedThroughput().withReadCapacityUnits(50L).withWriteCapacityUnits(1L))
Task(db.createTable(req))(awsExecutorService)
}
def saveConfig(table: Table, timelineName: String, config: ionroller.TimelineConfiguration)(implicit writer: Writes[ionroller.TimelineConfiguration]): Task[PutItemOutcome] = {
val item = new Item()
.withString("name", timelineName)
.withJSON("config", writer.writes(config).toString)
.withLong("timestamp", config.timestamp.getMillis)
Task(table.putItem(item))(awsExecutorService)
}
def saveState(table: Table, timelineName: TimelineName, state: ionroller.DesiredTimelineState)(implicit writer: Writes[ionroller.DesiredTimelineState]): Task[PutItemOutcome] = {
val item = new Item()
.withString("name", timelineName.name)
.withJSON("state", writer.writes(state).toString)
Task(table.putItem(item))(awsExecutorService)
}
def readConfigs(table: Table)(implicit reader: Reads[ionroller.TimelineConfiguration]): Task[ionroller.SystemConfiguration] = {
def readOutcomes(outcomes: Iterable[Item]): ionroller.SystemConfiguration = {
var configs = Map[TimelineName, ionroller.TimelineConfiguration]()
for {
item <- outcomes
} {
val name = item.getString("name")
val timestamp = item.getLong("timestamp")
val configJs = Json.parse(item.getJSON("config")).as[JsObject].deepMerge(Json.obj("timestamp" -> JsNumber(timestamp)))
configs = configs.updated(TimelineName(name), configJs.as[ionroller.TimelineConfiguration])
}
ionroller.SystemConfiguration(configs)
}
readLatestConfigs(table).map(readOutcomes)
}
def readLatestConfigs(table: Table)(implicit reader: Reads[ionroller.TimelineConfiguration]): Task[Iterable[Item]] = {
Task {
for {
service <- table.scan(new ScanSpec().withAttributesToGet("name")).asScala.toSeq.map(_.getString("name")).distinct
config <- table.query(new QuerySpec().withHashKey("name", service).withScanIndexForward(false).withMaxResultSize(1)).asScala.toSeq
} yield config
}(awsExecutorService)
}
def getConfig(table: Table, serviceName: String, timestamp: DateTime)(implicit reader: Reads[ionroller.TimelineConfiguration]): Task[Option[TimelineConfiguration]] = {
getConfigs(table, serviceName, timestamp.some, timestamp.some).map(_.headOption)
}
def getConfig(serviceName: String, timestamp: DateTime)(implicit reader: Reads[ionroller.TimelineConfiguration]): Task[Option[TimelineConfiguration]] = {
for {
table <- configTable(None)
config <- getConfigs(table, serviceName, timestamp.some, timestamp.some).map(_.headOption)
} yield config
}
def getConfigs(table: Table, serviceName: String, from: Option[DateTime], to: Option[DateTime]): Task[Seq[TimelineConfiguration]] = {
Task {
val rkCond = new RangeKeyCondition("timestamp")
val rangeKeyCondition = (from, to) match {
case (Some(f), Some(t)) =>
if (f != t) rkCond.between(f.getMillis, t.getMillis).some
else rkCond.eq(f.getMillis).some
case (Some(f), None) => rkCond.ge(f.getMillis).some
case (None, Some(t)) => rkCond.le(t.getMillis).some
case (None, None) => None
}
val querySpec = new QuerySpec()
.withHashKey("name", serviceName)
.withScanIndexForward(false)
for {
item <- (rangeKeyCondition match {
case Some(cond) => table.query(querySpec.withRangeKeyCondition(cond))
case None => table.query(querySpec)
}).asScala.toSeq
} yield (Json.parse(item.getJSON("config")).as[JsObject] ++ Json.obj("timestamp" -> JsNumber(item.getLong("timestamp")))).as[ionroller.TimelineConfiguration]
}(awsExecutorService)
}
def deleteConfig(table: Table, serviceName: String): Task[Unit] = {
Task {
table.query(new QuerySpec()
.withHashKey("name", serviceName)
.withScanIndexForward(false)).asScala.toSeq foreach { item =>
{
println(item)
table.deleteItem("name", serviceName, "timestamp", item.getLong("timestamp"))
}
}
}(awsExecutorService)
}
def readStates(table: Table)(implicit reader: Reads[ionroller.TimelineConfiguration]): Task[Map[TimelineName, ionroller.DesiredTimelineState]] = {
import scala.collection.JavaConverters._
def readOutcomes(outcomes: Iterable[Item]): Map[TimelineName, ionroller.DesiredTimelineState] = {
var configs = Map[TimelineName, ionroller.DesiredTimelineState]()
for {
item <- outcomes
} {
configs = configs.updated(TimelineName(item.getString("name")), Json.parse(item.getJSON("state")).as[ionroller.DesiredTimelineState])
}
configs
}
Task(table.scan())(awsExecutorService).map(o => readOutcomes(o.asScala))
}
def getSystemConfig(table: Table)(implicit reader: Reads[ionroller.TimelineConfiguration]): Task[ionroller.SystemConfiguration] = {
readConfigs(table)
}
def getSystemState(table: Table)(implicit reader: Reads[ionroller.DesiredTimelineState]): Task[ionroller.DesiredSystemState] = {
readStates(table).map(c => ionroller.DesiredSystemState(c))
}
private[aws] def getLastItemForService(table: Table, item: Item): Task[Option[Item]] = {
val qs = new QuerySpec()
.withHashKey("name", item.getString("name"))
.withScanIndexForward(false)
.withMaxResultSize(1)
Task.delay(table.query(qs).asScala.toList.headOption)
}
def putItem(table: Table, item: Item) = {
Task.delay(table.putItem(item))
}
def sameRecurringEventItem(currItem: Item, lastRecurringEventItem: Option[Item]): Boolean = {
(currItem, lastRecurringEventItem) match {
case (c, Some(l)) => {
val type1 = c.getString("type")
val type2 = l.getString("type")
val s1 = c.getString("message")
val s2 = l.getString("message")
val t1 = c.getLong("timestamp")
val t2 = l.getLong("timestamp")
val tDiffMinutes = (t1 - t2) / (1000000 * 60)
// AWS Service can return one of those messages
val msg1 = "[ERROR] Configuration validation exception: MinSize is greater than MaxSize"
val msg2 = "[ERROR] Configuration validation exception: MaxSize is less than MinSize"
if (type1 == EnvironmentNotHealthy.toString && type1 != type2)
tDiffMinutes < 5
else (type1 == type2) && (s1 == s2 || (s1 == msg1 && s2 == msg2) || (s1 == msg2 && s2 == msg1)) && tDiffMinutes < 60
}
case (c, None) => false
}
}
def putRecurringEventItem(table: Table, item: Item) = {
for {
lastRecurringEventItem <- getLastItemForService(table, item)
_ <- {
if (!sameRecurringEventItem(item, lastRecurringEventItem))
putItem(table, item)
else
Task.now(())
}
} yield ()
}
object EventLogger extends ionroller.tracking.EventLogger {
override def log(event: Event): Task[Throwable \\/ Unit] = {
val outcomeTask = for {
table <- eventTable(None)
item <- item(event)
_ <- {
if (event.eventType == ExceptionEvent || event.eventType == EnvironmentNotHealthy || event.eventType == WaitingForTrafficIncrement || event.eventType == WaitingForTrafficDecrement || event.eventType == WaitingForNextRolloutStep) putRecurringEventItem(table, item)
else putItem(table, item)
}
} yield ()
outcomeTask.attempt
}
}
def item(event: Event): Task[Item] = {
for {
data <- toJson(event.data)
item <- Task({
val item = new Item()
.withString("name", event.service.name)
.withLong("timestamp", event.timestamp)
.withString("host", event.host)
.withString("type", event.eventType.toString)
.withJSON("data", data)
if (!event.message.isEmpty) item.withString("message", event.message)
event.version.foreach(v => item.withString("version", v.tag))
event.user.foreach(u => item.withString("user", u))
item
})(awsExecutorService)
} yield item
}
def itemToEventJsonString(item: Item): String = {
try {
Json.obj(
"type" -> item.getString("type"),
"service" -> item.getString("name"),
"version" -> item.getString("version"),
"timestamp" -> item.getLong("timestamp"),
"message" -> item.getString("message"),
"data" -> {
if (item.getJSON("data") != null) Json.parse(item.getJSON("data")) else ""
},
"host" -> item.getString("host"),
"user" -> item.getString("user")
).toString()
} catch {
case ex: Exception => {
""
}
}
}
def toJson(obj: Option[Object]): Task[String] = {
Task({
obj match {
case (Some(js: JsValue)) => js
case _ => Json.parse(obj.toJson)
}
})(awsExecutorService).map(_.toString)
}
def readItems(table: Table, scanSpec: Option[ScanSpec]) = {
scanSpec match {
case None => Task.delay(table.scan().asScala.toList)
case Some(spec) => Task.delay(table.scan(spec).asScala.toList)
}
}
def queryTable[A](table: Table, querySpecs: Seq[QuerySpec], sortBy: Option[Item => A], maxResultSize: Option[Int])(implicit ord: Ordering[A]) = {
val tasks = querySpecs map {
querySpec =>
Task(table.query(querySpec).asScala.toList)(awsExecutorService)
}
Task.gatherUnordered(tasks).map(_.foldMap(identity)).map(seq => {
sortBy match {
case Some(function) => {
val result = seq.sortBy(function)
maxResultSize match {
case Some(max) => result.takeRight(max)
case None => result
}
}
case None => seq
}
})
}
def queryEvents(services: Seq[String], from: Option[Long], to: Option[Long], version: Option[String], maxResultSize: Option[Int]) = {
val (rangeKeyCondition, filterValueMap, filterExpression) = {
val versionFilterExpression = "attribute_not_exists(version) or (version = :version)"
(from, to, version) match {
case (Some(f), Some(t), None) => (Some(new RangeKeyCondition("timestamp").between(f, t)), None, None)
case (Some(f), None, None) => (Some(new RangeKeyCondition("timestamp").gt(f)), None, None)
case (None, Some(t), None) => (Some(new RangeKeyCondition("timestamp").lt(t)), None, None)
case (None, None, Some(v)) => (
None,
Some(HashMap[String, Object](":version" -> v).asJava),
Some(versionFilterExpression)
)
case (Some(f), Some(t), Some(v)) => (
Some(new RangeKeyCondition("timestamp").between(f, t)),
Some(HashMap[String, Object](":version" -> v).asJava),
Some(versionFilterExpression)
)
case (Some(f), None, Some(v)) => (
Some(new RangeKeyCondition("timestamp").gt(f)),
Some(HashMap[String, Object](":version" -> v).asJava),
Some(versionFilterExpression)
)
case (None, Some(t), Some(v)) => (
Some(new RangeKeyCondition("timestamp").lt(t)),
Some(HashMap[String, Object](":version" -> v).asJava),
Some(versionFilterExpression)
)
case (_, _, _) => (None, None, None)
}
}
val querySpecs = for {
service <- services
} yield {
val qs = new QuerySpec().withHashKey("name", service).withScanIndexForward(false)
(rangeKeyCondition, filterValueMap, filterExpression) match {
case (Some(rk: RangeKeyCondition), Some(nm: java.util.Map[String, Object]), Some(fe: String)) => qs.withRangeKeyCondition(rk).withValueMap(nm).withFilterExpression(fe)
case (Some(rk: RangeKeyCondition), None, None) => qs.withRangeKeyCondition(rk)
case (None, Some(nm: java.util.Map[String, Object]), Some(fe: String)) => qs.withValueMap(nm).withFilterExpression(fe)
case (_, _, _) => qs.withMaxResultSize(maxResultSize.getOrElse(20))
}
}
val maxSize = (rangeKeyCondition, filterValueMap, filterExpression) match {
case (None, None, None) => Some(maxResultSize.getOrElse(20))
case (_, _, _) => None
}
for {
eventTable <- eventTable(None)
events <- queryTable(eventTable, querySpecs, { item: Item => item.getLong("timestamp") }.some, maxSize)
} yield events
}
def getServiceNames: Task[List[String]] = {
for {
table <- configTable(None)
service <- Task(table.scan(new ScanSpec().withAttributesToGet("name")).asScala.toSeq.map(_.getString("name")).distinct)(awsExecutorService)
} yield service.toList
}
}
| yonglehou/ionroller | core/src/main/scala/ionroller/aws/Dynamo.scala | Scala | mit | 17,157 |
/**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package rx.lang.scala
/**
* Subjects are Observers and Observables at the same time.
*/
package object subjects {}
| zjrstar/RxScala | src/main/scala/rx/lang/scala/subjects/package.scala | Scala | apache-2.0 | 716 |
import scala.tools.partest.ReplTest
object MyApp extends dotty.runtime.LegacyApp {
Console println "Hello, delayed world."
}
object Test extends ReplTest {
def code = ":javap -app MyApp$"
override def welcoming = true
// The constant pool indices are not the same for GenASM / GenBCode, so
// replacing the exact numbers by XX.
lazy val hasConstantPoolRef = """(.*)(#\\d\\d)(.*)""".r
override def normalize(s: String) = s match {
case hasConstantPoolRef(start, ref, end) => start + "#XX" + end
case _ => super.normalize(s)
}
}
| yusuke2255/dotty | tests/pending/run/repl-javap-app.scala | Scala | bsd-3-clause | 555 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.server
import java.io.{File, IOException}
import java.util.concurrent.TimeUnit
import java.util.concurrent.atomic.{AtomicBoolean, AtomicLong}
import com.yammer.metrics.core.Gauge
import kafka.api._
import kafka.cluster.{BrokerEndPoint, Partition, Replica}
import kafka.common._
import kafka.controller.KafkaController
import kafka.log.{LogAppendInfo, LogManager}
import kafka.message.{ByteBufferMessageSet, MessageSet}
import kafka.metrics.KafkaMetricsGroup
import kafka.utils._
import org.apache.kafka.common.metrics.Metrics
import org.apache.kafka.common.protocol.Errors
import org.apache.kafka.common.utils.{Time => JTime}
import scala.collection._
/*
* Result metadata of a log append operation on the log
*/
case class LogAppendResult(info: LogAppendInfo, error: Option[Throwable] = None) {
def errorCode = error match {
case None => ErrorMapping.NoError
case Some(e) => ErrorMapping.codeFor(e.getClass.asInstanceOf[Class[Throwable]])
}
}
/*
* Result metadata of a log read operation on the log
* @param info @FetchDataInfo returned by the @Log read
* @param hw high watermark of the local replica
* @param readSize amount of data that was read from the log i.e. size of the fetch
* @param isReadFromLogEnd true if the request read up to the log end offset snapshot
* when the read was initiated, false otherwise
* @param error Exception if error encountered while reading from the log
*/
case class LogReadResult(info: FetchDataInfo,
hw: Long,
readSize: Int,
isReadFromLogEnd : Boolean,
error: Option[Throwable] = None) {
def errorCode = error match {
case None => ErrorMapping.NoError
case Some(e) => ErrorMapping.codeFor(e.getClass.asInstanceOf[Class[Throwable]])
}
override def toString = {
"Fetch Data: [%s], HW: [%d], readSize: [%d], isReadFromLogEnd: [%b], error: [%s]"
.format(info, hw, readSize, isReadFromLogEnd, error)
}
}
object LogReadResult {
val UnknownLogReadResult = LogReadResult(FetchDataInfo(LogOffsetMetadata.UnknownOffsetMetadata,
MessageSet.Empty),
-1L,
-1,
false)
}
case class BecomeLeaderOrFollowerResult(responseMap: collection.Map[(String, Int), Short],
updatedLeaders: Set[Partition],
updatedFollowers: Set[Partition],
errorCode: Short) {
override def toString = {
"updated leaders: [%s], updated followers: [%s], update results: [%s], global error: [%d]"
.format(updatedLeaders, updatedFollowers, responseMap, errorCode)
}
}
object ReplicaManager {
val HighWatermarkFilename = "replication-offset-checkpoint"
val IsrChangePropagationBlackOut = 5000L
val IsrChangePropagationInterval = 60000L
}
class ReplicaManager(val config: KafkaConfig,
metrics: Metrics,
time: Time,
jTime: JTime,
val zkUtils: ZkUtils,
scheduler: Scheduler,
val logManager: LogManager,
val isShuttingDown: AtomicBoolean,
threadNamePrefix: Option[String] = None) extends Logging with KafkaMetricsGroup {
/* epoch of the controller that last changed the leader */
@volatile var controllerEpoch: Int = KafkaController.InitialControllerEpoch - 1
private val localBrokerId = config.brokerId
private val allPartitions = new Pool[(String, Int), Partition]
private val replicaStateChangeLock = new Object
val replicaFetcherManager = new ReplicaFetcherManager(config, this, metrics, jTime, threadNamePrefix)
private val highWatermarkCheckPointThreadStarted = new AtomicBoolean(false)
val highWatermarkCheckpoints = config.logDirs.map(dir => (new File(dir).getAbsolutePath, new OffsetCheckpoint(new File(dir, ReplicaManager.HighWatermarkFilename)))).toMap
private var hwThreadInitialized = false
this.logIdent = "[Replica Manager on Broker " + localBrokerId + "]: "
val stateChangeLogger = KafkaController.stateChangeLogger
private val isrChangeSet: mutable.Set[TopicAndPartition] = new mutable.HashSet[TopicAndPartition]()
private val lastIsrChangeMs = new AtomicLong(System.currentTimeMillis())
private val lastIsrPropagationMs = new AtomicLong(System.currentTimeMillis())
val delayedProducePurgatory = new DelayedOperationPurgatory[DelayedProduce](
purgatoryName = "Produce", config.brokerId, config.producerPurgatoryPurgeIntervalRequests)
val delayedFetchPurgatory = new DelayedOperationPurgatory[DelayedFetch](
purgatoryName = "Fetch", config.brokerId, config.fetchPurgatoryPurgeIntervalRequests)
val leaderCount = newGauge(
"LeaderCount",
new Gauge[Int] {
def value = {
getLeaderPartitions().size
}
}
)
val partitionCount = newGauge(
"PartitionCount",
new Gauge[Int] {
def value = allPartitions.size
}
)
val underReplicatedPartitions = newGauge(
"UnderReplicatedPartitions",
new Gauge[Int] {
def value = underReplicatedPartitionCount()
}
)
val isrExpandRate = newMeter("IsrExpandsPerSec", "expands", TimeUnit.SECONDS)
val isrShrinkRate = newMeter("IsrShrinksPerSec", "shrinks", TimeUnit.SECONDS)
def underReplicatedPartitionCount(): Int = {
getLeaderPartitions().count(_.isUnderReplicated)
}
def startHighWaterMarksCheckPointThread() = {
if(highWatermarkCheckPointThreadStarted.compareAndSet(false, true))
scheduler.schedule("highwatermark-checkpoint", checkpointHighWatermarks, period = config.replicaHighWatermarkCheckpointIntervalMs, unit = TimeUnit.MILLISECONDS)
}
def recordIsrChange(topicAndPartition: TopicAndPartition) {
isrChangeSet synchronized {
isrChangeSet += topicAndPartition
lastIsrChangeMs.set(System.currentTimeMillis())
}
}
/**
* This function periodically runs to see if ISR needs to be propagated. It propagates ISR when:
* 1. There is ISR change not propagated yet.
* 2. There is no ISR Change in the last five seconds, or it has been more than 60 seconds since the last ISR propagation.
* This allows an occasional ISR change to be propagated within a few seconds, and avoids overwhelming controller and
* other brokers when large amount of ISR change occurs.
*/
def maybePropagateIsrChanges() {
val now = System.currentTimeMillis()
isrChangeSet synchronized {
if (isrChangeSet.nonEmpty &&
(lastIsrChangeMs.get() + ReplicaManager.IsrChangePropagationBlackOut < now ||
lastIsrPropagationMs.get() + ReplicaManager.IsrChangePropagationInterval < now)) {
ReplicationUtils.propagateIsrChanges(zkUtils, isrChangeSet)
isrChangeSet.clear()
lastIsrPropagationMs.set(now)
}
}
}
/**
* Try to complete some delayed produce requests with the request key;
* this can be triggered when:
*
* 1. The partition HW has changed (for acks = -1)
* 2. A follower replica's fetch operation is received (for acks > 1)
*/
def tryCompleteDelayedProduce(key: DelayedOperationKey) {
val completed = delayedProducePurgatory.checkAndComplete(key)
debug("Request key %s unblocked %d producer requests.".format(key.keyLabel, completed))
}
/**
* Try to complete some delayed fetch requests with the request key;
* this can be triggered when:
*
* 1. The partition HW has changed (for regular fetch)
* 2. A new message set is appended to the local log (for follower fetch)
*/
def tryCompleteDelayedFetch(key: DelayedOperationKey) {
val completed = delayedFetchPurgatory.checkAndComplete(key)
debug("Request key %s unblocked %d fetch requests.".format(key.keyLabel, completed))
}
def startup() {
// start ISR expiration thread
scheduler.schedule("isr-expiration", maybeShrinkIsr, period = config.replicaLagTimeMaxMs, unit = TimeUnit.MILLISECONDS)
scheduler.schedule("isr-change-propagation", maybePropagateIsrChanges, period = 2500L, unit = TimeUnit.MILLISECONDS)
}
def stopReplica(topic: String, partitionId: Int, deletePartition: Boolean): Short = {
stateChangeLogger.trace("Broker %d handling stop replica (delete=%s) for partition [%s,%d]".format(localBrokerId,
deletePartition.toString, topic, partitionId))
val errorCode = ErrorMapping.NoError
getPartition(topic, partitionId) match {
case Some(partition) =>
if(deletePartition) {
val removedPartition = allPartitions.remove((topic, partitionId))
if (removedPartition != null)
removedPartition.delete() // this will delete the local log
}
case None =>
// Delete log and corresponding folders in case replica manager doesn't hold them anymore.
// This could happen when topic is being deleted while broker is down and recovers.
if(deletePartition) {
val topicAndPartition = TopicAndPartition(topic, partitionId)
if(logManager.getLog(topicAndPartition).isDefined) {
logManager.deleteLog(topicAndPartition)
}
}
stateChangeLogger.trace("Broker %d ignoring stop replica (delete=%s) for partition [%s,%d] as replica doesn't exist on broker"
.format(localBrokerId, deletePartition, topic, partitionId))
}
stateChangeLogger.trace("Broker %d finished handling stop replica (delete=%s) for partition [%s,%d]"
.format(localBrokerId, deletePartition, topic, partitionId))
errorCode
}
def stopReplicas(stopReplicaRequest: StopReplicaRequest): (mutable.Map[TopicAndPartition, Short], Short) = {
replicaStateChangeLock synchronized {
val responseMap = new collection.mutable.HashMap[TopicAndPartition, Short]
if(stopReplicaRequest.controllerEpoch < controllerEpoch) {
stateChangeLogger.warn("Broker %d received stop replica request from an old controller epoch %d."
.format(localBrokerId, stopReplicaRequest.controllerEpoch) +
" Latest known controller epoch is %d " + controllerEpoch)
(responseMap, ErrorMapping.StaleControllerEpochCode)
} else {
controllerEpoch = stopReplicaRequest.controllerEpoch
// First stop fetchers for all partitions, then stop the corresponding replicas
replicaFetcherManager.removeFetcherForPartitions(stopReplicaRequest.partitions.map(r => TopicAndPartition(r.topic, r.partition)))
for(topicAndPartition <- stopReplicaRequest.partitions){
val errorCode = stopReplica(topicAndPartition.topic, topicAndPartition.partition, stopReplicaRequest.deletePartitions)
responseMap.put(topicAndPartition, errorCode)
}
(responseMap, ErrorMapping.NoError)
}
}
}
def getOrCreatePartition(topic: String, partitionId: Int): Partition = {
var partition = allPartitions.get((topic, partitionId))
if (partition == null) {
allPartitions.putIfNotExists((topic, partitionId), new Partition(topic, partitionId, time, this))
partition = allPartitions.get((topic, partitionId))
}
partition
}
def getPartition(topic: String, partitionId: Int): Option[Partition] = {
val partition = allPartitions.get((topic, partitionId))
if (partition == null)
None
else
Some(partition)
}
def getReplicaOrException(topic: String, partition: Int): Replica = {
val replicaOpt = getReplica(topic, partition)
if(replicaOpt.isDefined)
replicaOpt.get
else
throw new ReplicaNotAvailableException("Replica %d is not available for partition [%s,%d]".format(config.brokerId, topic, partition))
}
def getLeaderReplicaIfLocal(topic: String, partitionId: Int): Replica = {
val partitionOpt = getPartition(topic, partitionId)
partitionOpt match {
case None =>
throw new UnknownTopicOrPartitionException("Partition [%s,%d] doesn't exist on %d".format(topic, partitionId, config.brokerId))
case Some(partition) =>
partition.leaderReplicaIfLocal match {
case Some(leaderReplica) => leaderReplica
case None =>
throw new NotLeaderForPartitionException("Leader not local for partition [%s,%d] on broker %d"
.format(topic, partitionId, config.brokerId))
}
}
}
def getReplica(topic: String, partitionId: Int, replicaId: Int = config.brokerId): Option[Replica] = {
val partitionOpt = getPartition(topic, partitionId)
partitionOpt match {
case None => None
case Some(partition) => partition.getReplica(replicaId)
}
}
/**
* Append messages to leader replicas of the partition, and wait for them to be replicated to other replicas;
* the callback function will be triggered either when timeout or the required acks are satisfied
*/
def appendMessages(timeout: Long,
requiredAcks: Short,
internalTopicsAllowed: Boolean,
messagesPerPartition: Map[TopicAndPartition, MessageSet],
responseCallback: Map[TopicAndPartition, ProducerResponseStatus] => Unit) {
if (isValidRequiredAcks(requiredAcks)) {
val sTime = SystemTime.milliseconds
val localProduceResults = appendToLocalLog(internalTopicsAllowed, messagesPerPartition, requiredAcks)
debug("Produce to local log in %d ms".format(SystemTime.milliseconds - sTime))
val produceStatus = localProduceResults.map { case (topicAndPartition, result) =>
topicAndPartition ->
ProducePartitionStatus(
result.info.lastOffset + 1, // required offset
ProducerResponseStatus(result.errorCode, result.info.firstOffset)) // response status
}
if (delayedRequestRequired(requiredAcks, messagesPerPartition, localProduceResults)) {
// create delayed produce operation
val produceMetadata = ProduceMetadata(requiredAcks, produceStatus)
val delayedProduce = new DelayedProduce(timeout, produceMetadata, this, responseCallback)
// create a list of (topic, partition) pairs to use as keys for this delayed produce operation
val producerRequestKeys = messagesPerPartition.keys.map(new TopicPartitionOperationKey(_)).toSeq
// try to complete the request immediately, otherwise put it into the purgatory
// this is because while the delayed produce operation is being created, new
// requests may arrive and hence make this operation completable.
delayedProducePurgatory.tryCompleteElseWatch(delayedProduce, producerRequestKeys)
} else {
// we can respond immediately
val produceResponseStatus = produceStatus.mapValues(status => status.responseStatus)
responseCallback(produceResponseStatus)
}
} else {
// If required.acks is outside accepted range, something is wrong with the client
// Just return an error and don't handle the request at all
val responseStatus = messagesPerPartition.map {
case (topicAndPartition, messageSet) =>
(topicAndPartition ->
ProducerResponseStatus(Errors.INVALID_REQUIRED_ACKS.code,
LogAppendInfo.UnknownLogAppendInfo.firstOffset))
}
responseCallback(responseStatus)
}
}
// If all the following conditions are true, we need to put a delayed produce request and wait for replication to complete
//
// 1. required acks = -1
// 2. there is data to append
// 3. at least one partition append was successful (fewer errors than partitions)
private def delayedRequestRequired(requiredAcks: Short, messagesPerPartition: Map[TopicAndPartition, MessageSet],
localProduceResults: Map[TopicAndPartition, LogAppendResult]): Boolean = {
requiredAcks == -1 &&
messagesPerPartition.size > 0 &&
localProduceResults.values.count(_.error.isDefined) < messagesPerPartition.size
}
private def isValidRequiredAcks(requiredAcks: Short): Boolean = {
requiredAcks == -1 || requiredAcks == 1 || requiredAcks == 0
}
/**
* Append the messages to the local replica logs
*/
private def appendToLocalLog(internalTopicsAllowed: Boolean,
messagesPerPartition: Map[TopicAndPartition, MessageSet],
requiredAcks: Short): Map[TopicAndPartition, LogAppendResult] = {
trace("Append [%s] to local log ".format(messagesPerPartition))
messagesPerPartition.map { case (topicAndPartition, messages) =>
BrokerTopicStats.getBrokerTopicStats(topicAndPartition.topic).totalProduceRequestRate.mark()
BrokerTopicStats.getBrokerAllTopicsStats().totalProduceRequestRate.mark()
// reject appending to internal topics if it is not allowed
if (Topic.InternalTopics.contains(topicAndPartition.topic) && !internalTopicsAllowed) {
(topicAndPartition, LogAppendResult(
LogAppendInfo.UnknownLogAppendInfo,
Some(new InvalidTopicException("Cannot append to internal topic %s".format(topicAndPartition.topic)))))
} else {
try {
val partitionOpt = getPartition(topicAndPartition.topic, topicAndPartition.partition)
val info = partitionOpt match {
case Some(partition) =>
partition.appendMessagesToLeader(messages.asInstanceOf[ByteBufferMessageSet], requiredAcks)
case None => throw new UnknownTopicOrPartitionException("Partition %s doesn't exist on %d"
.format(topicAndPartition, localBrokerId))
}
val numAppendedMessages =
if (info.firstOffset == -1L || info.lastOffset == -1L)
0
else
info.lastOffset - info.firstOffset + 1
// update stats for successfully appended bytes and messages as bytesInRate and messageInRate
BrokerTopicStats.getBrokerTopicStats(topicAndPartition.topic).bytesInRate.mark(messages.sizeInBytes)
BrokerTopicStats.getBrokerAllTopicsStats.bytesInRate.mark(messages.sizeInBytes)
BrokerTopicStats.getBrokerTopicStats(topicAndPartition.topic).messagesInRate.mark(numAppendedMessages)
BrokerTopicStats.getBrokerAllTopicsStats.messagesInRate.mark(numAppendedMessages)
trace("%d bytes written to log %s-%d beginning at offset %d and ending at offset %d"
.format(messages.sizeInBytes, topicAndPartition.topic, topicAndPartition.partition, info.firstOffset, info.lastOffset))
(topicAndPartition, LogAppendResult(info))
} catch {
// NOTE: Failed produce requests metric is not incremented for known exceptions
// it is supposed to indicate un-expected failures of a broker in handling a produce request
case e: KafkaStorageException =>
fatal("Halting due to unrecoverable I/O error while handling produce request: ", e)
Runtime.getRuntime.halt(1)
(topicAndPartition, null)
case utpe: UnknownTopicOrPartitionException =>
(topicAndPartition, LogAppendResult(LogAppendInfo.UnknownLogAppendInfo, Some(utpe)))
case nle: NotLeaderForPartitionException =>
(topicAndPartition, LogAppendResult(LogAppendInfo.UnknownLogAppendInfo, Some(nle)))
case mtle: MessageSizeTooLargeException =>
(topicAndPartition, LogAppendResult(LogAppendInfo.UnknownLogAppendInfo, Some(mtle)))
case mstle: MessageSetSizeTooLargeException =>
(topicAndPartition, LogAppendResult(LogAppendInfo.UnknownLogAppendInfo, Some(mstle)))
case imse : InvalidMessageSizeException =>
(topicAndPartition, LogAppendResult(LogAppendInfo.UnknownLogAppendInfo, Some(imse)))
case t: Throwable =>
BrokerTopicStats.getBrokerTopicStats(topicAndPartition.topic).failedProduceRequestRate.mark()
BrokerTopicStats.getBrokerAllTopicsStats.failedProduceRequestRate.mark()
error("Error processing append operation on partition %s".format(topicAndPartition), t)
(topicAndPartition, LogAppendResult(LogAppendInfo.UnknownLogAppendInfo, Some(t)))
}
}
}
}
/**
* Fetch messages from the leader replica, and wait until enough data can be fetched and return;
* the callback function will be triggered either when timeout or required fetch info is satisfied
*/
def fetchMessages(timeout: Long,
replicaId: Int,
fetchMinBytes: Int,
fetchInfo: immutable.Map[TopicAndPartition, PartitionFetchInfo],
responseCallback: Map[TopicAndPartition, FetchResponsePartitionData] => Unit) {
val isFromFollower = replicaId >= 0
val fetchOnlyFromLeader: Boolean = replicaId != Request.DebuggingConsumerId
val fetchOnlyCommitted: Boolean = ! Request.isValidBrokerId(replicaId)
// read from local logs
val logReadResults = readFromLocalLog(fetchOnlyFromLeader, fetchOnlyCommitted, fetchInfo)
// if the fetch comes from the follower,
// update its corresponding log end offset
if(Request.isValidBrokerId(replicaId))
updateFollowerLogReadResults(replicaId, logReadResults)
// check if this fetch request can be satisfied right away
val bytesReadable = logReadResults.values.map(_.info.messageSet.sizeInBytes).sum
val errorReadingData = logReadResults.values.foldLeft(false) ((errorIncurred, readResult) =>
errorIncurred || (readResult.errorCode != ErrorMapping.NoError))
// respond immediately if 1) fetch request does not want to wait
// 2) fetch request does not require any data
// 3) has enough data to respond
// 4) some error happens while reading data
if(timeout <= 0 || fetchInfo.size <= 0 || bytesReadable >= fetchMinBytes || errorReadingData) {
val fetchPartitionData = logReadResults.mapValues(result =>
FetchResponsePartitionData(result.errorCode, result.hw, result.info.messageSet))
responseCallback(fetchPartitionData)
} else {
// construct the fetch results from the read results
val fetchPartitionStatus = logReadResults.map { case (topicAndPartition, result) =>
(topicAndPartition, FetchPartitionStatus(result.info.fetchOffsetMetadata, fetchInfo.get(topicAndPartition).get))
}
val fetchMetadata = FetchMetadata(fetchMinBytes, fetchOnlyFromLeader, fetchOnlyCommitted, isFromFollower, fetchPartitionStatus)
val delayedFetch = new DelayedFetch(timeout, fetchMetadata, this, responseCallback)
// create a list of (topic, partition) pairs to use as keys for this delayed fetch operation
val delayedFetchKeys = fetchPartitionStatus.keys.map(new TopicPartitionOperationKey(_)).toSeq
// try to complete the request immediately, otherwise put it into the purgatory;
// this is because while the delayed fetch operation is being created, new requests
// may arrive and hence make this operation completable.
delayedFetchPurgatory.tryCompleteElseWatch(delayedFetch, delayedFetchKeys)
}
}
/**
* Read from a single topic/partition at the given offset upto maxSize bytes
*/
def readFromLocalLog(fetchOnlyFromLeader: Boolean,
readOnlyCommitted: Boolean,
readPartitionInfo: Map[TopicAndPartition, PartitionFetchInfo]): Map[TopicAndPartition, LogReadResult] = {
readPartitionInfo.map { case (TopicAndPartition(topic, partition), PartitionFetchInfo(offset, fetchSize)) =>
BrokerTopicStats.getBrokerTopicStats(topic).totalFetchRequestRate.mark()
BrokerTopicStats.getBrokerAllTopicsStats().totalFetchRequestRate.mark()
val partitionDataAndOffsetInfo =
try {
trace("Fetching log segment for topic %s, partition %d, offset %d, size %d".format(topic, partition, offset, fetchSize))
// decide whether to only fetch from leader
val localReplica = if (fetchOnlyFromLeader)
getLeaderReplicaIfLocal(topic, partition)
else
getReplicaOrException(topic, partition)
// decide whether to only fetch committed data (i.e. messages below high watermark)
val maxOffsetOpt = if (readOnlyCommitted)
Some(localReplica.highWatermark.messageOffset)
else
None
/* Read the LogOffsetMetadata prior to performing the read from the log.
* We use the LogOffsetMetadata to determine if a particular replica is in-sync or not.
* Using the log end offset after performing the read can lead to a race condition
* where data gets appended to the log immediately after the replica has consumed from it
* This can cause a replica to always be out of sync.
*/
val initialLogEndOffset = localReplica.logEndOffset
val logReadInfo = localReplica.log match {
case Some(log) =>
log.read(offset, fetchSize, maxOffsetOpt)
case None =>
error("Leader for partition [%s,%d] does not have a local log".format(topic, partition))
FetchDataInfo(LogOffsetMetadata.UnknownOffsetMetadata, MessageSet.Empty)
}
val readToEndOfLog = initialLogEndOffset.messageOffset - logReadInfo.fetchOffsetMetadata.messageOffset <= 0
LogReadResult(logReadInfo, localReplica.highWatermark.messageOffset, fetchSize, readToEndOfLog, None)
} catch {
// NOTE: Failed fetch requests metric is not incremented for known exceptions since it
// is supposed to indicate un-expected failure of a broker in handling a fetch request
case utpe: UnknownTopicOrPartitionException =>
LogReadResult(FetchDataInfo(LogOffsetMetadata.UnknownOffsetMetadata, MessageSet.Empty), -1L, fetchSize, false, Some(utpe))
case nle: NotLeaderForPartitionException =>
LogReadResult(FetchDataInfo(LogOffsetMetadata.UnknownOffsetMetadata, MessageSet.Empty), -1L, fetchSize, false, Some(nle))
case rnae: ReplicaNotAvailableException =>
LogReadResult(FetchDataInfo(LogOffsetMetadata.UnknownOffsetMetadata, MessageSet.Empty), -1L, fetchSize, false, Some(rnae))
case oor : OffsetOutOfRangeException =>
LogReadResult(FetchDataInfo(LogOffsetMetadata.UnknownOffsetMetadata, MessageSet.Empty), -1L, fetchSize, false, Some(oor))
case e: Throwable =>
BrokerTopicStats.getBrokerTopicStats(topic).failedFetchRequestRate.mark()
BrokerTopicStats.getBrokerAllTopicsStats().failedFetchRequestRate.mark()
error("Error processing fetch operation on partition [%s,%d] offset %d".format(topic, partition, offset))
LogReadResult(FetchDataInfo(LogOffsetMetadata.UnknownOffsetMetadata, MessageSet.Empty), -1L, fetchSize, false, Some(e))
}
(TopicAndPartition(topic, partition), partitionDataAndOffsetInfo)
}
}
def maybeUpdateMetadataCache(updateMetadataRequest: UpdateMetadataRequest, metadataCache: MetadataCache) {
replicaStateChangeLock synchronized {
if(updateMetadataRequest.controllerEpoch < controllerEpoch) {
val stateControllerEpochErrorMessage = ("Broker %d received update metadata request with correlation id %d from an " +
"old controller %d with epoch %d. Latest known controller epoch is %d").format(localBrokerId,
updateMetadataRequest.correlationId, updateMetadataRequest.controllerId, updateMetadataRequest.controllerEpoch,
controllerEpoch)
stateChangeLogger.warn(stateControllerEpochErrorMessage)
throw new ControllerMovedException(stateControllerEpochErrorMessage)
} else {
metadataCache.updateCache(updateMetadataRequest, localBrokerId, stateChangeLogger)
controllerEpoch = updateMetadataRequest.controllerEpoch
}
}
}
def becomeLeaderOrFollower(leaderAndISRRequest: LeaderAndIsrRequest, metadataCache: MetadataCache): BecomeLeaderOrFollowerResult = {
leaderAndISRRequest.partitionStateInfos.foreach { case ((topic, partition), stateInfo) =>
stateChangeLogger.trace("Broker %d received LeaderAndIsr request %s correlation id %d from controller %d epoch %d for partition [%s,%d]"
.format(localBrokerId, stateInfo, leaderAndISRRequest.correlationId,
leaderAndISRRequest.controllerId, leaderAndISRRequest.controllerEpoch, topic, partition))
}
replicaStateChangeLock synchronized {
val responseMap = new mutable.HashMap[(String, Int), Short]
if (leaderAndISRRequest.controllerEpoch < controllerEpoch) {
leaderAndISRRequest.partitionStateInfos.foreach { case ((topic, partition), stateInfo) =>
stateChangeLogger.warn(("Broker %d ignoring LeaderAndIsr request from controller %d with correlation id %d since " +
"its controller epoch %d is old. Latest known controller epoch is %d").format(localBrokerId, leaderAndISRRequest.controllerId,
leaderAndISRRequest.correlationId, leaderAndISRRequest.controllerEpoch, controllerEpoch))
}
BecomeLeaderOrFollowerResult(responseMap, Set.empty[Partition], Set.empty[Partition], ErrorMapping.StaleControllerEpochCode)
} else {
val controllerId = leaderAndISRRequest.controllerId
val correlationId = leaderAndISRRequest.correlationId
controllerEpoch = leaderAndISRRequest.controllerEpoch
// First check partition's leader epoch
val partitionState = new mutable.HashMap[Partition, PartitionStateInfo]()
leaderAndISRRequest.partitionStateInfos.foreach { case ((topic, partitionId), partitionStateInfo) =>
val partition = getOrCreatePartition(topic, partitionId)
val partitionLeaderEpoch = partition.getLeaderEpoch()
// If the leader epoch is valid record the epoch of the controller that made the leadership decision.
// This is useful while updating the isr to maintain the decision maker controller's epoch in the zookeeper path
if (partitionLeaderEpoch < partitionStateInfo.leaderIsrAndControllerEpoch.leaderAndIsr.leaderEpoch) {
if(partitionStateInfo.allReplicas.contains(config.brokerId))
partitionState.put(partition, partitionStateInfo)
else {
stateChangeLogger.warn(("Broker %d ignoring LeaderAndIsr request from controller %d with correlation id %d " +
"epoch %d for partition [%s,%d] as itself is not in assigned replica list %s")
.format(localBrokerId, controllerId, correlationId, leaderAndISRRequest.controllerEpoch,
topic, partition.partitionId, partitionStateInfo.allReplicas.mkString(",")))
responseMap.put((topic, partitionId), ErrorMapping.UnknownTopicOrPartitionCode)
}
} else {
// Otherwise record the error code in response
stateChangeLogger.warn(("Broker %d ignoring LeaderAndIsr request from controller %d with correlation id %d " +
"epoch %d for partition [%s,%d] since its associated leader epoch %d is old. Current leader epoch is %d")
.format(localBrokerId, controllerId, correlationId, leaderAndISRRequest.controllerEpoch,
topic, partition.partitionId, partitionStateInfo.leaderIsrAndControllerEpoch.leaderAndIsr.leaderEpoch, partitionLeaderEpoch))
responseMap.put((topic, partitionId), ErrorMapping.StaleLeaderEpochCode)
}
}
val partitionsTobeLeader = partitionState.filter { case (partition, partitionStateInfo) =>
partitionStateInfo.leaderIsrAndControllerEpoch.leaderAndIsr.leader == config.brokerId
}
val partitionsToBeFollower = (partitionState -- partitionsTobeLeader.keys)
val partitionsBecomeLeader = if (!partitionsTobeLeader.isEmpty)
makeLeaders(controllerId, controllerEpoch, partitionsTobeLeader, leaderAndISRRequest.correlationId, responseMap)
else
Set.empty[Partition]
val partitionsBecomeFollower = if (!partitionsToBeFollower.isEmpty)
makeFollowers(controllerId, controllerEpoch, partitionsToBeFollower, leaderAndISRRequest.correlationId, responseMap, metadataCache)
else
Set.empty[Partition]
// we initialize highwatermark thread after the first leaderisrrequest. This ensures that all the partitions
// have been completely populated before starting the checkpointing there by avoiding weird race conditions
if (!hwThreadInitialized) {
startHighWaterMarksCheckPointThread()
hwThreadInitialized = true
}
replicaFetcherManager.shutdownIdleFetcherThreads()
BecomeLeaderOrFollowerResult(responseMap, partitionsBecomeLeader, partitionsBecomeFollower, ErrorMapping.NoError)
}
}
}
/*
* Make the current broker to become leader for a given set of partitions by:
*
* 1. Stop fetchers for these partitions
* 2. Update the partition metadata in cache
* 3. Add these partitions to the leader partitions set
*
* If an unexpected error is thrown in this function, it will be propagated to KafkaApis where
* the error message will be set on each partition since we do not know which partition caused it. Otherwise,
* return the set of partitions that are made leader due to this method
*
* TODO: the above may need to be fixed later
*/
private def makeLeaders(controllerId: Int,
epoch: Int,
partitionState: Map[Partition, PartitionStateInfo],
correlationId: Int,
responseMap: mutable.Map[(String, Int), Short]): Set[Partition] = {
partitionState.foreach(state =>
stateChangeLogger.trace(("Broker %d handling LeaderAndIsr request correlationId %d from controller %d epoch %d " +
"starting the become-leader transition for partition %s")
.format(localBrokerId, correlationId, controllerId, epoch, TopicAndPartition(state._1.topic, state._1.partitionId))))
for (partition <- partitionState.keys)
responseMap.put((partition.topic, partition.partitionId), ErrorMapping.NoError)
val partitionsToMakeLeaders: mutable.Set[Partition] = mutable.Set()
try {
// First stop fetchers for all the partitions
replicaFetcherManager.removeFetcherForPartitions(partitionState.keySet.map(new TopicAndPartition(_)))
// Update the partition information to be the leader
partitionState.foreach{ case (partition, partitionStateInfo) =>
if (partition.makeLeader(controllerId, partitionStateInfo, correlationId))
partitionsToMakeLeaders += partition
else
stateChangeLogger.info(("Broker %d skipped the become-leader state change after marking its partition as leader with correlation id %d from " +
"controller %d epoch %d for partition %s since it is already the leader for the partition.")
.format(localBrokerId, correlationId, controllerId, epoch, TopicAndPartition(partition.topic, partition.partitionId)));
}
partitionsToMakeLeaders.foreach { partition =>
stateChangeLogger.trace(("Broker %d stopped fetchers as part of become-leader request from controller " +
"%d epoch %d with correlation id %d for partition %s")
.format(localBrokerId, controllerId, epoch, correlationId, TopicAndPartition(partition.topic, partition.partitionId)))
}
} catch {
case e: Throwable =>
partitionState.foreach { state =>
val errorMsg = ("Error on broker %d while processing LeaderAndIsr request correlationId %d received from controller %d" +
" epoch %d for partition %s").format(localBrokerId, correlationId, controllerId, epoch,
TopicAndPartition(state._1.topic, state._1.partitionId))
stateChangeLogger.error(errorMsg, e)
}
// Re-throw the exception for it to be caught in KafkaApis
throw e
}
partitionState.foreach { state =>
stateChangeLogger.trace(("Broker %d completed LeaderAndIsr request correlationId %d from controller %d epoch %d " +
"for the become-leader transition for partition %s")
.format(localBrokerId, correlationId, controllerId, epoch, TopicAndPartition(state._1.topic, state._1.partitionId)))
}
partitionsToMakeLeaders
}
/*
* Make the current broker to become follower for a given set of partitions by:
*
* 1. Remove these partitions from the leader partitions set.
* 2. Mark the replicas as followers so that no more data can be added from the producer clients.
* 3. Stop fetchers for these partitions so that no more data can be added by the replica fetcher threads.
* 4. Truncate the log and checkpoint offsets for these partitions.
* 5. If the broker is not shutting down, add the fetcher to the new leaders.
*
* The ordering of doing these steps make sure that the replicas in transition will not
* take any more messages before checkpointing offsets so that all messages before the checkpoint
* are guaranteed to be flushed to disks
*
* If an unexpected error is thrown in this function, it will be propagated to KafkaApis where
* the error message will be set on each partition since we do not know which partition caused it. Otherwise,
* return the set of partitions that are made follower due to this method
*/
private def makeFollowers(controllerId: Int,
epoch: Int,
partitionState: Map[Partition, PartitionStateInfo],
correlationId: Int,
responseMap: mutable.Map[(String, Int), Short],
metadataCache: MetadataCache) : Set[Partition] = {
partitionState.foreach { state =>
stateChangeLogger.trace(("Broker %d handling LeaderAndIsr request correlationId %d from controller %d epoch %d " +
"starting the become-follower transition for partition %s")
.format(localBrokerId, correlationId, controllerId, epoch, TopicAndPartition(state._1.topic, state._1.partitionId)))
}
for (partition <- partitionState.keys)
responseMap.put((partition.topic, partition.partitionId), ErrorMapping.NoError)
val partitionsToMakeFollower: mutable.Set[Partition] = mutable.Set()
try {
// TODO: Delete leaders from LeaderAndIsrRequest
partitionState.foreach{ case (partition, partitionStateInfo) =>
val leaderIsrAndControllerEpoch = partitionStateInfo.leaderIsrAndControllerEpoch
val newLeaderBrokerId = leaderIsrAndControllerEpoch.leaderAndIsr.leader
metadataCache.getAliveBrokers.find(_.id == newLeaderBrokerId) match {
// Only change partition state when the leader is available
case Some(leaderBroker) =>
if (partition.makeFollower(controllerId, partitionStateInfo, correlationId))
partitionsToMakeFollower += partition
else
stateChangeLogger.info(("Broker %d skipped the become-follower state change after marking its partition as follower with correlation id %d from " +
"controller %d epoch %d for partition [%s,%d] since the new leader %d is the same as the old leader")
.format(localBrokerId, correlationId, controllerId, leaderIsrAndControllerEpoch.controllerEpoch,
partition.topic, partition.partitionId, newLeaderBrokerId))
case None =>
// The leader broker should always be present in the metadata cache.
// If not, we should record the error message and abort the transition process for this partition
stateChangeLogger.error(("Broker %d received LeaderAndIsrRequest with correlation id %d from controller" +
" %d epoch %d for partition [%s,%d] but cannot become follower since the new leader %d is unavailable.")
.format(localBrokerId, correlationId, controllerId, leaderIsrAndControllerEpoch.controllerEpoch,
partition.topic, partition.partitionId, newLeaderBrokerId))
// Create the local replica even if the leader is unavailable. This is required to ensure that we include
// the partition's high watermark in the checkpoint file (see KAFKA-1647)
partition.getOrCreateReplica()
}
}
replicaFetcherManager.removeFetcherForPartitions(partitionsToMakeFollower.map(new TopicAndPartition(_)))
partitionsToMakeFollower.foreach { partition =>
stateChangeLogger.trace(("Broker %d stopped fetchers as part of become-follower request from controller " +
"%d epoch %d with correlation id %d for partition %s")
.format(localBrokerId, controllerId, epoch, correlationId, TopicAndPartition(partition.topic, partition.partitionId)))
}
logManager.truncateTo(partitionsToMakeFollower.map(partition => (new TopicAndPartition(partition), partition.getOrCreateReplica().highWatermark.messageOffset)).toMap)
partitionsToMakeFollower.foreach { partition =>
stateChangeLogger.trace(("Broker %d truncated logs and checkpointed recovery boundaries for partition [%s,%d] as part of " +
"become-follower request with correlation id %d from controller %d epoch %d").format(localBrokerId,
partition.topic, partition.partitionId, correlationId, controllerId, epoch))
}
if (isShuttingDown.get()) {
partitionsToMakeFollower.foreach { partition =>
stateChangeLogger.trace(("Broker %d skipped the adding-fetcher step of the become-follower state change with correlation id %d from " +
"controller %d epoch %d for partition [%s,%d] since it is shutting down").format(localBrokerId, correlationId,
controllerId, epoch, partition.topic, partition.partitionId))
}
}
else {
// we do not need to check if the leader exists again since this has been done at the beginning of this process
val partitionsToMakeFollowerWithLeaderAndOffset = partitionsToMakeFollower.map(partition =>
new TopicAndPartition(partition) -> BrokerAndInitialOffset(
metadataCache.getAliveBrokers.find(_.id == partition.leaderReplicaIdOpt.get).get.getBrokerEndPoint(config.interBrokerSecurityProtocol),
partition.getReplica().get.logEndOffset.messageOffset)).toMap
replicaFetcherManager.addFetcherForPartitions(partitionsToMakeFollowerWithLeaderAndOffset)
partitionsToMakeFollower.foreach { partition =>
stateChangeLogger.trace(("Broker %d started fetcher to new leader as part of become-follower request from controller " +
"%d epoch %d with correlation id %d for partition [%s,%d]")
.format(localBrokerId, controllerId, epoch, correlationId, partition.topic, partition.partitionId))
}
}
} catch {
case e: Throwable =>
val errorMsg = ("Error on broker %d while processing LeaderAndIsr request with correlationId %d received from controller %d " +
"epoch %d").format(localBrokerId, correlationId, controllerId, epoch)
stateChangeLogger.error(errorMsg, e)
// Re-throw the exception for it to be caught in KafkaApis
throw e
}
partitionState.foreach { state =>
stateChangeLogger.trace(("Broker %d completed LeaderAndIsr request correlationId %d from controller %d epoch %d " +
"for the become-follower transition for partition %s")
.format(localBrokerId, correlationId, controllerId, epoch, TopicAndPartition(state._1.topic, state._1.partitionId)))
}
partitionsToMakeFollower
}
private def maybeShrinkIsr(): Unit = {
trace("Evaluating ISR list of partitions to see which replicas can be removed from the ISR")
allPartitions.values.foreach(partition => partition.maybeShrinkIsr(config.replicaLagTimeMaxMs))
}
private def updateFollowerLogReadResults(replicaId: Int, readResults: Map[TopicAndPartition, LogReadResult]) {
debug("Recording follower broker %d log read results: %s ".format(replicaId, readResults))
readResults.foreach { case (topicAndPartition, readResult) =>
getPartition(topicAndPartition.topic, topicAndPartition.partition) match {
case Some(partition) =>
partition.updateReplicaLogReadResult(replicaId, readResult)
// for producer requests with ack > 1, we need to check
// if they can be unblocked after some follower's log end offsets have moved
tryCompleteDelayedProduce(new TopicPartitionOperationKey(topicAndPartition))
case None =>
warn("While recording the replica LEO, the partition %s hasn't been created.".format(topicAndPartition))
}
}
}
private def getLeaderPartitions() : List[Partition] = {
allPartitions.values.filter(_.leaderReplicaIfLocal().isDefined).toList
}
// Flushes the highwatermark value for all partitions to the highwatermark file
def checkpointHighWatermarks() {
val replicas = allPartitions.values.map(_.getReplica(config.brokerId)).collect{case Some(replica) => replica}
val replicasByDir = replicas.filter(_.log.isDefined).groupBy(_.log.get.dir.getParentFile.getAbsolutePath)
for((dir, reps) <- replicasByDir) {
val hwms = reps.map(r => (new TopicAndPartition(r) -> r.highWatermark.messageOffset)).toMap
try {
highWatermarkCheckpoints(dir).write(hwms)
} catch {
case e: IOException =>
fatal("Error writing to highwatermark file: ", e)
Runtime.getRuntime().halt(1)
}
}
}
// High watermark do not need to be checkpointed only when under unit tests
def shutdown(checkpointHW: Boolean = true) {
info("Shutting down")
replicaFetcherManager.shutdown()
delayedFetchPurgatory.shutdown()
delayedProducePurgatory.shutdown()
if (checkpointHW)
checkpointHighWatermarks()
info("Shut down completely")
}
}
| Zhiqiang-He/kafka-0914-edit | core/src/main/scala/kafka/server/ReplicaManager.scala | Scala | apache-2.0 | 47,229 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE
* file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file
* to You under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the
* License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package kafka.admin
import java.util.Collections
import java.util.Properties
import kafka.admin.ReassignPartitionsCommand._
import kafka.common.AdminCommandFailedException
import kafka.server.{KafkaConfig, KafkaServer}
import kafka.utils.TestUtils._
import kafka.utils.{Logging, TestUtils}
import kafka.zk.{ReassignPartitionsZNode, ZkVersion, ZooKeeperTestHarness}
import org.junit.Assert.{assertEquals, assertTrue}
import org.junit.{After, Before, Test}
import kafka.admin.ReplicationQuotaUtils._
import org.apache.kafka.clients.admin.AdminClientConfig
import org.apache.kafka.clients.admin.{AdminClient => JAdminClient}
import org.apache.kafka.common.{TopicPartition, TopicPartitionReplica}
import scala.collection.JavaConverters._
import scala.collection.Map
import scala.collection.Seq
import scala.util.Random
import java.io.File
import org.apache.kafka.clients.producer.ProducerRecord
class ReassignPartitionsClusterTest extends ZooKeeperTestHarness with Logging {
val partitionId = 0
var servers: Seq[KafkaServer] = null
val topicName = "my-topic"
val delayMs = 1000
var adminClient: JAdminClient = null
def zkUpdateDelay(): Unit = Thread.sleep(delayMs)
@Before
override def setUp() {
super.setUp()
}
def startBrokers(brokerIds: Seq[Int]) {
servers = brokerIds.map(i => createBrokerConfig(i, zkConnect, enableControlledShutdown = false, logDirCount = 3))
.map(c => createServer(KafkaConfig.fromProps(c)))
}
def createAdminClient(servers: Seq[KafkaServer]): JAdminClient = {
val props = new Properties()
props.put(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, TestUtils.getBrokerListStrFromServers(servers))
props.put(AdminClientConfig.REQUEST_TIMEOUT_MS_CONFIG, "10000")
JAdminClient.create(props)
}
def getRandomLogDirAssignment(brokerId: Int): String = {
val server = servers.find(_.config.brokerId == brokerId).get
val logDirs = server.config.logDirs
new File(logDirs(Random.nextInt(logDirs.size))).getAbsolutePath
}
@After
override def tearDown() {
if (adminClient != null) {
adminClient.close()
adminClient = null
}
TestUtils.shutdownServers(servers)
super.tearDown()
}
@Test
def testHwAfterPartitionReassignment(): Unit = {
//Given a single replica on server 100
startBrokers(Seq(100, 101, 102))
adminClient = createAdminClient(servers)
createTopic(zkClient, topicName, Map(0 -> Seq(100)), servers = servers)
val topicPartition = new TopicPartition(topicName, 0)
val leaderServer = servers.find(_.config.brokerId == 100).get
leaderServer.replicaManager.logManager.truncateFullyAndStartAt(topicPartition, 100L, false)
val topicJson: String = s"""{"version":1,"partitions":[{"topic":"$topicName","partition":0,"replicas":[101, 102]}]}"""
ReassignPartitionsCommand.executeAssignment(zkClient, Some(adminClient), topicJson, NoThrottle)
val newLeaderServer = servers.find(_.config.brokerId == 101).get
TestUtils.waitUntilTrue (
() => newLeaderServer.replicaManager.getPartition(topicPartition).flatMap(_.leaderReplicaIfLocal).isDefined,
"broker 101 should be the new leader", pause = 1L
)
assertEquals(100, newLeaderServer.replicaManager.getReplicaOrException(topicPartition).highWatermark.messageOffset)
val newFollowerServer = servers.find(_.config.brokerId == 102).get
TestUtils.waitUntilTrue(() => newFollowerServer.replicaManager.getReplicaOrException(topicPartition).highWatermark.messageOffset == 100,
"partition follower's highWatermark should be 100")
}
@Test
def shouldMoveSinglePartition(): Unit = {
//Given a single replica on server 100
startBrokers(Seq(100, 101))
adminClient = createAdminClient(servers)
val partition = 0
// Get a random log directory on broker 101
val expectedLogDir = getRandomLogDirAssignment(101)
createTopic(zkClient, topicName, Map(partition -> Seq(100)), servers = servers)
//When we move the replica on 100 to broker 101
val topicJson: String = s"""{"version":1,"partitions":[{"topic":"$topicName","partition":0,"replicas":[101],"log_dirs":["$expectedLogDir"]}]}"""
ReassignPartitionsCommand.executeAssignment(zkClient, Some(adminClient), topicJson, NoThrottle)
waitForReassignmentToComplete()
//Then the replica should be on 101
assertEquals(Seq(101), zkClient.getPartitionAssignmentForTopics(Set(topicName)).get(topicName).get(partition))
// The replica should be in the expected log directory on broker 101
val replica = new TopicPartitionReplica(topicName, 0, 101)
assertEquals(expectedLogDir, adminClient.describeReplicaLogDirs(Collections.singleton(replica)).all().get.get(replica).getCurrentReplicaLogDir)
}
@Test
def shouldMoveSinglePartitionWithinBroker() {
// Given a single replica on server 100
startBrokers(Seq(100, 101))
adminClient = createAdminClient(servers)
val expectedLogDir = getRandomLogDirAssignment(100)
createTopic(zkClient, topicName, Map(0 -> Seq(100)), servers = servers)
// When we execute an assignment that moves an existing replica to another log directory on the same broker
val topicJson: String = s"""{"version":1,"partitions":[{"topic":"$topicName","partition":0,"replicas":[100],"log_dirs":["$expectedLogDir"]}]}"""
ReassignPartitionsCommand.executeAssignment(zkClient, Some(adminClient), topicJson, NoThrottle)
val replica = new TopicPartitionReplica(topicName, 0, 100)
TestUtils.waitUntilTrue(() => {
expectedLogDir == adminClient.describeReplicaLogDirs(Collections.singleton(replica)).all().get.get(replica).getCurrentReplicaLogDir
}, "Partition should have been moved to the expected log directory", 1000)
}
@Test
def shouldExpandCluster() {
val brokers = Array(100, 101, 102)
startBrokers(brokers)
adminClient = createAdminClient(servers)
createTopic(zkClient, topicName, Map(
0 -> Seq(100, 101),
1 -> Seq(100, 101),
2 -> Seq(100, 101)
), servers = servers)
//When rebalancing
val newAssignment = generateAssignment(zkClient, brokers, json(topicName), true)._1
// Find a partition in the new assignment on broker 102 and a random log directory on broker 102,
// which currently does not have any partition for this topic
val partition1 = newAssignment.find { case (_, brokerIds) => brokerIds.contains(102) }.get._1.partition
val replica1 = new TopicPartitionReplica(topicName, partition1, 102)
val expectedLogDir1 = getRandomLogDirAssignment(102)
// Find a partition in the new assignment on broker 100 and a random log directory on broker 100,
// which currently has partition for this topic
val partition2 = newAssignment.find { case (_, brokerIds) => brokerIds.contains(100) }.get._1.partition
val replica2 = new TopicPartitionReplica(topicName, partition2, 100)
val expectedLogDir2 = getRandomLogDirAssignment(100)
// Generate a replica assignment to reassign replicas on broker 100 and 102 respectively to a random log directory on the same broker.
// Before this reassignment, the replica already exists on broker 100 but does not exist on broker 102
val newReplicaAssignment = Map(replica1 -> expectedLogDir1, replica2 -> expectedLogDir2)
ReassignPartitionsCommand.executeAssignment(zkClient, Some(adminClient),
ReassignPartitionsCommand.formatAsReassignmentJson(newAssignment, newReplicaAssignment), NoThrottle)
waitForReassignmentToComplete()
// Then the replicas should span all three brokers
val actual = zkClient.getPartitionAssignmentForTopics(Set(topicName))(topicName)
assertEquals(Seq(100, 101, 102), actual.values.flatten.toSeq.distinct.sorted)
// The replica should be in the expected log directory on broker 102 and 100
waitUntilTrue(() => {
expectedLogDir1 == adminClient.describeReplicaLogDirs(Collections.singleton(replica1)).all().get.get(replica1).getCurrentReplicaLogDir
}, "Partition should have been moved to the expected log directory on broker 102", 1000)
waitUntilTrue(() => {
expectedLogDir2 == adminClient.describeReplicaLogDirs(Collections.singleton(replica2)).all().get.get(replica2).getCurrentReplicaLogDir
}, "Partition should have been moved to the expected log directory on broker 100", 1000)
}
@Test
def shouldShrinkCluster() {
//Given partitions on 3 of 3 brokers
val brokers = Array(100, 101, 102)
startBrokers(brokers)
createTopic(zkClient, topicName, Map(
0 -> Seq(100, 101),
1 -> Seq(101, 102),
2 -> Seq(102, 100)
), servers = servers)
//When rebalancing
val newAssignment = generateAssignment(zkClient, Array(100, 101), json(topicName), true)._1
ReassignPartitionsCommand.executeAssignment(zkClient, None,
ReassignPartitionsCommand.formatAsReassignmentJson(newAssignment, Map.empty), NoThrottle)
waitForReassignmentToComplete()
//Then replicas should only span the first two brokers
val actual = zkClient.getPartitionAssignmentForTopics(Set(topicName))(topicName)
assertEquals(Seq(100, 101), actual.values.flatten.toSeq.distinct.sorted)
}
@Test
def shouldMoveSubsetOfPartitions() {
//Given partitions on 3 of 3 brokers
val brokers = Array(100, 101, 102)
startBrokers(brokers)
adminClient = createAdminClient(servers)
createTopic(zkClient, "topic1", Map(
0 -> Seq(100, 101),
1 -> Seq(101, 102),
2 -> Seq(102, 100)
), servers = servers)
createTopic(zkClient, "topic2", Map(
0 -> Seq(100, 101),
1 -> Seq(101, 102),
2 -> Seq(102, 100)
), servers = servers)
val proposed: Map[TopicPartition, Seq[Int]] = Map(
new TopicPartition("topic1", 0) -> Seq(100, 102),
new TopicPartition("topic1", 2) -> Seq(100, 102),
new TopicPartition("topic2", 1) -> Seq(101, 100),
new TopicPartition("topic2", 2) -> Seq(100, 102)
)
val replica1 = new TopicPartitionReplica("topic1", 0, 102)
val replica2 = new TopicPartitionReplica("topic2", 1, 100)
val proposedReplicaAssignment: Map[TopicPartitionReplica, String] = Map(
replica1 -> getRandomLogDirAssignment(102),
replica2 -> getRandomLogDirAssignment(100)
)
//When rebalancing
ReassignPartitionsCommand.executeAssignment(zkClient, Some(adminClient),
ReassignPartitionsCommand.formatAsReassignmentJson(proposed, proposedReplicaAssignment), NoThrottle)
waitForReassignmentToComplete()
//Then the proposed changes should have been made
val actual = zkClient.getPartitionAssignmentForTopics(Set("topic1", "topic2"))
assertEquals(Seq(100, 102), actual("topic1")(0))//changed
assertEquals(Seq(101, 102), actual("topic1")(1))
assertEquals(Seq(100, 102), actual("topic1")(2))//changed
assertEquals(Seq(100, 101), actual("topic2")(0))
assertEquals(Seq(101, 100), actual("topic2")(1))//changed
assertEquals(Seq(100, 102), actual("topic2")(2))//changed
// The replicas should be in the expected log directories
val replicaDirs = adminClient.describeReplicaLogDirs(List(replica1, replica2).asJava).all().get()
assertEquals(proposedReplicaAssignment(replica1), replicaDirs.get(replica1).getCurrentReplicaLogDir)
assertEquals(proposedReplicaAssignment(replica2), replicaDirs.get(replica2).getCurrentReplicaLogDir)
}
@Test
def shouldExecuteThrottledReassignment() {
//Given partitions on 3 of 3 brokers
val brokers = Array(100, 101, 102)
startBrokers(brokers)
createTopic(zkClient, topicName, Map(
0 -> Seq(100, 101)
), servers = servers)
//Given throttle set so replication will take a certain number of secs
val initialThrottle = Throttle(10 * 1000 * 1000, -1, () => zkUpdateDelay)
val expectedDurationSecs = 5
val numMessages = 500
val msgSize = 100 * 1000
produceMessages(topicName, numMessages, acks = 0, msgSize)
assertEquals(expectedDurationSecs, numMessages * msgSize / initialThrottle.interBrokerLimit)
//Start rebalance which will move replica on 100 -> replica on 102
val newAssignment = generateAssignment(zkClient, Array(101, 102), json(topicName), true)._1
val start = System.currentTimeMillis()
ReassignPartitionsCommand.executeAssignment(zkClient, None,
ReassignPartitionsCommand.formatAsReassignmentJson(newAssignment, Map.empty), initialThrottle)
//Check throttle config. Should be throttling replica 0 on 100 and 102 only.
checkThrottleConfigAddedToZK(adminZkClient, initialThrottle.interBrokerLimit, servers, topicName, Set("0:100","0:101"), Set("0:102"))
//Await completion
waitForReassignmentToComplete()
val took = System.currentTimeMillis() - start - delayMs
//Check move occurred
val actual = zkClient.getPartitionAssignmentForTopics(Set(topicName))(topicName)
assertEquals(Seq(101, 102), actual.values.flatten.toSeq.distinct.sorted)
//Then command should have taken longer than the throttle rate
assertTrue(s"Expected replication to be > ${expectedDurationSecs * 0.9 * 1000} but was $took",
took > expectedDurationSecs * 0.9 * 1000)
assertTrue(s"Expected replication to be < ${expectedDurationSecs * 2 * 1000} but was $took",
took < expectedDurationSecs * 2 * 1000)
}
@Test
def shouldOnlyThrottleMovingReplicas() {
//Given 6 brokers, two topics
val brokers = Array(100, 101, 102, 103, 104, 105)
startBrokers(brokers)
createTopic(zkClient, "topic1", Map(
0 -> Seq(100, 101),
1 -> Seq(100, 101),
2 -> Seq(103, 104) //will leave in place
), servers = servers)
createTopic(zkClient, "topic2", Map(
0 -> Seq(104, 105),
1 -> Seq(104, 105),
2 -> Seq(103, 104)//will leave in place
), servers = servers)
//Given throttle set so replication will take a while
val throttle: Long = 1000 * 1000
produceMessages("topic1", 100, acks = 0, 100 * 1000)
produceMessages("topic2", 100, acks = 0, 100 * 1000)
//Start rebalance
val newAssignment = Map(
new TopicPartition("topic1", 0) -> Seq(100, 102),//moved 101=>102
new TopicPartition("topic1", 1) -> Seq(100, 102),//moved 101=>102
new TopicPartition("topic2", 0) -> Seq(103, 105),//moved 104=>103
new TopicPartition("topic2", 1) -> Seq(103, 105),//moved 104=>103
new TopicPartition("topic1", 2) -> Seq(103, 104), //didn't move
new TopicPartition("topic2", 2) -> Seq(103, 104) //didn't move
)
ReassignPartitionsCommand.executeAssignment(zkClient, None,
ReassignPartitionsCommand.formatAsReassignmentJson(newAssignment, Map.empty), Throttle(throttle))
//Check throttle config. Should be throttling specific replicas for each topic.
checkThrottleConfigAddedToZK(adminZkClient, throttle, servers, "topic1",
Set("1:100","1:101","0:100","0:101"), //All replicas for moving partitions should be leader-throttled
Set("1:102","0:102") //Move destinations should be follower throttled.
)
checkThrottleConfigAddedToZK(adminZkClient, throttle, servers, "topic2",
Set("1:104","1:105","0:104","0:105"), //All replicas for moving partitions should be leader-throttled
Set("1:103","0:103") //Move destinations should be follower throttled.
)
}
@Test
def shouldChangeThrottleOnRerunAndRemoveOnVerify() {
//Given partitions on 3 of 3 brokers
val brokers = Array(100, 101, 102)
startBrokers(brokers)
createTopic(zkClient, topicName, Map(
0 -> Seq(100, 101)
), servers = servers)
//Given throttle set so replication will take at least 20 sec (we won't wait this long)
val initialThrottle: Long = 1000 * 1000
produceMessages(topicName, numMessages = 200, acks = 0, valueLength = 100 * 1000)
//Start rebalance
val newAssignment = generateAssignment(zkClient, Array(101, 102), json(topicName), true)._1
ReassignPartitionsCommand.executeAssignment(zkClient, None,
ReassignPartitionsCommand.formatAsReassignmentJson(newAssignment, Map.empty), Throttle(initialThrottle))
//Check throttle config
checkThrottleConfigAddedToZK(adminZkClient, initialThrottle, servers, topicName, Set("0:100","0:101"), Set("0:102"))
//Ensure that running Verify, whilst the command is executing, should have no effect
verifyAssignment(zkClient, None, ReassignPartitionsCommand.formatAsReassignmentJson(newAssignment, Map.empty))
//Check throttle config again
checkThrottleConfigAddedToZK(adminZkClient, initialThrottle, servers, topicName, Set("0:100","0:101"), Set("0:102"))
//Now re-run the same assignment with a larger throttle, which should only act to increase the throttle and make progress
val newThrottle = initialThrottle * 1000
ReassignPartitionsCommand.executeAssignment(zkClient, None,
ReassignPartitionsCommand.formatAsReassignmentJson(newAssignment, Map.empty), Throttle(newThrottle))
//Check throttle was changed
checkThrottleConfigAddedToZK(adminZkClient, newThrottle, servers, topicName, Set("0:100","0:101"), Set("0:102"))
//Await completion
waitForReassignmentToComplete()
//Verify should remove the throttle
verifyAssignment(zkClient, None, ReassignPartitionsCommand.formatAsReassignmentJson(newAssignment, Map.empty))
//Check removed
checkThrottleConfigRemovedFromZK(adminZkClient, topicName, servers)
//Check move occurred
val actual = zkClient.getPartitionAssignmentForTopics(Set(topicName))(topicName)
assertEquals(Seq(101, 102), actual.values.flatten.toSeq.distinct.sorted)
}
@Test(expected = classOf[AdminCommandFailedException])
def shouldFailIfProposedDoesNotMatchExisting() {
//Given a single replica on server 100
startBrokers(Seq(100, 101))
createTopic(zkClient, topicName, Map(0 -> Seq(100)), servers = servers)
//When we execute an assignment that includes an invalid partition (1:101 in this case)
val topicJson = s"""{"version":1,"partitions":[{"topic":"$topicName","partition":1,"replicas":[101]}]}"""
ReassignPartitionsCommand.executeAssignment(zkClient, None, topicJson, NoThrottle)
}
@Test(expected = classOf[AdminCommandFailedException])
def shouldFailIfProposedHasEmptyReplicaList() {
//Given a single replica on server 100
startBrokers(Seq(100, 101))
createTopic(zkClient, topicName, Map(0 -> Seq(100)), servers = servers)
//When we execute an assignment that specifies an empty replica list (0: empty list in this case)
val topicJson = s"""{"version":1,"partitions":[{"topic":"$topicName","partition":0,"replicas":[]}]}"""
ReassignPartitionsCommand.executeAssignment(zkClient, None, topicJson, NoThrottle)
}
@Test(expected = classOf[AdminCommandFailedException])
def shouldFailIfProposedHasInvalidBrokerID() {
//Given a single replica on server 100
startBrokers(Seq(100, 101))
createTopic(zkClient, topicName, Map(0 -> Seq(100)), servers = servers)
//When we execute an assignment that specifies an invalid brokerID (102: invalid broker ID in this case)
val topicJson = s"""{"version":1,"partitions":[{"topic":"$topicName","partition":0,"replicas":[101, 102]}]}"""
ReassignPartitionsCommand.executeAssignment(zkClient, None, topicJson, NoThrottle)
}
@Test(expected = classOf[AdminCommandFailedException])
def shouldFailIfProposedHasInvalidLogDir() {
// Given a single replica on server 100
startBrokers(Seq(100, 101))
adminClient = createAdminClient(servers)
createTopic(zkClient, topicName, Map(0 -> Seq(100)), servers = servers)
// When we execute an assignment that specifies an invalid log directory
val topicJson: String = s"""{"version":1,"partitions":[{"topic":"$topicName","partition":0,"replicas":[101],"log_dirs":["invalidDir"]}]}"""
ReassignPartitionsCommand.executeAssignment(zkClient, Some(adminClient), topicJson, NoThrottle)
}
@Test(expected = classOf[AdminCommandFailedException])
def shouldFailIfProposedHasInconsistentReplicasAndLogDirs() {
// Given a single replica on server 100
startBrokers(Seq(100, 101))
adminClient = createAdminClient(servers)
val logDir = getRandomLogDirAssignment(100)
createTopic(zkClient, topicName, Map(0 -> Seq(100)), servers = servers)
// When we execute an assignment whose length of replicas doesn't match that of replicas
val topicJson: String = s"""{"version":1,"partitions":[{"topic":"$topicName","partition":0,"replicas":[101],"log_dirs":["$logDir", "$logDir"]}]}"""
ReassignPartitionsCommand.executeAssignment(zkClient, Some(adminClient), topicJson, NoThrottle)
}
@Test
def shouldPerformThrottledReassignmentOverVariousTopics() {
val throttle = Throttle(1000L)
startBrokers(Seq(0, 1, 2, 3))
//With up several small topics
createTopic(zkClient, "orders", Map(0 -> List(0, 1, 2), 1 -> List(0, 1, 2)), servers)
createTopic(zkClient, "payments", Map(0 -> List(0, 1), 1 -> List(0, 1)), servers)
createTopic(zkClient, "deliveries", Map(0 -> List(0)), servers)
createTopic(zkClient, "customers", Map(0 -> List(0), 1 -> List(1), 2 -> List(2), 3 -> List(3)), servers)
//Define a move for some of them
val move = Map(
new TopicPartition("orders", 0) -> Seq(0, 2, 3),//moves
new TopicPartition("orders", 1) -> Seq(0, 1, 2),//stays
new TopicPartition("payments", 1) -> Seq(1, 2), //only define one partition as moving
new TopicPartition("deliveries", 0) -> Seq(1, 2) //increase replication factor
)
//When we run a throttled reassignment
new ReassignPartitionsCommand(zkClient, None, move, adminZkClient = adminZkClient).reassignPartitions(throttle)
waitForReassignmentToComplete()
//Check moved replicas did move
assertEquals(Seq(0, 2, 3), zkClient.getReplicasForPartition(new TopicPartition("orders", 0)))
assertEquals(Seq(0, 1, 2), zkClient.getReplicasForPartition(new TopicPartition("orders", 1)))
assertEquals(Seq(1, 2), zkClient.getReplicasForPartition(new TopicPartition("payments", 1)))
assertEquals(Seq(1, 2), zkClient.getReplicasForPartition(new TopicPartition("deliveries", 0)))
//Check untouched replicas are still there
assertEquals(Seq(0, 1), zkClient.getReplicasForPartition(new TopicPartition("payments", 0)))
assertEquals(Seq(0), zkClient.getReplicasForPartition(new TopicPartition("customers", 0)))
assertEquals(Seq(1), zkClient.getReplicasForPartition(new TopicPartition("customers", 1)))
assertEquals(Seq(2), zkClient.getReplicasForPartition(new TopicPartition("customers", 2)))
assertEquals(Seq(3), zkClient.getReplicasForPartition(new TopicPartition("customers", 3)))
}
/**
* Verifies that the Controller sets a watcher for the reassignment znode after reassignment completion.
* This includes the case where the znode is set immediately after it's deleted (i.e. before the watch is set).
* This case relies on the scheduling of the operations, so it won't necessarily fail every time, but it fails
* often enough to detect a regression.
*/
@Test
def shouldPerformMultipleReassignmentOperationsOverVariousTopics() {
startBrokers(Seq(0, 1, 2, 3))
createTopic(zkClient, "orders", Map(0 -> List(0, 1, 2), 1 -> List(0, 1, 2)), servers)
createTopic(zkClient, "payments", Map(0 -> List(0, 1), 1 -> List(0, 1)), servers)
createTopic(zkClient, "deliveries", Map(0 -> List(0)), servers)
createTopic(zkClient, "customers", Map(0 -> List(0), 1 -> List(1), 2 -> List(2), 3 -> List(3)), servers)
val firstMove = Map(
new TopicPartition("orders", 0) -> Seq(0, 2, 3), //moves
new TopicPartition("orders", 1) -> Seq(0, 1, 2), //stays
new TopicPartition("payments", 1) -> Seq(1, 2), //only define one partition as moving
new TopicPartition("deliveries", 0) -> Seq(1, 2) //increase replication factor
)
new ReassignPartitionsCommand(zkClient, None, firstMove, adminZkClient = adminZkClient).reassignPartitions()
// Low pause to detect deletion of the reassign_partitions znode before the reassignment is complete
waitForReassignmentToComplete(pause = 1L)
// Check moved replicas did move
assertEquals(Seq(0, 2, 3), zkClient.getReplicasForPartition(new TopicPartition("orders", 0)))
assertEquals(Seq(0, 1, 2), zkClient.getReplicasForPartition(new TopicPartition("orders", 1)))
assertEquals(Seq(1, 2), zkClient.getReplicasForPartition(new TopicPartition("payments", 1)))
assertEquals(Seq(1, 2), zkClient.getReplicasForPartition(new TopicPartition("deliveries", 0)))
// Check untouched replicas are still there
assertEquals(Seq(0, 1), zkClient.getReplicasForPartition(new TopicPartition("payments", 0)))
assertEquals(Seq(0), zkClient.getReplicasForPartition(new TopicPartition("customers", 0)))
assertEquals(Seq(1), zkClient.getReplicasForPartition(new TopicPartition("customers", 1)))
assertEquals(Seq(2), zkClient.getReplicasForPartition(new TopicPartition("customers", 2)))
assertEquals(Seq(3), zkClient.getReplicasForPartition(new TopicPartition("customers", 3)))
// Define a move for some of them
val secondMove = Map(
new TopicPartition("orders", 0) -> Seq(0, 2, 3), // stays
new TopicPartition("orders", 1) -> Seq(3, 1, 2), // moves
new TopicPartition("payments", 1) -> Seq(2, 1), // changed preferred leader
new TopicPartition("deliveries", 0) -> Seq(1, 2, 3) //increase replication factor
)
new ReassignPartitionsCommand(zkClient, None, secondMove, adminZkClient = adminZkClient).reassignPartitions()
// Low pause to detect deletion of the reassign_partitions znode before the reassignment is complete
waitForReassignmentToComplete(pause = 1L)
// Check moved replicas did move
assertEquals(Seq(0, 2, 3), zkClient.getReplicasForPartition(new TopicPartition("orders", 0)))
assertEquals(Seq(3, 1, 2), zkClient.getReplicasForPartition(new TopicPartition("orders", 1)))
assertEquals(Seq(2, 1), zkClient.getReplicasForPartition(new TopicPartition("payments", 1)))
assertEquals(Seq(1, 2, 3), zkClient.getReplicasForPartition(new TopicPartition("deliveries", 0)))
//Check untouched replicas are still there
assertEquals(Seq(0, 1), zkClient.getReplicasForPartition(new TopicPartition("payments", 0)))
assertEquals(Seq(0), zkClient.getReplicasForPartition(new TopicPartition("customers", 0)))
assertEquals(Seq(1), zkClient.getReplicasForPartition(new TopicPartition("customers", 1)))
assertEquals(Seq(2), zkClient.getReplicasForPartition(new TopicPartition("customers", 2)))
assertEquals(Seq(3), zkClient.getReplicasForPartition(new TopicPartition("customers", 3)))
// We set the znode and then continuously attempt to set it again to exercise the case where the znode is set
// immediately after deletion (i.e. before we set the watcher again)
val thirdMove = Map(new TopicPartition("orders", 0) -> Seq(1, 2, 3))
new ReassignPartitionsCommand(zkClient, None, thirdMove, adminZkClient = adminZkClient).reassignPartitions()
val fourthMove = Map(new TopicPartition("payments", 1) -> Seq(2, 3))
// Continuously attempt to set the reassignment znode with `fourthMove` until it succeeds. It will only succeed
// after `thirdMove` completes.
Iterator.continually {
try new ReassignPartitionsCommand(zkClient, None, fourthMove, adminZkClient = adminZkClient).reassignPartitions()
catch {
case _: AdminCommandFailedException => false
}
}.exists(identity)
// Low pause to detect deletion of the reassign_partitions znode before the reassignment is complete
waitForReassignmentToComplete(pause = 1L)
// Check moved replicas for thirdMove and fourthMove
assertEquals(Seq(1, 2, 3), zkClient.getReplicasForPartition(new TopicPartition("orders", 0)))
assertEquals(Seq(2, 3), zkClient.getReplicasForPartition(new TopicPartition("payments", 1)))
//Check untouched replicas are still there
assertEquals(Seq(3, 1, 2), zkClient.getReplicasForPartition(new TopicPartition("orders", 1)))
assertEquals(Seq(1, 2, 3), zkClient.getReplicasForPartition(new TopicPartition("deliveries", 0)))
assertEquals(Seq(0, 1), zkClient.getReplicasForPartition(new TopicPartition("payments", 0)))
assertEquals(Seq(0), zkClient.getReplicasForPartition(new TopicPartition("customers", 0)))
assertEquals(Seq(1), zkClient.getReplicasForPartition(new TopicPartition("customers", 1)))
assertEquals(Seq(2), zkClient.getReplicasForPartition(new TopicPartition("customers", 2)))
assertEquals(Seq(3), zkClient.getReplicasForPartition(new TopicPartition("customers", 3)))
}
/**
* Set the `reassign_partitions` znode while the brokers are down and verify that the reassignment is triggered by
* the Controller during start-up.
*/
@Test
def shouldTriggerReassignmentOnControllerStartup(): Unit = {
startBrokers(Seq(0, 1, 2))
createTopic(zkClient, "orders", Map(0 -> List(0, 1), 1 -> List(1, 2)), servers)
servers.foreach(_.shutdown())
val firstMove = Map(
new TopicPartition("orders", 0) -> Seq(2, 1), // moves
new TopicPartition("orders", 1) -> Seq(1, 2), // stays
new TopicPartition("customers", 0) -> Seq(1, 2) // non-existent topic, triggers topic deleted path
)
// Set znode directly to avoid non-existent topic validation
zkClient.setOrCreatePartitionReassignment(firstMove, ZkVersion.MatchAnyVersion)
servers.foreach(_.startup())
waitForReassignmentToComplete()
assertEquals(Seq(2, 1), zkClient.getReplicasForPartition(new TopicPartition("orders", 0)))
assertEquals(Seq(1, 2), zkClient.getReplicasForPartition(new TopicPartition("orders", 1)))
assertEquals(Seq.empty, zkClient.getReplicasForPartition(new TopicPartition("customers", 0)))
}
def waitForReassignmentToComplete(pause: Long = 100L) {
waitUntilTrue(() => !zkClient.reassignPartitionsInProgress,
s"Znode ${ReassignPartitionsZNode.path} wasn't deleted", pause = pause)
}
def json(topic: String*): String = {
val topicStr = topic.map { t => "{\\"topic\\": \\"" + t + "\\"}" }.mkString(",")
s"""{"topics": [$topicStr],"version":1}"""
}
private def produceMessages(topic: String, numMessages: Int, acks: Int, valueLength: Int): Unit = {
val records = (0 until numMessages).map(_ => new ProducerRecord[Array[Byte], Array[Byte]](topic,
new Array[Byte](valueLength)))
TestUtils.produceMessages(servers, records, acks)
}
}
| mihbor/kafka | core/src/test/scala/unit/kafka/admin/ReassignPartitionsClusterTest.scala | Scala | apache-2.0 | 31,257 |
package scalapb
object Scalapb {
def getDescriptor(): com.google.protobuf.Descriptors.FileDescriptor = {
throw new NotImplementedError("Descriptors are not implemented yet for ScalaJS.")
}
}
| dotty-staging/ScalaPB | scalapb-runtime/non-jvm/src/main/scala/scalapb/Scalapb.scala | Scala | apache-2.0 | 200 |
package org.jetbrains.plugins.scala
package lang
package parser
package parsing
package top.template
import org.jetbrains.plugins.scala.lang.lexer.ScalaTokenTypes
import org.jetbrains.plugins.scala.lang.parser.parsing.base.Import
import org.jetbrains.plugins.scala.lang.parser.parsing.builder.ScalaPsiBuilder
import org.jetbrains.plugins.scala.lang.parser.parsing.expressions.Expr
import org.jetbrains.plugins.scala.lang.parser.parsing.statements._
/**
* @author Alexander Podkhalyuzin
* Date: 13.02.2008
*/
/*
* TemplateStat ::= Import
* | {AttributeClause} {Modifier} Def
* | {AttributeClause} {Modifier} Dcl
* | Expr
*/
object TemplateStat {
def parse(builder: ScalaPsiBuilder): Boolean = {
builder.getTokenType match {
case ScalaTokenTypes.kIMPORT =>
Import parse builder
return true
case _ =>
if (Def parse builder) {
return true
} else if (Dcl parse builder) {
return true
} else if (EmptyDcl parse builder) {
return true
} else if (Expr.parse(builder)) {
return true
} else {
return false
}
}
}
} | advancedxy/intellij-scala | src/org/jetbrains/plugins/scala/lang/parser/parsing/top/template/TemplateStat.scala | Scala | apache-2.0 | 1,191 |
package kvstore
import akka.actor.{Actor, ActorRef, Props, ReceiveTimeout}
import scala.concurrent.duration._
import scala.language.postfixOps
object Replicator {
case class Replicate(key: String, valueOption: Option[String], id: Long)
case class Replicated(key: String, id: Long)
case class Snapshot(key: String, valueOption: Option[String], seq: Long)
case class SnapshotAck(key: String, seq: Long)
def props(replica: ActorRef): Props = Props(new Replicator(replica))
}
class Replicator(val replica: ActorRef) extends Actor {
import Replicator._
import Replica._
import context.dispatcher
/*
* The contents of this actor is just a suggestion, you can implement it in any way you like.
*/
// map from sequence number to pair of sender and request
var acks = Map.empty[Long, (ActorRef, Replicate)]
// a sequence of not-yet-sent snapshots (you can disregard this if not implementing batching)
var pending = Vector.empty[Snapshot]
var _seqCounter = 0L
def nextSeq = {
val ret = _seqCounter
_seqCounter += 1
ret
}
def receive: Receive = {
case Replicate(key, valueOption, id) =>
val seq = nextSeq
replica ! Snapshot(key, valueOption, seq)
acks = acks.updated(seq, (sender, Replicate(key, valueOption, id)))
context.setReceiveTimeout(100 milliseconds)
context.become(pendingConfirmation(seq))
}
def pendingConfirmation(expected: Long): Receive = {
case msg: Replicate =>
val seq = nextSeq
acks = acks.updated(seq, (sender, msg))
/*
TODO Initial idea to implement batching
for {
(_,m) <- acks.get(expected)
if (m.key.equals(msg.key))
} yield {
pending = pending :+ Snapshot(msg.key, msg.valueOption, seq)
}
*/
case ReceiveTimeout =>
for {
(_,Replicate(k,v,id)) <- acks.get(expected)
} yield replica ! Snapshot(k,v,expected)
case SnapshotAck(key, s) =>
for {
(sender, Replicate(k,v,id)) <- acks.get(s)
} yield {
sender ! Replicated(k, id)
acks = acks - s
acks.get(expected + 1) match {
case None =>
context.become(receive)
context.setReceiveTimeout(Duration.Undefined)
case Some((_, Replicate(k,v,id))) =>
replica ! Snapshot(k, v, expected + 1)
context.become(pendingConfirmation(expected + 1))
}
}
}
}
| alexlloreda/reactive | kvstore/src/main/scala/kvstore/Replicator.scala | Scala | mit | 2,438 |
package org.jetbrains.plugins.scala.lang.psi.stubs.elements.signatures
import com.intellij.psi.PsiElement
import com.intellij.psi.stubs.{StubElement, StubInputStream, StubOutputStream}
import com.intellij.util.io.StringRef
import org.jetbrains.plugins.scala.lang.psi.api.statements.params.{ScClassParameter, ScParameter}
import org.jetbrains.plugins.scala.lang.psi.stubs.ScParameterStub
import org.jetbrains.plugins.scala.lang.psi.stubs.elements.ScStubElementType
import org.jetbrains.plugins.scala.lang.psi.stubs.impl.ScParameterStubImpl
import org.jetbrains.plugins.scala.lang.psi.stubs.elements._
/**
* User: Alexander Podkhalyuzin
* Date: 19.10.2008
*/
abstract class ScParamElementType[P <: ScParameter](debugName: String) extends ScStubElementType[ScParameterStub, ScParameter](debugName) {
override def serialize(stub: ScParameterStub, dataStream: StubOutputStream): Unit = {
dataStream.writeName(stub.getName)
dataStream.writeOptionName(stub.typeText)
dataStream.writeBoolean(stub.isStable)
dataStream.writeBoolean(stub.isDefaultParameter)
dataStream.writeBoolean(stub.isRepeated)
dataStream.writeBoolean(stub.isVal)
dataStream.writeBoolean(stub.isVar)
dataStream.writeBoolean(stub.isCallByNameParameter)
dataStream.writeOptionName(stub.bodyText)
dataStream.writeOptionName(stub.deprecatedName)
}
override def deserialize(dataStream: StubInputStream, parentStub: StubElement[_ <: PsiElement]): ScParameterStub =
new ScParameterStubImpl(parentStub, this,
nameRef = dataStream.readName,
typeTextRef = dataStream.readOptionName,
isStable = dataStream.readBoolean,
isDefaultParameter = dataStream.readBoolean,
isRepeated = dataStream.readBoolean,
isVal = dataStream.readBoolean,
isVar = dataStream.readBoolean,
isCallByNameParameter = dataStream.readBoolean,
bodyTextRef = dataStream.readOptionName,
deprecatedNameRef = dataStream.readOptionName)
override def createStubImpl(parameter: ScParameter, parentStub: StubElement[_ <: PsiElement]): ScParameterStub = {
val typeText = parameter.typeElement.map {
_.getText
}
val (isVal, isVar) = parameter match {
case parameter: ScClassParameter => (parameter.isVal, parameter.isVar)
case _ => (false, false)
}
val defaultExprText = parameter.getActualDefaultExpression.map {
_.getText
}
new ScParameterStubImpl(parentStub, this,
nameRef = StringRef.fromString(parameter.name),
typeTextRef = typeText.asReference,
isStable = parameter.isStable,
isDefaultParameter = parameter.baseDefaultParam,
isRepeated = parameter.isRepeatedParameter,
isVal = isVal,
isVar = isVar,
isCallByNameParameter = parameter.isCallByNameParameter,
bodyTextRef = defaultExprText.asReference,
deprecatedNameRef = parameter.deprecatedName.asReference)
}
} | gtache/intellij-lsp | intellij-lsp-dotty/src/org/jetbrains/plugins/scala/lang/psi/stubs/elements/signatures/ScParamElementType.scala | Scala | apache-2.0 | 2,914 |
/*
* Copyright (c) 2014-2021 by The Monix Project Developers.
* See the project homepage at: https://monix.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package monix.eval
object CoevalRightSuite extends BaseTestSuite {
test("Coeval.right should return a Now with a Right") { implicit s =>
val t = Coeval.right[Int, String]("t")
t.value() match {
case Right(_: String) =>
case _ => fail("Expected Coeval with a Right")
}
}
}
| monifu/monifu | monix-eval/shared/src/test/scala/monix/eval/CoevalRightSuite.scala | Scala | apache-2.0 | 976 |
package models
import java.util.UUID
case class MajorSubject(majorID: UUID, subject: Subject, required: Boolean)
| yoo-haemin/hufs-planner | project/app/models/MajorSubject.scala | Scala | agpl-3.0 | 115 |
package gsd.linux.stats
import gsd.linux._
import java.io.PrintStream
import com.sun.xml.internal.ws.developer.MemberSubmissionAddressing.Validation
class ASEStatistics(val ck: ConcreteKConfig)
extends FeatureStatistics(ck) with VisibilityStatistics {
// Feature Kinds
// menus, menuconfigs, configs, choice
// Switch features
lazy val boolType = boolConfigs
lazy val tristateType = triConfigs
// Post-processed ConcreteKConfig (removed inherited and depends on)
lazy val ppk = ASEStatistics.removeInheritedAndDependsOn(ck)
lazy val apk = ppk.toAbstractKConfig
//Post-processed ConcreteKconfig (removed inherited)
lazy val ipk = ASEStatistics.removeInherited(ck)
}
object ASEStatistics {
import KExprList._
import org.kiama.rewriting.Rewriter._
def fixHexIdentifiers(ck: ConcreteKConfig): ConcreteKConfig = {
def isHex(s: String) =
try {
Integer.parseInt(s, 16)
true
}
catch {
case _ => false
}
lazy val fixHex =
rule[IdOrValue] {
case Id(x) if isHex(x) && !ck.configMap.contains(x) =>
KInt(Integer.parseInt(x, 16))
}
rewrite(everywheretd(fixHex))(ck)
}
/**
* Removes a conjunction from the condition of a property
*/
def removeCondition(propCond: KExpr, conj: KExpr): KExpr = {
val cj = conj.splitConjunctions
propCond.splitConjunctions filterNot
{ cj contains } filter
{ _ != Yes } mkConjunction
}
def rewriteProperties(ck: ConcreteKConfig)(f: (CConfig, Property) => KExpr)
: ConcreteKConfig = {
val strategy =
everywheretd {
rule[CConfig] {
case config@CConfig(_,_,_,_,inh,pros,ds,selects,rngs,_,_) =>
config.copy (
prompt = pros map
{ p => p.copy( c = f(config, p) ) },
defs = ds map
{ d => d.copy( c = f(config, d) ) },
sels = selects map
{ s => s.copy( c = f(config, s) ) },
ranges = rngs map
{ r => r.copy( c = f(config, r) ) }
)
}
}
rewrite(strategy)(ck)
}
/**
* Removes inherited expression from property conditions.
*/
def removeInherited(ck: ConcreteKConfig): ConcreteKConfig =
rewriteProperties(ck){ (config, p) =>
removeCondition(p.cond, config.inherited)
}
/**
* Removes depends on expression from property conditions.
*/
def removeDependsOn(ck: ConcreteKConfig): ConcreteKConfig =
rewriteProperties(ck){ (config, p) =>
(p.cond /: config.depends){ (pcond, dep) => removeCondition(pcond, dep.cond) }
}
def removeInheritedAndDependsOn(ck: ConcreteKConfig): ConcreteKConfig =
removeDependsOn(removeInherited(ck))
}
| AlexanderKnueppel/is-there-a-mismatch | Source/KConfigTranslator/src/main/scala/gsd/linux/stats/ASEStatistics.scala | Scala | lgpl-3.0 | 2,750 |
/*
* Copyright 2017 Nicolas Rinaudo
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kantan.mongodb
package io
import org.bson.{BsonReader, BsonWriter}
import org.bson.codecs.{Codec, DecoderContext, EncoderContext}
object LongCodec extends Codec[BsonLong] {
override def decode(reader: BsonReader, d: DecoderContext) = BsonLong(reader.readInt64())
override def encode(writer: BsonWriter, value: BsonLong, e: EncoderContext) = writer.writeInt64(value.value)
override def getEncoderClass = classOf[BsonLong]
}
| nrinaudo/kantan.mongodb | core/src/main/scala/kantan/mongodb/io/LongCodec.scala | Scala | apache-2.0 | 1,103 |
/*
* This file is part of the Linux Variability Modeling Tools (LVAT).
*
* Copyright (C) 2011 Steven She <[email protected]>
*
* LVAT is free software: you can redistribute it and/or modify it under
* the terms of the GNU Lesser General Public License as published by the
* Free Software Foundation, either version 3 of the License, or (at your
* option) any later version.
*
* LVAT is distributed in the hope that it will be useful, but WITHOUT ANY
* WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
* FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
* more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with LVAT. (See files COPYING and COPYING.LESSER.) If not, see
* <http://www.gnu.org/licenses/>.
*/
package gsd.linux.stats
import util.logging.ConsoleLogger
import java.io.PrintStream
import gsd.linux.cnf.DimacsReader.{DimacsHeader, DimacsProblem}
import gsd.linux.cnf.{DimacsReader, SATBuilder}
import java.util.Scanner
import org.clapper.argot._
import gsd.linux.tools.ArgotUtil
object DeadFeaturesMain extends ArgotUtil with ConsoleLogger {
import ArgotConverters._
val name = "DeadFeaturesMain"
val inParam = parser.parameter[String]("in-file",
"input file containing CNF in dimacs format, stdin if not specified", false)
val outParam = parser.parameter[String]("out-file",
"output file for the list of dead features, stdout if not specified", true)
val genFlag = parser.flag[Boolean](List("g"),
"do NOT consider variables that end with '_m' as generated")
def main(args: Array[String]) {
try {
parser.parse(args)
val (header, problem): (DimacsHeader, DimacsProblem) =
(pOpt.value, inParam.value) match {
case (Some(_), Some(_)) =>
parser.usage("Either a project (-p) is specified or input & output parameters are used.")
case (Some(p), None) => (p.header, p.dimacs)
case (None, Some(f)) =>
(DimacsReader.readHeaderFile(f), DimacsReader.readFile(f))
case (None, None) =>
log("Using stdin as input...")
log("Warning: dimacs parsing from stdin is experimental!")
val scanner = new Scanner(System.in)
val header = DimacsReader.readHeader(scanner)
val dimacs = DimacsReader.read(scanner)
(header, dimacs)
}
val output =
(pOpt.value, outParam.value) match {
case (Some(p), None) => new PrintStream(p.implgFile.get)
case (None, Some(f)) => new PrintStream(f)
case _ => System.out
}
execute(header, problem, output)
}
catch {
case e: ArgotUsageException => println(e.message)
}
}
def execute(header: DimacsHeader, dimacs: DimacsProblem,
out: PrintStream) {
val generated =
if (genFlag.value.getOrElse(false)) header.generated
else {
log("[INFO] Considering features that end with _m as generated...")
header.generated ++
(header.varMap filter { case (_,v) => v.endsWith("_m") } map (_._1))
}
log("Initializing SAT solver...")
val sat = new SATBuilder(dimacs.cnf, dimacs.numVars, generated)
with ConsoleLogger
val stats = new SATStatistics(sat, header.idMap) with ConsoleLogger
stats.deadFeatures foreach out.println
}
}
| scas-mdd/linux-variability-analysis-tools.fm-translation | src/main/scala/gsd/linux/stats/DeadFeaturesMain.scala | Scala | gpl-3.0 | 3,440 |
package org.jetbrains.plugins.scala
package lang
package transformation
package types
/**
* @author Pavel Fatin
*/
class ExpandTupleTypeTest extends TransformerTest(new ExpandTupleType()) {
def testTuple2(): Unit = check(
before = "val v: (A, B)",
after = "val v: Tuple2[A, B]"
)()
def testTuple3(): Unit = check(
before = "val v: (A, B, C)",
after = "val v: Tuple3[A, B, C]"
)()
def testParenthesis(): Unit = check(
before = "val v: (A)",
after = "val v: (A)"
)()
def testInsideFunctionType(): Unit = check(
before = "val v: (A, B) => C",
after = "val v: (A, B) => C"
)()
def testExplicit(): Unit = check(
before = "val v: Tuple2[A, B]",
after = "val v: Tuple2[A, B]"
)()
}
| ilinum/intellij-scala | test/org/jetbrains/plugins/scala/lang/transformation/types/ExpandTupleTypeTest.scala | Scala | apache-2.0 | 745 |
package com.nthportal.concurrent
import java.util.concurrent.Callable
package object _cancellable_task {
@inline
private[concurrent] def callable[A](body: => A): Callable[A] = () => body
}
| NthPortal/cancellable-task | src/main/scala-2.12/com/nthportal/concurrent/_cancellable_task/package.scala | Scala | apache-2.0 | 195 |
/*
* Copyright 2010 LinkedIn
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.javaapi
import kafka.utils.IteratorTemplate
import java.nio.ByteBuffer
import message.ByteBufferMessageSet
class MultiFetchResponse(buffer: ByteBuffer, numSets: Int) extends java.lang.Iterable[ByteBufferMessageSet] {
val underlyingBuffer = ByteBuffer.wrap(buffer.array)
// this has the side effect of setting the initial position of buffer correctly
val errorCode = underlyingBuffer.getShort
import Implicits._
val underlying = new kafka.api.MultiFetchResponse(underlyingBuffer, numSets)
override def toString() = underlying.toString
def iterator : java.util.Iterator[ByteBufferMessageSet] = {
new IteratorTemplate[ByteBufferMessageSet] {
val iter = underlying.iterator
override def makeNext(): ByteBufferMessageSet = {
if(iter.hasNext)
iter.next
else
return allDone
}
}
}
} | quipo/kafka | core/src/main/scala/kafka/javaapi/MultiFetchResponse.scala | Scala | apache-2.0 | 1,464 |
package modelservice.core
import scala.util.{Success, Failure}
import scala.concurrent.Future
import akka.actor._
import akka.cluster.{Cluster, MemberStatus}
import akka.cluster.ClusterEvent._
import akka.routing.{ActorRefRoutee, RemoveRoutee, RoundRobinPool}
import com.typesafe.config.ConfigFactory
import modelservice.core.prediction.PredictionActors
import modelservice.storage.BaseAssetManagerAPI.SyncAssets
import modelservice.storage._
import modelservice.api.CoreApi
/**
* Core
*/
trait Core {
implicit def system: ActorSystem
val config = ConfigFactory.load()
}
trait InitCore extends Core {
// Initialize the ActorSystem
implicit lazy val system = ActorSystem("model-service-actors")
}
trait CoreActors {
val parseActor: ActorRef
val modelParser: ActorRef
}
trait CoreActorSet extends CoreActors {
this: Core with PredictionActors with StorageActors =>
val parseActor = system actorOf(Props(new FeatureParser(this)).withRouter(RoundRobinPool(nrOfInstances = 8)),
name = "parseActor")
val modelParser = system actorOf(Props(new ModelParser(this)).withRouter(RoundRobinPool(nrOfInstances = 4)),
name = "modelParser")
}
/**
* Mix in trait for clustering
*/
trait ClusterCore {
this: Core with ClusteredStorageActorSet with CoreApi =>
implicit val executionContext = system.dispatcher
val cluster = Cluster(system)
var currentNodes = Set.empty[Address]
var currentLeader: Option[Address] = None
var leaderShutdownActorRef: ActorRef = clusterShutdownHandler
val selfRemoteAddressString = s"akka.tcp://${system.name}@${config.getString("clustering.ip")}:${config.getString("clustering.port")}"
val antiEntropyPeriod = config.hasPath("modelservice.anti-entropy-period-ms") match {
case true => config.getInt("modelservice.anti-entropy-period-ms")
case false => 60000
}
// val selfRemoteAddress = Address("akka.tcp", system.name, config.getString("clustering.ip"), config.getString("clustering.port").toInt)
def currentNodeIsLeader(): Boolean = {
currentLeader match {
case Some(leaderAddress) => if (leaderAddress.toString == selfRemoteAddressString) true else false
case None => false
}
}
val clusterListener = system.actorOf(Props(new Actor with ActorLogging {
val selfRemoteAddress = self.path.address
import scala.concurrent.duration._
def receive = {
case state: CurrentClusterState =>
log.info("Current members: {}", state.members)
currentNodes = state.members.collect {
case m if m.status == MemberStatus.up => m.address
}
log.info("Number of nodes in cluster: {}", currentNodes.size)
log.info("Current leader: {}", state.leader)
currentLeader = state.leader
updateLeaderActor(currentLeader)
case MemberJoined(member) =>
log.info("Member joined: {}", member)
case LeaderChanged(leader) =>
log.info("Leader changed: {}", leader)
currentLeader = leader
updateLeaderActor(currentLeader)
case MemberUp(member) =>
log.info("Member is Up: {}", member)
currentNodes += member.address
// The leader should run a round of anti-entropy on the incoming node to sync its assets with the rest of the
// cluster
if (currentNodeIsLeader()) {
log.info(s"Leader sending anti-entropy instructions to new member ${member.address}")
syncNode(Some(member.address))
}
case UnreachableMember(member) =>
log.info("Member detected as unreachable: {}", member)
currentNodes -= member.address
if (currentNodeIsLeader()) {
log.info(s"Leader removing unreachable member ${member.address}")
cluster.leave(member.address)
cluster.down(member.address)
}
case MemberRemoved(member, _) =>
currentNodes -= member.address
log.info("Member removed: {}", member)
case MemberExited(member) =>
currentNodes -= member.address
log.info("Member exited: {}", member)
case _: ClusterDomainEvent => // ignore
}
def syncNode(remoteNode: Option[Address]) = {
import akka.util.Timeout
import scala.concurrent.duration._
implicit val timeout = Timeout(5 seconds)
remoteNode match {
case Some(l) => context.actorSelection(RootActorPath(l) / "user" / modelStorageActorName).resolveOne(10 seconds) onComplete {
case Success(result: ActorRef) => {
val remoteActor = result
log.info(s"remoteActor: $remoteActor")
remoteActor.tell(SyncAssets(), modelActor)
}
case Failure(exception) => log.info(s"ERROR COULD NOT RESOLVE REMOTE ACTOR: $exception")
}
case None => log.info("ERROR NO LEADER")
}
}
def updateLeaderActor(curLeader: Option[Address]) = {
curLeader match {
case Some(l) => context.actorSelection(RootActorPath(l) / "user" / "cluster-shutdown-handler").resolveOne(3 seconds) onComplete {
case Success(result: ActorRef) => {
val remoteActor = result
leaderShutdownActorRef = remoteActor
log.info(s"leaderShutdownActorRef: $remoteActor")
}
case Failure(exception) => log.info(s"ERROR COULD NOT RESOLVE REMOTE ACTOR: $exception")
}
case None => log.info("ERROR NO LEADER")
}
}
val syncLoop: Future[Unit] = Future {
// Run anti-entropy check periodically
while (true) {
Thread.sleep(antiEntropyPeriod)
currentLeader match {
case Some(leader: Address) => {
if (currentNodeIsLeader()) {
(currentNodes - leader).foreach {
case (nodeAddress: Address) => {
log.info(s"Leader sending anti-entropy instructions to existing member ${nodeAddress}")
syncNode(Some(nodeAddress))
}
}
}
}
case None => log.info("No leader node assigned yet")
}
}
}
}), name = "cluster-listener")
val clusterShutdownHandler = system.actorOf(Props(new Actor with ActorLogging {
sys addShutdownHook {
// Tell the leader node to alert the cluster that we are leaving
exit()
log.info("Shutting down")
// Shutdown the JVM when the actor system shuts down
system.terminate()
}
import scala.concurrent.duration._
def receive = {
case ClusterCoreMessages.Exit() =>
val exitAddress = sender.path.address
log.info(s"Exiting member: ${exitAddress}")
log.info(s"Removing address: $exitAddress")
currentNodes -= exitAddress
val exitRoutee = sender()
currentLeader match {
case Some(leader: Address) => {
if (currentNodeIsLeader()) {
(currentNodes - leader).foreach {
case (nodeAddress: Address) =>
context.actorSelection(RootActorPath(nodeAddress) / "user" / "cluster-shutdown-handler")
.resolveOne(2 seconds) onComplete {
case Success(nodeActor: ActorRef) => nodeActor ! ClusterCoreMessages.ExitNode(exitAddress, exitRoutee)
case Failure(exception) => log.info(s"ERROR: could not resolve remote actor: $exception")
}
}
log.info(s"Removing routee: $exitRoutee")
modelServiceRouter ! RemoveRoutee(ActorRefRoutee(exitRoutee))
}
}
case None => log.info("No leader node assigned yet")
}
Thread.sleep(5000)
cluster.leave(exitAddress)
cluster.down(exitAddress)
case ClusterCoreMessages.ExitNode(exitAddress, exitRoutee) =>
log.info(s"Removing address: $exitAddress")
currentNodes -= exitAddress
log.info(s"Removing routee: $exitRoutee")
modelServiceRouter ! RemoveRoutee(ActorRefRoutee(exitRoutee))
case other =>
val exitAddress = sender.path.address
log.info(s"Unknown request: ${other} from sender ${sender} with address ${exitAddress}")
}
def exit() = {
log.info("Exiting")
leaderShutdownActorRef.tell(ClusterCoreMessages.Exit(), modelServiceLocal)
log.info("Goodbye")
Thread.sleep(10000)
}
}), name = "cluster-shutdown-handler")
val modelActor = system.actorOf(Props(new BaseAssetManager[MSHashMap[String], String](modelStorageMap))
.withRouter(RoundRobinPool(nrOfInstances = 4)), modelStorageActorName)
println (s"modelActor: $modelActor")
val clusterStatusActor = system.actorOf(Props(new Actor with ActorLogging {
import org.json4s._
import org.json4s.jackson.Serialization
import spray.http.ContentTypes._
import spray.http.{HttpEntity, HttpResponse}
import spray.http.HttpHeaders._
implicit val formats = DefaultFormats
def receive = {
case ClusterCoreMessages.GetNodes() => {
val client = sender()
val nodeMap = Map(
"nodes" -> currentNodes.toList.flatMap(_.host)
)
val nodeMapWithLeader = currentLeader match {
case Some(leader) => leader.host match {
case Some(host) => nodeMap.updated("leader", host)
case None => nodeMap
}
case None => nodeMap
}
client ! HttpResponse(
200,
entity = HttpEntity(
`application/json`,
Serialization.write(nodeMapWithLeader)
),
headers = List(Connection("close"))
)
}
}
}), name = "cluster-status")
cluster.subscribe(clusterListener, classOf[ClusterDomainEvent])
}
object ClusterCoreMessages {
final case class Exit()
final case class OK()
final case class ExitNode(exitAddress: Address, exitRoutee: ActorRef)
final case class GetNodes()
}
| kiip/model-service | src/main/scala/modelservice/core/Core.scala | Scala | bsd-3-clause | 9,903 |
package cromwell.engine.backend.jes
import com.google.api.client.util.ArrayMap
import com.google.api.services.genomics.{Genomics, model}
import com.google.api.services.genomics.model.{CancelOperationRequest, LoggingOptions, Pipeline, RunPipelineArgs, RunPipelineRequest, ServiceAccount, _}
import com.typesafe.config.ConfigFactory
import cromwell.core.WorkflowId
import cromwell.engine.backend.BackendCallJobDescriptor
import cromwell.engine.backend.jes.JesBackend._
import cromwell.engine.backend.jes.Run.{Failed, Running, Success, _}
import cromwell.engine.db.DataAccess._
import cromwell.engine.workflow.BackendCallKey
import cromwell.engine.{AbortFunction, ExecutionEventEntry}
import cromwell.logging.WorkflowLogger
import cromwell.util.google.GoogleScopes
import org.joda.time.DateTime
import org.slf4j.LoggerFactory
import scala.collection.JavaConverters._
import scala.concurrent.ExecutionContext
import scala.concurrent.duration._
import scala.language.postfixOps
object Run {
val JesServiceAccount = new ServiceAccount().setEmail("default").setScopes(GoogleScopes.Scopes.asJava)
lazy val MaximumPollingInterval = Duration(ConfigFactory.load.getConfig("backend").getConfig("jes").getInt("maximumPollingInterval"), "seconds")
val InitialPollingInterval = 5 seconds
val PollingBackoffFactor = 1.1
def apply(runIdForResumption: Option[String],
jesJobDescriptor: JesJobDescriptor,
jesParameters: Seq[JesParameter],
projectId: String,
genomicsInterface: Genomics): Run = {
lazy val jobDescriptor = jesJobDescriptor.jobDescriptor
lazy val workflow = jobDescriptor.workflowDescriptor
lazy val command = jesJobDescriptor.jesCommandLine
lazy val runtimeAttributes = jobDescriptor.callRuntimeAttributes
lazy val key = jobDescriptor.key
lazy val gcsPath = jobDescriptor.callRootPath.toString
val logger = WorkflowLogger(
"JES Run",
workflow,
otherLoggers = Seq(LoggerFactory.getLogger(getClass.getName)),
callTag = Option(key.tag)
)
logger.debug(s"Command line is: $command")
val runtimeInfo = if (jesJobDescriptor.preemptible) PreemptibleJesRuntimeInfo(command, runtimeAttributes) else NonPreemptibleJesRuntimeInfo(command, runtimeAttributes)
val pipeline = new model.Pipeline()
.setProjectId(projectId)
.setDocker(runtimeInfo.docker)
.setResources(runtimeInfo.resources)
.setName(workflow.name)
.setInputParameters(jesParameters.collect({ case i: JesInput => i.toGooglePipelineParameter }).toVector.asJava)
.setOutputParameters(jesParameters.collect({ case i: JesFileOutput => i.toGooglePipelineParameter }).toVector.asJava)
def runPipeline(): String = {
val rpargs = new RunPipelineArgs().setProjectId(projectId).setServiceAccount(JesServiceAccount)
rpargs.setInputs(jesParameters.collect({ case i: JesInput => i.name -> i.toGoogleRunParameter }).toMap.asJava)
logger.info(s"Inputs:\\n${stringifyMap(rpargs.getInputs.asScala.toMap)}")
rpargs.setOutputs(jesParameters.collect({ case i: JesFileOutput => i.name -> i.toGoogleRunParameter }).toMap.asJava)
logger.info(s"Outputs:\\n${stringifyMap(rpargs.getOutputs.asScala.toMap)}")
val rpr = new RunPipelineRequest().setEphemeralPipeline(pipeline).setPipelineArgs(rpargs)
val logging = new LoggingOptions()
logging.setGcsPath(s"$gcsPath/${JesBackend.jesLogFilename(key)}")
rpargs.setLogging(logging)
val runId = genomicsInterface.pipelines().run(rpr).execute().getName
logger.info(s"JES Run ID is $runId")
runId
}
// If runIdForResumption is defined use that, otherwise we'll create a new Run with an ephemeral pipeline.
val runId = runIdForResumption getOrElse runPipeline
new Run(runId, workflow.id, key, genomicsInterface, logger)
}
private def stringifyMap(m: Map[String, String]): String = m map { case(k, v) => s" $k -> $v"} mkString "\\n"
implicit class RunOperationExtension(val operation: Operation) extends AnyVal {
def hasStarted = operation.getMetadata.asScala.get("startTime") isDefined
}
sealed trait RunStatus {
// Could be defined as false for Initializing and true otherwise, but this is more defensive.
def isRunningOrComplete = this match {
case Running | _: TerminalRunStatus => true
case _ => false
}
}
trait TerminalRunStatus extends RunStatus
case object Initializing extends RunStatus
case object Running extends RunStatus
case class Success(events: Seq[ExecutionEventEntry]) extends TerminalRunStatus {
override def toString = "Success"
}
final case class Failed(errorCode: Int, errorMessage: Option[String], events: Seq[ExecutionEventEntry]) extends TerminalRunStatus {
// Don't want to include errorMessage or code in the snappy status toString:
override def toString = "Failed"
}
// An event with a startTime timestamp
private case class EventStartTime(name: String, timestamp: DateTime)
def getEventList(op: Operation): Seq[ExecutionEventEntry] = {
val starterEvents = eventIfExists("createTime", op, "waiting for quota") ++ eventIfExists("startTime", op, "initializing VM")
val eventsList: Seq[EventStartTime] = if (op.getMetadata.containsKey("events")) {
op.getMetadata.get("events").asInstanceOf[java.util.ArrayList[AnyRef]].asScala map { x =>
val entry = x.asInstanceOf[ArrayMap[String, String]]
EventStartTime(entry.get("description"), DateTime.parse(entry.get("startTime")))
} toSeq
} else Seq.empty
// The final event is only used as the book-end for the final pairing (see below) so the name is never actually used...
// ... which is rather a pity actually - it's a jolly good name.
val finaleEvents = eventIfExists("endTime", op, "cromwell poll interval") ++ Seq(
EventStartTime("The Queen flying around with a jet-pack, with Winston Churchill cheering and waving a huge Union Jack in the background", DateTime.now))
// Join the Seqs together, pair up consecutive elements then make events with start and end times.
((starterEvents ++ eventsList ++ finaleEvents).sliding(2) toSeq) map { case Seq(a, b) => ExecutionEventEntry(a.name, a.timestamp, b.timestamp) }
}
private def eventIfExists(name: String, op: Operation, eventName: String): Seq[EventStartTime] = {
val metadata = op.getMetadata
if(metadata.containsKey(name))
Seq(EventStartTime(eventName, DateTime.parse(metadata.get(name).asInstanceOf[String])))
else
Seq.empty
}
}
case class Run(runId: String, workflowId: WorkflowId, key: BackendCallKey, genomicsInterface: Genomics, logger: WorkflowLogger) {
lazy val call = key.scope
def status(): RunStatus = {
val op = genomicsInterface.operations().get(runId).execute
if (op.getDone) {
// If there's an error, generate a Failed status. Otherwise, we were successful!
val eventList = getEventList(op)
Option(op.getError) map { x => Failed(x.getCode, Option(x.getMessage), eventList) } getOrElse Success(eventList)
} else if (op.hasStarted) {
Running
} else {
Initializing
}
}
def checkStatus(jobDescriptor: BackendCallJobDescriptor, previousStatus: Option[RunStatus]): RunStatus = {
val currentStatus = status()
if (!(previousStatus contains currentStatus)) {
// If this is the first time checking the status, we log the transition as '-' to 'currentStatus'. Otherwise
// just use the state names.
val prevStateName = previousStatus map { _.toString } getOrElse "-"
logger.info(s"Status change from $prevStateName to $currentStatus")
/*
TODO: Not sure we're supposed to be directly talking to the database.
This doesn't even wait for the future to complete. Pretty sure this should be a message to the workflow actor,
that then contacts the database to change the state. For now, updating this end run to the database to pass in the
default, global execution context.
*/
// Update the database state:
// TODO the database API should probably be returning DBIOs so callers can compose and wrap with a transaction.
globalDataAccess.updateExecutionInfo(workflowId, BackendCallKey(call, key.index, key.attempt), JesBackend.InfoKeys.JesRunId, Option(runId))(ExecutionContext.global)
globalDataAccess.updateExecutionInfo(workflowId, BackendCallKey(call, key.index, key.attempt), JesBackend.InfoKeys.JesStatus, Option(currentStatus.toString))(ExecutionContext.global)
// If this has transitioned to a running or complete state from a state that is not running or complete,
// register the abort function.
if (currentStatus.isRunningOrComplete && (previousStatus.isEmpty || !previousStatus.get.isRunningOrComplete)) {
jobDescriptor.abortRegistrationFunction.foreach(_.register(AbortFunction(() => abort())))
}
}
currentStatus
}
def abort(): Unit = {
val cancellationRequest: CancelOperationRequest = new CancelOperationRequest()
genomicsInterface.operations().cancel(runId, cancellationRequest).execute
}
}
| cowmoo/cromwell | engine/src/main/scala/cromwell/engine/backend/jes/Run.scala | Scala | bsd-3-clause | 9,230 |
package com.github.j5ik2o.reactive.redis.feature
import java.util.UUID
import cats.data.NonEmptyList
import com.github.j5ik2o.reactive.redis._
import com.github.j5ik2o.reactive.redis.command.sets.{ SAddFailed, SAddRequest, SAddSucceeded, SAddSuspended }
trait SetsAPI[M[_]] {
def sAdd(key: String, member: String, members: String*): M[Result[Long]]
def sAdd(key: String, members: NonEmptyList[String]): M[Result[Long]]
}
trait SetsFeature extends SetsAPI[ReaderTTaskRedisConnection] { this: RedisClient =>
override def sAdd(key: String, member: String, members: String*): ReaderTTaskRedisConnection[Result[Long]] =
sAdd(key, NonEmptyList.of(member, members: _*))
override def sAdd(key: String, members: NonEmptyList[String]): ReaderTTaskRedisConnection[Result[Long]] =
send(SAddRequest(UUID.randomUUID(), key, members)).flatMap {
case SAddSuspended(_, _) => ReaderTTask.pure(Suspended)
case SAddSucceeded(_, _, value) => ReaderTTask.pure(Provided(value))
case SAddFailed(_, _, ex) => ReaderTTask.raiseError(ex)
}
/*
* SCARD
* SDIFF
* SDIFFSTORE
* SINTER
* SINTERSTORE
* SISMEMBER
* SMEMBERS
* SMOVE
* SPOP
* SRANDMEMBER
* SREM
* SSCAN
* SUNION
* SUNIONSTORE
*/
}
| j5ik2o/reactive-redis | core/src/main/scala/com/github/j5ik2o/reactive/redis/feature/SetsFeature.scala | Scala | mit | 1,238 |
package com.twitter.finagle.util
import com.twitter.finagle.Stack
import com.twitter.finagle.param.Label
import com.twitter.util.registry.GlobalRegistry
import java.util.concurrent.atomic.AtomicInteger
import scala.language.existentials
object StackRegistry {
/**
* Represents an entry in the registry.
*/
case class Entry(addr: String, stack: Stack[_], params: Stack.Params) {
// Introspect the entries stack and params. We limit the
// reflection of params to case classes.
// TODO: we might be able to make this avoid reflection with Showable
val modules: Seq[Module] = stack.tails.map { node =>
val raw = node.head.parameters.map { p => params(p) }
val reflected = raw.foldLeft(Seq.empty[(String, String)]) {
case (seq, p: Product) =>
// TODO: many case classes have a $outer field because they close over an outside scope.
// this is not very useful, and it might make sense to filter them out in the future.
val fields = p.getClass.getDeclaredFields.map(_.getName).toSeq
val values = p.productIterator.map(_.toString).toSeq
seq ++ (fields.zipAll(values, "<unknown>", "<unknown>"))
case (seq, _) => seq
}
Module(node.head.role.name, node.head.description, reflected)
}.toSeq
val name: String = params[Label].label
}
/**
* The module describing a given Param for a Stack element.
*/
case class Module(name: String, description: String, fields: Seq[(String, String)])
}
/**
* A registry that allows the registration of a string identifier with a
* a [[com.twitter.finagle.Stack]] and its params. This is especially useful
* in keeping a process global registry of Finagle clients and servers for
* dynamic introspection.
*/
trait StackRegistry {
import StackRegistry._
/** The name of the [[StackRegistry]], to be used for identification in the registry. */
def registryName: String
private[this] var registry = Map.empty[String, Entry]
private[this] val numEntries = new AtomicInteger(0)
/** Registers an `addr` and `stk`. */
def register(addr: String, stk: Stack[_], params: Stack.Params): Unit = {
val entry = Entry(addr, stk, params)
addEntries(entry)
synchronized { registry += entry.name -> entry }
}
/** Unregisters an `addr` and `stk`. */
def unregister(addr: String, stk: Stack[_], params: Stack.Params): Unit = {
val entry = Entry(addr, stk, params)
synchronized { registry -= entry.name }
removeEntries(entry)
}
private[this] def addEntries(entry: Entry): Unit = {
val gRegistry = GlobalRegistry.get
entry.modules.foreach { case Module(paramName, _, reflected) =>
reflected.foreach { case (field, value) =>
val key = Seq(registryName, entry.name, entry.addr, paramName, field)
if (gRegistry.put(key, value).isEmpty)
numEntries.incrementAndGet()
}
}
}
private[this] def removeEntries(entry: Entry): Unit = {
val gRegistry = GlobalRegistry.get
val name = entry.name
entry.modules.foreach { case Module(paramName, _, reflected) =>
reflected.foreach { case (field, value) =>
val key = Seq(registryName, name, entry.addr, paramName, field)
if (gRegistry.remove(key).isDefined)
numEntries.decrementAndGet()
}
}
}
/** Returns the number of entries */
def size: Int = numEntries.get
/** Returns a list of all entries. */
def registrants: Iterable[Entry] = synchronized { registry.values }
// added for tests
private[finagle] def clear(): Unit = synchronized { registry = Map.empty[String, Entry] }
}
| cogitate/twitter-finagle-uuid | finagle-core/src/main/scala/com/twitter/finagle/util/StackRegistry.scala | Scala | apache-2.0 | 3,632 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.command.schema
import org.apache.spark.sql.{CarbonEnv, Row, SparkSession}
import org.apache.spark.sql.catalyst.TableIdentifier
import org.apache.spark.sql.execution.command._
import org.apache.spark.sql.hive.{CarbonRelation, CarbonSessionState}
import org.apache.spark.util.AlterTableUtil
import org.apache.carbondata.common.logging.{LogService, LogServiceFactory}
import org.apache.carbondata.format.TableInfo
private[sql] case class AlterTableUnsetCommand(val tableIdentifier: TableIdentifier,
val propKeys: Seq[String],
val ifExists: Boolean,
val isView: Boolean)
extends RunnableCommand with SchemaProcessCommand {
override def run(sparkSession: SparkSession): Seq[Row] = {
processSchema(sparkSession)
}
override def processSchema(sparkSession: SparkSession): Seq[Row] = {
val LOGGER: LogService = LogServiceFactory.getLogService(this.getClass.getCanonicalName)
AlterTableUtil.modifyTableComment(tableIdentifier, Map.empty[String, String],
propKeys, false)(sparkSession, sparkSession.sessionState.asInstanceOf[CarbonSessionState])
Seq.empty
}
}
| HuaweiBigData/carbondata | integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/schema/AlterTableUnsetCommand.scala | Scala | apache-2.0 | 2,068 |
/**
* Copyright 2009 Jorge Ortiz
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
**/
package com.github.nscala_time.time
import org.joda.time._
import com.github.nscala_time.PimpedType
class RichDateTimeZone(val underlying: DateTimeZone) extends Super with PimpedType[DateTimeZone] {
def id: String = underlying.getID
}
| mamdouhweb/nscala-time | src/main/scala/com/github/nscala_time/time/RichDateTimeZone.scala | Scala | apache-2.0 | 839 |
package controllers
import java.sql.{Array =>_, _}
object Test {
def testInt() = {
val numsList = List( (1 to 100), (1 to 10000), (1 to 10000) )
val rownumsList = List( (1 to 1000 ) , (1 to 5000), (1 to 10000) )
numsList.foreach { nums =>
(1 to 10).foreach { t =>
Application.time("Relate: Select 1 column from "+nums.size.toString+" records") { RelateTests.select1_int(nums) }
}
(1 to 10).foreach { t =>
Application.time("Anorm 2.1.1: Select 1 column from "+nums.size.toString+" records") { AnormTests.select1_int(nums.mkString(",")) }
}
(1 to 10).foreach { t =>
Application.time("JDBC: Select 1 column from "+nums.size.toString+" records") { JDBCTests.select1_int(nums) }
}
(1 to 10).foreach { t =>
Application.time("Relate: Select 10 columns from "+nums.size.toString+" records") { RelateTests.select2_int(nums) }
}
(1 to 10).foreach { t =>
Application.time("Anorm 2.1.1: Select 10 columns from "+nums.size.toString+" records") { AnormTests.select2_int(nums.mkString(",")) }
}
(1 to 10).foreach { t =>
Application.time("JDBC: Select 10 columns from "+nums.size.toString+" records") { JDBCTests.select2_int(nums) }
}
(1 to 10).foreach { t =>
Application.time("Relate: Select 25 columns from "+nums.size.toString+" records") { RelateTests.select3_int(nums) }
}
(1 to 10).foreach { t =>
Application.time("Anorm 2.1.1: Select 25 columns from "+nums.size.toString+" records") { AnormTests.select3_int(nums.mkString(",")) }
}
(1 to 10).foreach { t =>
Application.time("JDBC: Select 25 columns from "+nums.size.toString+" records") { JDBCTests.select3_int(nums) }
}
}
rownumsList.foreach { nums =>
(1 to 10).foreach { t =>
Application.time("Relate: Insert 10 columns , "+nums.size.toString +" records") { RelateTests.insert1_int(nums) }
}
(1 to 10).foreach { t =>
Application.time("Anorm 2.1.1: Insert 10 columns , "+nums.size.toString +" records") { AnormTests.insert1_int(nums) }
}
(1 to 10).foreach { t =>
Application.time("JDBC: Insert 10 columns , "+nums.size.toString +" records") { JDBCTests.insert1_int(nums) }
}
(1 to 10).foreach { t =>
Application.time("Relate: Insert 50 columns , "+nums.size.toString +" records") { RelateTests.insert2_int(nums) }
}
(1 to 10).foreach { t =>
Application.time("Anorm 2.1.1: Insert 50 columns , "+nums.size.toString +" records") { AnormTests.insert2_int(nums) }
}
(1 to 10).foreach { t =>
Application.time("JDBC: Insert 50 columns , "+nums.size.toString +" records") { JDBCTests.insert2_int(nums) }
}
(1 to 10).foreach { t =>
Application.time("Relate: Insert 50 columns , "+nums.size.toString+" records") { RelateTests.insert3_int(nums) }
}
(1 to 10).foreach { t =>
Application.time("Anorm 2.1.1: Insert 50 columns , "+nums.size.toString+" records") { AnormTests.insert3_int(nums) }
}
(1 to 10).foreach { t =>
Application.time("JDBC: Insert 50 columns , "+nums.size.toString+" records") { JDBCTests.insert3_int(nums) }
}
}
numsList.foreach { nums =>
(1 to 10).foreach { t =>
Application.time("Relate: Update 2 columns , "+nums.size.toString +" records") { RelateTests.update1_int(nums) }
}
(1 to 10).foreach { t =>
Application.time("Anorm 2.1.1: Update 2 columns , "+nums.size.toString +" records") { AnormTests.update1_int(nums.mkString(",")) }
}
(1 to 10).foreach { t =>
Application.time("JDBC: Update 2 columns , "+nums.size.toString +" records") { JDBCTests.update1_int(nums) }
}
(1 to 10).foreach { t=>
Application.time("Relate: Update 10 columns , "+nums.size.toString +" records") { RelateTests.update2_int(nums) }
}
(1 to 10).foreach { t=>
Application.time("Anorm 2.1.1: Update 10 columns , "+nums.size.toString +" records") { AnormTests.update2_int(nums.mkString(",")) }
}
(1 to 10).foreach { t=>
Application.time("JDBC: Update 10 columns , "+nums.size.toString +" records") { JDBCTests.update2_int(nums) }
}
(1 to 10).foreach { t =>
Application.time("Relate: Update 20 columns , "+nums.size.toString+" records") { RelateTests.update3_int(nums) }
}
(1 to 10).foreach { t =>
Application.time("Anorm 2.1.1: Update 20 columns , "+nums.size.toString+" records") { AnormTests.update3_int(nums.mkString(",")) }
}
(1 to 10).foreach { t =>
Application.time("JDBC: Update 20 columns , "+nums.size.toString+" records") { JDBCTests.update3_int(nums) }
}
}
//def testDouble = {
// val doubleResults1 = (1 to 10).map ( trial => (trial,
// Application.time("Relate") { RelateTests.select1_double() } ,
// Application.time("Anorm") { AnormTests.select1_double() },
// Application.time("JDBC") { JDBCTests.select1("double") } )).toList
//
//
// }
}
} | lucidsoftware/relate-benchmarks | app/controllers/Test.scala | Scala | mit | 4,967 |
package main.scala.Trivia
import java.util.{ArrayList, LinkedList}
class Game {
var players: ArrayList[String] = new ArrayList[String]
var places: Array[Int] = new Array[Int](6)
var purses: Array[Int] = new Array[Int](6)
var inPenaltyBox: Array[Boolean] = new Array[Boolean](6)
var popQuestions: LinkedList[String] = new LinkedList[String]
var scienceQuestions: LinkedList[String] = new LinkedList[String]
var sportsQuestions: LinkedList[String] = new LinkedList[String]
var rockQuestions: LinkedList[String] = new LinkedList[String]
var currentPlayer: Int = 0
var isGettingOutOfPenaltyBox: Boolean = false
def initialize() {
var i: Int = 0
while (i < 50) {
popQuestions.addLast("Pop Question " + i)
scienceQuestions.addLast(("Science Question " + i))
sportsQuestions.addLast(("Sports Question " + i))
rockQuestions.addLast(createRockQuestion(i))
i += 1
}
}
initialize()
def createRockQuestion(index: Int): String = "Rock Question " + index
def isPlayable: Boolean = (howManyPlayers >= 2)
def add(playerName: String): Boolean = {
players.add(playerName)
places(howManyPlayers) = 0
purses(howManyPlayers) = 0
inPenaltyBox(howManyPlayers) = false
println(playerName + " was added")
println("They are player number " + players.size)
true
}
def howManyPlayers: Int = players.size
def roll(roll: Int): Unit = {
println(players.get(currentPlayer) + " is the current player")
println("They have rolled a " + roll)
if (inPenaltyBox(currentPlayer)) {
if (roll % 2 != 0) {
isGettingOutOfPenaltyBox = true
println(players.get(currentPlayer) + " is getting out of the penalty box")
places(currentPlayer) = places(currentPlayer) + roll
if (places(currentPlayer) > 11) places(currentPlayer) = places(currentPlayer) - 12
println(players.get(currentPlayer) + "'s new location is " + places(currentPlayer))
println("The category is " + currentCategory)
askQuestion
} else {
println(players.get(currentPlayer) + " is not getting out of the penalty box")
isGettingOutOfPenaltyBox = false
}
} else {
places(currentPlayer) = places(currentPlayer) + roll
if (places(currentPlayer) > 11) places(currentPlayer) = places(currentPlayer) - 12
println(players.get(currentPlayer) + "'s new location is " + places(currentPlayer))
println("The category is " + currentCategory)
askQuestion
}
}
private def askQuestion: Unit = {
if (currentCategory == "Pop") println(popQuestions.removeFirst)
if (currentCategory == "Science") println(scienceQuestions.removeFirst)
if (currentCategory == "Sports") println(sportsQuestions.removeFirst)
if (currentCategory == "Rock") println(rockQuestions.removeFirst)
}
private def currentCategory: String = {
if (places(currentPlayer) == 0) return "Pop"
if (places(currentPlayer) == 4) return "Pop"
if (places(currentPlayer) == 8) return "Pop"
if (places(currentPlayer) == 1) return "Science"
if (places(currentPlayer) == 5) return "Science"
if (places(currentPlayer) == 9) return "Science"
if (places(currentPlayer) == 2) return "Sports"
if (places(currentPlayer) == 6) return "Sports"
if (places(currentPlayer) == 10) return "Sports"
"Rock"
}
def wasCorrectlyAnswered: Boolean = {
if (inPenaltyBox(currentPlayer)) {
if (isGettingOutOfPenaltyBox) {
println("Answer was correct!!!!")
purses(currentPlayer) += 1
println(players.get(currentPlayer) + " now has " + purses(currentPlayer) + " Gold Coins.")
var winner: Boolean = didPlayerWin
currentPlayer += 1
if (currentPlayer == players.size) currentPlayer = 0
winner
} else {
currentPlayer += 1
if (currentPlayer == players.size) currentPlayer = 0
true
}
} else {
println("Answer was corrent!!!!")
purses(currentPlayer) += 1
println(players.get(currentPlayer) + " now has " + purses(currentPlayer) + " Gold Coins.")
var winner: Boolean = didPlayerWin
currentPlayer += 1
if (currentPlayer == players.size) currentPlayer = 0
winner
}
}
def wrongAnswer: Boolean = {
println("Question was incorrectly answered")
println(players.get(currentPlayer) + " was sent to the penalty box")
inPenaltyBox(currentPlayer) = true
currentPlayer += 1
if (currentPlayer == players.size) currentPlayer = 0
true
}
private def didPlayerWin: Boolean = !(purses(currentPlayer) == 6)
} | ollielo/ScalaKata | src/main/scala/Trivia/Game.scala | Scala | mit | 4,622 |
/*
* Copyright (C) 2016-2019 Lightbend Inc. <https://www.lightbend.com>
*/
package docs.home.scaladsl.persistence
import scala.collection.immutable
import com.lightbend.lagom.scaladsl.playjson.JsonSerializerRegistry
import com.lightbend.lagom.scaladsl.playjson.JsonSerializer
object BlogPostSerializerRegistry extends JsonSerializerRegistry {
override def serializers: immutable.Seq[JsonSerializer[_]] =
BlogCommand.serializers ++ BlogEvent.serializers :+ JsonSerializer[BlogState]
}
| ignasi35/lagom | docs/manual/scala/guide/cluster/code/docs/home/scaladsl/persistence/BlogPostSerializerRegistry.scala | Scala | apache-2.0 | 496 |
package org.jetbrains.plugins.scala.performance
import java.io.File
import java.util
import com.intellij.lang.javascript.boilerplate.GithubDownloadUtil
import com.intellij.openapi.externalSystem.model.ProjectSystemId
import com.intellij.openapi.externalSystem.settings.ExternalProjectSettings
import com.intellij.openapi.externalSystem.test.ExternalSystemImportingTestCase
import com.intellij.openapi.projectRoots.ProjectJdkTable
import com.intellij.openapi.projectRoots.impl.JavaAwareProjectJdkTableImpl
import com.intellij.openapi.roots.ProjectRootManager
import com.intellij.openapi.vfs.{LocalFileSystem, VirtualFile}
import com.intellij.platform.templates.github.ZipUtil
import com.intellij.psi.search.{FileTypeIndex, GlobalSearchScopesCore}
import com.intellij.testFramework.{IdeaTestUtil, VfsTestUtil}
import org.jetbrains.SbtStructureSetup
import org.jetbrains.plugins.scala.finder.SourceFilterScope
import org.jetbrains.plugins.scala.util.TestUtils
import org.jetbrains.plugins.scala.util.reporter.ProgressReporter
import org.jetbrains.plugins.scala.{ScalaFileType, extensions}
import org.jetbrains.sbt.project.SbtProjectSystem
import org.jetbrains.sbt.project.settings.SbtProjectSettings
import org.junit.Assert
/**
* Author: Svyatoslav Ilinskiy
* Date: 11/17/2015
*/
abstract class DownloadingAndImportingTestCase extends ExternalSystemImportingTestCase with SbtStructureSetup {
implicit class IntExt(val i: Int) {
def seconds: Int = i * 1000
}
override protected def getCurrentExternalProjectSettings: ExternalProjectSettings = {
val settings = new SbtProjectSettings
val internalSdk = JavaAwareProjectJdkTableImpl.getInstanceEx.getInternalJdk
val sdk = if (internalSdk == null) IdeaTestUtil.getMockJdk18
else internalSdk
settings.setJdk(sdk.getName)
settings.setCreateEmptyContentRootDirectories(true)
settings
}
protected val reporter = ProgressReporter.newInstance()
override protected def getExternalSystemId: ProjectSystemId = SbtProjectSystem.Id
override protected def getTestsTempDir: String = ""
def rootDirPath: String = s"${TestUtils.getTestDataPath}/projects"
def projectDirPath: String = s"$rootDirPath/$githubRepoName"
def downloadURL: String = s"https://github.com/$githubUsername/$githubRepoName/archive/$revision.zip"
def outputZipFileName = s"$rootDirPath/zipFiles/$githubRepoName-$githubUsername-$revision"
override def setUpInWriteAction(): Unit = {
super.setUpInWriteAction()
val outputZipFile = new File(outputZipFileName)
val projectDir = new File(projectDirPath)
if (!outputZipFile.exists() && !projectDir.exists()) {
//don't download if zip file is already there
reporter.notify("Starting download")
GithubDownloadUtil.downloadAtomically(reporter.progressIndicator, downloadURL, outputZipFile, githubUsername, githubRepoName)
} else { reporter.notify("Project files already exist, skipping download") }
if (!projectDir.exists()) {
//don't unpack if the project is already unpacked
reporter.notify("Finished download, extracting")
ZipUtil.unzip(null, projectDir, outputZipFile, null, null, true)
} else { reporter.notify("Project files already extracted") }
Assert.assertTrue("Project dir does not exist. Download or unpack failed!", projectDir.exists())
reporter.notify("Finished extracting, starting SBT setup")
myProjectRoot = LocalFileSystem.getInstance.refreshAndFindFileByIoFile(projectDir)
setUpSbtLauncherAndStructure(myProject)
extensions.inWriteAction {
val internalSdk = JavaAwareProjectJdkTableImpl.getInstanceEx.getInternalJdk
val sdk = if (internalSdk == null) IdeaTestUtil.getMockJdk17
else internalSdk
if (ProjectJdkTable.getInstance().findJdk(sdk.getName) == null) {
ProjectJdkTable.getInstance().addJdk(sdk)
}
ProjectRootManager.getInstance(myProject).setProjectSdk(sdk)
reporter.notify("Finished SBT setup, starting import")
}
}
override def setUp(): Unit = {
super.setUp()
importProject()
}
def findFile(filename: String): VirtualFile = {
import scala.collection.JavaConversions._
val searchScope = SourceFilterScope(myProject, GlobalSearchScopesCore.directoryScope(myProject, myProjectRoot, true))
val files: util.Collection[VirtualFile] = FileTypeIndex.getFiles(ScalaFileType.INSTANCE, searchScope)
val file = files.filter(_.getName == filename).toList match {
case vf :: Nil => vf
case Nil => // is this a file path?
val file = VfsTestUtil.findFileByCaseSensitivePath(s"$projectDirPath/$filename")
Assert.assertTrue(
s"Could not find file: $filename. Consider providing relative path from project root",
file != null && files.contains(file)
)
file
case list =>
Assert.fail(s"There are ${list.size} files with name $filename. Provide full path from project root")
null
}
LocalFileSystem.getInstance().refreshFiles(files)
file
}
def githubUsername: String
def githubRepoName: String
def revision: String
}
trait ScalaCommunityGithubRepo {
def githubUsername: String = "JetBrains"
def githubRepoName: String = "intellij-scala"
def revision: String = "a9ac902e8930c520b390095d9e9346d9ae546212"
}
| loskutov/intellij-scala | test/org/jetbrains/plugins/scala/performance/DownloadingAndImportingTestCase.scala | Scala | apache-2.0 | 5,319 |
package gapt.formats.tip.transformation
import gapt.formats.tip.analysis.SymbolTable
import gapt.formats.tip.parser.TipSmtAnd
import gapt.formats.tip.parser.TipSmtAssertion
import gapt.formats.tip.parser.TipSmtCase
import gapt.formats.tip.parser.TipSmtConstructorPattern
import gapt.formats.tip.parser.TipSmtEq
import gapt.formats.tip.parser.TipSmtExists
import gapt.formats.tip.parser.TipSmtExpression
import gapt.formats.tip.parser.TipSmtForall
import gapt.formats.tip.parser.TipSmtFun
import gapt.formats.tip.parser.TipSmtFunctionDefinition
import gapt.formats.tip.parser.TipSmtGoal
import gapt.formats.tip.parser.TipSmtIdentifier
import gapt.formats.tip.parser.TipSmtImp
import gapt.formats.tip.parser.TipSmtIte
import gapt.formats.tip.parser.TipSmtMatch
import gapt.formats.tip.parser.TipSmtMutualRecursiveFunctionDefinition
import gapt.formats.tip.parser.TipSmtNot
import gapt.formats.tip.parser.TipSmtOr
import gapt.formats.tip.parser.TipSmtProblem
import gapt.formats.tip.parser.TipSmtType
import gapt.formats.tip.parser.TipSmtVariableDecl
import gapt.formats.tip.util.Substitution
import gapt.formats.tip.util.Substitute
object expandVariableMatchExpressions extends TipSmtProblemTransformation {
override def transform( problem: TipSmtProblem ): TipSmtProblem =
new VariableMatchExpansion( problem )()
}
/**
* This class expands Boolean match-expressions whose matched expression is
* a variable.
*
* Let E be the boolean match-expression
* ( match x ( case p_1 e_1) ... (case p_n e_n)),
* and let X_i be the variables in pattern p_i. Then the expression E is
* expanded into the formula
* !X_1 (e_1[x/p_1]) & ... & !X_n (e_n[x/p_n]).
*
* The variable-match-expressions are expanded from outside to inside.
*
* @param problem A well-formed TIP problem whose variable-match expressions
* are to be expanded.
*/
class VariableMatchExpansion( problem: TipSmtProblem ) {
problem.symbolTable = Some( SymbolTable( problem ) )
private sealed trait Polarity
private case object Forall extends Polarity
private case object Exists extends Polarity
/**
* Expands variable-match expressions in the given problem.
*
* Variable-match expressions are expanded in function definitions, goals and
* assertions.
*
* @return A problem without variable-match expressions.
*/
def apply(): TipSmtProblem = {
problem.copy( definitions = problem.definitions map {
_ match {
case fun @ TipSmtFunctionDefinition( _, _, _, _, body ) =>
apply( fun )
case funDefs @ TipSmtMutualRecursiveFunctionDefinition( _ ) =>
funDefs.copy( functions = funDefs.functions.map { apply } )
case goal @ TipSmtGoal( _, formula ) =>
goal.copy( expr =
expandVariableMatch( formula, Map[String, Polarity]() ) )
case assertion @ TipSmtAssertion( _, formula ) =>
assertion.copy( expr =
expandVariableMatch( formula, Map[String, Polarity]() ) )
case definition => definition
}
} )
}
private def apply(
fun: TipSmtFunctionDefinition ): TipSmtFunctionDefinition = {
fun.copy( body = expandVariableMatch( fun.body, Map[String, Polarity]() ) )
}
/**
* Expands variable-match expressions in the given expression.
*
* @param expression The expression whose variable-match expressions are to
* be expanded.
* @return An expression without variable-match subexpressions.
*/
def expandVariableMatch(
expression: TipSmtExpression,
variables: Map[String, Polarity] ): TipSmtExpression = {
expression match {
case expr @ TipSmtAnd( _ ) =>
expr.copy( expr.exprs.map { expandVariableMatch( _, variables ) } )
case expr @ TipSmtOr( _ ) =>
expr.copy( expr.exprs.map { expandVariableMatch( _, variables ) } )
case expr @ TipSmtImp( _ ) =>
expr.copy( expr.exprs.map { expandVariableMatch( _, variables ) } )
case expr @ TipSmtEq( _ ) =>
expr.copy( expr.exprs.map { expandVariableMatch( _, variables ) } )
case expr @ TipSmtFun( _, _ ) =>
expr.copy( arguments =
expr.arguments.map { expandVariableMatch( _, variables ) } )
case expr @ TipSmtNot( _ ) =>
expr.copy( expandVariableMatch( expr.expr, variables ) )
case expr @ TipSmtForall( _, _ ) =>
expandVariableMatch( expr, variables )
case expr @ TipSmtExists( _, _ ) =>
expandVariableMatch( expr, variables )
case expr @ TipSmtMatch( _, _ ) =>
expandVariableMatch( expr, variables )
case expr @ TipSmtIte( _, _, _ ) =>
TipSmtIte(
expandVariableMatch( expr.cond, variables ),
expandVariableMatch( expr.ifTrue, variables ),
expandVariableMatch( expr.ifFalse, variables ) )
case _ => expression
}
}
private def expandVariableMatch(
forall: TipSmtForall,
variables: Map[String, Polarity] ): TipSmtExpression = {
val newVariables = variables ++ forall.variables.map { _.name -> Forall }
forall.copy( formula = expandVariableMatch( forall.formula, newVariables ) )
}
private def expandVariableMatch(
exists: TipSmtExists,
variables: Map[String, Polarity] ): TipSmtExpression = {
val newVariables = variables ++ exists.variables.map { _.name -> Exists }
exists.copy( formula = expandVariableMatch( exists.formula, newVariables ) )
}
/**
* Expands variable-match expressions in the given expression.
*
* @param tipSmtMatch The expression whose variable-match expressions are to
* be expanded.
* @return An expression without variable-match subexpressions.
*/
def expandVariableMatch(
tipSmtMatch: TipSmtMatch,
variables: Map[String, Polarity] ): TipSmtExpression = {
tipSmtMatch.expr match {
case identifier @ TipSmtIdentifier( _ ) //
if variables.contains( identifier.name ) =>
val polarity = variables( identifier.name )
val connective = polarity match {
case Forall => TipSmtAnd
case Exists => TipSmtOr
}
connective( tipSmtMatch.cases
.map { expandCaseStatement( identifier, _, polarity ) }
.map { expandVariableMatch( _, variables ) } )
case _ => tipSmtMatch.copy( cases = tipSmtMatch.cases map {
expandVariableMatch
} )
}
}
/**
* Converts a case statement of a variable-match expression into a formula.
*
* @param variable The variable that is matched upon.
* @param tipSmtCase The case statement to be expanded.
* @return A formula of the form !X (e[x/p]), where X are the
* variables occurring in the case-statement's pattern, e is the case
* statement's expression and p is the case statement's pattern.
*/
def expandCaseStatement(
variable: TipSmtIdentifier,
tipSmtCase: TipSmtCase,
polarity: Polarity ): TipSmtExpression = {
val pattern @ TipSmtConstructorPattern( _, _ ) = tipSmtCase.pattern
val boundVariables =
problem
.symbolTable.get.typeOf( pattern.constructor.name )
.argumentTypes.zip( pattern.identifiers )
.filter { case ( _, field ) => isVariable( field ) }
.map {
case ( ty, field ) =>
TipSmtVariableDecl( field.name, TipSmtType( ty.name ) )
}
val quantifier = polarity match {
case Forall => TipSmtForall
case Exists => TipSmtExists
}
if ( boundVariables.isEmpty )
new Substitute( problem )(
tipSmtCase.expr,
Substitution( variable ->
patternToExpression( pattern ) ) )
else
quantifier(
boundVariables,
( new Substitute( problem ) )(
tipSmtCase.expr,
Substitution( variable -> patternToExpression( pattern ) ) ) )
}
/**
* Converts a pattern into an expression.
*
* @param pattern The pattern to be converted into an expression.
* @return A formula representing the pattern.
*/
def patternToExpression(
pattern: TipSmtConstructorPattern ): TipSmtExpression = {
TipSmtFun( pattern.constructor.name, pattern.identifiers )
}
/**
* Expands variable-match expressions in the given case-statement.
* @param tipSmtCase The case statement in whose expression variable-match
* expressions are expanded.
* @return A case statement whose expression does not contain variable
* patterns.
*/
def expandVariableMatch( tipSmtCase: TipSmtCase ): TipSmtCase =
tipSmtCase.copy(
expr = expandVariableMatch( tipSmtCase.expr, Map[String, Polarity]() ) )
/**
* Checks whether the given identifier represents a variable.
*
* @param identifier The identifier to be checked.
* @return true if the given identifier is a variable, false otherwise.
*/
def isVariable( identifier: TipSmtIdentifier ): Boolean = {
!problem.symbolTable.get.contains( identifier.name )
}
}
| gapt/gapt | core/src/main/scala/gapt/formats/tip/transformation/variableMatchExpansion.scala | Scala | gpl-3.0 | 8,982 |
package com.twitter.finagle.httpx.codec
import org.jboss.netty.channel.{ChannelHandlerContext, MessageEvent, SimpleChannelHandler}
import org.jboss.netty.handler.codec.http.HttpRequest
import org.jboss.netty.handler.ssl.SslHandler
/**
* Extract the cipher from the SslHandler and set it as a header on the HTTP
* request befor sending it upstream.
*/
class AnnotateCipher(headerName: String) extends SimpleChannelHandler {
override def messageReceived(ctx: ChannelHandlerContext, e: MessageEvent) {
(e.getMessage, ctx.getPipeline.get(classOf[SslHandler])) match {
case (req: HttpRequest, ssl: SslHandler) =>
req.headers.set(headerName, ssl.getEngine().getSession().getCipherSuite())
case _ =>
()
}
super.messageReceived(ctx, e)
}
}
| jamescway/finagle | finagle-httpx/src/main/scala/com/twitter/finagle/httpx/codec/AnnotateCipher.scala | Scala | apache-2.0 | 783 |
package de.commercetools.graphite
import java.util.concurrent.TimeUnit
import language.postfixOps
import org.influxdb.InfluxDB.ConsistencyLevel
import org.influxdb.InfluxDBFactory
import org.influxdb.dto.{Point, BatchPoints}
import rx.lang.scala.Observable
import scala.concurrent.duration._
import scala.util.{Random, Failure, Success}
object InfluxDBTest extends App {
val influxDB = InfluxDBFactory.connect("http://localhost:8086", "root", "root")
val db = "foo"
val servers = List("app01", "app01", "app03")
def sendStats() = {
println("Sending")
val points = (1 to 30).toList map { i =>
val p = Point
.measurement("request33")
.tag("server", Random.shuffle(servers).head)
.tag("project", "project" + Random.nextInt(10))
.field("aa", Random.nextInt(100).toLong)
.field("value", Random.nextInt(3000).toLong)
.build()
println(p.lineProtocol())
p
}
val batch = BatchPoints
.database(db)
.retentionPolicy("default")
.consistency(ConsistencyLevel.ALL)
.time(System.currentTimeMillis(), TimeUnit.MILLISECONDS)
.build()
points foreach batch.point
influxDB.write(batch)
}
sendStats()
Observable.interval(10 seconds).subscribe(
onNext = _ => sendStats(),
onError = error => {
error.printStackTrace()
}
)
while (true) {
Thread.sleep(1000)
}
}
| geoand/mongo-metrics-reporter | src/main/scala/de/commercetools/graphite/InfluxDBTest.scala | Scala | apache-2.0 | 1,430 |
package com.eevolution.context.dictionary.domain.api.service
import com.eevolution.context.dictionary._
import com.eevolution.context.dictionary.domain.model.Tenant
/**
* Copyright (C) 2003-2017, e-Evolution Consultants S.A. , http://www.e-evolution.com
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
* Email: [email protected], http://www.e-evolution.com , http://github.com/e-Evolution
* Created by [email protected] , www.e-evolution.com
*/
/**
* Tenant Service
*/
trait TenantService extends api.Service[Tenant, Int] {
//Definition
} | adempiere/ADReactiveSystem | dictionary-api/src/main/scala/com/eevolution/context/dictionary/domain/api/service/TenantService.scala | Scala | gpl-3.0 | 1,183 |
//package test
// scalastyle:off println
package org.apache.spark.examples.mllib
import java.text.BreakIterator
import scala.collection.mutable
import scopt.OptionParser
import org.apache.log4j.{ Level, Logger }
import org.apache.spark.{ SparkContext, SparkConf }
import org.apache.spark.mllib.clustering.{ EMLDAOptimizer, OnlineLDAOptimizer, DistributedLDAModel, LDA }
import org.apache.spark.mllib.linalg.{ Vector, Vectors }
import org.apache.spark.rdd.RDD
/**
* An example Latent Dirichlet Allocation (LDA) app. Run with
* {{{
* ./bin/run-example mllib.LDAExample [options] <input>
* }}}
* If you use it as a template to create your own app, please use `spark-submit` to submit your app.
*/
object LDAExample {
private case class Params(
input: Seq[String] = Seq.empty,
k: Int = 20,
maxIterations: Int = 10,
docConcentration: Double = -1,
topicConcentration: Double = -1,
vocabSize: Int = 10000,
stopwordFile: String = "",
algorithm: String = "em",
checkpointDir: Option[String] = None,
checkpointInterval: Int = 10) extends AbstractParams[Params]
def main(args: Array[String]) {
val defaultParams = Params()
val parser = new OptionParser[Params]("LDAExample") {
head("LDAExample: an example LDA app for plain text data.")
opt[Int]("k")
.text(s"number of topics. default: ${defaultParams.k}")
.action((x, c) => c.copy(k = x))
opt[Int]("maxIterations")
.text(s"number of iterations of learning. default: ${defaultParams.maxIterations}")
.action((x, c) => c.copy(maxIterations = x))
opt[Double]("docConcentration")
.text(s"amount of topic smoothing to use (> 1.0) (-1=auto)." +
s" default: ${defaultParams.docConcentration}")
.action((x, c) => c.copy(docConcentration = x))
opt[Double]("topicConcentration")
.text(s"amount of term (word) smoothing to use (> 1.0) (-1=auto)." +
s" default: ${defaultParams.topicConcentration}")
.action((x, c) => c.copy(topicConcentration = x))
opt[Int]("vocabSize")
.text(s"number of distinct word types to use, chosen by frequency. (-1=all)" +
s" default: ${defaultParams.vocabSize}")
.action((x, c) => c.copy(vocabSize = x))
opt[String]("stopwordFile")
.text(s"filepath for a list of stopwords. Note: This must fit on a single machine." +
s" default: ${defaultParams.stopwordFile}")
.action((x, c) => c.copy(stopwordFile = x))
opt[String]("algorithm")
.text(s"inference algorithm to use. em and online are supported." +
s" default: ${defaultParams.algorithm}")
.action((x, c) => c.copy(algorithm = x))
opt[String]("checkpointDir")
.text(s"Directory for checkpointing intermediate results." +
s" Checkpointing helps with recovery and eliminates temporary shuffle files on disk." +
s" default: ${defaultParams.checkpointDir}")
.action((x, c) => c.copy(checkpointDir = Some(x)))
opt[Int]("checkpointInterval")
.text(s"Iterations between each checkpoint. Only used if checkpointDir is set." +
s" default: ${defaultParams.checkpointInterval}")
.action((x, c) => c.copy(checkpointInterval = x))
arg[String]("<input>...")
.text("input paths (directories) to plain text corpora." +
" Each text file line should hold 1 document.")
.unbounded()
.required()
.action((x, c) => c.copy(input = c.input :+ x))
}
parser.parse(args, defaultParams).map { params =>
run(params)
}.getOrElse {
parser.showUsageAsError
sys.exit(1)
}
}
private def run(params: Params) {
val conf = new SparkConf().setAppName(s"LDAExample with $params")
val sc = new SparkContext(conf)
Logger.getRootLogger.setLevel(Level.WARN)
// Load documents, and prepare them for LDA.
val preprocessStart = System.nanoTime()
val (corpus, vocabArray, actualNumTokens) =
preprocess(sc, params.input, params.vocabSize, params.stopwordFile)
corpus.cache()
val actualCorpusSize = corpus.count()
val actualVocabSize = vocabArray.size
val preprocessElapsed = (System.nanoTime() - preprocessStart) / 1e9
println()
println(s"Corpus summary:")
println(s"\\t Training set size: $actualCorpusSize documents")
println(s"\\t Vocabulary size: $actualVocabSize terms")
println(s"\\t Training set size: $actualNumTokens tokens")
println(s"\\t Preprocessing time: $preprocessElapsed sec")
println()
// Run LDA.
val lda = new LDA()
val optimizer = params.algorithm.toLowerCase match {
case "em" => new EMLDAOptimizer
// add (1.0 / actualCorpusSize) to MiniBatchFraction be more robust on tiny datasets.
case "online" => new OnlineLDAOptimizer().setMiniBatchFraction(0.05 + 1.0 / actualCorpusSize)
case _ => throw new IllegalArgumentException(
s"Only em, online are supported but got ${params.algorithm}.")
}
lda.setOptimizer(optimizer)
.setK(params.k)
.setMaxIterations(params.maxIterations)
.setDocConcentration(params.docConcentration)
.setTopicConcentration(params.topicConcentration)
.setCheckpointInterval(params.checkpointInterval)
if (params.checkpointDir.nonEmpty) {
sc.setCheckpointDir(params.checkpointDir.get)
}
val startTime = System.nanoTime()
val ldaModel = lda.run(corpus)
val elapsed = (System.nanoTime() - startTime) / 1e9
println(s"Finished training LDA model. Summary:")
println(s"\\t Training time: $elapsed sec")
if (ldaModel.isInstanceOf[DistributedLDAModel]) {
val distLDAModel = ldaModel.asInstanceOf[DistributedLDAModel]
val avgLogLikelihood = distLDAModel.logLikelihood / actualCorpusSize.toDouble
println(s"\\t Training data average log likelihood: $avgLogLikelihood")
println()
}
// Print the topics, showing the top-weighted terms for each topic.
val topicIndices = ldaModel.describeTopics(maxTermsPerTopic = 10)
val topics = topicIndices.map {
case (terms, termWeights) =>
terms.zip(termWeights).map { case (term, weight) => (vocabArray(term.toInt), weight) }
}
println(s"${params.k} topics:")
topics.zipWithIndex.foreach {
case (topic, i) =>
println(s"TOPIC $i")
topic.foreach {
case (term, weight) =>
println(s"$term\\t$weight")
}
println()
}
sc.stop()
}
/**
* Load documents, tokenize them, create vocabulary, and prepare documents as term count vectors.
* @return (corpus, vocabulary as array, total token count in corpus)
*/
private def preprocess(
sc: SparkContext,
paths: Seq[String],
vocabSize: Int,
stopwordFile: String): (RDD[(Long, Vector)], Array[String], Long) = {
// Get dataset of document texts
// One document per line in each text file. If the input consists of many small files,
// this can result in a large number of small partitions, which can degrade performance.
// In this case, consider using coalesce() to create fewer, larger partitions.
val textRDD: RDD[String] = sc.textFile(paths.mkString(","))
// Split text into words
val tokenizer = new SimpleTokenizer(sc, stopwordFile)
val tokenized: RDD[(Long, IndexedSeq[String])] = textRDD.zipWithIndex().map {
case (text, id) =>
id -> tokenizer.getWords(text)
}
tokenized.cache()
// Counts words: RDD[(word, wordCount)]
val wordCounts: RDD[(String, Long)] = tokenized
.flatMap { case (_, tokens) => tokens.map(_ -> 1L) }
.reduceByKey(_ + _)
wordCounts.cache()
val fullVocabSize = wordCounts.count()
// Select vocab
// (vocab: Map[word -> id], total tokens after selecting vocab)
val (vocab: Map[String, Int], selectedTokenCount: Long) = {
val tmpSortedWC: Array[(String, Long)] = if (vocabSize == -1 || fullVocabSize <= vocabSize) {
// Use all terms
wordCounts.collect().sortBy(-_._2)
} else {
// Sort terms to select vocab
wordCounts.sortBy(_._2, ascending = false).take(vocabSize)
}
(tmpSortedWC.map(_._1).zipWithIndex.toMap, tmpSortedWC.map(_._2).sum)
}
val documents = tokenized.map {
case (id, tokens) =>
// Filter tokens by vocabulary, and create word count vector representation of document.
val wc = new mutable.HashMap[Int, Int]()
tokens.foreach { term =>
if (vocab.contains(term)) {
val termIndex = vocab(term)
wc(termIndex) = wc.getOrElse(termIndex, 0) + 1
}
}
val indices = wc.keys.toArray.sorted
val values = indices.map(i => wc(i).toDouble)
val sb = Vectors.sparse(vocab.size, indices, values)
(id, sb)
}
val vocabArray = new Array[String](vocab.size)
vocab.foreach { case (term, i) => vocabArray(i) = term }
(documents, vocabArray, selectedTokenCount)
}
}
/**
* Simple Tokenizer.
*
* TODO: Formalize the interface, and make this a public class in mllib.feature
*/
private class SimpleTokenizer(sc: SparkContext, stopwordFile: String) extends Serializable {
private val stopwords: Set[String] = if (stopwordFile.isEmpty) {
Set.empty[String]
} else {
val stopwordText = sc.textFile(stopwordFile).collect()
stopwordText.flatMap(_.stripMargin.split("\\\\s+")).toSet
}
// Matches sequences of Unicode letters
private val allWordRegex = "^(\\\\p{L}*)$".r
// Ignore words shorter than this length.
private val minWordLength = 3
def getWords(text: String): IndexedSeq[String] = {
val words = new mutable.ArrayBuffer[String]()
// Use Java BreakIterator to tokenize text into words.
val wb = BreakIterator.getWordInstance
wb.setText(text)
// current,end index start,end of each word
var current = wb.first()
var end = wb.next()
while (end != BreakIterator.DONE) {
// Convert to lowercase
val word: String = text.substring(current, end).toLowerCase
// Remove short words and strings that aren't only letters
word match {
case allWordRegex(w) if w.length >= minWordLength && !stopwords.contains(w) =>
words += w
case _ =>
}
current = end
try {
end = wb.next()
} catch {
case e: Exception =>
// Ignore remaining text in line.
// This is a known bug in BreakIterator (for some Java versions),
// which fails when it sees certain characters.
end = BreakIterator.DONE
}
}
words
}
}
// scalastyle:on printl | HGladiator/MyCodes | Scala/lda.scala | Scala | mit | 10,660 |
/*
*************************************************************************************
* Copyright 2013 Normation SAS
*************************************************************************************
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* In accordance with the terms of section 7 (7. Additional Terms.) of
* the GNU Affero GPL v3, the copyright holders add the following
* Additional permissions:
* Notwithstanding to the terms of section 5 (5. Conveying Modified Source
* Versions) and 6 (6. Conveying Non-Source Forms.) of the GNU Affero GPL v3
* licence, when you create a Related Module, this Related Module is
* not considered as a part of the work and may be distributed under the
* license agreement of your choice.
* A "Related Module" means a set of sources files including their
* documentation that, without modification of the Source Code, enables
* supplementary functions or services in addition to those offered by
* the Software.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/agpl.html>.
*
*************************************************************************************
*/
package com.normation.rudder.web.services
import scala.xml.NodeSeq
import com.normation.rudder.domain.policies.DirectiveId
import bootstrap.liftweb.RudderConfig
import net.liftweb.http.S
import com.normation.rudder.web.model.JsInitContextLinkUtil._
import scala.xml.Text
import net.liftweb.http.SHtml
import com.normation.rudder.repository.RoNodeGroupRepository
import com.normation.rudder.domain.policies.RuleTarget
import com.normation.rudder.domain.policies.GroupTarget
import com.normation.rudder.repository.FullNodeGroupCategory
trait DiffItem[T] {
def display(implicit displayer : T => NodeSeq) : NodeSeq
}
case class Added[T](
value:T
) extends DiffItem[T] {
val newValue = Some(value)
def display(implicit displayer : T => NodeSeq) : NodeSeq =
<li style="background:none repeat scroll 0 0 #D6FFD6; list-style-type:none">
+ {displayer(value)}
</li>
}
case class Deleted[T](
value:T
) extends DiffItem[T] {
def display(implicit displayer : T => NodeSeq) : NodeSeq =
<li style="background:none repeat scroll 0 0 #FFD6D6; list-style-type:none">
- {displayer(value)}
</li>
}
case class Unchanged[T](
value:T
) extends DiffItem[T] {
def display(implicit displayer : T => NodeSeq) : NodeSeq =
<li style="list-style-type:none">
{displayer(value)}
</li>
}
// Not used yet, but for later use
case class Modified[T](
oldValue:T
, newValue:T
) extends DiffItem[T] {
private[this] val delete = Deleted(oldValue)
private[this] val add = Added(oldValue)
def display(implicit displayer : T => NodeSeq) : NodeSeq =
delete.display ++ add.display
}
object DiffDisplayer {
//Directive targets Displayer
private[this] val roDirectiveRepo = RudderConfig.roDirectiveRepository
private[this] implicit def displayDirective(directiveId: DirectiveId) = {
<span> Directive {createDirectiveLink(directiveId)}</span>
}
def displayDirectiveChangeList (
oldDirectives:Seq[DirectiveId]
, newDirectives:Seq[DirectiveId]
) : NodeSeq = {
// First, find unchanged and deleted (have find no clean way to make a 3 way partition)
val (unchanged,deleted) = oldDirectives.partition(newDirectives.contains)
// Get the added ones
val added = newDirectives.filterNot(unchanged.contains).map(Added(_))
val deletedMap = deleted.map(Deleted(_))
val unchangedMap = unchanged.map(Unchanged(_))
// Finally mix all maps together in one and display it
val changeMap:Seq[DiffItem[DirectiveId]] = deletedMap ++ unchangedMap ++ added
<ul style="padding-left:10px">
{ for {
change <- changeMap
} yield {
// Implicit used here (displayDirective)
change.display
} }
</ul>
}
//Node groups targets Displayer
private[this] val roNodeGroupRepository = RudderConfig.roNodeGroupRepository
// Almost the same as display Directive see comments there for more details
def displayRuleTargets (
oldTargets:Seq[RuleTarget]
, newTargets:Seq[RuleTarget]
, groupLib: FullNodeGroupCategory
) : NodeSeq = {
implicit def displayNodeGroup(target: RuleTarget) : NodeSeq= {
target match {
case GroupTarget(nodeGroupId) =>
<span> Group {createGroupLink(nodeGroupId)}</span>
case x => groupLib.allTargets.get(x).map{ targetInfo =>
<span>
{targetInfo.name}
{if (targetInfo.isSystem) <span class="greyscala">(System)</span>}
</span>
}.getOrElse(<span> {x.target}</span>)
}
}
val (unchanged,deleted) = oldTargets.partition(newTargets.contains)
val added = newTargets.filterNot(unchanged.contains).map(Added(_))
val deletedMap = deleted.map(Deleted(_))
val unchangedMap = unchanged.map(Unchanged(_))
val changeMap:Seq[DiffItem[RuleTarget]] = deletedMap ++ unchangedMap ++ added
<ul style="padding-left:10px">
{ for {
change <- changeMap
} yield {
// Implicit used here (displayNodeGroup)
change.display
}
}
</ul>
}
} | jooooooon/rudder | rudder-web/src/main/scala/com/normation/rudder/web/services/DiffDisplayer.scala | Scala | agpl-3.0 | 5,805 |
/*
* NodeView.scala
* (Cord)
*
* Copyright (c) 2015-2020 Hanns Holger Rutz.
*
* This software is published under the GNU Lesser General Public License v2.1+
*
*
* For further information, please contact Hanns Holger Rutz at
* [email protected]
*/
package de.sciss.cord
package view
import de.sciss.cord.view.impl.ObjNodeViewImpl
import org.scalajs.dom
object NodeView {
/** Standard view for object nodes. */
def apply(parentView: PatcherView, obj: ObjNode): NodeView =
new ObjNodeViewImpl(parentView, obj, elemText = obj.contents)
}
trait NodeView extends View {
override def elem: Node
override def peer: dom.svg.G
def portLocation(port: Port): DoublePoint2D
}
| Sciss/Cord | src/main/scala/de/sciss/cord/view/NodeView.scala | Scala | lgpl-2.1 | 696 |
package fly.play.aws.auth
object UrlEncoder {
def encodePath(value: String) =
encode(value).replace("%2F", "/")
def encode(value: String): String =
java.net.URLEncoder.encode(value, "UTF-8")
.replace("+", "%20")
.replace("*", "%2A")
.replace("%7E", "~")
} | fooblahblah/play-aws-utils | src/main/scala/fly/play/aws/auth/UrlEncoder.scala | Scala | mit | 287 |
package org.jobimtext.coref.berkeley
object MathHelper {
private val logOf2 = Math.log(2.0)
def log2(d: Double) = Math.log(d) / logOf2
}
| timfeu/berkeleycoref-thesaurus | src/main/java/org/jobimtext/coref/berkeley/MathHelper.scala | Scala | gpl-3.0 | 143 |
/*
* Copyright 2022 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.nisp.controllers
import com.google.inject.Inject
import play.api.http.{Status => HttpStatus}
import play.api.i18n.I18nSupport
import play.api.mvc.{Action, AnyContent, MessagesControllerComponents, Request}
import play.api.Logging
import play.twirl.api.Html
import uk.gov.hmrc.http.{HeaderCarrier, HttpClient, HttpReads, HttpResponse}
import uk.gov.hmrc.nisp.config.ApplicationConfig
import uk.gov.hmrc.nisp.views.html.{feedback, feedback_thankyou}
import uk.gov.hmrc.play.partials.{CachedStaticHtmlPartialRetriever, FormPartialRetriever, HeaderCarrierForPartialsConverter}
import uk.gov.hmrc.renderer.TemplateRenderer
import java.net.URLEncoder
import scala.concurrent.{ExecutionContext, Future}
class FeedbackController @Inject() (
applicationConfig: ApplicationConfig,
httpClient: HttpClient,
executionContext: ExecutionContext,
nispHeaderCarrierForPartialsConverter: HeaderCarrierForPartialsConverter,
mcc: MessagesControllerComponents,
feedbackThankYou: feedback_thankyou,
feedback: feedback
)(implicit
val formPartialRetriever: FormPartialRetriever,
val templateRenderer: TemplateRenderer,
val cachedStaticHtmlPartialRetriever: CachedStaticHtmlPartialRetriever,
executor: ExecutionContext
) extends NispFrontendController(mcc)
with I18nSupport
with Logging {
def contactFormReferer(implicit request: Request[AnyContent]): String = request.headers.get(REFERER).getOrElse("")
def localSubmitUrl(): String = routes.FeedbackController.submit.url
private val TICKET_ID = "ticketId"
private def feedbackFormPartialUrl(implicit request: Request[AnyContent]) =
s"${applicationConfig.contactFrontendPartialBaseUrl}/contact/beta-feedback/form/?submitUrl=${urlEncode(localSubmitUrl())}" +
s"&service=${urlEncode(applicationConfig.contactFormServiceIdentifier)}&referer=${urlEncode(contactFormReferer)}"
private def feedbackHmrcSubmitPartialUrl() =
s"${applicationConfig.contactFrontendPartialBaseUrl}/contact/beta-feedback/form?resubmitUrl=${urlEncode(localSubmitUrl())}"
private def feedbackThankYouPartialUrl(ticketId: String) =
s"${applicationConfig.contactFrontendPartialBaseUrl}/contact/beta-feedback/form/confirmation?ticketId=${urlEncode(ticketId)}"
def show: Action[AnyContent] = Action { implicit request =>
(request.session.get(REFERER), request.headers.get(REFERER)) match {
case (None, Some(ref)) =>
Ok(feedback(feedbackFormPartialUrl, None)).withSession(request.session + (REFERER -> ref))
case _ =>
Ok(feedback(feedbackFormPartialUrl, None))
}
}
def submit: Action[AnyContent] = Action.async { implicit request =>
request.body.asFormUrlEncoded
.map { formData =>
httpClient
.POSTForm[HttpResponse](feedbackHmrcSubmitPartialUrl(), formData)(
rds = PartialsFormReads.readPartialsForm,
hc = partialsReadyHeaderCarrier,
ec = executionContext
)
.map { resp =>
resp.status match {
case HttpStatus.OK =>
Redirect(routes.FeedbackController.showThankYou).withSession(request.session + (TICKET_ID -> resp.body))
case HttpStatus.BAD_REQUEST => BadRequest(feedback(feedbackFormPartialUrl, Some(Html(resp.body))))
case status => logger.warn(s"Unexpected status code from feedback form: $status"); InternalServerError
}
}
}
.getOrElse {
logger.warn("Trying to submit an empty feedback form")
Future.successful(InternalServerError)
}
}
def showThankYou: Action[AnyContent] = Action { implicit request =>
val ticketId = request.session.get(TICKET_ID).getOrElse("N/A")
val referer = request.session.get(REFERER).getOrElse("/")
Ok(feedbackThankYou(feedbackThankYouPartialUrl(ticketId), referer)).withSession(request.session - REFERER)
}
private def urlEncode(value: String) = URLEncoder.encode(value, "UTF-8")
private def partialsReadyHeaderCarrier(implicit request: Request[_]): HeaderCarrier = {
val hc1 = nispHeaderCarrierForPartialsConverter.headerCarrierEncryptingSessionCookieFromRequest(request)
nispHeaderCarrierForPartialsConverter.headerCarrierForPartialsToHeaderCarrier(hc1)
}
}
object PartialsFormReads {
implicit val readPartialsForm: HttpReads[HttpResponse] = new HttpReads[HttpResponse] {
def read(method: String, url: String, response: HttpResponse): HttpResponse = response
}
}
| hmrc/nisp-frontend | app/uk/gov/hmrc/nisp/controllers/FeedbackController.scala | Scala | apache-2.0 | 5,123 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.streaming.api.scala
import java.util.concurrent.TimeUnit
import org.apache.flink.api.java.tuple.Tuple
import org.apache.flink.streaming.api.TimeCharacteristic
import org.apache.flink.streaming.api.functions.AssignerWithPunctuatedWatermarks
import org.apache.flink.streaming.api.functions.sink.SinkFunction
import org.apache.flink.streaming.api.functions.source.SourceFunction
import org.apache.flink.streaming.api.scala.testutils.{CheckingIdentityRichAllWindowFunction, CheckingIdentityRichProcessAllWindowFunction, CheckingIdentityRichProcessWindowFunction, CheckingIdentityRichWindowFunction}
import org.apache.flink.streaming.api.watermark.Watermark
import org.apache.flink.streaming.api.windowing.assigners.TumblingEventTimeWindows
import org.apache.flink.streaming.api.windowing.time.Time
import org.apache.flink.streaming.api.windowing.windows.TimeWindow
import org.junit.Assert._
import org.junit.{Ignore, Test}
import scala.collection.mutable
class WindowFunctionITCase {
@Test
def testRichWindowFunction(): Unit = {
WindowFunctionITCase.testResults = mutable.MutableList()
CheckingIdentityRichWindowFunction.reset()
val env = StreamExecutionEnvironment.getExecutionEnvironment
env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime)
env.setParallelism(1)
val source1 = env.addSource(new SourceFunction[(String, Int)]() {
def run(ctx: SourceFunction.SourceContext[(String, Int)]) {
ctx.collect(("a", 0))
ctx.collect(("a", 1))
ctx.collect(("a", 2))
ctx.collect(("b", 3))
ctx.collect(("b", 4))
ctx.collect(("b", 5))
ctx.collect(("a", 6))
ctx.collect(("a", 7))
ctx.collect(("a", 8))
// source is finite, so it will have an implicit MAX watermark when it finishes
}
def cancel() {}
}).assignTimestampsAndWatermarks(new WindowFunctionITCase.Tuple2TimestampExtractor)
source1
.keyBy(0)
.window(TumblingEventTimeWindows.of(Time.of(3, TimeUnit.MILLISECONDS)))
.apply(new CheckingIdentityRichWindowFunction[(String, Int), Tuple, TimeWindow]())
.addSink(new SinkFunction[(String, Int)]() {
def invoke(value: (String, Int)) {
WindowFunctionITCase.testResults += value.toString
}
})
env.execute("RichWindowFunction Test")
val expectedResult = mutable.MutableList(
"(a,0)", "(a,1)", "(a,2)", "(a,6)", "(a,7)", "(a,8)",
"(b,3)", "(b,4)", "(b,5)")
assertEquals(expectedResult.sorted, WindowFunctionITCase.testResults.sorted)
CheckingIdentityRichWindowFunction.checkRichMethodCalls()
}
@Test
def testRichProcessWindowFunction(): Unit = {
WindowFunctionITCase.testResults = mutable.MutableList()
CheckingIdentityRichProcessWindowFunction.reset()
val env = StreamExecutionEnvironment.getExecutionEnvironment
env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime)
env.setParallelism(1)
val source1 = env.addSource(new SourceFunction[(String, Int)]() {
def run(ctx: SourceFunction.SourceContext[(String, Int)]) {
ctx.collect(("a", 0))
ctx.collect(("a", 1))
ctx.collect(("a", 2))
ctx.collect(("b", 3))
ctx.collect(("b", 4))
ctx.collect(("b", 5))
ctx.collect(("a", 6))
ctx.collect(("a", 7))
ctx.collect(("a", 8))
// source is finite, so it will have an implicit MAX watermark when it finishes
}
def cancel() {}
}).assignTimestampsAndWatermarks(new WindowFunctionITCase.Tuple2TimestampExtractor)
source1
.keyBy(0)
.window(TumblingEventTimeWindows.of(Time.of(3, TimeUnit.MILLISECONDS)))
.process(new CheckingIdentityRichProcessWindowFunction[(String, Int), Tuple, TimeWindow]())
.addSink(new SinkFunction[(String, Int)]() {
def invoke(value: (String, Int)) {
WindowFunctionITCase.testResults += value.toString
}
})
env.execute("RichProcessWindowFunction Test")
val expectedResult = mutable.MutableList(
"(a,0)", "(a,1)", "(a,2)", "(a,6)", "(a,7)", "(a,8)",
"(b,3)", "(b,4)", "(b,5)")
assertEquals(expectedResult.sorted, WindowFunctionITCase.testResults.sorted)
CheckingIdentityRichProcessWindowFunction.checkRichMethodCalls()
}
@Test
def testRichAllWindowFunction(): Unit = {
WindowFunctionITCase.testResults = mutable.MutableList()
CheckingIdentityRichAllWindowFunction.reset()
val env = StreamExecutionEnvironment.getExecutionEnvironment
env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime)
env.setParallelism(1)
val source1 = env.addSource(new SourceFunction[(String, Int)]() {
def run(ctx: SourceFunction.SourceContext[(String, Int)]) {
ctx.collect(("a", 0))
ctx.collect(("a", 1))
ctx.collect(("a", 2))
ctx.collect(("b", 3))
ctx.collect(("b", 4))
ctx.collect(("b", 5))
ctx.collect(("a", 6))
ctx.collect(("a", 7))
ctx.collect(("a", 8))
// source is finite, so it will have an implicit MAX watermark when it finishes
}
def cancel() {}
}).assignTimestampsAndWatermarks(new WindowFunctionITCase.Tuple2TimestampExtractor)
source1
.windowAll(TumblingEventTimeWindows.of(Time.of(3, TimeUnit.MILLISECONDS)))
.apply(new CheckingIdentityRichAllWindowFunction[(String, Int), TimeWindow]())
.addSink(new SinkFunction[(String, Int)]() {
def invoke(value: (String, Int)) {
WindowFunctionITCase.testResults += value.toString
}
})
env.execute("RichAllWindowFunction Test")
val expectedResult = mutable.MutableList(
"(a,0)", "(a,1)", "(a,2)", "(a,6)", "(a,7)", "(a,8)",
"(b,3)", "(b,4)", "(b,5)")
assertEquals(expectedResult.sorted, WindowFunctionITCase.testResults.sorted)
CheckingIdentityRichAllWindowFunction.checkRichMethodCalls()
}
@Test
def testRichProcessAllWindowFunction(): Unit = {
WindowFunctionITCase.testResults = mutable.MutableList()
CheckingIdentityRichProcessAllWindowFunction.reset()
val env = StreamExecutionEnvironment.getExecutionEnvironment
env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime)
env.setParallelism(1)
val source1 = env.addSource(new SourceFunction[(String, Int)]() {
def run(ctx: SourceFunction.SourceContext[(String, Int)]) {
ctx.collect(("a", 0))
ctx.collect(("a", 1))
ctx.collect(("a", 2))
ctx.collect(("b", 3))
ctx.collect(("b", 4))
ctx.collect(("b", 5))
ctx.collect(("a", 6))
ctx.collect(("a", 7))
ctx.collect(("a", 8))
// source is finite, so it will have an implicit MAX watermark when it finishes
}
def cancel() {}
}).assignTimestampsAndWatermarks(new WindowFunctionITCase.Tuple2TimestampExtractor)
source1
.windowAll(TumblingEventTimeWindows.of(Time.of(3, TimeUnit.MILLISECONDS)))
.process(new CheckingIdentityRichProcessAllWindowFunction[(String, Int), TimeWindow]())
.addSink(new SinkFunction[(String, Int)]() {
def invoke(value: (String, Int)) {
WindowFunctionITCase.testResults += value.toString
}
})
env.execute("RichAllWindowFunction Test")
val expectedResult = mutable.MutableList(
"(a,0)", "(a,1)", "(a,2)", "(a,6)", "(a,7)", "(a,8)",
"(b,3)", "(b,4)", "(b,5)")
assertEquals(expectedResult.sorted, WindowFunctionITCase.testResults.sorted)
CheckingIdentityRichProcessAllWindowFunction.checkRichMethodCalls()
}
}
object WindowFunctionITCase {
private var testResults: mutable.MutableList[String] = null
private class Tuple2TimestampExtractor extends AssignerWithPunctuatedWatermarks[(String, Int)] {
private var currentTimestamp = -1L
override def extractTimestamp(element: (String, Int), previousTimestamp: Long): Long = {
currentTimestamp = element._2
currentTimestamp
}
def checkAndGetNextWatermark(
lastElement: (String, Int),
extractedTimestamp: Long): Watermark = {
new Watermark(lastElement._2 - 1)
}
}
}
| DieBauer/flink | flink-streaming-scala/src/test/scala/org/apache/flink/streaming/api/scala/WindowFunctionITCase.scala | Scala | apache-2.0 | 8,996 |
package com.swissguard.services
import javax.inject.{Inject, Singleton}
import com.twitter.util.Future
import com.swissguard.repositories.UserRepository
import com.swissguard.authentication.thriftscala.Claims
import com.swissguard.tokenizer.Tokenizer
@Singleton
class AuthenticationService @Inject()(
userRepository: UserRepository,
tokenizer: Tokenizer
) {
def validate(token: String): Future[Boolean] = {
Future value tokenizer.validate(token)
}
def claimsForToken(token: String): Future[Claims] = {
tokenizer.getPayloadForToken(token).getOrElse(None) match {
case Some(payload: Map[String,String]) => Future value Claims (
userId = payload("userId"),
username = payload("userName"),
claims = List()
)
case _ => Future exception new Exception("No payload")
}
}
}
| divanvisagie/swiss-guard | authentication/src/main/scala/com/swissguard/services/AuthenticationService.scala | Scala | apache-2.0 | 837 |
package com.teamisotope.techexpansion.proxy
import net.minecraftforge.fml.common.event._
class ServerProxy extends CommonProxy {
override def preInit(event: FMLPreInitializationEvent): Unit = {
super.preInit(event)
}
override def init(event: FMLInitializationEvent): Unit = {
super.init(event)
}
override def postInit(event: FMLPostInitializationEvent): Unit = {
super.postInit(event)
}
}
| collaborationmods/TechExpansion | src/main/scala/com/teamisotope/techexpansion/proxy/ServerProxy.scala | Scala | gpl-3.0 | 419 |
import scala.annotation.compileTimeOnly
class C(val s: String) extends AnyVal {
@compileTimeOnly("error")
def error = ???
}
| loskutov/intellij-scala | testdata/scalacTests/pos/t8498.scala | Scala | apache-2.0 | 129 |
/*
* Copyright (C) 2009-2014 Typesafe Inc. <http://www.typesafe.com>
*/
package play.routes.compiler
import play.twirl.api.{ Format, BufferedContent }
import scala.collection.immutable
/**
* Twirl scala content type
*/
class ScalaContent(elements: immutable.Seq[ScalaContent], text: String) extends BufferedContent[ScalaContent](elements, text) {
def this(text: String) = this(Nil, text)
def this(elements: immutable.Seq[ScalaContent]) = this(elements, "")
def contentType = "application/scala"
}
/**
* Twirl Scala format
*/
object ScalaFormat extends Format[ScalaContent] {
def raw(text: String) = new ScalaContent(text)
def escape(text: String) = new ScalaContent(text)
val empty = new ScalaContent(Nil)
def fill(elements: immutable.Seq[ScalaContent]) = new ScalaContent(elements)
} | jyotikamboj/container | pf-framework/src/routes-compiler/src/main/scala/play/routes/compiler/ScalaFormat.scala | Scala | mit | 812 |
/*
* Copyright 2016 The BigDL Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.bigdl.keras.nn
import com.intel.analytics.bigdl.keras.KerasBaseSpec
import com.intel.analytics.bigdl.dllib.nn.abstractnn.AbstractModule
import com.intel.analytics.bigdl.dllib.nn.internal.{Permute, Sequential => KSequential}
import com.intel.analytics.bigdl.dllib.tensor.Tensor
import com.intel.analytics.bigdl.dllib.utils.Shape
import com.intel.analytics.bigdl.dllib.utils.serializer.ModuleSerializationTest
import scala.util.Random
class PermuteSpec extends KerasBaseSpec {
"Permute" should "be the same as Keras" in {
val kerasCode =
"""
|input_tensor = Input(shape=[3, 4, 5, 6])
|input = np.random.random([2, 3, 4, 5, 6])
|output_tensor = Permute((3, 1, 4, 2))(input_tensor)
|model = Model(input=input_tensor, output=output_tensor)
""".stripMargin
val seq = KSequential[Float]()
val layer = Permute[Float](Array(3, 1, 4, 2), inputShape = Shape(3, 4, 5, 6))
seq.add(layer)
seq.getOutputShape().toSingle().toArray should be (Array(-1, 5, 3, 6, 4))
checkOutputAndGrad(seq.asInstanceOf[AbstractModule[Tensor[Float], Tensor[Float], Float]],
kerasCode)
}
}
class PermuteSerialTest extends ModuleSerializationTest {
override def test(): Unit = {
val layer = Permute[Float](Array(3, 1, 4, 2), inputShape = Shape(3, 4, 5, 6))
layer.build(Shape(2, 3, 4, 5, 6))
val input = Tensor[Float](2, 3, 4, 5, 6).apply1(_ => Random.nextFloat())
runSerializationTest(layer, input)
}
}
| intel-analytics/BigDL | scala/dllib/src/test/scala/com/intel/analytics/bigdl/dllib/nn/keras/nn/PermuteSpec.scala | Scala | apache-2.0 | 2,100 |
import org.apache.solr.client.solrj.SolrClient
import org.apache.solr.client.solrj.embedded.EmbeddedSolrServer
import org.apache.solr.client.solrj.request.FieldAnalysisRequest
import org.apache.solr.common.util.NamedList
import org.apache.solr.core.CoreContainer
import java.util.Collections
import scala.Console._
object SynonymGraph {
var server: SolrClient = _
def main(a: Array[String]): Unit = {
try {
val solrDir = SynonymGraph.getClass.getResource("/solr").getPath
val container = new CoreContainer(solrDir)
container.load()
server = new EmbeddedSolrServer(container, "synonym-graph")
val resp: NamedList[Object] = server.request(new FieldAnalysisRequest().setFieldTypes(Collections.singletonList("text")).setFieldValue("teh huge sofa"))
val it = resp.iterator()
while (it.hasNext) {
println(it.next())
}
} catch {
case e: Exception => println(e)
}
finally {
server.close()
}
}
}
| MysterionRise/information-retrieval-adventure | lucene6/src/main/scala/SynonymGraph.scala | Scala | gpl-2.0 | 989 |
/* *\\
** Squants **
** **
** Scala Quantities and Units of Measure Library and DSL **
** (c) 2013-2015, Gary Keorkunian **
** **
\\* */
package squants.motion
import squants._
import squants.energy.Joules
import squants.mass.{ Kilograms, Pounds }
import squants.space.SquareMeters
import squants.time.{ Seconds, TimeDerivative, TimeIntegral }
/**
* @author garyKeorkunian
* @since 0.1
*
* @param value Double
*/
final class Force private (val value: Double, val unit: ForceUnit)
extends Quantity[Force]
with TimeDerivative[Momentum] with TimeIntegral[Yank] {
def dimension = Force
protected[squants] def timeIntegrated = NewtonSeconds(toNewtons)
protected def timeDerived = NewtonsPerSecond(toNewtons)
override def time = Seconds(1)
/* This could also be Torque, as Energy(Work) and Torque are dimensionally equivalent */
def *(that: Length): Energy = Joules(this.toNewtons * that.toMeters)
def /(that: Length) = ??? // return SurfaceTension
def /(that: Mass): Acceleration = MetersPerSecondSquared(this.toNewtons / that.toKilograms)
def /(that: Acceleration): Mass = Kilograms(this.toNewtons / that.toMetersPerSecondSquared)
def /(that: Area): Pressure = Pascals(this.toNewtons / that.toSquareMeters)
def /(that: Pressure): Area = SquareMeters(this.toNewtons / that.toPascals)
def toNewtons = to(Newtons)
def toKilogramForce = to(KilogramForce)
def toPoundForce = to(PoundForce)
def toKiloElectronVoltsPerMicrometer = to(KiloElectronVoltsPerMicrometer)
def toMegaElectronVoltsPerCentimeter = to(MegaElectronVoltsPerCentimeter)
}
object Force extends Dimension[Force] {
private[motion] def apply[A](n: A, unit: ForceUnit)(implicit num: Numeric[A]) = new Force(num.toDouble(n), unit)
def apply(value: Any) = parse(value)
def name = "Force"
def primaryUnit = Newtons
def siUnit = Newtons
def units = Set(
Newtons, KilogramForce, PoundForce,
KiloElectronVoltsPerMicrometer, MegaElectronVoltsPerCentimeter)
}
trait ForceUnit extends UnitOfMeasure[Force] with UnitConverter {
def apply[A](n: A)(implicit num: Numeric[A]) = Force(n, this)
}
object Newtons extends ForceUnit with PrimaryUnit with SiUnit {
val symbol = "N"
}
object KilogramForce extends ForceUnit {
val symbol = "kgf"
val conversionFactor = MetersPerSecondSquared.conversionFactor * EarthGravities.conversionFactor
}
object PoundForce extends ForceUnit {
val symbol = "lbf"
val conversionFactor = Pounds.conversionFactor * KilogramForce.conversionFactor / Kilograms.conversionFactor
}
object KiloElectronVoltsPerMicrometer extends ForceUnit {
val symbol = "keV/μm"
val conversionFactor = 1.602176565e-16 / MetricSystem.Micro
}
object MegaElectronVoltsPerCentimeter extends ForceUnit {
val symbol = "MeV/cm"
val conversionFactor = 1.602176565e-13 / MetricSystem.Centi
}
object ForceConversions {
lazy val newton = Newtons(1)
lazy val kilogramForce = KilogramForce(1)
lazy val poundForce = PoundForce(1)
lazy val kiloElectronVoltsPerMicrometer = KiloElectronVoltsPerMicrometer(1)
lazy val megaElectronVoltsPerCentimeter = MegaElectronVoltsPerCentimeter(1)
implicit class ForceConversions[A](n: A)(implicit num: Numeric[A]) {
def newtons = Newtons(n)
def kilogramForce = KilogramForce(n)
def poundForce = PoundForce(n)
def lbf = PoundForce(n)
def kiloElectronVoltsPerMicrometer = KiloElectronVoltsPerMicrometer(n)
def megaElectronVoltsPerCentimeter = MegaElectronVoltsPerCentimeter(n)
}
implicit object ForceNumeric extends AbstractQuantityNumeric[Force](Force.primaryUnit)
}
| typelevel/squants | shared/src/main/scala/squants/motion/Force.scala | Scala | apache-2.0 | 3,970 |
/*
* Copyright 2012-2013 Eligotech BV.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.eligosource.eventsourced.journal.common
import java.nio.ByteBuffer
package object util {
private [journal] implicit val ordering = new Ordering[Key] {
def compare(x: Key, y: Key) =
if (x.processorId != y.processorId)
x.processorId - y.processorId
else if (x.initiatingChannelId != y.initiatingChannelId)
x.initiatingChannelId - y.initiatingChannelId
else if (x.sequenceNr != y.sequenceNr)
math.signum(x.sequenceNr - y.sequenceNr).toInt
else if (x.confirmingChannelId != y.confirmingChannelId)
x.confirmingChannelId - y.confirmingChannelId
else 0
}
implicit def counterToBytes(ctr: Long): Array[Byte] =
ByteBuffer.allocate(8).putLong(ctr).array
implicit def counterFromBytes(bytes: Array[Byte]): Long =
ByteBuffer.wrap(bytes).getLong
}
| eligosource/eventsourced | es-journal/es-journal-common/src/main/scala/org/eligosource/eventsourced/journal/common/util/package.scala | Scala | apache-2.0 | 1,431 |
object test {
abstract class Bar {
type T
def bar: Unit
}
new Bar {
type T = Int
def bar = ()
}.bar
}
| yusuke2255/dotty | tests/untried/pos/t615.scala | Scala | bsd-3-clause | 127 |
package au.com.agiledigital.rest.tests
import java.util.concurrent.TimeUnit
import org.specs2.control.NoLanguageFeatures
import org.specs2.mock.Mockito
import org.specs2.mutable.Specification
import scala.concurrent.duration.FiniteDuration
/**
* A base trait for Specs2 specifications.
*/
trait BaseSpec extends Specification with Mockito with NoLanguageFeatures {
/**
* Default number to retry when polling for an expected result (e.g. with eventually).
*/
val defaultRetries: Int = 5
/**
* Default amount of time to wait between retries when polling for an expected result (e.g. with eventually).
*/
val defaultTimeout: FiniteDuration = new FiniteDuration(2, TimeUnit.SECONDS)
/**
* Default amount of time to wait for a result (e.g. when dealing with Futures).
*/
val defaultAwait: FiniteDuration = defaultTimeout * defaultRetries.toLong
/**
* Default number of seconds to wait for a result (e.g. when dealing with Futures).
*/
val defaultDurationInSeconds: Long = defaultAwait.toSeconds
}
| agiledigital/play-rest-support | testkit/src/main/scala/au/com/agiledigital/rest/tests/BaseSpec.scala | Scala | apache-2.0 | 1,052 |
package sample.blog.processes
import akka.actor.ActorLogging
import akka.persistence.{ AtLeastOnceDelivery, PersistentActor }
object Table2 {
sealed trait Command
final case class Mark(id: Long) extends Command
sealed trait Event
case class WatermarkMark(id: Long) extends Event
sealed trait Reply
case class Marked(id: Long) extends Reply
}
class Table2 extends PersistentActor with AtLeastOnceDelivery with ActorLogging {
override val persistenceId: String = "aadff"
override def receiveRecover: Receive = ???
//Why: to guarantee ordering between 2 async callbacks
override def receiveCommand: Receive = {
case Table2.Mark(id) ⇒
//
persistAsync(Table2.WatermarkMark(id)) { marker ⇒
//update state ...
}
//executes once all persistAsync handlers done
deferAsync(Table2.Marked(id)) { marked ⇒
sender() ! marked
}
//ordering between persistAsync and defer is guaranteed
}
}
| haghard/akka-pq | src/main/scala/sample/blog/processes/Table2.scala | Scala | apache-2.0 | 974 |
package HackerRank.Training.BasicProgramming
import java.io.{ByteArrayInputStream, IOException, InputStream, PrintWriter}
import java.util.InputMismatchException
import scala.collection.generic.CanBuildFrom
import scala.language.higherKinds
/**
* Copyright (c) 2017 A. Roberto Fischer
*
* @author A. Roberto Fischer <[email protected]> on 5/28/2017
*/
private[this] object ServiceLane {
import Reader._
import Writer._
private[this] val TEST_INPUT: Option[String] = None
//------------------------------------------------------------------------------------------//
// Solution
//------------------------------------------------------------------------------------------//
private[this] def solve(): Unit = {
val n = nextInt()
val t = nextInt()
val serviceLane = nextInt[Vector](n)
val cases = next[(Int, Int), Vector]((nextInt(), nextInt()), t)
cases.foreach { case (from, to) =>
println(serviceLane.slice(from, to + 1).min)
}
}
//------------------------------------------------------------------------------------------//
// Run
//------------------------------------------------------------------------------------------//
@throws[Exception]
def main(args: Array[String]): Unit = {
val s = System.currentTimeMillis
solve()
flush()
if (TEST_INPUT.isDefined) System.out.println(System.currentTimeMillis - s + "ms")
}
//------------------------------------------------------------------------------------------//
// Input
//------------------------------------------------------------------------------------------//
private[this] final object Reader {
private[this] implicit val in: InputStream = TEST_INPUT.fold(System.in)(s => new ByteArrayInputStream(s.getBytes))
def nextSeq[T, Coll[_]](reader: => Seq[T], n: Int)
(implicit cbf: CanBuildFrom[Coll[T], T, Coll[T]]): Coll[T] = {
val builder = cbf()
builder.sizeHint(n)
for (_ <- 0 until n) {
builder ++= reader
}
builder.result()
}
def next[T, Coll[_]](reader: => T, n: Int)
(implicit cbf: CanBuildFrom[Coll[T], T, Coll[T]]): Coll[T] = {
val builder = cbf()
builder.sizeHint(n)
for (_ <- 0 until n) {
builder += reader
}
builder.result()
}
def nextWithIndex[T, Coll[_]](reader: => T, n: Int)
(implicit cbf: CanBuildFrom[Coll[(T, Int)], (T, Int), Coll[(T, Int)]]): Coll[(T, Int)] = {
val builder = cbf()
builder.sizeHint(n)
for (i <- 0 until n) {
builder += ((reader, i))
}
builder.result()
}
def nextDouble[Coll[_]]
(n: Int)(implicit cbf: CanBuildFrom[Coll[Double], Double, Coll[Double]]): Coll[Double] = {
val builder = cbf()
builder.sizeHint(n)
for (_ <- 0 until n) {
builder += nextDouble()
}
builder.result()
}
def nextDoubleWithIndex[Coll[_]]
(n: Int)(implicit cbf: CanBuildFrom[Coll[(Double, Int)], (Double, Int), Coll[(Double, Int)]]): Coll[(Double, Int)] = {
val builder = cbf()
builder.sizeHint(n)
for (i <- 0 until n) {
builder += ((nextDouble(), i))
}
builder.result()
}
def nextChar[Coll[_]]
(n: Int)(implicit cbf: CanBuildFrom[Coll[Char], Char, Coll[Char]]): Coll[Char] = {
val builder = cbf()
builder.sizeHint(n)
var b = skip
var p = 0
while (p < n && !isSpaceChar(b)) {
builder += b.toChar
p += 1
b = readByte().toInt
}
builder.result()
}
def nextCharWithIndex[Coll[_]]
(n: Int)(implicit cbf: CanBuildFrom[Coll[(Char, Int)], (Char, Int), Coll[(Char, Int)]]): Coll[(Char, Int)] = {
val builder = cbf()
builder.sizeHint(n)
var b = skip
var p = 0
while (p < n && !isSpaceChar(b)) {
builder += ((b.toChar, p))
p += 1
b = readByte().toInt
}
builder.result()
}
def nextInt[Coll[_]]
(n: Int)(implicit cbf: CanBuildFrom[Coll[Int], Int, Coll[Int]]): Coll[Int] = {
val builder = cbf()
builder.sizeHint(n)
for (_ <- 0 until n) {
builder += nextInt()
}
builder.result()
}
def nextIntWithIndex[Coll[_]]
(n: Int)(implicit cbf: CanBuildFrom[Coll[(Int, Int)], (Int, Int), Coll[(Int, Int)]]): Coll[(Int, Int)] = {
val builder = cbf()
builder.sizeHint(n)
for (i <- 0 until n) {
builder += ((nextInt(), i))
}
builder.result()
}
def nextLong[Coll[_]]
(n: Int)(implicit cbf: CanBuildFrom[Coll[Long], Long, Coll[Long]]): Coll[Long] = {
val builder = cbf()
builder.sizeHint(n)
for (_ <- 0 until n) {
builder += nextLong()
}
builder.result()
}
def nextLongWithIndex[Coll[_]]
(n: Int)(implicit cbf: CanBuildFrom[Coll[(Long, Int)], (Long, Int), Coll[(Long, Int)]]): Coll[(Long, Int)] = {
val builder = cbf()
builder.sizeHint(n)
for (i <- 0 until n) {
builder += ((nextLong(), i))
}
builder.result()
}
def nextString[Coll[_]]
(n: Int)(implicit cbf: CanBuildFrom[Coll[String], String, Coll[String]]): Coll[String] = {
val builder = cbf()
builder.sizeHint(n)
for (_ <- 0 until n) {
builder += nextString()
}
builder.result()
}
def nextStringWithIndex[Coll[_]]
(n: Int)(implicit cbf: CanBuildFrom[Coll[(String, Int)], (String, Int), Coll[(String, Int)]]): Coll[(String, Int)] = {
val builder = cbf()
builder.sizeHint(n)
for (i <- 0 until n) {
builder += ((nextString(), i))
}
builder.result()
}
def nextMultiLine(n: Int, m: Int): Array[Array[Char]] = {
val map = new Array[Array[Char]](n)
var i = 0
while (i < n) {
map(i) = nextChar[Array](m)
i += 1
}
map
}
def nextDouble(): Double = nextString().toDouble
def nextChar(): Char = skip.toChar
def nextString(): String = {
var b = skip
val sb = new java.lang.StringBuilder
while (!isSpaceChar(b)) {
sb.appendCodePoint(b)
b = readByte().toInt
}
sb.toString
}
def nextInt(): Int = {
var num = 0
var b = 0
var minus = false
while ( {
b = readByte().toInt
b != -1 && !((b >= '0' && b <= '9') || b == '-')
}) {}
if (b == '-') {
minus = true
b = readByte().toInt
}
while (true) {
if (b >= '0' && b <= '9') {
num = num * 10 + (b - '0')
} else {
if (minus) return -num else return num
}
b = readByte().toInt
}
throw new IOException("Read Int")
}
def nextLong(): Long = {
var num = 0L
var b = 0
var minus = false
while ( {
b = readByte().toInt
b != -1 && !((b >= '0' && b <= '9') || b == '-')
}) {}
if (b == '-') {
minus = true
b = readByte().toInt
}
while (true) {
if (b >= '0' && b <= '9') {
num = num * 10 + (b - '0')
} else {
if (minus) return -num else return num
}
b = readByte().toInt
}
throw new IOException("Read Long")
}
private[this] val inputBuffer = new Array[Byte](1024)
private[this] var lenBuffer = 0
private[this] var ptrBuffer = 0
private[this] def readByte()(implicit in: java.io.InputStream): Byte = {
if (lenBuffer == -1) throw new InputMismatchException
if (ptrBuffer >= lenBuffer) {
ptrBuffer = 0
try {
lenBuffer = in.read(inputBuffer)
} catch {
case _: IOException =>
throw new InputMismatchException
}
if (lenBuffer <= 0) return -1
}
inputBuffer({
ptrBuffer += 1
ptrBuffer - 1
})
}
private[this] def isSpaceChar(c: Int) = !(c >= 33 && c <= 126)
private[this] def skip = {
var b = 0
while ( {
b = readByte().toInt
b != -1 && isSpaceChar(b)
}) {}
b
}
}
//------------------------------------------------------------------------------------------//
// Output
//------------------------------------------------------------------------------------------//
private[this] final object Writer {
private[this] val out = new PrintWriter(System.out)
def flush(): Unit = out.flush()
def println(x: Any): Unit = out.println(x)
def print(x: Any): Unit = out.print(x)
}
} | robertoFischer/hackerrank | src/main/scala/HackerRank/Training/BasicProgramming/ServiceLane.scala | Scala | mit | 8,681 |
package skuber
import java.util.Date
/**
* @author David O'Riordan
*/
case class Node(
val kind: String ="Node",
override val apiVersion: String = v1,
val metadata: ObjectMeta,
spec: Option[Node.Spec] = None,
status: Option[Node.Status] = None)
extends ObjectResource {
def withResourceVersion(version: String) = this.copy(metadata = metadata.copy(resourceVersion=version))
}
object Node {
val specification=CoreResourceSpecification(
scope = ResourceSpecification.Scope.Cluster,
names = ResourceSpecification.Names(
plural = "nodes",
singular = "node",
kind = "Node",
shortNames = List("no")
)
)
implicit val nodeDef = new ResourceDefinition[Node] { def spec=specification }
implicit val nodeListDef = new ResourceDefinition[NodeList] { def spec=specification }
def named(name: String) = Node(metadata=ObjectMeta(name=name))
def apply(name: String, spec: Node.Spec) : Node = Node(metadata=ObjectMeta(name=name), spec = Some(spec))
case class Spec(
podCIDR: String = "",
providerID: String = "",
unschedulable: Boolean = false,
externalID: String = "",
taints: List[Taint] = Nil)
case class Taint(
effect: String,
key: String,
value: Option[String] = None,
timeAdded: Option[Timestamp] = None
)
case class Status(
capacity: Resource.ResourceList=Map(),
phase: Option[Phase.Phase] = None,
conditions: List[Node.Condition] = List(),
addresses: List[Node.Address] = List(),
nodeInfo: Option[Node.SystemInfo] = None,
allocatable: Resource.ResourceList=Map(),
daemonEndpoints: Option[DaemonEndpoints] = None,
images: List[Container.Image] = Nil,
volumesInUse: List[String] = Nil,
volumesAttached: List[AttachedVolume]
)
case class DaemonEndpoints(
kubeletEndpoint: DaemonEndpoint
)
case class DaemonEndpoint(
Port: Int
)
case class AttachedVolume(name: String, devicePath: String)
object Phase extends Enumeration {
type Phase = Value
val Pending, Running,Terminated = Value
}
case class Condition(
_type : String,
status: String,
lastHeartbeatTime: Option[Timestamp]=None,
lastTransitionTime: Option[Timestamp] = None,
reason: Option[String] = None,
message: Option[String] = None)
case class Address(_type: String, address: String)
case class SystemInfo(
machineID: String,
systemUUID: String,
bootID: String,
kernelVersion: String,
osImage: String,
containerRuntimeVersion: String,
kubeletVersion: String,
kubeProxyVersion: String)
} | doriordan/skuber | client/src/main/scala/skuber/Node.scala | Scala | apache-2.0 | 2,674 |
/*
* Copyright (c) 2015,
* Ilya Sergey, Christopher Earl, Matthew Might and David Van Horn
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of the project "Reachability" nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package org.ucombinator.lambdajs.syntax
import util.parsing.input.Positional
/**
* @author ilya
*/
object LJSyntax {
type Label = String
final case class Op(op: String, stamp: Int)
sealed abstract class Exp extends Positional {
def isValue: Boolean = false
}
sealed abstract class StampedExp extends Exp {
val stamp: Int
}
/*
* Value-like expressions
*/
case class Var(name: String, stamp: Int) extends StampedExp
case class EString(s: String) extends Exp {
override def isValue = true
}
// case class EAddr(a: Addr) extends Exp {
// override def isValue = true
// }
case class EInt(n: Int) extends Exp {
override def isValue = true
}
case class EFloat(n: Double) extends Exp {
override def isValue = true
}
case class EBool(b: Boolean) extends Exp {
override def isValue = true
}
case object EUndef extends Exp {
override def isValue = true
}
case object EEval extends Exp
case object ENan extends Exp
case object EInfP extends Exp
case object EInfM extends Exp
case object ENull extends Exp {
override def isValue = true
}
case class Fun(params: List[Var], body: Exp, stamp: Int) extends StampedExp
/*
* Other LambdaJS expressions
*/
case class Record(entries: List[(String, Exp)], stamp: Int) extends Exp {
override def isValue = entries.foldLeft(true) {
case (result, (s, v)) => result && v.isValue
}
override def hashCode() = stamp
override def equals(obj: Any) = obj match {
case se: StampedExp => se.stamp == this.stamp
case x: AnyRef => this eq x
case x => x equals this
}
}
case class Let(x: Var, rhs: Exp, body: Exp, stamp: Int) extends StampedExp {
override def hashCode() = stamp
override def equals(obj: Any) = obj match {
case se: StampedExp => se.stamp == this.stamp
case x: AnyRef => this eq x
case x => x equals this
}
}
case class App(fun: Exp, args: List[Exp], stamp: Int) extends StampedExp {
override def hashCode() = stamp
override def equals(obj: Any) = obj match {
case se: StampedExp => se.stamp == this.stamp
case x: AnyRef => this eq x
case x => x equals this
}
}
case class Lookup(rec: Exp, index: Exp, stamp: Int) extends StampedExp {
override def hashCode() = stamp
override def equals(obj: Any) = obj match {
case se: StampedExp => se.stamp == this.stamp
case x: AnyRef => this eq x
case x => x equals this
}
}
case class Update(rec: Exp, index: Exp, rhs: Exp, stamp: Int) extends StampedExp {
override def hashCode() = stamp
override def equals(obj: Any) = obj match {
case se: StampedExp => se.stamp == this.stamp
case x: AnyRef => this eq x
case x => x equals this
}
}
case class Del(rec: Exp, index: Exp, stamp: Int) extends StampedExp {
override def hashCode() = stamp
override def equals(obj: Any) = obj match {
case se: StampedExp => se.stamp == this.stamp
case x: AnyRef => this eq x
case x => x equals this
}
}
case class Asgn(lhs: Exp, rhs: Exp, stamp: Int) extends StampedExp {
override def hashCode() = stamp
override def equals(obj: Any) = obj match {
case se: StampedExp => se.stamp == this.stamp
case x: AnyRef => this eq x
case x => x equals this
}
}
case class Ref(e: Exp, stamp: Int) extends StampedExp {
override def hashCode() = stamp
override def equals(obj: Any) = obj match {
case se: StampedExp => se.stamp == this.stamp
case x: AnyRef => this eq x
case x => x equals this
}
}
case class Deref(e: Exp, stamp: Int) extends StampedExp {
override def hashCode() = stamp
override def equals(obj: Any) = obj match {
case se: StampedExp => se.stamp == this.stamp
case x: AnyRef => this eq x
case x => x equals this
}
}
case class If(cond: Exp, tb: Exp, eb: Exp, stamp: Int) extends StampedExp {
override def hashCode() = stamp
override def equals(obj: Any) = obj match {
case se: StampedExp => se.stamp == this.stamp
case x: AnyRef => this eq x
case x => x equals this
}
}
case class Seq(fst: Exp, snd: Exp, stamp: Int) extends StampedExp {
override def hashCode() = stamp
override def equals(obj: Any) = obj match {
case se: StampedExp => se.stamp == this.stamp
case x: AnyRef => this eq x
case x => x equals this
}
}
case class While(cond: Exp, body: Exp, stamp: Int) extends StampedExp {
override def hashCode() = stamp
override def equals(obj: Any) = obj match {
case se: StampedExp => se.stamp == this.stamp
case x: AnyRef => this eq x
case x => x equals this
}
}
case class Labelled(lab: Label, e: Exp, stamp: Int) extends StampedExp {
override def hashCode() = stamp
override def equals(obj: Any) = obj match {
case se: StampedExp => se.stamp == this.stamp
case x: AnyRef => this eq x
case x => x equals this
}
}
case class Break(lab: Label, e: Exp, stamp: Int) extends StampedExp {
override def hashCode() = stamp
override def equals(obj: Any) = obj match {
case se: StampedExp => se.stamp == this.stamp
case x: AnyRef => this eq x
case x => x equals this
}
}
case class TryCatch(e: Exp, x: Var, rest: Exp, stamp: Int) extends StampedExp {
override def hashCode() = stamp
override def equals(obj: Any) = obj match {
case se: StampedExp => se.stamp == this.stamp
case y: AnyRef => this eq y
case y => y equals this
}
}
case class TryFinally(e: Exp, rest: Exp, stamp: Int) extends StampedExp {
override def hashCode() = stamp
override def equals(obj: Any) = obj match {
case se: StampedExp => se.stamp == this.stamp
case x: AnyRef => this eq x
case x => x equals this
}
}
case class Throw(e: Exp, stamp: Int) extends StampedExp {
override def hashCode() = stamp
override def equals(obj: Any) = obj match {
case se: StampedExp => se.stamp == this.stamp
case x: AnyRef => this eq x
case x => x equals this
}
}
case class OpApp(op: Op, args: List[Exp], stamp: Int) extends StampedExp {
override def hashCode() = stamp
override def equals(obj: Any) = obj match {
case se: StampedExp => se.stamp == this.stamp
case x: AnyRef => this eq x
case x => x equals this
}
}
}
trait LJSyntax {
import LJSyntax._
type Addr
/**
* Variable environments
*/
type Env = Map[Var, Addr]
/**
* Values
*/
sealed abstract class Value
abstract class AbstractStringValue extends Value
case class StringValue(s: String) extends AbstractStringValue
case object StringTop extends AbstractStringValue
case class BoolValue(b: Boolean) extends Value
abstract class AbstractNumValue extends Value
case class IntValue(n: Int) extends AbstractNumValue
case class FloatValue(f: Double) extends AbstractNumValue
case object NumTopValue extends AbstractNumValue
case object PlusInf extends AbstractNumValue
case object MinusInf extends AbstractNumValue
case object EvalValue extends Value
def mkIntValue(n: Int, truncate: Boolean): AbstractNumValue = if (truncate) {
NumTopValue
} else {
IntValue(n)
}
def mkFloatValue(n: Double, truncate: Boolean): AbstractNumValue = if (truncate) {
NumTopValue
} else {
FloatValue(n)
}
case class AddrValue(a: Addr) extends Value
case object UndefValue extends Value
case object NullValue extends Value
case class RecValue(entries: List[(StringValue, Value)]) extends Value
case class FunValue(fun: Fun, env: Env) extends Value
/*
* Closures
*/
sealed abstract class Closure
case class GroundClo(e: Exp, env: Env) extends Closure
case class RecordClo(entries: List[(StringValue, Closure)]) extends Closure
case class LetClo(x: Var, rhs: Closure, body: Closure) extends Closure
case class AppClo(fun: Closure, args: List[Closure]) extends Closure
case class LookupClo(rec: Closure, index: Closure) extends Closure
case class UpdateClo(rec: Closure, index: Closure, rhs: Closure) extends Closure
case class DelClo(rec: Closure, index: Closure) extends Closure
case class AsgnClo(lhs: Closure, rhs: Closure) extends Closure
case class RefClo(e: Closure) extends Closure
case class DerefClo(e: Closure) extends Closure
case class IfClo(cond: Closure, tb: Closure, eb: Closure) extends Closure
case class SeqClo(fst: Closure, snd: Closure) extends Closure
case class WhileClo(cond: Closure, body: Closure) extends Closure
case class LabelledClo(lab: Label, e: Closure) extends Closure
case class BreakClo(lab: Label, e: Closure) extends Closure
case class TryCatchClo(e: Closure, x: Var, rest: Closure) extends Closure
case class TryFinallyClo(e: Closure, rest: Closure) extends Closure
case class ThrowClo(e: Closure) extends Closure
case class OpClo(op: Op, args: List[Closure]) extends Closure
case class ValueClo(v: Value) extends Closure
/***************************************
* Potential redeces
***************************************/
sealed abstract class PotentialRedex
case class PR_VAR(v: Var, env: Env) extends PotentialRedex
case class PR_APP(v: Value, args: List[Value]) extends PotentialRedex
case class PR_LET(x: Var, v: Value, clo: Closure) extends PotentialRedex
case class PR_REC_REF(v: Value, s: AbstractStringValue) extends PotentialRedex
case class PR_REC_SET(v: Value, s: AbstractStringValue, v2: Value, c: Closure) extends PotentialRedex
case class PR_REC_DEL(v: Value, s: AbstractStringValue, c: Closure) extends PotentialRedex
case class PR_IF(v: Value, tb: Closure, eb: Closure) extends PotentialRedex
case class PR_OP(op: Op, vs: List[Value]) extends PotentialRedex
case class PR_REF(v: Value, c: Closure) extends PotentialRedex
case class PR_ASGN(a: Value, v: Value, c: Closure) extends PotentialRedex
case class PR_DEREF(v: Value, c: Closure) extends PotentialRedex
case class PR_THROW(v: Value) extends PotentialRedex
case class PR_BREAK(l: Label, v: Value) extends PotentialRedex
/***************************************
* Utility functions
***************************************/
/**
* Convert an expression to closure
*/
def exp2Clo(expr: Exp, r: Env): Closure = {
def toGround(exp: Exp) = GroundClo(exp, r)
expr match {
case v if v.isValue => ValueClo(exp2Value(v))
case Record(entries, _) => RecordClo(entries.map {
case (s, t) => (StringValue(s), toGround(t))
})
case f@Fun(_, _, _) => toGround(f)
case x@Var(_, _) => toGround(x)
case Let(x, rhs, b, _) => LetClo(x, toGround(rhs), toGround(b))
case App(fun: Exp, args: List[Exp], _) => AppClo(toGround(fun), args.map(toGround))
case Lookup(rec: Exp, index: Exp, _) => LookupClo(toGround(rec), toGround(index))
case Update(rec: Exp, index: Exp, rhs: Exp, _) => UpdateClo(toGround(rec), toGround(index), toGround(rhs))
case Del(rec: Exp, index: Exp, _) => DelClo(toGround(rec), toGround(index))
case Asgn(lhs: Exp, rhs: Exp, _) => AsgnClo(toGround(lhs), toGround(rhs))
case Ref(e: Exp, _) => RefClo(toGround(e))
case Deref(e: Exp, _) => DerefClo(toGround(e))
case If(cond: Exp, tb: Exp, eb: Exp, _) => IfClo(toGround(cond), toGround(tb), toGround(eb))
case Seq(fst: Exp, snd: Exp, _) => SeqClo(toGround(fst), toGround(snd))
case While(cond: Exp, body: Exp, _) => WhileClo(toGround(cond), toGround(body))
case Labelled(lab: Label, e: Exp, _) => LabelledClo(lab, toGround(e))
case Break(lab: Label, e: Exp, _) => BreakClo(lab, toGround(e))
case TryCatch(e: Exp, x: Var, rest: Exp, _) => TryCatchClo(toGround(e), x, toGround(rest))
case TryFinally(e: Exp, rest: Exp, _) => TryFinallyClo(toGround(e), toGround(rest))
case Throw(e: Exp, _) => ThrowClo(toGround(e))
case OpApp(op: Op, args: List[Exp], _) => OpClo(op, args.map(toGround))
case x => throw new Exception("Cannot convert the expression " + x.toString + " to a closure.")
}
}
/**
* Convert an expression to value
*/
def exp2Value(e: Exp): Value = e match {
case EString(s) => StringValue(s)
// case EInt(n) => mkIntValue(n, false)
case EFloat(n) => mkFloatValue(n, false)
case EBool(b) => BoolValue(b)
case EUndef => UndefValue
case ENull => NullValue
case EEval => EvalValue
case ENan => NumTopValue
case EInfP => PlusInf
case EInfM => MinusInf
// case EAddr(a) => AddrValue(a)
case r@Record(entries, _) if r.isValue => RecValue(entries.map {
case (s, ee) => (StringValue(s), exp2Value(ee))
})
case x => throw new Exception("Not value term: " + e.toString)
}
}
| ilyasergey/reachability | src/org/ucombinator/lambdajs/syntax/LJSyntax.scala | Scala | bsd-3-clause | 14,564 |
package utils
import java.sql.Date
import entity.{User, Article}
import entity.form._
import security.Encryptor.ImplicitEc
import scala.language.implicitConversions
/**
* Samsara Aquarius Utils
* Implicit Form Converter
*/
object FormConverter {
implicit def infoConvert(data: InfoFormData): Article = {
Article(0, data.title, url = data.url, author = data.author, cid = data.cid, updateDate = data.updateDate)
}
implicit def registerConvert(data: RegisterFormData): User = {
User(uid = 0, username = data.username, password = data.password.encrypt(),
joinDate = new Date(System.currentTimeMillis()), email = data.email)
}
}
| sczyh30/samsara-aquarius | app/utils/FormConverter.scala | Scala | mit | 661 |
package zangelo.spray.json
import org.specs2.mutable.Specification
import spray.json._
import zangelo.spray.json.annotation._
trait TestObject extends Product
object TestProtocol
extends DefaultJsonProtocol
with AutoProductFormats[TestObject]
//TODO needs to be a top-level class so companion symbol is accessible
case class TestDefault(a:String = "a", b:Int) extends TestObject
/**
* Mostly adapted from spray-json's ProductFormatsSpec
*/
class AutoProductFormatsSpec extends Specification {
case class Test0() extends TestObject
case class Test2(a:Int, b:Option[Double]) extends TestObject
case class Test3[A, B](as: List[A], bs: List[B]) extends TestObject
case class TestSeq[A, B](as: Seq[A], bs:Seq[B]) extends TestObject
case class Test36(a1: String,
a2: String,
a3: String,
a4: String,
a5: Int,
a6: String,
a7: String,
a8: String,
a9: String,
a10: String,
a11: String,
a12: Double,
a13: String,
a14: String,
a15: String,
a16: String,
a17: String,
a18: String,
a19: String,
a20: String,
a21: String,
a22: String,
a23: Int,
a24: String,
a25: String,
a26: String,
a27: String,
a28: String,
a29: String,
a30: Double,
a31: String,
a32: String,
a33: String,
a34: String,
a35: String,
a36: String) extends TestObject
@SerialVersionUID(1L) // SerialVersionUID adds a static field to the case class
case class TestStatic(a: Int, b: Option[Double]) extends TestObject
case class TestMangled(`foo-bar!`: Int) extends TestObject
"A JsonFormat created with `autoProductFormat`, for a case class with 2 elements," should {
import TestProtocol._
val json = JsObject("a" -> JsNumber(42), "b" -> JsNumber(4.2))
val obj = Test2(42, Some(4.2))
val genJson = obj.toJson
"convert to a respective JsObject" in {
genJson mustEqual json
}
"convert a JsObject to the respective case class instance" in {
json.convertTo[Test2] mustEqual obj
}
"throw a DeserializationException if the JsObject does not all required members" in (
JsObject("b" -> JsNumber(4.2)).convertTo[Test2] must
throwA(new DeserializationException("Object is missing required member 'a'"))
)
"not require the presence of optional fields for deserialization" in {
JsObject("a" -> JsNumber(42)).convertTo[Test2] mustEqual Test2(42, None)
}
"not render `None` members during serialization" in {
Test2(42, None).toJson mustEqual JsObject("a" -> JsNumber(42))
}
"ignore additional members during deserialization" in {
JsObject("a" -> JsNumber(42), "b" -> JsNumber(4.2), "c" -> JsString('no)).convertTo[Test2] mustEqual obj
}
"not depend on any specific member order for deserialization" in {
JsObject("b" -> JsNumber(4.2), "a" -> JsNumber(42)).convertTo[Test2] mustEqual obj
}
//FIXME throw correct exception
// "throw a DeserializationException if the JsValue is not a JsObject" in {
// JsNull.convertTo[Test2] must throwA(new DeserializationException("Object expected in field 'a'"))
// }
}
"A JsonFormat for a generic case class and created with `autoProductFormat`" should {
import TestProtocol._
val obj = Test3(42 :: 43 :: Nil, "x" :: "y" :: "z" :: Nil)
val json = JsObject(
"as" -> JsArray(JsNumber(42), JsNumber(43)),
"bs" -> JsArray(JsString("x"), JsString("y"), JsString("z"))
)
"convert to a respective JsObject" in {
obj.toJson mustEqual json
}
"convert a JsObject to the respective case class instance" in {
json.convertTo[Test3[Int, String]] mustEqual obj
}
}
"A JsonFormat for a case class with 36 parameters and created with `autoProductFormat`" should {
import TestProtocol._
val obj = Test36(
"a1", "a2", "a3", "a4", 5, "a6", "a7", "a8", "a9",
"a10", "a11", 12d, "a13", "a14", "a15", "a16", "a17", "a18",
"a1", "a2", "a3", "a4", 5, "a6", "a7", "a8", "a9",
"a10", "a11", 12d, "a13", "a14", "a15", "a16", "a17", "a18")
val json = JsonParser("""{"a28":"a10","a17":"a17","a34":"a16","a6":"a6","a30":12.0,"a24":"a6","a13":"a13","a29":"a11","a35":"a17","a18":"a18","a5":5,"a4":"a4","a9":"a9","a25":"a7","a14":"a14","a15":"a15","a26":"a8","a36":"a18","a11":"a11","a22":"a4","a33":"a15","a10":"a10","a3":"a3","a21":"a3","a8":"a8","a32":"a14","a1":"a1","a16":"a16","a27":"a9","a20":"a2","a7":"a7","a12":12.0,"a23":5,"a2":"a2","a19":"a1","a31":"a13"}""")
"convert to a respective JsObject" in {
obj.toJson mustEqual json
}
"convert a JsObject to the respective case class instance" in {
json.convertTo[Test36] mustEqual obj
}
}
"A JsonFormat for a generic case class with an explicitly provided type parameter" should {
"serialize to the correct type parameter" in {
import TestProtocol._
case class Box[A](a: A) extends TestObject
Box(42).toJson === JsObject(Map("a" -> JsNumber(42)))
}
}
//TODO support transient fields
// "A JsonFormat for a case class with transient fields and created with `jsonFormat`" should {
// import TestProtocol1._
// val obj = TestTransient(42, Some(4.2))
// val json = JsObject("a" -> JsNumber(42), "b" -> JsNumber(4.2))
// "convert to a respective JsObject" in {
// obj.toJson mustEqual json
// }
// "convert a JsObject to the respective case class instance" in {
// json.convertTo[TestTransient] mustEqual obj
// }
// }
//
"A JsonFormat for a case class with static fields and created with `autoProductFormat`" should {
import TestProtocol._
val obj = TestStatic(42, Some(4.2))
val json = JsObject("a" -> JsNumber(42), "b" -> JsNumber(4.2))
"convert to a respective JsObject" in {
obj.toJson mustEqual json
}
"convert a JsObject to the respective case class instance" in {
json.convertTo[TestStatic] mustEqual obj
}
}
"A JsonFormat created with `autoProductFormat`, for a case class with 0 elements," should {
import TestProtocol._
val obj = Test0()
val json = JsObject()
"convert to a respective JsObject" in {
obj.toJson mustEqual json
}
"convert a JsObject to the respective case class instance" in {
json.convertTo[Test0] mustEqual obj
}
"ignore additional members during deserialization" in {
JsObject("a" -> JsNumber(42)).convertTo[Test0] mustEqual obj
}
// FIXME not sure why this is failing
// "throw a DeserializationException if the JsValue is not a JsObject" in (
// JsNull.convertTo[Test0] must throwA(new DeserializationException("JSON object expected instead"))
// )
}
"A JsonFormat created with `autoProductFormat`, for a case class with mangled-name members," should {
import TestProtocol._
val json = "{\\"foo-bar!\\":42}"
"produce the correct JSON" in {
TestMangled(42).toJson.compactPrint === json
}
"convert a JsObject to the respective case class instance" in {
json.parseJson.convertTo[TestMangled] === TestMangled(42)
}
}
"A JsonFormat created with `autoProductFormat`, for a case class with @JsonProperty annotated members," should {
import TestProtocol._
case class TestAnnotated(@JsonProperty("overridden") a:String, b:Int) extends TestObject
val obj = TestAnnotated("a", 42)
val json = JsObject("overridden" -> JsString("a"), "b" -> JsNumber(42))
"rename the JSON property according to the annotation's value" in {
obj.toJson shouldEqual json
}
"convert a JsObject to the respective case class instance" in {
json.convertTo[TestAnnotated] shouldEqual obj
}
}
"A JsonFormat created with `autoProductFormat`, for a case class with default members," should {
import TestProtocol._
val obj = TestDefault(b = 42)
val json = JsObject("a" -> JsString("a"), "b" -> JsNumber(42))
val jsonWithDefaultMissing = JsObject("b" -> JsNumber(42))
"convert to a respective JsObject with default parameter as a field" in {
obj.toJson shouldEqual json
}
"convert a JsObject with the default parameter missing to the respective case class instance" in {
jsonWithDefaultMissing.convertTo[TestDefault] shouldEqual obj
}
}
"A JsonFormat created with `autoProductFormat`, for a case class with @JsonUnwrapped annotated members," should {
import TestProtocol._
case class Nested(c:String, d:Option[Double]) extends TestObject
case class TestAnnotated(a:String, @JsonUnwrapped b:Nested) extends TestObject
case class TestAnnotatedPrefix(a:String, @JsonUnwrapped("pre_") b:Nested) extends TestObject
case class TestAnnotatedSuffix(a:String, @JsonUnwrapped("","_suf") b:Nested) extends TestObject
val obj = TestAnnotated("a", Nested("c", Some(42.0)))
val objPrefix = TestAnnotatedPrefix("a", Nested("c", Some(42.0)))
val objSuffix = TestAnnotatedSuffix("a", Nested("c", Some(42.0)))
val json = JsObject("a" -> JsString("a"), "c" -> JsString("c"), "d" -> JsNumber(42.0))
val prefixJson = JsObject("a" -> JsString("a"), "pre_c" -> JsString("c"), "pre_d" -> JsNumber(42.0))
val suffixJson = JsObject("a" -> JsString("a"), "c_suf" -> JsString("c"), "d_suf" -> JsNumber(42.0))
"bring the nested object's properties into the top-level JSON" in {
obj.toJson shouldEqual json
}
"convert a JsObject to the respective case class instance" in {
json.convertTo[TestAnnotated] shouldEqual obj
}
"should add a prefix to the nested keys when converted to a JsObject" in {
objPrefix.toJson shouldEqual prefixJson
}
"convert a JsObject with prefixed keys to the respective case class instance" in {
prefixJson.convertTo[TestAnnotatedPrefix] shouldEqual objPrefix
}
"should add a suffix to the nested keys when converted to a JsObject" in {
objSuffix.toJson shouldEqual suffixJson
}
"convert a JsObject with suffixed keys to the respective case class instance" in {
suffixJson.convertTo[TestAnnotatedSuffix] shouldEqual objSuffix
}
}
"A JsonFormat created with `autoProductFormat`, for a case class with @JsonPropertyCase annotations," should {
import TestProtocol._
case class TestSingleArg(@JsonPropertyCase(JsonPropertyCases.Snakize) twoWordsA:String,
twoWordsB:String)
extends TestObject
@JsonPropertyCase(JsonPropertyCases.Snakize)
case class TestAllArgs(twoWordsA:String, twoWordsB:String) extends TestObject
val singleArgObject = TestSingleArg("a", "b")
val allArgsObject = TestAllArgs("a", "b")
val singleArgJson = JsObject("two_words_a" -> JsString("a"), "twoWordsB" -> JsString("b"))
val allArgsJson = JsObject("two_words_a" -> JsString("a"), "two_words_b" -> JsString("b"))
"convert to a respective JsObject with the case of a single argument converted to snake case" in {
singleArgObject.toJson shouldEqual singleArgJson
}
"convert a JsObject with a single snakized key to a respective case class instance" in {
singleArgJson.convertTo[TestSingleArg] shouldEqual singleArgObject
}
"convert to a respective JsObject with the case of a all arguments converted to snake case" in {
allArgsObject.toJson shouldEqual allArgsJson
}
"convert a JsObject with all snakized keys to a respective case class instance" in {
allArgsJson.convertTo[TestAllArgs] shouldEqual allArgsObject
}
}
"A JsonFormat created with `autoProductFormat`, for a case class with @JsonIgnore annotations," should {
import TestProtocol._
case class TestIgnore(@JsonIgnore a:String, b:Int,
@JsonIgnore c:Int, d:String)
extends TestObject
val ignoreObject = TestIgnore("a", 42, 5, "d")
val ignoreJson = JsObject("b" -> JsNumber(42), "d" -> JsString("d"))
"ignore properties with the @JsonIgnore annotation" in {
ignoreObject.toJson shouldEqual ignoreJson
}
}
"A JsonFormat created with `autoProductFormat`, for a case class with Seq fields," should {
import TestProtocol._
val seqObject = TestSeq(Seq("a", "b", "c"), Seq(1,2,3))
val seqJson = JsObject("as" -> JsArray(JsString("a"), JsString("b"), JsString("c")),
"bs" -> JsArray(JsNumber(1), JsNumber(2), JsNumber(3)))
"serialize the Seq fields into a JSON array" in {
seqObject.toJson shouldEqual seqJson
}
}
}
| zackangelo/spray-json-macros | src/test/scala/zangelo/spray/json/AutoProductFormatsSpec.scala | Scala | apache-2.0 | 13,072 |
package com.circusoc.simplesite.users
import com.circusoc.simplesite.GraphTestingConf
import com.circusoc.simplesite.users.permissions.Permission
import com.circusoc.testgraph.{UserTestGraph, TestNodeFactory}
import org.scalatest.FlatSpec
import org.scalatest.Matchers._
/**
* Created by riri on 25/01/15.
*/
class UserGraphSpec extends FlatSpec {
import GraphTestingConf._
it should "do things that I expect with reflection" in {
val factoryA = UserTestGraph.permissionsFactory[permissions.CanAdministerUsersPermission.type]
factoryA.randomItem() should be(permissions.CanAdministerUsersPermission)
// val factoryB = UserTestGraph.permissionsFactory[Integer] // this doesn't compile, as expected.
}
it should "create a user" in {
val userFactory = UserTestGraph.userFactory
val user = userFactory.randomNode()
val permsFactory: TestNodeFactory[Permission] = UserTestGraph.permissionsFactory[permissions.CanAdministerUsersPermission.type]
val perm = permsFactory.randomNode()
val join = UserTestGraph.addPermissionJoiner
join.join(user, perm)
val userGot = User.authenticateByUsername(user.node.username, userFactory.retrieveUserPassword(user.node))
assert(userGot.isDefined)
assert(userGot.get.hasPermission(permissions.CanAdministerUsersPermission))
}
}
| ririw/circusoc-backend | src/test/scala/com/circusoc/simplesite/users/UserGraphSpec.scala | Scala | agpl-3.0 | 1,337 |
object foo {
sealed trait Exp[T]
case class Var[T](name: String) extends Exp[T]
def env[T](x: Var[T]): T = ???
def eval[S](e: Exp[S]) = e match {
case v: Var[foo] =>
env(v)
}
}
| som-snytt/dotty | tests/pos/gadt-foo.scala | Scala | apache-2.0 | 199 |
package com.outr.nextui.examples
import com.outr.nextui.UI
object Examples {
val examples = Vector(
HelloWorld,
ContainerExample,
EasingExample,
ImageExample,
LabelExample,
ScreensExample,
VirtualSizeExample,
AddRemoveExample,
AnimationExample
)
def apply(): Vector[UI] = examples
} | outr/nextui | examples/src/main/scala/com/outr/nextui/examples/Examples.scala | Scala | mit | 327 |
package org.sisioh.aws4s.s3.model
import com.amazonaws.services.s3.model.{ BucketTaggingConfiguration, TagSet }
import org.sisioh.aws4s.PimpedType
import scala.collection.JavaConverters._
object BucketTaggingConfigurationFactory {
def create(): BucketTaggingConfiguration = new BucketTaggingConfiguration()
def create(tagSets: Seq[TagSet]): BucketTaggingConfiguration = new BucketTaggingConfiguration(tagSets.asJava)
}
class RichBucketTaggingConfiguration(val underlying: BucketTaggingConfiguration)
extends AnyVal with PimpedType[BucketTaggingConfiguration] {
def tagSet: TagSet = underlying.getTagSet
def tagSets: Seq[TagSet] = underlying.getAllTagSets.asScala
def tagSets_=(value: Seq[TagSet]): Unit =
underlying.setTagSets(value.asJava)
}
| everpeace/aws4s | aws4s-s3/src/main/scala/org/sisioh/aws4s/s3/model/RichBucketTaggingConfiguration.scala | Scala | mit | 772 |
package com.tuvistavie.xserver.frontend.auth
import play.api.Play
import play.api.mvc.{ RequestHeader, AnyContent }
import scala.sys.process.Process
import com.tuvistavie.xserver.frontend.util.Config
trait PasswordAuthentication {
def authenticate(username: String, password: String): Option[User]
}
trait TokenAuthentication {
def authenticate(token: String): Option[User]
}
trait DummyPasswordAuthentication extends PasswordAuthentication {
override def authenticate(username: String, password: String) = {
Some(User(username))
}
}
trait UnixAuthentication extends PasswordAuthentication {
override def authenticate(username: String, password: String): Option[User] = {
val authPath = Config.getString("paths.nix-password-checker")
val pb = Process(authPath, Seq(username, password))
val exitCode: Int = pb.!
if(exitCode == 0) Some(User(username))
else None
}
}
trait SimpleTokenAuthentication extends TokenAuthentication {
override def authenticate(token: String): Option[User] = {
UserManager.current findUserByToken(token)
}
}
abstract class LoginManager extends PasswordAuthentication with TokenAuthentication {
def login(implicit request: RequestHeader): Option[User] = {
request.session get(Config.getString("auth.token-name")) flatMap { authenticate _ }
}
}
object DummyLoginManager extends LoginManager with DummyPasswordAuthentication with SimpleTokenAuthentication
object NixLoginManager extends LoginManager with UnixAuthentication with SimpleTokenAuthentication
| tuvistavie/scala-x-server | frontend/app/auth/Authentication.scala | Scala | mit | 1,539 |
package com.github.kfang.mongo4s.commands
import reactivemongo.bson.{BSONValue, Producer, BSONDocument}
import reactivemongo.core.commands.GetLastError
case class RemoveQuery(
sel: BSONDocument,
writeConcern: GetLastError = GetLastError(),
firstMatchOnly: Boolean = true
){
def writeConcern(gle: GetLastError): RemoveQuery = this.copy(writeConcern = gle)
def firstMatchOnly(b: Boolean): RemoveQuery = this.copy(firstMatchOnly = b)
}
case class RemoveModelQuery[M](
m: M,
writeConcern: GetLastError = GetLastError(),
firstMatchOnly: Boolean = true
){
def writeConcern(gle: GetLastError): RemoveModelQuery[M] = this.copy(writeConcern = gle)
def firstMatchOnly(b: Boolean): RemoveModelQuery[M] = this.copy(firstMatchOnly = b)
}
trait RemoveDsl {
def remove(sel: BSONDocument): RemoveQuery = RemoveQuery(sel)
def remove(sel: Producer[(String, BSONValue)]*): RemoveQuery = remove(BSONDocument(sel: _*))
def remove[M](m: M): RemoveModelQuery[M] = RemoveModelQuery(m)
class RemoveExpectsSel {
def id(id: String): RemoveQuery = remove(BSONDocument("_id" -> id))
def id(id: BSONValue): RemoveQuery = remove(BSONDocument("_id" -> id))
def sel(s: BSONDocument): RemoveQuery = remove(s)
def sel(s: Producer[(String, BSONValue)]*): RemoveQuery = remove(BSONDocument(s: _*))
def model[M](m: M): RemoveModelQuery[M] = remove(m)
}
def remove: RemoveExpectsSel = new RemoveExpectsSel()
//For the CRUD people
def delete(sel: BSONDocument): RemoveQuery = remove(sel)
def delete(sel: Producer[(String, BSONValue)]*): RemoveQuery = remove(BSONDocument(sel: _*))
def delete[M](m: M): RemoveModelQuery[M] = RemoveModelQuery(m)
def delete: RemoveExpectsSel = new RemoveExpectsSel()
}
object RemoveDsl extends RemoveDsl
| kfang/mongo4s | src/main/scala/com/github/kfang/mongo4s/commands/RemoveDsl.scala | Scala | mit | 1,769 |
package incognito.anonymization.dichotomize
import org.apache.spark.rdd.RDD
import org.apache.spark._
import incognito.rdd.ECKey
import incognito.rdd.BucketSizes
import org.apache.spark.rdd.RDD.rddToPairRDDFunctions
import org.apache.spark.rdd.EmptyRDD
import breeze.linalg.Vector
import incognito.rdd.BucketSizes
import incognito.utils.Utils
import incognito.rdd.BucketSizes
import incognito.rdd.BucketSizes
import incognito.rdd.BucketSizes
import incognito.rdd.ECS
import org.apache.spark.storage.StorageLevel
import incognito.archive.ECsAccum
import incognito.rdd.ECS
import incognito.rdd.ECKey
import incognito.rdd.ECS
import scala.collection.mutable.ArrayBuffer
import incognito.rdd.ECS
import scala.util.control.Breaks
import org.apache.spark.serializer.KryoRegistrator
import com.esotericsoftware.kryo.Kryo
import incognito.rdd.ECS
/**
* @author Antorweep Chakravorty
* @constructor a constructor to dichotomize buckets into equivalence classes
*/
class IDichotomize() extends Dichotomize {
/**
* A iterative method to divide the bucket frequencies into two approx. equal halfs at each itteration/level
* @param _x rdd with eckey, bucket code, frequency and upperbound.
* Initially all buckets are grouped into once ec and as we dichotomize it, new ecs with different sizes are created
*/
def div(_x: RDD[BucketSizes]): RDD[(Boolean, BucketSizes)] = {
// val numPartitions = _x.sparkContext.getConf.get("spark.default.parallelism").toInt
/*Groups the buckets by their eckey (initial key is the same for all) and
generates a rdd with a arrays of all buketcodes, their sizes and ubounds
*/
val x = _x.map(b => (b.ecKey, b)).groupByKey()
.mapValues({ b =>
val bA = b
val buks = b.map(_.bucketCode).toArray
val sizes = b.map(_.size).toArray
val uBounds = b.map(_.uBound).toArray
(buks, sizes, uBounds)
}) //.repartition(numPartitions)
//A return variable that stores equivalence classes that are complete and cannot be further divided and others that can be
val out = x.map({
y =>
//Boolean to determine whether determined equivalence classes are allowed
var valid = true
val ecKey = y._1
val buks = y._2._1
val sizes = Vector(y._2._2)
val uBounds = Vector(y._2._3)
/*Determines the left and right hand side frequencies of equivalence classes at each iteration
of the binary dichotomization phase
*/
val lhs = sizes.map(s => math.floor(s / 2).toInt)
val rhs = sizes - lhs
val lhsCount = lhs.sum * 1.0
val rhsCount = rhs.sum * 1.0
//If both of the newly determined equivalence classes have frequencies
if (lhsCount > 0.0 && rhsCount > 0.0) {
//Calculate the probability of the equivalence classes
val lhsProbs = (lhs.map(_.toDouble) / lhsCount)
val rhsProbs = (rhs.map(_.toDouble) / rhsCount)
//Determines if the created left hand side equivalence class fulfils the proportionality requirements/upper bound contrains
val lFalse = lhsProbs.toArray.filter({
var i = (-1); s => i += 1;
(new Utils()).round(s, 2) > (new Utils()).round(uBounds(i), 2)
}).length
//Determines if the created right hand side equivalence class fulfils the proportionality requirements/upper bound contrains
val rFalse = rhsProbs.toArray.filter({
var i = (-1); s => i += 1;
(new Utils()).round(s, 2) > (new Utils()).round(uBounds(i), 2)
}).length
//If all frequencies chooses from the buckets into the left and right equivalence classes doesnot satisfy the conditions
if (lFalse > 0.0 || rFalse > 0.0) {
valid = false
}
} else
valid = false
val utils = new Utils
val out = ({
//If generated equivalence classes are invalid, we return their parent with a valid false condition, which states that that equivalence class cannot be further divided
if (!valid) {
val out = buks.map({ var i = (-1); v => i += 1; new BucketSizes(ecKey, v, sizes(i), uBounds(i)) })
.map(v => (valid, v))
out
} //If generated equivalence classes are valid, we return their left and right childs with a valid true condition, which states that we can now try dichotomizing the childs
else {
val nECKeyLHS = new ECKey(ecKey.level + 1, utils.hashId(ecKey.sideParent + ecKey.side).toString, '0')
val nECKeyRHS = new ECKey(ecKey.level + 1, utils.hashId(ecKey.sideParent + ecKey.side).toString, '1')
val l = buks.map({ var i = (-1); v => i += 1; new BucketSizes(nECKeyLHS, v, lhs(i), uBounds(i)) })
val r = buks.map({ var i = (-1); v => i += 1; new BucketSizes(nECKeyRHS, v, rhs(i), uBounds(i)) })
val c = (Array(l) ++ Array(r)).flatMap(f => f)
val out = c.map(v => (valid, v))
out
}
})
out
}).flatMap(f => f)
//Returned as Boolean, BucketSizes. The Boolean variable "true" specifying that the ECs could be checked whether they can be further divided and "false" specifying that they are complete.
out
}
/**
* A method to create equivalence classes
* @param bucketSizes a RDD having all buckets and their sizes with a same initial equivalence key. During the dichotomization phase, eckeys are updated and how many records from each bucket could be choosen into how many equvalence clasess is also determined
* @return returns the equivalence classes and the buckets that create it
*/
override def getECSizes(bucketsSizes: RDD[BucketSizes]): RDD[ECS] = {
//All buckets have the same eckey and are tested if they can be dichotomized
var y = div(bucketsSizes)
//We store the equivalence classes that are complete and are not allowed to be further divided
var leafs = y.filter(!_._1).map(_._2)
//We iteratively call the div method on the buckets until no equivalence classes can be created
while (!y.isEmpty()) {
//We filter out the equivalence classes that are allowed to be further divided
val nLeafs = y.filter(_._1).map(_._2).cache
y = div(nLeafs)
leafs = leafs.union(y.filter(!_._1).map(_._2))
nLeafs.unpersist(false)
}
//We return the generated equivalence classes reprented by their keys and a group of buckets and the number of records that can be drawn from them respectively.
val out = leafs.map(v => (v.ecKey, (v.bucketCode, v.size)))
.groupByKey
.mapValues(f => (f.toMap))
.map(v => new ECS(v._1, v._2))
out
}
} | achak1987/SparkAnonymizationToolkit | src/main/scala/incognito/anonymization/dichotomize/IDichotomize.scala | Scala | apache-2.0 | 6,733 |
package examples.circe
import io.circe._
import io.circe.syntax._
import io.circe.Decoder._
import scalaz._, Scalaz._
object Example01 extends App {
// a: Any
case class User(id: Long, handle: String, display: Any) {
def displayAsJson: Json = ???
}
val a1 = User(1, "test", 1)
val a2 = User(1, "test", "test")
// Define an encoder
// A => JSON
val userEncoder: Encoder[User] = Encoder.instance(user =>
Json.obj(
"id" -> Json.fromLong(user.id),
"handle" -> Json.fromString(user.handle),
"display" -> user.displayAsJson))
// Define a decoder
val userDecoder: Decoder[User] = Decoder.instance { c =>
val result: Result[User] =
for {
id <- c.downField("id").as[Long]
handle <- c.downField("handle").as[String]
displayName <- c.downField("display").as[String]
} yield User(id, handle, displayName)
result
}
// Example
val userAdil = User(1, "adilakhter", "Adil Akhter")
val encoded: Json = userEncoder(userAdil)
val encodedJsonString = encoded.noSpaces
//{"id":1,"handle":"adilakhter","display":"Adil Akhter"}
val decoded: Result[User] = userDecoder.decodeJson(encoded)
val decodedUser = decoded.right.get // JUST DO NOT DO THIS IN PRODUCTION
assert( decodedUser == userAdil)
"example 01.1:" |> println
// ------------------
// Problems:
// --------------------
// boilerplates.
// verbose
// parallel hierarchy that is needed to handled.
// and wired during Json encoding and decoding
// For instance:
case class Department (id: Long, users: List[User])
}
| adilakhter/scalaznoob | src/main/scala/examples/circe/Example01.scala | Scala | apache-2.0 | 1,609 |
/***
MIT License
Copyright (c) 2018 Zifeng Deng
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
object NFA {
def merge(stack: List[Char], postfix: List[Char]): List[Char] = stack match {
case head::tail if head != '(' => merge(tail, head::postfix)
case _ => postfix
}
def push(op1: Char, op2: Char): Boolean = (op1, op2) match {
case ('|', '.') => true
case _ => false
}
def infixToPosfix(re: List[Char],
stack: List[Char],
postfix: List[Char]): List[Char] = {
re match {
case Nil => stack match {
case Nil => postfix.reverse
case head::tail => infixToPosfix(Nil, tail, head::postfix);
}
case head::tail => head match {
case '|' | '.' => stack match {
case Nil => infixToPosfix(tail, head :: stack, postfix)
case op :: rest => if (op == '(') {
infixToPosfix(tail, head :: stack, postfix)
} else if (push(op, head)) {
infixToPosfix(tail, head::stack, postfix)
} else {
infixToPosfix(tail, head :: rest, op :: postfix)
}
}
case '(' => infixToPosfix(tail, head::stack, postfix)
case ')' => infixToPosfix(tail, stack.dropWhile(_ != '(').tail, merge(stack, postfix))
case _ => infixToPosfix(tail, stack, head::postfix)
}
}
}
def main(args: Array[String]): Unit = {
println(infixToPosfix("a.a.a|b.(c|d)".toList, Nil, Nil))
}
}
| binape/code-snippets | 00-QuickExamples/NFA01.scala | Scala | mit | 2,443 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.types
import org.json4s.JsonAST.JValue
import org.json4s.JsonDSL._
import org.apache.spark.annotation.Stable
/**
* The data type for Maps. Keys in a map are not allowed to have `null` values.
*
* Please use `DataTypes.createMapType()` to create a specific instance.
*
* @param keyType The data type of map keys.
* @param valueType The data type of map values.
* @param valueContainsNull Indicates if map values have `null` values.
*/
@Stable
case class MapType(
keyType: DataType,
valueType: DataType,
valueContainsNull: Boolean) extends DataType {
/** No-arg constructor for kryo. */
def this() = this(null, null, false)
private[sql] def buildFormattedString(prefix: String, builder: StringBuilder): Unit = {
builder.append(s"$prefix-- key: ${keyType.typeName}\\n")
DataType.buildFormattedString(keyType, s"$prefix |", builder)
builder.append(s"$prefix-- value: ${valueType.typeName} " +
s"(valueContainsNull = $valueContainsNull)\\n")
DataType.buildFormattedString(valueType, s"$prefix |", builder)
}
override private[sql] def jsonValue: JValue =
("type" -> typeName) ~
("keyType" -> keyType.jsonValue) ~
("valueType" -> valueType.jsonValue) ~
("valueContainsNull" -> valueContainsNull)
/**
* The default size of a value of the MapType is
* (the default size of the key type + the default size of the value type).
* We assume that there is only 1 element on average in a map. See SPARK-18853.
*/
override def defaultSize: Int = 1 * (keyType.defaultSize + valueType.defaultSize)
override def simpleString: String = s"map<${keyType.simpleString},${valueType.simpleString}>"
override def catalogString: String = s"map<${keyType.catalogString},${valueType.catalogString}>"
override def sql: String = s"MAP<${keyType.sql}, ${valueType.sql}>"
override private[spark] def asNullable: MapType =
MapType(keyType.asNullable, valueType.asNullable, valueContainsNull = true)
override private[spark] def existsRecursively(f: (DataType) => Boolean): Boolean = {
f(this) || keyType.existsRecursively(f) || valueType.existsRecursively(f)
}
}
/**
* @since 1.3.0
*/
@Stable
object MapType extends AbstractDataType {
override private[sql] def defaultConcreteType: DataType = apply(NullType, NullType)
override private[sql] def acceptsType(other: DataType): Boolean = {
other.isInstanceOf[MapType]
}
override private[sql] def simpleString: String = "map"
/**
* Construct a [[MapType]] object with the given key type and value type.
* The `valueContainsNull` is true.
*/
def apply(keyType: DataType, valueType: DataType): MapType =
MapType(keyType: DataType, valueType: DataType, valueContainsNull = true)
}
| pgandhi999/spark | sql/catalyst/src/main/scala/org/apache/spark/sql/types/MapType.scala | Scala | apache-2.0 | 3,576 |
package eu.shiftforward.Elements
import eu.shiftforward.{Ground, Bus, Wire, CircuitSimulation}
trait Logic extends CircuitSimulation {
private def unaryLogicGate(input: Wire)(op: Boolean => Boolean) = {
val output = new Wire
input addAction { () =>
val inputSig = input.getSignal
schedule(InverterDelay) { output <~ op(inputSig) }
}
output
}
def inverter(input: Wire): Wire = unaryLogicGate(input) { !_ }
def inverter(input: Bus): Bus = input map inverter
def bridge(input: Wire): Wire = unaryLogicGate(input) { identity }
def bridge(input: Bus): Bus = input map bridge
private def binaryLogicGate(a: Wire, b: Wire)(op: (Boolean, Boolean) => Boolean) = {
val output = new Wire
def action() {
val inputA = a.getSignal
val inputB = b.getSignal
schedule(GenericGateDelay) { output <~ op(inputA, inputB) }
}
a addAction action
b addAction action
output
}
def and(ins: Iterable[Wire]): Wire = ins reduceLeft and
def or(ins: Iterable[Wire]): Wire = ins reduceLeft or
def xor(ins: Iterable[Wire]): Wire = ins reduceLeft xor
def nand(ins: Iterable[Wire]): Wire = ins reduceLeft nand
def nor(ins: Iterable[Wire]): Wire = ins reduceLeft nor
def and(a: Wire, b: Wire) = binaryLogicGate(a, b) { _ && _ }
def or(a: Wire, b: Wire) = binaryLogicGate(a, b) { _ || _ }
def xor(a: Wire, b: Wire) = binaryLogicGate(a, b) { _ ^ _ }
def nand(a: Wire, b: Wire) = binaryLogicGate(a, b) { (x, y) => !(x && y) }
def nor(a: Wire, b: Wire) = binaryLogicGate(a, b) { (x, y) => !(x || y) }
def and(x: Bus, y: Bus): Bus = (x, y).zipped map and
def or(x: Bus, y: Bus): Bus = (x, y).zipped map or
def xor(x: Bus, y: Bus): Bus = (x, y).zipped map xor
def nand(x: Bus, y: Bus): Bus = (x, y).zipped map nand
def nor(x: Bus, y: Bus): Bus = (x, y).zipped map nor
/* def and(x: Bus, y: Wire): Bus = x map { w => and(w, y) }
def or(x: Bus, y: Wire): Bus = x map { w => or(w, y) }
def xor(x: Bus, y: Wire): Bus = x map { w => xor(w, y) }
def nand(x: Bus, y: Wire): Bus = x map { w => nand(w, y) }
def nor(x: Bus, y: Wire): Bus = x map { w => nor(w, y) } */
def rotateRight(a: Bus): Bus = bridge(a.drop(1) ++ a.take(1))
def rotateLeft(a: Bus): Bus = bridge(a.takeRight(1) ++ a.dropRight(1))
def shiftRight(a: Bus): Bus = bridge(a.drop(1) :+ Ground)
def shiftLeft(a: Bus): Bus = bridge(Ground +: a.dropRight(1))
} | hugoferreira/from-zero-to-computer | src/main/scala/eu/shiftforward/Elements/Logic.scala | Scala | mit | 2,440 |
package com.crobox.clickhouse.dsl
/**
* QueryFactory exposes all methods of OperationalQuery from a empty starting point (factoring new queries)
*/
trait QueryFactory extends OperationalQuery {
override val internalQuery: InternalQuery = InternalQuery()
}
| crobox/clickhouse-scala-client | dsl/src/main/scala/com.crobox.clickhouse/dsl/QueryFactory.scala | Scala | lgpl-3.0 | 263 |
package spray.oauth.adapters.inmemory.models
import com.github.nscala_time.time.Imports._
import org.joda.time.PeriodType
import spray.oauth.adapters.inmemory.utils.{ Entity, DAO, Sequence }
import spray.oauth.models.GrantType
import spray.oauth.utils.OAuth2Parameters._
import spray.oauth.utils.TokenGenerator
import spray.oauth.{ AuthUser, AuthInfo }
/**
* Created with IntelliJ IDEA.
* User: hasan.ozgan
* Date: 4/21/14
* Time: 9:44 AM
* To change this template use File | Settings | File Templates.
*/
case class Code(
id: Long,
fk_consumer: Long,
fk_user: Option[Long],
scope: Option[String],
code: String,
token_refreshable: Boolean,
redirect_uri: Option[String],
ip_restriction: Option[String],
created_on: DateTime = DateTime.now,
deleted_on: DateTime = DateTime.now,
expired_on: DateTime) extends Entity(id) {
def expires_in = {
if (expired_on >= DateTime.now) {
val interval: Interval = new Interval(DateTime.now, expired_on)
interval.toPeriod(PeriodType.seconds()).getSeconds
} else 0
}
def toAuthInfo: AuthInfo = {
val clientId = Some(fk_consumer.toString)
AuthInfo(fk_user.map(x => AuthUser(x.toString)), clientId, scope, redirect_uri, token_refreshable, GrantType.AuthorizationCode, ip_restriction)
}
}
object CodeDAO extends DAO[Code] {
def findAuthInfoByCode(code: String): Option[AuthInfo] = {
findOneByCode(code).filter(x => x.expires_in > 0).map { x => x.toAuthInfo }
}
def findOneByCode(code: String): Option[Code] = {
findBy(p => p.code.equals(code))
}
def deleteCode(code: String): Unit = {
val found = findBy(p => p.code.equals(code))
if (found.nonEmpty) {
remove(found.get)
}
}
def createCode(info: AuthInfo): Option[Code] = {
val clientId = info.clientId.map { x => x.toLong }.getOrElse(throw new Exception("ClientId Not Found"))
val created_on = DateTime.now
val expired_on = created_on + CODE_DURATION
var code = Code(Sequence.nextId, clientId, info.user.map(x => x.id.toLong), info.scope, TokenGenerator.bearer(CODE_LENGTH), info.refreshable, info.redirectUri, info.remoteAddress, created_on, created_on, expired_on)
try {
this.save(code)
Some(code)
} catch {
case e: Exception => None
}
}
}
| hasanozgan/spray-oauth | core/src/main/scala/spray/oauth/adapters/inmemory/models/Code.scala | Scala | apache-2.0 | 2,306 |
/*
Learning from this @author:wspringer
https://github.com/wspringer/scala-lzw/blob/master/src/main/scala/nl/flotsam/lzw/Node.scala
*/
trait Node {
def decode[T](i: Int, fn: (Byte) => T): Node
def encode[T](b: Byte, fn: (Int, Int) => T): Node
def apply[T](fn: (Byte) => T)
def root: Node
def bitsRequired: Int
def terminate[T](fn: (Int, Int) => T)
def first: Byte
}
trait NodeManager {
def create(owner: Node, value: Byte, first: Byte): Option[Node]
def get(value: Int): Option[Node]
}
class ValueNode(index: Int, owner: Node, value: Byte, val first: Byte, nodeManager: NodeManager) extends Node {
private val children = new Array[Node](255)
def decode[T](i: Int, fn: (Byte) => T) = {
val node = nodeManager.get(i).get
node.apply(fn)
val child = children(0xff & node.first)
if (child == null) {
nodeManager.create(this, node.first, this.first) match {
case Some(nxt) => children(0xff & node.first) = nxt
case _ =>
}
}
node
}
def encode[T](b: Byte, fn: (Int, Int) => T) = {
val child = children(0xff & b)
if (child == null) {
fn(index, bitsRequired)
nodeManager.create(this, b, first) match {
case Some(node) => children(0xff & b) = node
case _ =>
}
root.encode(b, fn)
} else child
}
def apply[T](fn: (Byte) => T) {
owner.apply(fn)
fn(value)
}
def root = owner.root
def bitsRequired = owner.bitsRequired
def terminate[T](fn: (Int, Int) => T) {
fn(index, bitsRequired)
}
}
class RootNode(limit: Int = 512) extends Node with NodeManager {
private var index = 255
private val initial = Array.tabulate[ValueNode](256)(b => new ValueNode(b, this, b.toByte, b.toByte, this))
private val createdNodes = new Array[ValueNode](limit - 256)
def decode[T](i: Int, fn: (Byte) => T) = {
val node = initial(i)
node.apply(fn)
node
}
def encode[T](b: Byte, fn: (Int, Int) => T) = initial(0xff & b)
def apply[T](fn: (Byte) => T) { }
def terminate[T](fn: (Int, Int) => T) {}
def first = 0
val root = this
def create(owner: Node, value: Byte, first: Byte) = if (index <= limit) {
index += 1
val node = new ValueNode(index, owner, value, first, this)
createdNodes(index - 256) = node
Some(node)
} else {
// No reset
None
}
def get(value: Int): Option[Node] =
if (value < 256) Some(initial(value))
else if (value > index) None
else Some(createdNodes(value - 256))
def bitsRequired = 32 - Integer.numberOfLeadingZeros(index)
}
object Lzw extends App {
/**
* LZW encodes a sequence of bytes. Returns a non-strict collection of tuples, where the first element of the tuple
* represents a value to be send to the output, and the second element the minimal number of bits expected to be
* used for representing the output.
*
* Depending on the size of the index, the number of bits used to represent pointers to elements of the index can
* vary (and will grow while encoding). There are different policies for dealing with this while writing the output
* values. Some may prefer to always allocate a fixed number of bits for the output values,
* while others might prefer to limit the number of bits written to the minimum needed. By explicitly passing the
* number of bits required *minimally* to store the output value, callers can choose to implement any policy they
* deem appropriate.
*
* @param in A collection of bytes.
* @return A non-strict collection providing the values of the LZW encoded representation of the input collection.
*/
println( " Hello World - LZW ")
def encode(in: Traversable[Byte], limit: Int = 256): Traversable[(Int, Int)] =
new Traversable[(Int, Int)] {
def foreach[U](f: ((Int, Int)) => U) {
val root: Node = new RootNode(limit)
val untupled = Function.untupled(f)
in.foldLeft(root)({ (node, b) => node.encode(b, untupled) }).terminate(untupled)
}
}
System.out.println( encode("ABRACADADABRA".getBytes("utf-8"),500 ) )
}
| jaimeguzman/learning | LzW-wspringer.scala | Scala | apache-2.0 | 4,093 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.datasources.jdbc.connection
import java.sql.{Driver, DriverManager}
import javax.security.auth.login.Configuration
import scala.collection.JavaConverters._
import org.scalatest.BeforeAndAfterEach
import org.apache.spark.SparkFunSuite
import org.apache.spark.sql.execution.datasources.jdbc.{DriverRegistry, JDBCOptions}
abstract class ConnectionProviderSuiteBase extends SparkFunSuite with BeforeAndAfterEach {
protected def registerDriver(driverClass: String): Driver = {
DriverRegistry.register(driverClass)
DriverManager.getDrivers.asScala.collectFirst {
case d if d.getClass.getCanonicalName == driverClass => d
}.get
}
protected def options(url: String) = new JDBCOptions(Map[String, String](
JDBCOptions.JDBC_URL -> url,
JDBCOptions.JDBC_TABLE_NAME -> "table",
JDBCOptions.JDBC_KEYTAB -> "/path/to/keytab",
JDBCOptions.JDBC_PRINCIPAL -> "principal"
))
override def afterEach(): Unit = {
try {
Configuration.setConfiguration(null)
} finally {
super.afterEach()
}
}
protected def testSecureConnectionProvider(provider: SecureConnectionProvider): Unit = {
// Make sure no authentication for the database is set
assert(Configuration.getConfiguration.getAppConfigurationEntry(provider.appEntry) == null)
// Make sure the first call sets authentication properly
val savedConfig = Configuration.getConfiguration
provider.setAuthenticationConfigIfNeeded()
val config = Configuration.getConfiguration
assert(savedConfig != config)
val appEntry = config.getAppConfigurationEntry(provider.appEntry)
assert(appEntry != null)
// Make sure a second call is not modifying the existing authentication
provider.setAuthenticationConfigIfNeeded()
assert(config.getAppConfigurationEntry(provider.appEntry) === appEntry)
}
}
| kevinyu98/spark | sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/jdbc/connection/ConnectionProviderSuiteBase.scala | Scala | apache-2.0 | 2,682 |
package com.softwaremill.spray
import spray.routing.{RequestContext, Route, SimpleRoutingApp}
import akka.actor.{Props, Actor, ActorSystem}
object SprayComplete extends App with SimpleRoutingApp {
implicit val actorSystem = ActorSystem()
def secure(route: Route): Route = {
parameters("username", "password") { (username, password) =>
if (username == "admin" && password == "1234") {
route
} else {
ctx => ctx.complete(401, "bad user")
}
}
}
startServer(interface = "localhost", port = 8080) {
get {
pathPrefix("takeaway") {
path("hello") {
complete {
"Welcome to the potato & steak take-away!"
}
} ~
path("order" / "potatoes") {
parameters("mashed".as[Boolean], "number".as[Int], "special"?) { (mashed, number, specialWishes) =>
complete {
s"You ordered ${if (mashed) "mashed" else "normal"} potatoes. " +
s"One is free, so you'll get ${number+1} potatoes." +
s"Your special wishes: ${specialWishes.getOrElse("none luckily")}"
}
}
} ~
path("pay") {
secure {
ctx => paymentActor ! ctx
}
}
}
}
}
class PaymentActor extends Actor {
def receive = {
case ctx: RequestContext => ctx.complete("paid")
}
}
lazy val paymentActor = actorSystem.actorOf(Props[PaymentActor])
}
| adamw/keep-it-simple-scala | src/main/scala/com/softwaremill/spray/SprayComplete.scala | Scala | apache-2.0 | 1,466 |
/*
* Copyright (C) 2017 HAT Data Exchange Ltd
* SPDX-License-Identifier: AGPL-3.0
*
* This file is part of the Hub of All Things project (HAT).
*
* HAT is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License
* as published by the Free Software Foundation, version 3 of
* the License.
*
* HAT is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
* the GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General
* Public License along with this program. If not, see
* <http://www.gnu.org/licenses/>.
*
* Written by Andrius Aucinas <[email protected]>
* 2 / 2017
*/
package org.hatdex.hat.resourceManagement.models
import java.util.UUID
import org.joda.time.DateTime
import play.api.libs.json.Json
import play.api.libs.json.OFormat
case class DatabaseInstance(
id: UUID,
name: String,
password: String)
case class DatabaseServer(
id: Int,
host: String,
port: Int,
dateCreated: DateTime,
databases: Seq[DatabaseInstance])
case class HatKeys(
privateKey: String,
publicKey: String)
case class HatSignup(
id: UUID,
fullName: String,
username: String,
email: String,
pass: String,
dbPass: String,
created: Boolean,
registerTime: DateTime,
database: Option[DatabaseInstance],
databaseServer: Option[DatabaseServer],
keys: Option[HatKeys])
object HatSignup {
import play.api.libs.json.JodaWrites._
import play.api.libs.json.JodaReads._
implicit val databaseInstanceFormat: OFormat[DatabaseInstance] = Json.format[DatabaseInstance]
implicit val databaseServerFormat: OFormat[DatabaseServer] = Json.format[DatabaseServer]
implicit val hatKeysFormat: OFormat[HatKeys] = Json.format[HatKeys]
implicit val hatSignupFormat: OFormat[HatSignup] = Json.format[HatSignup]
}
| Hub-of-all-Things/HAT2.0 | hat/app/org/hatdex/hat/resourceManagement/models/HatSignup.scala | Scala | agpl-3.0 | 2,030 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.streaming.mqtt
import scala.reflect.ClassTag
import org.apache.spark.storage.StorageLevel
import org.apache.spark.streaming.StreamingContext
import org.apache.spark.streaming.api.java.{JavaReceiverInputDStream, JavaStreamingContext, JavaDStream}
import org.apache.spark.streaming.dstream.{ReceiverInputDStream, DStream}
object MQTTUtils {
/**
* Create an input stream that receives messages pushed by a MQTT publisher.
* @param ssc StreamingContext object
* @param brokerUrl Url of remote MQTT publisher
* @param topic Topic name to subscribe to
* @param storageLevel RDD storage level. Defaults to StorageLevel.MEMORY_AND_DISK_SER_2.
*/
def createStream(
ssc: StreamingContext,
brokerUrl: String,
topic: String,
storageLevel: StorageLevel = StorageLevel.MEMORY_AND_DISK_SER_2
): ReceiverInputDStream[String] = {
new MQTTInputDStream(ssc, brokerUrl, topic, storageLevel)
}
/**
* Create an input stream that receives messages pushed by a MQTT publisher.
* Storage level of the data will be the default StorageLevel.MEMORY_AND_DISK_SER_2.
* @param jssc JavaStreamingContext object
* @param brokerUrl Url of remote MQTT publisher
* @param topic Topic name to subscribe to
*/
def createStream(
jssc: JavaStreamingContext,
brokerUrl: String,
topic: String
): JavaReceiverInputDStream[String] = {
implicitly[ClassTag[AnyRef]].asInstanceOf[ClassTag[String]]
createStream(jssc.ssc, brokerUrl, topic)
}
/**
* Create an input stream that receives messages pushed by a MQTT publisher.
* @param jssc JavaStreamingContext object
* @param brokerUrl Url of remote MQTT publisher
* @param topic Topic name to subscribe to
* @param storageLevel RDD storage level.
*/
def createStream(
jssc: JavaStreamingContext,
brokerUrl: String,
topic: String,
storageLevel: StorageLevel
): JavaReceiverInputDStream[String] = {
implicitly[ClassTag[AnyRef]].asInstanceOf[ClassTag[String]]
createStream(jssc.ssc, brokerUrl, topic, storageLevel)
}
}
| andrewor14/iolap | external/mqtt/src/main/scala/org/apache/spark/streaming/mqtt/MQTTUtils.scala | Scala | apache-2.0 | 2,968 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.