code
stringlengths 5
1M
| repo_name
stringlengths 5
109
| path
stringlengths 6
208
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 5
1M
|
---|---|---|---|---|---|
package com.avsystem.scex.parsing
import scala.annotation.nowarn
import scala.collection.immutable.SortedMap
/**
* Created: 24-10-2013
* Author: ghik
*/
trait PositionMapping {
def apply(pos: Int): Int
def reverse: PositionMapping
def compose(other: PositionMapping): PositionMapping =
if (this eq EmptyPositionMapping) other
else if (other eq EmptyPositionMapping) this
else ComposedPositionMapping(this, other)
def andThen(other: PositionMapping): PositionMapping =
other compose this
}
case class ShiftInfo(totalPrevShift: Int, addedLeft: Int, removedLeft: Int, addedRight: Int, removedRight: Int) {
def update(amount: Int, binding: Binding): ShiftInfo =
if (amount > 0 && binding == Binding.Left)
copy(addedLeft = addedLeft + amount)
else if (amount < 0 && binding == Binding.Left)
copy(removedLeft = removedLeft - amount)
else if (amount > 0 && binding == Binding.Right)
copy(addedRight = addedRight + amount)
else if (amount < 0 && binding == Binding.Right)
copy(removedRight = removedRight - amount)
else this
def totalShift: Int =
totalPrevShift + addedLeft - removedLeft + addedRight - removedRight
}
object ShiftInfo {
def empty(totalPrevShift: Int): ShiftInfo =
new ShiftInfo(totalPrevShift, 0, 0, 0, 0)
def apply(totalPrevShift: Int, amount: Int, binding: Binding): ShiftInfo =
empty(totalPrevShift).update(amount, binding)
def apply(totalPrevShift: Int, added: Int, removed: Int, binding: Binding): ShiftInfo =
empty(totalPrevShift).update(added, binding).update(-removed, binding)
}
class ShiftInfoPositionMapping(
private val shiftMapping: SortedMap[Int, ShiftInfo],
private val reverseShiftMapping: SortedMap[Int, ShiftInfo]
) extends PositionMapping {
@nowarn("msg=deprecated")
def apply(pos: Int): Int = shiftMapping.to(pos).lastOption match {
case Some((offset, si)) =>
// removedleft|removedright
// addedleft|addedright
//
// All 'removedleft' positions map to the first position of 'addedleft' or last position before it if empty.
// All 'removedright' positions map to the first position of 'addedright' or first position after it if empty.
val relpos = pos - offset
val reloffset = offset + si.totalPrevShift
if (relpos < si.removedLeft)
reloffset - (if (si.addedLeft == 0 && reloffset > 0) 1 else 0)
else if (relpos < si.removedLeft + si.removedRight)
reloffset + si.addedLeft
else
pos + si.totalShift
case None =>
pos
}
def reverse: PositionMapping =
new ShiftInfoPositionMapping(reverseShiftMapping, shiftMapping)
override def equals(other: Any): Boolean = other match {
case op: ShiftInfoPositionMapping => shiftMapping == op.shiftMapping
case _ => false
}
override lazy val hashCode: Int =
shiftMapping.hashCode()
override def toString: String =
s"PositionMapping($shiftMapping)"
}
case class SingleShiftPositionMapping(amount: Int) extends PositionMapping {
def apply(pos: Int): Int = pos + amount
def reverse: PositionMapping = SingleShiftPositionMapping(-amount)
}
case class ComposedPositionMapping(left: PositionMapping, right: PositionMapping) extends PositionMapping {
def apply(pos: Int): Int = left(right(pos))
def reverse: PositionMapping = ComposedPositionMapping(right.reverse, left.reverse)
}
object EmptyPositionMapping extends PositionMapping {
def apply(pos: Int): Int = pos
def reverse: PositionMapping = this
}
| AVSystem/scex | scex-core/src/main/scala/com/avsystem/scex/parsing/PositionMapping.scala | Scala | mit | 3,533 |
/*
* Copyright 2013-2015 Websudos, Limited.
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* - Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* - Explicit consent must be obtained from the copyright owner, Outworkers Limited before any redistribution is made.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
package com.websudos.phantom.builder.query
import com.datastax.driver.core.{ConsistencyLevel, Session}
import com.websudos.phantom.CassandraTable
import com.websudos.phantom.builder.{ConsistencyBound, QueryBuilder, Specified, Unspecified}
import com.websudos.phantom.connectors.KeySpace
class TruncateQuery[
Table <: CassandraTable[Table, _],
Record,
Status <: ConsistencyBound
](table: Table, val qb: CQLQuery, override val options: QueryOptions) extends ExecutableStatement {
def consistencyLevel_=(level: ConsistencyLevel)(implicit session: Session): TruncateQuery[Table, Record, Specified] = {
if (session.v3orNewer) {
new TruncateQuery(table, qb, options.consistencyLevel_=(level))
} else {
new TruncateQuery(table, QueryBuilder.consistencyLevel(qb, level.toString), options)
}
}
}
object TruncateQuery {
type Default[T <: CassandraTable[T, _], R] = TruncateQuery[T, R, Unspecified]
def apply[T <: CassandraTable[T, _], R](table: T)(implicit keySpace: KeySpace): TruncateQuery.Default[T, R] = {
new TruncateQuery(
table,
QueryBuilder.truncate(QueryBuilder.keyspace(keySpace.name, table.tableName).queryString),
QueryOptions.empty
)
}
}
| levinson/phantom | phantom-dsl/src/main/scala/com/websudos/phantom/builder/query/TruncateQuery.scala | Scala | bsd-2-clause | 2,710 |
package com.zobot.client.packet.definitions.clientbound.play
import com.zobot.client.packet.Packet
case class UnlockRecipes(count: Int, entityIds: Any) extends Packet {
override lazy val packetId = 0x32
override lazy val packetData: Array[Byte] =
fromVarInt(count) ++
fromAny(entityIds)
}
| BecauseNoReason/zobot | src/main/scala/com/zobot/client/packet/definitions/clientbound/play/UnlockRecipes.scala | Scala | mit | 304 |
package functional
import java.sql.Date.{valueOf => date}
import play.api.{Application => PlayApp}
import play.api.test._
import play.api.test.Helpers._
import play.api.i18n.MessagesApi
import play.api.i18n.{Lang, Messages, MessagesImpl, MessagesProvider}
import java.time.Instant
import play.api.inject.guice.GuiceApplicationBuilder
import helpers.InjectorSupport
import play.api.db.Database
import views.Titles
import helpers.Formatter
import helpers.UrlHelper
import helpers.UrlHelper._
import helpers.PasswordHash
import constraints.FormConstraints
import play.api.test._
import play.api.test.Helpers._
import java.sql.Connection
import java.util.concurrent.TimeUnit
import helpers.Helper._
import org.specs2.mutable.Specification
import play.api.test.{Helpers, TestServer}
import play.api.i18n.{Lang, Messages}
import play.api.test.TestServer
import org.openqa.selenium.By
import models._
import com.ruimo.scoins.Scoping._
import SeleniumHelpers.htmlUnit
import SeleniumHelpers.FirefoxJa
class UserGroupMaintenanceSpec extends Specification with InjectorSupport {
"User group maintenance" should {
"Empty messages should be shown when no user group exists" in new WithBrowser(
WebDriverFactory(CHROME), appl(inMemoryDatabase())
) {
inject[Database].withConnection { implicit conn =>
val currencyInfo = inject[CurrencyRegistry]
val localeInfo = inject[LocaleInfoRepo]
import localeInfo.{En, Ja}
implicit val lang = Lang("ja")
implicit val storeUserRepo = inject[StoreUserRepo]
val Messages = inject[MessagesApi]
implicit val mp: MessagesProvider = new MessagesImpl(lang, Messages)
val adminUser = loginWithTestUser(browser)
browser.goTo(
controllers.routes.UserGroupMaintenance.edit().url.addParm("lang", lang.code).toString
)
browser.waitUntil(
browser.find(".emptyMessage")
).text === Messages("recordEmpty")
}
}
"Create group" in new WithBrowser(
WebDriverFactory(CHROME), appl(inMemoryDatabase())
) {
inject[Database].withConnection { implicit conn =>
val currencyInfo = inject[CurrencyRegistry]
val localeInfo = inject[LocaleInfoRepo]
import localeInfo.{En, Ja}
implicit val lang = Lang("ja")
implicit val storeUserRepo = inject[StoreUserRepo]
val Messages = inject[MessagesApi]
implicit val mp: MessagesProvider = new MessagesImpl(lang, Messages)
val adminUser = loginWithTestUser(browser)
browser.goTo(
controllers.routes.UserGroupMaintenance.startCreate().url.addParm("lang", lang.code).toString
)
browser.waitUntil(browser.find("#groupName")).fill().`with`("group001")
browser.find(".createButton").click()
doWith(browser.waitUntil(browser.find(".userGroupTable"))) { tbl =>
tbl.find("tr.body").size === 1
tbl.find("tr.body .groupName a").text === "group001"
}
browser.goTo(
controllers.routes.UserGroupMaintenance.startCreate().url.addParm("lang", lang.code).toString
)
browser.waitUntil(browser.find("#groupName")).fill().`with`("group002")
browser.find(".createButton").click()
doWith(browser.waitUntil(browser.find(".userGroupTable"))) { tbl =>
tbl.find("tr.body").size === 2
tbl.find("tr.body .groupName a").index(0).text === "group001"
tbl.find("tr.body .groupName a").index(1).text === "group002"
}
// remove group002
browser.find(".removeUserGroupBtn").index(1).click()
// cancel button
browser.waitUntil(browser.find(".ui-dialog-buttonset .ui-button.ui-widget").index(1)).click()
// remove group002
browser.waitUntil(browser.find(".removeUserGroupBtn")).index(1).click()
// delete button
browser.waitUntil(browser.find(".ui-dialog-buttonset .ui-button.ui-widget").index(0)).click()
doWith(browser.waitUntil(browser.find(".userGroupTable"))) { tbl =>
browser.waitUntil(tbl.find("tr.body").size == 1)
tbl.find("tr.body .groupName a").index(0).text === "group001"
}
}
}
"Create group" in new WithBrowser(
WebDriverFactory(CHROME), appl(inMemoryDatabase())
) {
inject[Database].withConnection { implicit conn =>
val currencyInfo = inject[CurrencyRegistry]
val localeInfo = inject[LocaleInfoRepo]
import localeInfo.{En, Ja}
implicit val lang = Lang("ja")
implicit val storeUserRepo = inject[StoreUserRepo]
val Messages = inject[MessagesApi]
implicit val mp: MessagesProvider = new MessagesImpl(lang, Messages)
val adminUser = loginWithTestUser(browser)
browser.goTo(
controllers.routes.UserGroupMaintenance.startCreate().url.addParm("lang", lang.code).toString
)
browser.waitUntil(browser.find("#groupName")).fill().`with`("group001")
browser.find(".createButton").click()
doWith(browser.waitUntil(browser.find(".userGroupTable"))) { tbl =>
tbl.find("tr.body").size === 1
tbl.find("tr.body .groupName a").text === "group001"
}
browser.goTo(
controllers.routes.UserGroupMaintenance.startCreate().url.addParm("lang", lang.code).toString
)
browser.waitUntil(browser.find("#groupName")).fill().`with`("group002")
browser.find(".createButton").click()
doWith(browser.waitUntil(browser.find(".userGroupTable"))) { tbl =>
tbl.find("tr.body").size === 2
tbl.find("tr.body .groupName a").index(0).text === "group001"
tbl.find("tr.body .groupName a").index(1).text === "group002"
}
val user1 = createNormalUser(
browser, "11111111", "password01", "[email protected]", "firstName01", "lastName01", "company01"
)
val user2 = createNormalUser(
browser, "22222222", "password02", "[email protected]", "firstName02", "lastName02", "company02"
)
val user3 = createNormalUser(
browser, "33333333", "password03", "[email protected]", "firstName03", "lastName03", "company03"
)
browser.goTo(
controllers.routes.UserGroupMaintenance.edit().url.addParm("lang", lang.code).toString
)
doWith(browser.waitUntil(browser.find(".userGroupTable"))) { tbl =>
tbl.find("tr.body .groupName a").index(0).click()
}
browser.waitUntil(browser.find(".userGroup .groupName")).text === "group001"
browser.waitUntil(browser.find(".emptyMessage")).text === Messages("recordEmpty")
browser.switchTo(browser.waitUntil(browser.find(".userListForMemberFrame")))
browser.find("tr.body .userName").index(0).text === "11111111"
browser.find("tr.body button").index(0).click()
browser.waitUntil(browser.find(".userGroupMemberTable tr.body").size == 1)
browser.find(".userGroupMemberTable tr.body .userName").text === "11111111"
browser.switchTo(browser.waitUntil(browser.find(".userListForMemberFrame")))
browser.find("tr.body .userName").index(1).text === "22222222"
browser.find("tr.body button").index(1).click()
browser.waitUntil(browser.find(".userGroupMemberTable tr.body").size == 2)
browser.find(".userGroupMemberTable tr.body .userName").index(0).text === "11111111"
browser.find(".userGroupMemberTable tr.body .userName").index(1).text === "22222222"
browser.find(".userGroupMemberTable .removeUserGroupMemberBtn").index(0).click()
browser.waitUntil(browser.find(".userGroupMemberTable tr.body").size == 1)
browser.find(".userGroupMemberTable tr.body .userName").index(0).text === "22222222"
}
}
}
}
| ruimo/store2 | test/functional/UserGroupMaintenanceSpec.scala | Scala | apache-2.0 | 7,844 |
package com.amichalo.mooolelo.api.util
import akka.http.scaladsl.server.PathMatchers.{Segment, LongNumber}
import akka.http.scaladsl.server._
import com.amichalo.mooolelo.domain.{ServiceGroup, ServiceType, ServiceId}
trait PathMatchers {
val ServiceIdMatcher: PathMatcher1[ServiceId] = {
LongNumber map { value => ServiceId(value) }
}
val ServiceTypeMatcher: PathMatcher1[ServiceType] = {
Segment map { value => ServiceType(value) }
}
val GroupMatcher: PathMatcher1[ServiceGroup] = {
Segment map { value => ServiceGroup(value) }
}
}
| amichalo/mooolelo | src/main/scala/com/amichalo/mooolelo/api/util/PathMatchers.scala | Scala | apache-2.0 | 563 |
package lila.round
import reactivemongo.bson._
import lila.db.BSON.BSONJodaDateTimeHandler
import lila.db.Implicits._
import org.joda.time.DateTime
import chess.format.UciMove
import chess.Pos
import Forecast.Step
import lila.game.{ Pov, Game }
final class ForecastApi(coll: Coll) {
private implicit val PosBSONHandler = new BSONHandler[BSONString, Pos] {
def read(bsonStr: BSONString): Pos = Pos.posAt(bsonStr.value) err s"No such pos: ${bsonStr.value}"
def write(x: Pos) = BSONString(x.key)
}
private implicit val stepBSONHandler = Macros.handler[Step]
private implicit val forecastBSONHandler = Macros.handler[Forecast]
import Forecast._
def save(pov: Pov, steps: Forecast.Steps): Funit = firstStep(steps) match {
case None => coll.remove(BSONDocument("_id" -> pov.fullId)).void
case Some(step) if pov.game.turns == step.ply - 1 => coll.update(
BSONDocument("_id" -> pov.fullId),
Forecast(
_id = pov.fullId,
steps = steps,
date = DateTime.now).truncate,
upsert = true).void
case _ => fufail(Forecast.OutOfSync)
}
def loadForDisplay(pov: Pov): Fu[Option[Forecast]] =
pov.forecastable ?? coll.find(BSONDocument("_id" -> pov.fullId)).one[Forecast] flatMap {
case None => fuccess(none)
case Some(fc) =>
if (firstStep(fc.steps).exists(_.ply != pov.game.turns + 1)) clearPov(pov) inject none
else fuccess(fc.some)
}
def loadForPlay(pov: Pov): Fu[Option[Forecast]] =
pov.game.forecastable ?? coll.find(BSONDocument("_id" -> pov.fullId)).one[Forecast] flatMap {
case None => fuccess(none)
case Some(fc) =>
if (firstStep(fc.steps).exists(_.ply != pov.game.turns)) clearPov(pov) inject none
else fuccess(fc.some)
}
def nextMove(g: Game, last: chess.Move): Fu[Option[UciMove]] = g.forecastable ?? {
loadForPlay(Pov player g) flatMap {
case None => fuccess(none)
case Some(fc) => fc(g, last) match {
case Some((newFc, uciMove)) if newFc.steps.nonEmpty =>
coll.update(BSONDocument("_id" -> fc._id), newFc) inject uciMove.some
case Some((newFc, uciMove)) => clearPov(Pov player g) inject uciMove.some
case _ => clearPov(Pov player g) inject none
}
}
}
private def firstStep(steps: Forecast.Steps) = steps.headOption.flatMap(_.headOption)
def clearGame(g: Game) = coll.remove(BSONDocument(
"_id" -> BSONDocument("$in" -> chess.Color.all.map(g.fullIdOf))
)).void
def clearPov(pov: Pov) = coll.remove(BSONDocument("_id" -> pov.fullId)).void
}
| r0k3/lila | modules/round/src/main/ForecastApi.scala | Scala | mit | 2,591 |
package org.jetbrains.plugins.scala
package annotator
package template
import org.jetbrains.plugins.scala.annotator.AnnotatorPart
import com.intellij.lang.annotation.AnnotationHolder
import lang.psi.api.toplevel.typedef.ScTrait
object TraitHasImplicitBound extends AnnotatorPart[ScTrait] {
def kind = classOf[ScTrait]
def annotate(definition: ScTrait, holder: AnnotationHolder, typeAware: Boolean) {
val contextBoundElements = definition.typeParameters.flatMap(p => p.contextBoundTypeElement)
for (te <- contextBoundElements) {
val message = "Traits cannot have type parameters with context bounds"
holder.createErrorAnnotation(te, message)
}
val viewBoundElements = definition.typeParameters.flatMap(p => p.viewTypeElement)
for (te <- viewBoundElements) {
val message = "Traits cannot have type parameters with view bounds"
holder.createErrorAnnotation(te, message)
}
}
} | consulo/consulo-scala | src/org/jetbrains/plugins/scala/annotator/template/TraitHasImplicitBound.scala | Scala | apache-2.0 | 929 |
/*
* The MIT License (MIT)
* <p>
* Copyright (c) 2020
* <p>
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
* <p>
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
* <p>
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
package io.techcode.streamy.gelf.component
import akka.NotUsed
import akka.stream.scaladsl.{Flow, Framing}
import akka.util.ByteString
import io.techcode.streamy.component.SourceTransformer
import io.techcode.streamy.event.StreamEvent
import io.techcode.streamy.util.json.{Json, JsonParser}
import io.techcode.streamy.util.parser.ByteStringParser
/**
* Gelf transformer companion.
*/
object GelfTransformer {
// Default configuration
val DefaultConfig: Config = Config()
// Null byte delimiter
private val NullByteDelimiter: ByteString = ByteString("\\u0000")
/**
* Create a gelf flow that transform incoming [[ByteString]] to [[StreamEvent]].
* This parser is compliant with Gelf protocol.
*
* @param conf flow configuration.
* @return new gelf flow compliant with Gelf protocol.
*/
def parser(conf: Config = DefaultConfig): Flow[ByteString, StreamEvent, NotUsed] =
Framing.delimiter(NullByteDelimiter, conf.maxSize, allowTruncation = true)
.via(Flow.fromGraph(new SourceTransformer {
override def factory(): ByteStringParser[Json] = JsonParser.byteStringParser()
}))
// Configuration
case class Config(
maxSize: Int = Int.MaxValue
)
}
| amannocci/streamy | plugin-gelf/src/main/scala/io/techcode/streamy/gelf/component/GelfTransformer.scala | Scala | mit | 2,365 |
/*
* Copyright 2018 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package support.steps
import akka.actor.ActorSystem
import akka.stream.ActorMaterializer
import cucumber.api.scala.{EN, ScalaDsl}
import org.scalatest.Matchers
import org.scalatest.concurrent.{IntegrationPatience, ScalaFutures}
import play.api.libs.ws.ahc.{AhcConfigBuilder, AhcWSClient, AhcWSClientConfig}
import play.api.libs.ws.{WS, WSResponse}
import scala.concurrent.Future
class HealthCheckSteps extends ScalaDsl with EN with Stubs with ScalaFutures with Matchers with IntegrationPatience {
val baseUrl = Env.host
lazy implicit val wsClient = {
implicit val as = ActorSystem()
implicit val mat = ActorMaterializer()
val config = new AhcWSClientConfig()
val builder = new AhcConfigBuilder(config)
val ahcBuilder = builder.configure()
val ahcConfig = ahcBuilder.build()
new AhcWSClient(ahcConfig)
}
var pingResponse: Future[WSResponse] = null
When("""^I ping the microservice service using the endpoint '/ping/ping'$"""){ () =>
pingResponse = WS.clientUrl(s"$baseUrl/ping/ping").get()
}
Then("""^I should get a successful response$"""){ () =>
pingResponse.futureValue.status should be(200)
}
}
| hmrc/worldpay-downloader | test/support/steps/HealthCheckSteps.scala | Scala | apache-2.0 | 1,764 |
package com.avsystem.scex
package compiler
/**
* Created: 20-10-2014
* Author: ghik
*/
class ValueRoot[V](val value: V)
| AVSystem/scex | scex-core/src/test/scala/com/avsystem/scex/compiler/ValueRoot.scala | Scala | mit | 124 |
package com.zobot.client.packet.definitions.serverbound.play
import com.zobot.client.packet.Packet
case class ClientSettings(locale: String, viewDistance: Any, chatMode: Int, chatColors: Any, displayedSkinParts: Any, mainHand: Int) extends Packet {
override lazy val packetId = 0x04
override lazy val packetData: Array[Byte] =
fromVarString(locale) ++
fromAny(viewDistance) ++
fromVarInt(chatMode) ++
fromAny(chatColors) ++
fromAny(displayedSkinParts) ++
fromVarInt(mainHand)
}
| BecauseNoReason/zobot | src/main/scala/com/zobot/client/packet/definitions/serverbound/play/ClientSettings.scala | Scala | mit | 509 |
package ch.epfl.scala.index
package server
package routes
import com.softwaremill.session.SessionDirectives._
import com.softwaremill.session.SessionOptions._
import com.softwaremill.session.CsrfDirectives._
import com.softwaremill.session.CsrfOptions._
import akka.http.scaladsl._
import model._
import Uri.Query
import StatusCodes.TemporaryRedirect
import headers.Referer
import server.Directives._
class OAuth2(github: Github, session: GithubUserSession) {
import session._
val routes =
get {
path("login") {
headerValueByType[Referer]() { referer =>
redirect(Uri("https://github.com/login/oauth/authorize").withQuery(
Query(
"client_id" -> github.clientId,
"scope" -> "read:org",
"state" -> referer.value
)),
TemporaryRedirect)
}
} ~
path("logout") {
headerValueByType[Referer]() { referer =>
requiredSession(refreshable, usingCookies) { _ =>
invalidateSession(refreshable, usingCookies) { ctx =>
ctx.complete(
HttpResponse(
status = TemporaryRedirect,
headers = headers.Location(Uri(referer.value)) :: Nil,
entity = HttpEntity.Empty
)
)
}
}
}
} ~
pathPrefix("callback") {
path("done") {
complete("OK")
} ~
pathEnd {
parameters('code, 'state.?) { (code, state) =>
onSuccess(github.getUserStateWithOauth2(code)) { userState =>
setSession(refreshable, usingCookies, session.addUser(userState)) {
setNewCsrfToken(checkHeader) { ctx =>
ctx.complete(
HttpResponse(
status = TemporaryRedirect,
headers = headers.Location(Uri(state.getOrElse("/"))) :: Nil,
entity = HttpEntity.Empty
)
)
}
}
}
}
}
}
}
}
| adamwy/scaladex | server/src/main/scala/ch.epfl.scala.index.server/routes/Oauth2.scala | Scala | bsd-3-clause | 2,278 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.scheduler
import java.nio.ByteBuffer
import java.util.concurrent.RejectedExecutionException
import scala.language.existentials
import scala.util.control.NonFatal
import org.apache.spark._
import org.apache.spark.TaskState.TaskState
import org.apache.spark.serializer.SerializerInstance
import org.apache.spark.util.{ThreadUtils, Utils}
/**
* Runs a thread pool that deserializes and remotely fetches (if necessary) task results.
* TaskResultGetter作用是通过线程池对Worker上的Executor发送的Task的执行结果进行处理
*/
private[spark] class TaskResultGetter(sparkEnv: SparkEnv, scheduler: TaskSchedulerImpl)
extends Logging {
//通过线程池(默认4个线程)对worker上的Exceutor发送的Task的执行结果进行处理
private val THREADS = sparkEnv.conf.getInt("spark.resultGetter.threads", 4)
private val getTaskResultExecutor = ThreadUtils.newDaemonFixedThreadPool(
THREADS, "task-result-getter")
//hreadLocal为变量在每个线程中都创建了一个副本,那么每个线程可以访问自己内部的副本变量
protected val serializer = new ThreadLocal[SerializerInstance] {
override def initialValue(): SerializerInstance = {
sparkEnv.closureSerializer.newInstance()
}
}
/**
* 处理任务成功执行的机制,Driver端对结果的处理了,处理成功完成Task如下
*/
def enqueueSuccessfulTask(
taskSetManager: TaskSetManager, tid: Long, serializedData: ByteBuffer) {
getTaskResultExecutor.execute(new Runnable {//通过线程池来执行结果获取
override def run(): Unit = Utils.logUncaughtExceptions {
try {
val (result, size) = serializer.get().deserialize[TaskResult[_]](serializedData) match {
case directResult: DirectTaskResult[_] => //结果是计算结果
//确定大小符合要求
if (!taskSetManager.canFetchMoreResults(serializedData.limit())) {
return
}
// deserialize "value" without holding any lock so that it won't block other threads.
//反序列化“值”,而不需要锁定任何锁定,因此它不会阻止其他线程。
// We should call it here, so that when it's called again in
// "TaskSetManager.handleSuccessfulTask", it does not need to deserialize the value.
//我们应该在这里调用它,这样当它在“TaskSetManager.handleSuccessfulTask”中再次被调用时,它不需要反序列化该值。
directResult.value()
(directResult, serializedData.limit())
//Indirect 间结
case IndirectTaskResult(blockId, size) => //需要向远程的Worker网络获取结果
//确定大小符合要求
if (!taskSetManager.canFetchMoreResults(size)) {
// dropped by executor if size is larger than maxResultSize
//从远程的Worker删除结果
sparkEnv.blockManager.master.removeBlock(blockId)
return
}
logDebug("Fetching indirect task result for TID %s".format(tid))
//对TaskSet中的任务信息进行成功标记
scheduler.handleTaskGettingResult(taskSetManager, tid)
//从远程的BlockManager获取计算结果
val serializedTaskResult = sparkEnv.blockManager.getRemoteBytes(blockId)
if (!serializedTaskResult.isDefined) {
/* We won't be able to get the task result if the machine that ran the task failed
* between when the task ended and when we tried to fetch the result, or if the
* block manager had to flush the result. */
//如果在Executor的任务执行完成和Driver端取结果之间,Executor所在机器出现故障或其他错误
//会导致获取结果失败
scheduler.handleFailedTask(
taskSetManager, tid, TaskState.FINISHED, TaskResultLost)
return
}
//反序列化结果
val deserializedResult = serializer.get().deserialize[DirectTaskResult[_]](
serializedTaskResult.get)
//将远程的结果删除
sparkEnv.blockManager.master.removeBlock(blockId)
(deserializedResult, size)
}
result.metrics.setResultSize(size)
//对TaskSet中的任务信息进行成功状态标记
scheduler.handleSuccessfulTask(taskSetManager, tid, result)
} catch {
case cnf: ClassNotFoundException =>
val loader = Thread.currentThread.getContextClassLoader
taskSetManager.abort("ClassNotFound with classloader: " + loader)
// Matching NonFatal so we don't catch the ControlThrowable from the "return" above.
case NonFatal(ex) =>
logError("Exception while getting task result", ex)
taskSetManager.abort("Exception while getting task result: %s".format(ex))
}
}
})
}
/**
* enqueue 任务队列失败,处理每次结果都是由一个Daemon线程池负责,默认这个线程池由4个线程组成
*/
def enqueueFailedTask(taskSetManager: TaskSetManager, tid: Long, taskState: TaskState,
serializedData: ByteBuffer) {
var reason : TaskEndReason = UnknownReason
try {
getTaskResultExecutor.execute(new Runnable {
override def run(): Unit = Utils.logUncaughtExceptions {
val loader = Utils.getContextOrSparkClassLoader
try {
if (serializedData != null && serializedData.limit() > 0) {
reason = serializer.get().deserialize[TaskEndReason](
serializedData, loader)
}
} catch {
case cnd: ClassNotFoundException =>
// Log an error but keep going here -- the task failed, so not catastrophic
// if we can't deserialize the reason.
//记录一个错误,但继续在这里 - 任务失败,所以不是灾难性的,如果我们不能反序列化的原因。
logError(
"Could not deserialize TaskEndReason: ClassNotFound with classloader " + loader)
case ex: Exception => {}
}
//重新调度
scheduler.handleFailedTask(taskSetManager, tid, taskState, reason)
}
})
} catch {
case e: RejectedExecutionException if sparkEnv.isStopped =>
// ignore it
}
}
def stop() {
getTaskResultExecutor.shutdownNow()
}
}
| tophua/spark1.52 | core/src/main/scala/org/apache/spark/scheduler/TaskResultGetter.scala | Scala | apache-2.0 | 7,415 |
// This Source Code Form is subject to the terms of the Mozilla Public License, v. 2.0. If a copy of the MPL was not distributed with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
package ducttape.hyperdag.meta
import ducttape.hyperdag.PackedVertex
import ducttape.hyperdag.HyperEdge
import ducttape.hyperdag.UnpackedVertex
/**
* All incoming metaedges to a vertex will be assigned exactly one of their member hyperedges.
* This is represented by the sequence "edges". The parentRealizations are actually parallel
* with the number of incoming metaedges for a vertex. The next sequence is parallel with the
* number of edges in the active hyperedge for our current path (the one in this unpacking).
* TODO: Document an example of how to iterate over this easily with zip()
*
* Like UnpackedVertex, this interface explicitly avoids giving unpacked vertices as
* parents so that we can eventually discard more of the explored space
* the prevState can store information such as "what realizations does my parent have?"
* NOTE: It's actually the incoming edges that are meta -- not the vertex itself
*
* parentRealizations is parallel (i.e. has the same array length) with "edges", which is
* the hyperedges associated with this metaedge
* the second layer of sequences is parallel with the plain edges inside each hyperedge (Seq[Seq[D]]).
* the inner-most sequence represents a realization (Seq[D]).
*
* The incoming hyperedges for this UnpackedMetaVertex (edges) are parallel with the the outer-most
* sequence of parentRealizations. This allows us to access each parent vertex's active realization
* using the following pattern:
* val v: UnpackedMetaVertex = ...
* v.edges.zip(v.parentRealizations).foreach { case (hyperedgeElements: HyperEdge[H,E], parentReals: Seq[Seq[D]]) =>
* hyperedgeElements.zip(parentReals).foreach { case (e, parentReal) =>
* System.out.println("Parent realization of edge " + e + "is " + parentReal)
* }
* }
*
* For readability, you could mentally typedef:
* type Realization = Seq[D]
* type HyperEdgeParentRealizations = Seq[Realization]
* type MetaEdgeParentRealizations = Seq[Seq[Realization]]
*
* dual: The "dualistic" representation of this meta vertex in underlying HyperDag (not MetaHyperDag)
* used to represent this MetaHyperDag.
*
* see [[ducttape.hyperdag.meta.MetaHyperDag]] for definitions of generic types
*/
class UnpackedMetaVertex[V,H,E,D](val packed: PackedVertex[V],
val edges: Seq[HyperEdge[H,E]],
val realization: Seq[D],
val parentRealizations: Seq[Seq[Seq[D]]],
private[hyperdag] val dual: UnpackedVertex[V,H,E,D]) {
// TODO: More smearing of hash codes
override def hashCode() = packed.id ^ realization.hashCode
override def equals(that: Any) = that match {
case other: UnpackedMetaVertex[_,_,_,_] => (other.packed.id == this.packed.id) && (other.realization == this.realization)
}
override def toString() = s"${packed}/${realization.mkString("+")}"
}
| jhclark/ducttape | src/main/scala/ducttape/hyperdag/meta/UnpackedMetaVertex.scala | Scala | mpl-2.0 | 3,140 |
/*
* Copyright 2013 http4s.org
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.http4s
package headers
import cats.parse.Parser
import org.http4s.ServerSentEvent._
import org.http4s.internal.CharPredicate
import org.typelevel.ci._
final case class `Last-Event-Id`(id: EventId)
object `Last-Event-Id` {
def parse(s: String): ParseResult[`Last-Event-Id`] =
ParseResult.fromParser(parser, "Invalid Last-Event-Id header")(s)
private[http4s] val parser = Parser.charsWhile0(CharPredicate.All -- "\\n\\r").map { (id: String) =>
`Last-Event-Id`(ServerSentEvent.EventId(id))
}
implicit val headerInstance: Header[`Last-Event-Id`, Header.Single] =
Header.create(
ci"Last-Event-Id",
_.id.value,
parse,
)
}
| http4s/http4s | core/shared/src/main/scala/org/http4s/headers/Last-Event-Id.scala | Scala | apache-2.0 | 1,269 |
package spatutorial.client.components
import diode.react.ReactPot._
import diode.react._
import diode.data.Pot
import japgolly.scalajs.react._
import japgolly.scalajs.react.vdom.prefix_<^._
import spatutorial.client.components.Bootstrap._
import spatutorial.client.services.UpdateMotd
/**
* This is a simple component demonstrating how to display async data coming from the server
*/
object Motd {
// create the React component for holding the Message of the Day
val Motd = ReactComponentB[ModelProxy[Pot[String]]]("Motd")
.render_P { proxy =>
Panel(Panel.Props("Message of the day"),
// render messages depending on the state of the Pot
proxy().renderPending(_ > 500, _ => <.p("Loading...")),
proxy().renderFailed(ex => <.p("Failed to load")),
proxy().render(m => <.p(m)),
Button(Button.Props(proxy.dispatch(UpdateMotd()), CommonStyle.danger), Icon.refresh, " Update")
)
}
.componentDidMount(scope =>
// update only if Motd is empty
Callback.when(scope.props.value.isEmpty)(scope.props.dispatch(UpdateMotd()))
)
.build
def apply(proxy: ModelProxy[Pot[String]]) = Motd(proxy)
}
| vmunier/scalajs-spa-tutorial | client/src/main/scala/spatutorial/client/components/Motd.scala | Scala | apache-2.0 | 1,176 |
package com.github.gdefacci.briscola.presentation.competition
import com.github.gdefacci.briscola.{ competition => model }
import com.github.gdefacci.briscola.web.util.ArgonautHelper.{ enumDecoder, fromMap }
import argonaut.DecodeJson
import argonaut.DecodeResult
object CompetitionJsonDecoders {
import com.github.gdefacci.briscola.presentation.CommonJsonDecoders.SeqDecodeJson
import com.github.gdefacci.briscola.presentation.game.GameJsonDecoders._
implicit lazy val matchKindDecoder: DecodeJson[model.MatchKind] = {
lazy val singleMatchDecoder = fromMap[String, model.MatchKind](Map("single-match" -> model.SingleMatch), s"invalid MatchKind")
lazy val numberOfGamesMatchKindEncoder = DecodeJson.derive[model.NumberOfGamesMatchKind].map[model.MatchKind](p => p)
lazy val targetPointsMatchKindEncoder = DecodeJson.derive[model.TargetPointsMatchKind].map[model.MatchKind](p => p)
singleMatchDecoder ||| numberOfGamesMatchKindEncoder ||| targetPointsMatchKindEncoder
}
implicit lazy val competitionStartDeadlineDecoder: DecodeJson[model.CompetitionStartDeadline] = {
lazy val allPlayersDecoder = fromMap[String, model.CompetitionStartDeadline](Map("all-players" -> model.CompetitionStartDeadline.AllPlayers), s"invalid MatchKind")
lazy val onPlayerCountEncoder = DecodeJson.derive[model.CompetitionStartDeadline.OnPlayerCount].map[model.CompetitionStartDeadline](p => p)
allPlayersDecoder ||| onPlayerCountEncoder
}
// implicit def seqDecodeJson[T]() = DecodeJson[Seq[T]]
// implicit def SeqDecodeJson[A](implicit e: DecodeJson[A]): DecodeJson[Seq[A]] = DecodeJson.CanBuildFromDecodeJson[A, Seq]
implicit lazy val teamPlayerDecoder: DecodeJson[Input.TeamPlayer] = DecodeJson.derive[Input.TeamPlayer]
implicit lazy val teamInfoDecoder: DecodeJson[Input.TeamInfo] = DecodeJson.derive[Input.TeamInfo]
implicit lazy val teamDecoder: DecodeJson[Input.Team] = DecodeJson.derive[Input.Team]
implicit lazy val teamsDecoder: DecodeJson[Input.Teams] = DecodeJson.derive[Input.Teams]
implicit lazy val gamePlayersDecoder = {
lazy val playersDecoder: DecodeJson[Input.GamePlayers] = DecodeJson[Input.GamePlayers] { j =>
j.as[Set[org.obl.raz.Path]].map(Input.Players(_): Input.GamePlayers)
}
lazy val teamPlayersDecoder: DecodeJson[Input.GamePlayers] = DecodeJson.derive[Input.TeamPlayers].map[Input.GamePlayers](p => p)
playersDecoder ||| teamPlayersDecoder
}
implicit lazy val competitionDecoder: DecodeJson[Input.Competition] = DecodeJson.derive[Input.Competition]
} | gdefacci/briscola | ddd-briscola-web/src/main/scala/com/github/gdefacci/briscola/presentation/competition/CompetitionJsonDecoders.scala | Scala | bsd-3-clause | 2,560 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.ml.tree.impl
import scala.annotation.tailrec
import scala.collection.mutable
import org.apache.spark.SparkFunSuite
import org.apache.spark.ml.classification.DecisionTreeClassificationModel
import org.apache.spark.ml.feature.{Instance, LabeledPoint}
import org.apache.spark.ml.linalg.{Vector, Vectors}
import org.apache.spark.ml.tree._
import org.apache.spark.ml.util.TestingUtils._
import org.apache.spark.mllib.tree.{DecisionTreeSuite => OldDTSuite, EnsembleTestHelper}
import org.apache.spark.mllib.tree.configuration.{Algo => OldAlgo, QuantileStrategy, Strategy => OldStrategy}
import org.apache.spark.mllib.tree.impurity.{Entropy, Gini, GiniCalculator, Variance}
import org.apache.spark.mllib.util.MLlibTestSparkContext
import org.apache.spark.util.collection.OpenHashMap
/**
* Test suite for [[RandomForest]].
*/
class RandomForestSuite extends SparkFunSuite with MLlibTestSparkContext {
import RandomForestSuite.mapToVec
/////////////////////////////////////////////////////////////////////////////
// Tests for split calculation
/////////////////////////////////////////////////////////////////////////////
test("Binary classification with continuous features: split calculation") {
val arr = OldDTSuite.generateOrderedLabeledPointsWithLabel1().map(_.asML.toInstance)
assert(arr.length === 1000)
val rdd = sc.parallelize(arr)
val strategy = new OldStrategy(OldAlgo.Classification, Gini, 3, 2, 100)
val metadata = DecisionTreeMetadata.buildMetadata(rdd, strategy)
assert(!metadata.isUnordered(featureIndex = 0))
val splits = RandomForest.findSplits(rdd, metadata, seed = 42)
assert(splits.length === 2)
assert(splits(0).length === 99)
}
test("Binary classification with binary (ordered) categorical features: split calculation") {
val arr = OldDTSuite.generateCategoricalDataPoints().map(_.asML.toInstance)
assert(arr.length === 1000)
val rdd = sc.parallelize(arr)
val strategy = new OldStrategy(OldAlgo.Classification, Gini, maxDepth = 2, numClasses = 2,
maxBins = 100, categoricalFeaturesInfo = Map(0 -> 2, 1 -> 2))
val metadata = DecisionTreeMetadata.buildMetadata(rdd, strategy)
val splits = RandomForest.findSplits(rdd, metadata, seed = 42)
assert(!metadata.isUnordered(featureIndex = 0))
assert(!metadata.isUnordered(featureIndex = 1))
assert(splits.length === 2)
// no splits pre-computed for ordered categorical features
assert(splits(0).length === 0)
}
test("Binary classification with 3-ary (ordered) categorical features," +
" with no samples for one category: split calculation") {
val arr = OldDTSuite.generateCategoricalDataPoints().map(_.asML.toInstance)
assert(arr.length === 1000)
val rdd = sc.parallelize(arr)
val strategy = new OldStrategy(OldAlgo.Classification, Gini, maxDepth = 2, numClasses = 2,
maxBins = 100, categoricalFeaturesInfo = Map(0 -> 3, 1 -> 3))
val metadata = DecisionTreeMetadata.buildMetadata(rdd, strategy)
assert(!metadata.isUnordered(featureIndex = 0))
assert(!metadata.isUnordered(featureIndex = 1))
val splits = RandomForest.findSplits(rdd, metadata, seed = 42)
assert(splits.length === 2)
// no splits pre-computed for ordered categorical features
assert(splits(0).length === 0)
}
test("find splits for a continuous feature") {
// find splits for normal case
{
val fakeMetadata = new DecisionTreeMetadata(1, 200000, 200000.0, 0, 0,
Map(), Set(),
Array(6), Gini, QuantileStrategy.Sort,
0, 0, 0.0, 0.0, 0, 0
)
val featureSamples = Array.fill(10000)((1.0, math.random)).filter(_._2 != 0.0)
val splits = RandomForest.findSplitsForContinuousFeature(featureSamples, fakeMetadata, 0)
assert(splits.length === 5)
assert(fakeMetadata.numSplits(0) === 5)
assert(fakeMetadata.numBins(0) === 6)
// check returned splits are distinct
assert(splits.distinct.length === splits.length)
}
// SPARK-16957: Use midpoints for split values.
{
val fakeMetadata = new DecisionTreeMetadata(1, 8, 8.0, 0, 0,
Map(), Set(),
Array(3), Gini, QuantileStrategy.Sort,
0, 0, 0.0, 0.0, 0, 0
)
// possibleSplits <= numSplits
{
val featureSamples = Array(0, 1, 0, 0, 1, 0, 1, 1)
.map(x => (1.0, x.toDouble)).filter(_._2 != 0.0)
val splits = RandomForest.findSplitsForContinuousFeature(featureSamples, fakeMetadata, 0)
val expectedSplits = Array((0.0 + 1.0) / 2)
assert(splits === expectedSplits)
}
// possibleSplits > numSplits
{
val featureSamples = Array(0, 0, 1, 1, 2, 2, 3, 3)
.map(x => (1.0, x.toDouble)).filter(_._2 != 0.0)
val splits = RandomForest.findSplitsForContinuousFeature(featureSamples, fakeMetadata, 0)
val expectedSplits = Array((0.0 + 1.0) / 2, (2.0 + 3.0) / 2)
assert(splits === expectedSplits)
}
}
// find splits should not return identical splits
// when there are not enough split candidates, reduce the number of splits in metadata
{
val fakeMetadata = new DecisionTreeMetadata(1, 12, 12.0, 0, 0,
Map(), Set(),
Array(5), Gini, QuantileStrategy.Sort,
0, 0, 0.0, 0.0, 0, 0
)
val featureSamples = Array(1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3).map(x => (1.0, x.toDouble))
val splits = RandomForest.findSplitsForContinuousFeature(featureSamples, fakeMetadata, 0)
val expectedSplits = Array((1.0 + 2.0) / 2, (2.0 + 3.0) / 2)
assert(splits === expectedSplits)
// check returned splits are distinct
assert(splits.distinct.length === splits.length)
}
// find splits when most samples close to the minimum
{
val fakeMetadata = new DecisionTreeMetadata(1, 18, 18.0, 0, 0,
Map(), Set(),
Array(3), Gini, QuantileStrategy.Sort,
0, 0, 0.0, 0.0, 0, 0
)
val featureSamples =
Array(2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 4, 5).map(x => (1.0, x.toDouble))
val splits = RandomForest.findSplitsForContinuousFeature(featureSamples, fakeMetadata, 0)
val expectedSplits = Array((2.0 + 3.0) / 2, (3.0 + 4.0) / 2)
assert(splits === expectedSplits)
}
// find splits when most samples close to the maximum
{
val fakeMetadata = new DecisionTreeMetadata(1, 17, 17.0, 0, 0,
Map(), Set(),
Array(2), Gini, QuantileStrategy.Sort,
0, 0, 0.0, 0.0, 0, 0
)
val featureSamples =
Array(0, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2).map(x => (1.0, x.toDouble))
val splits = RandomForest.findSplitsForContinuousFeature(featureSamples, fakeMetadata, 0)
val expectedSplits = Array((1.0 + 2.0) / 2)
assert(splits === expectedSplits)
}
// find splits for arbitrarily scaled data
{
val fakeMetadata = new DecisionTreeMetadata(1, 0, 0.0, 0, 0,
Map(), Set(),
Array(6), Gini, QuantileStrategy.Sort,
0, 0, 0.0, 0.0, 0, 0
)
val featureSamplesUnitWeight = Array.fill(10)((1.0, math.random))
val featureSamplesSmallWeight = featureSamplesUnitWeight.map { case (w, x) => (w * 0.001, x)}
val featureSamplesLargeWeight = featureSamplesUnitWeight.map { case (w, x) => (w * 1000, x)}
val splitsUnitWeight = RandomForest
.findSplitsForContinuousFeature(featureSamplesUnitWeight, fakeMetadata, 0)
val splitsSmallWeight = RandomForest
.findSplitsForContinuousFeature(featureSamplesSmallWeight, fakeMetadata, 0)
val splitsLargeWeight = RandomForest
.findSplitsForContinuousFeature(featureSamplesLargeWeight, fakeMetadata, 0)
assert(splitsUnitWeight === splitsSmallWeight)
assert(splitsUnitWeight === splitsLargeWeight)
}
// find splits when most weight is close to the minimum
{
val fakeMetadata = new DecisionTreeMetadata(1, 0, 0.0, 0, 0,
Map(), Set(),
Array(3), Gini, QuantileStrategy.Sort,
0, 0, 0.0, 0.0, 0, 0
)
val featureSamples = Array((10, 1), (1, 2), (1, 3), (1, 4), (1, 5), (1, 6)).map {
case (w, x) => (w.toDouble, x.toDouble)
}
val splits = RandomForest.findSplitsForContinuousFeature(featureSamples, fakeMetadata, 0)
assert(splits === Array(1.5, 2.5, 3.5, 4.5, 5.5))
}
}
test("train with empty arrays") {
val lp = LabeledPoint(1.0, Vectors.dense(Array.empty[Double])).toInstance
val data = Array.fill(5)(lp)
val rdd = sc.parallelize(data)
val strategy = new OldStrategy(OldAlgo.Regression, Gini, maxDepth = 2,
maxBins = 5)
withClue("DecisionTree requires number of features > 0," +
" but was given an empty features vector") {
intercept[IllegalArgumentException] {
RandomForest.run(rdd, strategy, 1, "all", 42L, instr = None)
}
}
}
test("train with constant features") {
val instance = LabeledPoint(1.0, Vectors.dense(0.0, 0.0, 0.0)).toInstance
val data = Array.fill(5)(instance)
val rdd = sc.parallelize(data)
val strategy = new OldStrategy(
OldAlgo.Classification,
Gini,
maxDepth = 2,
numClasses = 2,
maxBins = 5,
categoricalFeaturesInfo = Map(0 -> 1, 1 -> 5))
val Array(tree) = RandomForest.run(rdd, strategy, 1, "all", 42L, instr = None)
assert(tree.rootNode.impurity === -1.0)
assert(tree.depth === 0)
assert(tree.rootNode.prediction === instance.label)
// Test with no categorical features
val strategy2 = new OldStrategy(
OldAlgo.Regression,
Variance,
maxDepth = 2,
maxBins = 5)
val Array(tree2) = RandomForest.run(rdd, strategy2, 1, "all", 42L, instr = None)
assert(tree2.rootNode.impurity === -1.0)
assert(tree2.depth === 0)
assert(tree2.rootNode.prediction === instance.label)
}
test("Multiclass classification with unordered categorical features: split calculations") {
val arr = OldDTSuite.generateCategoricalDataPoints().map(_.asML.toInstance)
assert(arr.length === 1000)
val rdd = sc.parallelize(arr)
val strategy = new OldStrategy(
OldAlgo.Classification,
Gini,
maxDepth = 2,
numClasses = 100,
maxBins = 100,
categoricalFeaturesInfo = Map(0 -> 3, 1 -> 3))
val metadata = DecisionTreeMetadata.buildMetadata(rdd, strategy)
assert(metadata.isUnordered(featureIndex = 0))
assert(metadata.isUnordered(featureIndex = 1))
val splits = RandomForest.findSplits(rdd, metadata, seed = 42)
assert(splits.length === 2)
assert(splits(0).length === 3)
assert(metadata.numSplits(0) === 3)
assert(metadata.numBins(0) === 3)
assert(metadata.numSplits(1) === 3)
assert(metadata.numBins(1) === 3)
// Expecting 2^2 - 1 = 3 splits per feature
def checkCategoricalSplit(s: Split, featureIndex: Int, leftCategories: Array[Double]): Unit = {
assert(s.featureIndex === featureIndex)
assert(s.isInstanceOf[CategoricalSplit])
val s0 = s.asInstanceOf[CategoricalSplit]
assert(s0.leftCategories === leftCategories)
assert(s0.numCategories === 3) // for this unit test
}
// Feature 0
checkCategoricalSplit(splits(0)(0), 0, Array(0.0))
checkCategoricalSplit(splits(0)(1), 0, Array(1.0))
checkCategoricalSplit(splits(0)(2), 0, Array(0.0, 1.0))
// Feature 1
checkCategoricalSplit(splits(1)(0), 1, Array(0.0))
checkCategoricalSplit(splits(1)(1), 1, Array(1.0))
checkCategoricalSplit(splits(1)(2), 1, Array(0.0, 1.0))
}
test("Multiclass classification with ordered categorical features: split calculations") {
val arr = OldDTSuite.generateCategoricalDataPointsForMulticlassForOrderedFeatures()
.map(_.asML.toInstance)
assert(arr.length === 3000)
val rdd = sc.parallelize(arr)
val strategy = new OldStrategy(OldAlgo.Classification, Gini, maxDepth = 2, numClasses = 100,
maxBins = 100, categoricalFeaturesInfo = Map(0 -> 10, 1 -> 10))
// 2^(10-1) - 1 > 100, so categorical features will be ordered
val metadata = DecisionTreeMetadata.buildMetadata(rdd, strategy)
assert(!metadata.isUnordered(featureIndex = 0))
assert(!metadata.isUnordered(featureIndex = 1))
val splits = RandomForest.findSplits(rdd, metadata, seed = 42)
assert(splits.length === 2)
// no splits pre-computed for ordered categorical features
assert(splits(0).length === 0)
}
/////////////////////////////////////////////////////////////////////////////
// Tests of other algorithm internals
/////////////////////////////////////////////////////////////////////////////
test("extract categories from a number for multiclass classification") {
val l = RandomForest.extractMultiClassCategories(13, 10)
assert(l.length === 3)
assert(Seq(3.0, 2.0, 0.0) === l)
}
test("Avoid aggregation on the last level") {
val arr = Array(
LabeledPoint(0.0, Vectors.dense(1.0, 0.0, 0.0)),
LabeledPoint(1.0, Vectors.dense(0.0, 1.0, 1.0)),
LabeledPoint(0.0, Vectors.dense(2.0, 0.0, 0.0)),
LabeledPoint(1.0, Vectors.dense(0.0, 2.0, 1.0)))
val input = sc.parallelize(arr.map(_.toInstance))
val strategy = new OldStrategy(algo = OldAlgo.Classification, impurity = Gini, maxDepth = 1,
numClasses = 2, categoricalFeaturesInfo = Map(0 -> 3))
val metadata = DecisionTreeMetadata.buildMetadata(input, strategy)
val splits = RandomForest.findSplits(input, metadata, seed = 42)
val bcSplits = input.sparkContext.broadcast(splits)
val treeInput = TreePoint.convertToTreeRDD(input, splits, metadata)
val baggedInput = BaggedPoint.convertToBaggedRDD(treeInput, 1.0, 1, withReplacement = false)
val topNode = LearningNode.emptyNode(nodeIndex = 1)
assert(topNode.isLeaf === false)
assert(topNode.stats === null)
val nodesForGroup = Map(0 -> Array(topNode))
val treeToNodeToIndexInfo = Map(0 -> Map(
topNode.id -> new RandomForest.NodeIndexInfo(0, None)
))
val nodeStack = new mutable.ListBuffer[(Int, LearningNode)]
RandomForest.findBestSplits(baggedInput, metadata, Map(0 -> topNode),
nodesForGroup, treeToNodeToIndexInfo, bcSplits, nodeStack)
bcSplits.destroy()
// don't enqueue leaf nodes into node queue
assert(nodeStack.isEmpty)
// set impurity and predict for topNode
assert(topNode.stats !== null)
assert(topNode.stats.impurity > 0.0)
// set impurity and predict for child nodes
assert(topNode.leftChild.get.toNode.prediction === 0.0)
assert(topNode.rightChild.get.toNode.prediction === 1.0)
assert(topNode.leftChild.get.stats.impurity === 0.0)
assert(topNode.rightChild.get.stats.impurity === 0.0)
}
test("Avoid aggregation if impurity is 0.0") {
val arr = Array(
LabeledPoint(0.0, Vectors.dense(1.0, 0.0, 0.0)),
LabeledPoint(1.0, Vectors.dense(0.0, 1.0, 1.0)),
LabeledPoint(0.0, Vectors.dense(2.0, 0.0, 0.0)),
LabeledPoint(1.0, Vectors.dense(0.0, 2.0, 1.0)))
val input = sc.parallelize(arr.map(_.toInstance))
val strategy = new OldStrategy(algo = OldAlgo.Classification, impurity = Gini, maxDepth = 5,
numClasses = 2, categoricalFeaturesInfo = Map(0 -> 3))
val metadata = DecisionTreeMetadata.buildMetadata(input, strategy)
val splits = RandomForest.findSplits(input, metadata, seed = 42)
val bcSplits = input.sparkContext.broadcast(splits)
val treeInput = TreePoint.convertToTreeRDD(input, splits, metadata)
val baggedInput = BaggedPoint.convertToBaggedRDD(treeInput, 1.0, 1, withReplacement = false)
val topNode = LearningNode.emptyNode(nodeIndex = 1)
assert(topNode.isLeaf === false)
assert(topNode.stats === null)
val nodesForGroup = Map(0 -> Array(topNode))
val treeToNodeToIndexInfo = Map(0 -> Map(
topNode.id -> new RandomForest.NodeIndexInfo(0, None)
))
val nodeStack = new mutable.ListBuffer[(Int, LearningNode)]
RandomForest.findBestSplits(baggedInput, metadata, Map(0 -> topNode),
nodesForGroup, treeToNodeToIndexInfo, bcSplits, nodeStack)
bcSplits.destroy()
// don't enqueue a node into node queue if its impurity is 0.0
assert(nodeStack.isEmpty)
// set impurity and predict for topNode
assert(topNode.stats !== null)
assert(topNode.stats.impurity > 0.0)
// set impurity and predict for child nodes
assert(topNode.leftChild.get.toNode.prediction === 0.0)
assert(topNode.rightChild.get.toNode.prediction === 1.0)
assert(topNode.leftChild.get.stats.impurity === 0.0)
assert(topNode.rightChild.get.stats.impurity === 0.0)
}
test("Use soft prediction for binary classification with ordered categorical features") {
// The following dataset is set up such that the best split is {1} vs. {0, 2}.
// If the hard prediction is used to order the categories, then {0} vs. {1, 2} is chosen.
val arr = Array(
LabeledPoint(0.0, Vectors.dense(0.0)),
LabeledPoint(0.0, Vectors.dense(0.0)),
LabeledPoint(0.0, Vectors.dense(0.0)),
LabeledPoint(1.0, Vectors.dense(0.0)),
LabeledPoint(0.0, Vectors.dense(1.0)),
LabeledPoint(0.0, Vectors.dense(1.0)),
LabeledPoint(0.0, Vectors.dense(1.0)),
LabeledPoint(0.0, Vectors.dense(1.0)),
LabeledPoint(0.0, Vectors.dense(2.0)),
LabeledPoint(0.0, Vectors.dense(2.0)),
LabeledPoint(0.0, Vectors.dense(2.0)),
LabeledPoint(1.0, Vectors.dense(2.0)))
val input = sc.parallelize(arr.map(_.toInstance))
// Must set maxBins s.t. the feature will be treated as an ordered categorical feature.
val strategy = new OldStrategy(algo = OldAlgo.Classification, impurity = Gini, maxDepth = 1,
numClasses = 2, categoricalFeaturesInfo = Map(0 -> 3), maxBins = 3)
val model = RandomForest.run(input, strategy, numTrees = 1, featureSubsetStrategy = "all",
seed = 42, instr = None, prune = false).head
model.rootNode match {
case n: InternalNode => n.split match {
case s: CategoricalSplit =>
assert(s.leftCategories === Array(1.0))
case _ => fail("model.rootNode.split was not a CategoricalSplit")
}
case _ => fail("model.rootNode was not an InternalNode")
}
}
test("Second level node building with vs. without groups") {
val arr = OldDTSuite.generateOrderedLabeledPoints().map(_.asML.toInstance)
assert(arr.length === 1000)
val rdd = sc.parallelize(arr)
// For tree with 1 group
val strategy1 =
new OldStrategy(OldAlgo.Classification, Entropy, 3, 2, 100, maxMemoryInMB = 1000)
// For tree with multiple groups
val strategy2 =
new OldStrategy(OldAlgo.Classification, Entropy, 3, 2, 100, maxMemoryInMB = 0)
val tree1 = RandomForest.run(rdd, strategy1, numTrees = 1, featureSubsetStrategy = "all",
seed = 42, instr = None).head
val tree2 = RandomForest.run(rdd, strategy2, numTrees = 1, featureSubsetStrategy = "all",
seed = 42, instr = None).head
def getChildren(rootNode: Node): Array[InternalNode] = rootNode match {
case n: InternalNode =>
assert(n.leftChild.isInstanceOf[InternalNode])
assert(n.rightChild.isInstanceOf[InternalNode])
Array(n.leftChild.asInstanceOf[InternalNode], n.rightChild.asInstanceOf[InternalNode])
case _ => fail("rootNode was not an InternalNode")
}
// Single group second level tree construction.
val children1 = getChildren(tree1.rootNode)
val children2 = getChildren(tree2.rootNode)
// Verify whether the splits obtained using single group and multiple group level
// construction strategies are the same.
for (i <- 0 until 2) {
assert(children1(i).gain > 0)
assert(children2(i).gain > 0)
assert(children1(i).split === children2(i).split)
assert(children1(i).impurity === children2(i).impurity)
assert(children1(i).impurityStats.stats === children2(i).impurityStats.stats)
assert(children1(i).leftChild.impurity === children2(i).leftChild.impurity)
assert(children1(i).rightChild.impurity === children2(i).rightChild.impurity)
assert(children1(i).prediction === children2(i).prediction)
}
}
def binaryClassificationTestWithContinuousFeaturesAndSubsampledFeatures(
strategy: OldStrategy): Unit = {
val numFeatures = 50
val arr = EnsembleTestHelper.generateOrderedLabeledPoints(numFeatures, 1000)
val rdd = sc.parallelize(arr).map(_.asML.toInstance)
// Select feature subset for top nodes. Return true if OK.
def checkFeatureSubsetStrategy(
numTrees: Int,
featureSubsetStrategy: String,
numFeaturesPerNode: Int): Unit = {
val seeds = Array(123, 5354, 230, 349867, 23987)
val maxMemoryUsage: Long = 128 * 1024L * 1024L
val metadata =
DecisionTreeMetadata.buildMetadata(rdd, strategy, numTrees, featureSubsetStrategy)
seeds.foreach { seed =>
val failString = s"Failed on test with:" +
s"numTrees=$numTrees, featureSubsetStrategy=$featureSubsetStrategy," +
s" numFeaturesPerNode=$numFeaturesPerNode, seed=$seed"
val nodeStack = new mutable.ListBuffer[(Int, LearningNode)]
val topNodes: Array[LearningNode] = new Array[LearningNode](numTrees)
Range(0, numTrees).foreach { treeIndex =>
topNodes(treeIndex) = LearningNode.emptyNode(nodeIndex = 1)
nodeStack.prepend((treeIndex, topNodes(treeIndex)))
}
val rng = new scala.util.Random(seed = seed)
val (nodesForGroup: Map[Int, Array[LearningNode]],
treeToNodeToIndexInfo: Map[Int, Map[Int, RandomForest.NodeIndexInfo]]) =
RandomForest.selectNodesToSplit(nodeStack, maxMemoryUsage, metadata, rng)
assert(nodesForGroup.size === numTrees, failString)
assert(nodesForGroup.values.forall(_.length == 1), failString) // 1 node per tree
if (numFeaturesPerNode == numFeatures) {
// featureSubset values should all be None
assert(treeToNodeToIndexInfo.values.forall(_.values.forall(_.featureSubset.isEmpty)),
failString)
} else {
// Check number of features.
assert(treeToNodeToIndexInfo.values.forall(_.values.forall(
_.featureSubset.get.length === numFeaturesPerNode)), failString)
}
}
}
checkFeatureSubsetStrategy(numTrees = 1, "auto", numFeatures)
checkFeatureSubsetStrategy(numTrees = 1, "all", numFeatures)
checkFeatureSubsetStrategy(numTrees = 1, "sqrt", math.sqrt(numFeatures).ceil.toInt)
checkFeatureSubsetStrategy(numTrees = 1, "log2",
(math.log(numFeatures) / math.log(2)).ceil.toInt)
checkFeatureSubsetStrategy(numTrees = 1, "onethird", (numFeatures / 3.0).ceil.toInt)
val realStrategies = Array(".1", ".10", "0.10", "0.1", "0.9", "1.0")
for (strategy <- realStrategies) {
val expected = (strategy.toDouble * numFeatures).ceil.toInt
checkFeatureSubsetStrategy(numTrees = 1, strategy, expected)
}
val integerStrategies = Array("1", "10", "100", "1000", "10000")
for (strategy <- integerStrategies) {
val expected = if (strategy.toInt < numFeatures) strategy.toInt else numFeatures
checkFeatureSubsetStrategy(numTrees = 1, strategy, expected)
}
val invalidStrategies = Array("-.1", "-.10", "-0.10", ".0", "0.0", "1.1", "0")
for (invalidStrategy <- invalidStrategies) {
intercept[IllegalArgumentException]{
val metadata =
DecisionTreeMetadata.buildMetadata(rdd, strategy, numTrees = 1, invalidStrategy)
}
}
checkFeatureSubsetStrategy(numTrees = 2, "all", numFeatures)
checkFeatureSubsetStrategy(numTrees = 2, "auto", math.sqrt(numFeatures).ceil.toInt)
checkFeatureSubsetStrategy(numTrees = 2, "sqrt", math.sqrt(numFeatures).ceil.toInt)
checkFeatureSubsetStrategy(numTrees = 2, "log2",
(math.log(numFeatures) / math.log(2)).ceil.toInt)
checkFeatureSubsetStrategy(numTrees = 2, "onethird", (numFeatures / 3.0).ceil.toInt)
for (strategy <- realStrategies) {
val expected = (strategy.toDouble * numFeatures).ceil.toInt
checkFeatureSubsetStrategy(numTrees = 2, strategy, expected)
}
for (strategy <- integerStrategies) {
val expected = if (strategy.toInt < numFeatures) strategy.toInt else numFeatures
checkFeatureSubsetStrategy(numTrees = 2, strategy, expected)
}
for (invalidStrategy <- invalidStrategies) {
intercept[IllegalArgumentException]{
val metadata =
DecisionTreeMetadata.buildMetadata(rdd, strategy, numTrees = 2, invalidStrategy)
}
}
}
test("Binary classification with continuous features: subsampling features") {
val categoricalFeaturesInfo = Map.empty[Int, Int]
val strategy = new OldStrategy(algo = OldAlgo.Classification, impurity = Gini, maxDepth = 2,
numClasses = 2, categoricalFeaturesInfo = categoricalFeaturesInfo)
binaryClassificationTestWithContinuousFeaturesAndSubsampledFeatures(strategy)
}
test("Binary classification with continuous features and node Id cache: subsampling features") {
val categoricalFeaturesInfo = Map.empty[Int, Int]
val strategy = new OldStrategy(algo = OldAlgo.Classification, impurity = Gini, maxDepth = 2,
numClasses = 2, categoricalFeaturesInfo = categoricalFeaturesInfo,
useNodeIdCache = true)
binaryClassificationTestWithContinuousFeaturesAndSubsampledFeatures(strategy)
}
test("computeFeatureImportance, featureImportances") {
/* Build tree for testing, with this structure:
grandParent
left2 parent
left right
*/
val leftImp = new GiniCalculator(Array(3.0, 2.0, 1.0), 6L)
val left = new LeafNode(0.0, leftImp.calculate(), leftImp)
val rightImp = new GiniCalculator(Array(1.0, 2.0, 5.0), 8L)
val right = new LeafNode(2.0, rightImp.calculate(), rightImp)
val parent = TreeTests.buildParentNode(left, right, new ContinuousSplit(0, 0.5))
val parentImp = parent.impurityStats
val left2Imp = new GiniCalculator(Array(1.0, 6.0, 1.0), 8L)
val left2 = new LeafNode(0.0, left2Imp.calculate(), left2Imp)
val grandParent = TreeTests.buildParentNode(left2, parent, new ContinuousSplit(1, 1.0))
val grandImp = grandParent.impurityStats
// Test feature importance computed at different subtrees.
def testNode(node: Node, expected: Map[Int, Double]): Unit = {
val map = new OpenHashMap[Int, Double]()
TreeEnsembleModel.computeFeatureImportance(node, map)
assert(mapToVec(map.toMap) ~== mapToVec(expected) relTol 0.01)
}
// Leaf node
testNode(left, Map.empty[Int, Double])
// Internal node with 2 leaf children
val feature0importance = parentImp.calculate() * parentImp.count -
(leftImp.calculate() * leftImp.count + rightImp.calculate() * rightImp.count)
testNode(parent, Map(0 -> feature0importance))
// Full tree
val feature1importance = grandImp.calculate() * grandImp.count -
(left2Imp.calculate() * left2Imp.count + parentImp.calculate() * parentImp.count)
testNode(grandParent, Map(0 -> feature0importance, 1 -> feature1importance))
// Forest consisting of (full tree) + (internal node with 2 leafs)
val trees = Array(parent, grandParent).map { root =>
new DecisionTreeClassificationModel(root, numFeatures = 2, numClasses = 3)
.asInstanceOf[DecisionTreeModel]
}
val importances: Vector = TreeEnsembleModel.featureImportances(trees, 2)
val tree2norm = feature0importance + feature1importance
val expected = Vectors.dense((1.0 + feature0importance / tree2norm) / 2.0,
(feature1importance / tree2norm) / 2.0)
assert(importances ~== expected relTol 0.01)
}
test("normalizeMapValues") {
val map = new OpenHashMap[Int, Double]()
map(0) = 1.0
map(2) = 2.0
TreeEnsembleModel.normalizeMapValues(map)
val expected = Map(0 -> 1.0 / 3.0, 2 -> 2.0 / 3.0)
assert(mapToVec(map.toMap) ~== mapToVec(expected) relTol 0.01)
}
///////////////////////////////////////////////////////////////////////////////
// Tests for pruning of redundant subtrees (generated by a split improving the
// impurity measure, but always leading to the same prediction).
///////////////////////////////////////////////////////////////////////////////
test("SPARK-3159 tree model redundancy - classification") {
// The following dataset is set up such that splitting over feature_1 for points having
// feature_0 = 0 improves the impurity measure, despite the prediction will always be 0
// in both branches.
val arr = Array(
Instance(0.0, 1.0, Vectors.dense(0.0, 1.0)),
Instance(1.0, 1.0, Vectors.dense(0.0, 1.0)),
Instance(0.0, 1.0, Vectors.dense(0.0, 0.0)),
Instance(1.0, 1.0, Vectors.dense(1.0, 0.0)),
Instance(0.0, 1.0, Vectors.dense(1.0, 0.0)),
Instance(1.0, 1.0, Vectors.dense(1.0, 1.0))
)
val rdd = sc.parallelize(arr)
val numClasses = 2
val strategy = new OldStrategy(algo = OldAlgo.Classification, impurity = Gini, maxDepth = 4,
numClasses = numClasses, maxBins = 32)
val prunedTree = RandomForest.run(rdd, strategy, numTrees = 1, featureSubsetStrategy = "auto",
seed = 42, instr = None).head
val unprunedTree = RandomForest.run(rdd, strategy, numTrees = 1, featureSubsetStrategy = "auto",
seed = 42, instr = None, prune = false).head
assert(prunedTree.numNodes === 5)
assert(unprunedTree.numNodes === 7)
assert(RandomForestSuite.getSumLeafCounters(List(prunedTree.rootNode)) === arr.size)
}
test("SPARK-3159 tree model redundancy - regression") {
// The following dataset is set up such that splitting over feature_0 for points having
// feature_1 = 1 improves the impurity measure, despite the prediction will always be 0.5
// in both branches.
val arr = Array(
Instance(0.0, 1.0, Vectors.dense(0.0, 1.0)),
Instance(1.0, 1.0, Vectors.dense(0.0, 1.0)),
Instance(0.0, 1.0, Vectors.dense(0.0, 0.0)),
Instance(0.0, 1.0, Vectors.dense(1.0, 0.0)),
Instance(1.0, 1.0, Vectors.dense(1.0, 1.0)),
Instance(0.0, 1.0, Vectors.dense(1.0, 1.0)),
Instance(0.5, 1.0, Vectors.dense(1.0, 1.0))
)
val rdd = sc.parallelize(arr)
val strategy = new OldStrategy(algo = OldAlgo.Regression, impurity = Variance, maxDepth = 4,
numClasses = 0, maxBins = 32)
val prunedTree = RandomForest.run(rdd, strategy, numTrees = 1, featureSubsetStrategy = "auto",
seed = 42, instr = None).head
val unprunedTree = RandomForest.run(rdd, strategy, numTrees = 1, featureSubsetStrategy = "auto",
seed = 42, instr = None, prune = false).head
assert(prunedTree.numNodes === 3)
assert(unprunedTree.numNodes === 5)
assert(RandomForestSuite.getSumLeafCounters(List(prunedTree.rootNode)) === arr.size)
}
test("weights at arbitrary scale") {
val arr = EnsembleTestHelper.generateOrderedLabeledPoints(3, 10)
val rddWithUnitWeights = sc.parallelize(arr.map(_.asML.toInstance))
val rddWithSmallWeights = rddWithUnitWeights.map { inst =>
Instance(inst.label, 0.001, inst.features)
}
val rddWithBigWeights = rddWithUnitWeights.map { inst =>
Instance(inst.label, 1000, inst.features)
}
val strategy = new OldStrategy(OldAlgo.Classification, Gini, 3, 2)
val unitWeightTrees = RandomForest.run(rddWithUnitWeights, strategy, 3, "all", 42L, None)
val smallWeightTrees = RandomForest.run(rddWithSmallWeights, strategy, 3, "all", 42L, None)
unitWeightTrees.zip(smallWeightTrees).foreach { case (unitTree, smallWeightTree) =>
TreeTests.checkEqual(unitTree, smallWeightTree)
}
val bigWeightTrees = RandomForest.run(rddWithBigWeights, strategy, 3, "all", 42L, None)
unitWeightTrees.zip(bigWeightTrees).foreach { case (unitTree, bigWeightTree) =>
TreeTests.checkEqual(unitTree, bigWeightTree)
}
}
test("minWeightFraction and minInstancesPerNode") {
val data = Array(
Instance(0.0, 1.0, Vectors.dense(0.0)),
Instance(0.0, 1.0, Vectors.dense(0.0)),
Instance(0.0, 1.0, Vectors.dense(0.0)),
Instance(0.0, 1.0, Vectors.dense(0.0)),
Instance(1.0, 0.1, Vectors.dense(1.0))
)
val rdd = sc.parallelize(data)
val strategy = new OldStrategy(OldAlgo.Classification, Gini, 3, 2,
minWeightFractionPerNode = 0.5)
val Array(tree1) = RandomForest.run(rdd, strategy, 1, "all", 42L, None)
assert(tree1.depth === 0)
strategy.minWeightFractionPerNode = 0.0
val Array(tree2) = RandomForest.run(rdd, strategy, 1, "all", 42L, None)
assert(tree2.depth === 1)
strategy.minInstancesPerNode = 2
val Array(tree3) = RandomForest.run(rdd, strategy, 1, "all", 42L, None)
assert(tree3.depth === 0)
strategy.minInstancesPerNode = 1
val Array(tree4) = RandomForest.run(rdd, strategy, 1, "all", 42L, None)
assert(tree4.depth === 1)
}
}
private object RandomForestSuite {
def mapToVec(map: Map[Int, Double]): Vector = {
val size = (map.keys.toSeq :+ 0).max + 1
val (indices, values) = map.toSeq.sortBy(_._1).unzip
Vectors.sparse(size, indices.toArray, values.toArray)
}
@tailrec
private def getSumLeafCounters(nodes: List[Node], acc: Long = 0): Long = {
if (nodes.isEmpty) {
acc
}
else {
nodes.head match {
case i: InternalNode => getSumLeafCounters(i.leftChild :: i.rightChild :: nodes.tail, acc)
case l: LeafNode => getSumLeafCounters(nodes.tail, acc + l.impurityStats.rawCount)
}
}
}
}
| shaneknapp/spark | mllib/src/test/scala/org/apache/spark/ml/tree/impl/RandomForestSuite.scala | Scala | apache-2.0 | 34,428 |
package com.twitter.util.tunable.linter
import com.twitter.app.App
import com.twitter.util.{Throw, Return}
import com.twitter.util.tunable.JsonTunableMapper
import java.nio.file.Paths
object ConfigurationLinter extends App {
def main(): Unit = {
var allSucceeded = true
println("\nValidating TunableMap configuration files...")
println("------------------------------------------------------------")
args.foreach { path =>
println(s"File: $path\n")
val url = Paths.get(path).toUri().toURL()
JsonTunableMapper().parse(url) match {
case Return(map) =>
println(s"Parsed as: ${map.contentString}")
case Throw(exc) =>
allSucceeded = false
println(s"Exception occurred: $exc")
}
println("------------------------------------------------------------")
}
if (allSucceeded) {
println("All configurations valid!")
} else {
exitOnError("One or more configurations failed to be parsed, see above for exceptions.")
}
}
}
| twitter/util | util-tunable/src/main/scala/com/twitter/util/tunable/linter/ConfigurationLinter.scala | Scala | apache-2.0 | 1,040 |
package scutil.lang
import minitest._
import scutil.core.implicits._
object EitherTest extends SimpleTestSuite {
test("Either should do successful ap") {
val func:Either[String,Int=>Int] = Either right (_ * 2)
val value:Either[String,Int] = Either right 7
assertEquals(
func ap value,
Right(14)
)
}
test("Either should abort function-first in ap") {
val func:Either[String,Int=>Int] = Either left "bug"
val value:Either[String,Int] = Either left "error"
assertEquals(
func ap value,
Left("bug")
)
}
test("Either should convert to Try") {
val func:Either[Throwable,Int] = Either right 1
assertEquals(
func.toTry,
scala.util.Success(1)
)
}
}
| ritschwumm/scutil | modules/core/src/test/scala/scutil/lang/EitherTest.scala | Scala | bsd-2-clause | 693 |
package ru.pavlenov.scala.utils
/**
* ⓭ + 44
* Какой сам? by Pavlenov Semen 05.07.14.
* ${TITLE}
* ${URL}
*
* ${GIVEN}
* ${RETURN}
*/
object Comb {
/**
* Считаем число Каталана
* С[n+1] = (2*(2*n+1)/(n+2))*C[n]
*
* @param n
* @return
*/
def catalan(n: Int): BigInt = {
var c: BigInt = 1
for (i <- 1 until n) c = (c * (4 * i + 2)) / (i + 2)
c
}
/**
* Число сочетаний без повторений (n различных элементов, взятых по m)
* http://www.webmath.ru/web/prog21_1.php
*
* C(k,n) = n! / (k! * (n-k)!)
*
* Сочетаниями из n элементов по m элементов называются комбинации, составленные из данных n элементов по m элементов,
* которые различаются хотя бы одним элементом
* (отличие сочетаний от размещений в том, что в сочетаниях не учитывается порядок элементов).
*
* @param n
* @param k
* @return
*/
def choose(n: Int, k: Int): BigInt = {
// Use symmetry of Pascal's triangle
val j = if (k > n - k) n - k else k
var result: BigInt = 1
for (i <- 1 to j) {
result *= (n - (j - i))
result /= i
}
result
}
def chooseLog10(n: Int, k: Int): Double = factLog10(n) - ( factLog10(k) + factLog10(n-k) )
/**
* Число размещений без повторений из n по m
* http://www.webmath.ru/web/prog27_1.php
*
* A(k,n) = n! / (n - m)!
*
* Размещениями из n элементов по m элементов (m < n) называются комбинации,
* составленные из данных n элементов по m элементов, которые отличаются
* либо самими элементами, либо порядком элементов.
*
* @param n
* @param k
* @return
*/
def assign(n: Int, k: Int): BigInt = fact(n) / fact(n - k)
/**
* Считает кол-во всех возможных подмножеств из множества размером n
* пример: {1, 2, 3} => 2^3=8, {∅, {1}, {2}, {3}, {1, 2}, {1, 3}, {2, 3}, {1, 2, 3}}
* https://math.stackexchange.com/questions/236659/number-of-subsets-of-a-set-having-r-elements/236671#236671?newreg=053c78da5ac04ea5bb19a6cb5682ae19
*
* @param n
* @return
*/
def countSubset(n: Int): BigInt = BigInt(2).pow(n)
/**
* Считаем факториал числа
*
* @param n
* @return
*/
def fact(n: Int): BigInt = (1 to n).map(BigInt.int2bigInt).product
def factLog10(n: Int): Double = (1 to n).map(math.log10(_)).sum
/**
* Меняем местами индексы и значения в массиве
* @param a
* @return
*/
def inversePermutation(a: Array[Int]) = {
val res = new Array[Int](a.length)
for (i <- 0 until a.length) res(a(i) - 1) = i + 1
res
}
/**
* Производим перестановку значений одного массива согласно значениям вторгго в качестве индекса
* @param sourceValue
* @param sourceIndex
* @return
*/
def applyPermutation(sourceValue: Array[Int], sourceIndex: Array[Int]) = {
val res = new Array[Int](sourceIndex.length)
var pos = 0
for (i <- sourceIndex) {
res(pos) = sourceValue(i-1)
pos += 1
}
res
}
def reverse(a: Array[Int], i: Int, j: Int) = a.take(i) ++ a.drop(i).take(j-i).reverse ++ a.drop(j)
def reversalDistance(a: Array[Int], b: Array[Int]) = {
val b1 = inversePermutation(b)
var a1 = applyPermutation(b1, a)
val l = a.length
var dist = 0
for (j <- l to 1 by -1; i <- 0 until l) if (a1(i) == j && (j - i) > 1) {
a1 = reverse(a1, i, j)
// println(a1.mkString(" "))
println(i+1 + " " + j)
dist += 1
}
dist
}
}
| laser13/rosalind | src/scala/ru/pavlenov/scala/utils/Comb.scala | Scala | apache-2.0 | 4,091 |
package io.pivotal.gemfire.spark.connector.internal
import java.net.InetAddress
import com.gemstone.gemfire.cache.client.{ClientCache, ClientCacheFactory, ClientRegionShortcut}
import com.gemstone.gemfire.cache.execute.{FunctionException, FunctionService}
import com.gemstone.gemfire.cache.query.Query
import com.gemstone.gemfire.cache.{Region, RegionService}
import com.gemstone.gemfire.internal.cache.execute.InternalExecution
import io.pivotal.gemfire.spark.connector.internal.oql.QueryResultCollector
import io.pivotal.gemfire.spark.connector.internal.rdd.GemFireRDDPartition
import org.apache.spark.{SparkEnv, Logging}
import io.pivotal.gemfire.spark.connector.GemFireConnection
import io.pivotal.gemfire.spark.connector.internal.gemfirefunctions._
import java.util.{Set => JSet, List => JList }
/**
* Default GemFireConnection implementation. The instance of this should be
* created by DefaultGemFireConnectionFactory
* @param locators pairs of host/port of locators
* @param gemFireProps The initial gemfire properties to be used.
*/
private[connector] class DefaultGemFireConnection (
locators: Seq[(String, Int)], gemFireProps: Map[String, String] = Map.empty)
extends GemFireConnection with Logging {
private val clientCache = initClientCache()
/** Register GemFire functions to the GemFire cluster */
FunctionService.registerFunction(RetrieveRegionMetadataFunction.getInstance())
FunctionService.registerFunction(RetrieveRegionFunction.getInstance())
private def initClientCache() : ClientCache = {
try {
val ccf = getClientCacheFactory
ccf.create()
} catch {
case e: Exception =>
logError(s"""Failed to init ClientCache, locators=${locators.mkString(",")}, Error: $e""")
throw new RuntimeException(e)
}
}
private def getClientCacheFactory: ClientCacheFactory = {
import io.pivotal.gemfire.spark.connector.map2Properties
val ccf = new ClientCacheFactory(gemFireProps)
ccf.setPoolReadTimeout(30000)
val servers = LocatorHelper.getAllGemFireServers(locators)
if (servers.isDefined && servers.get.size > 0) {
val sparkIp = System.getenv("SPARK_LOCAL_IP")
val hostName = if (sparkIp != null) InetAddress.getByName(sparkIp).getCanonicalHostName
else InetAddress.getLocalHost.getCanonicalHostName
val executorId = SparkEnv.get.executorId
val pickedServers = LocatorHelper.pickPreferredGemFireServers(servers.get, hostName, executorId)
logInfo(s"""Init ClientCache: severs=${pickedServers.mkString(",")}, host=$hostName executor=$executorId props=$gemFireProps""")
logDebug(s"""Init ClientCache: all-severs=${pickedServers.mkString(",")}""")
pickedServers.foreach{ case (host, port) => ccf.addPoolServer(host, port) }
} else {
logInfo(s"""Init ClientCache: locators=${locators.mkString(",")}, props=$gemFireProps""")
locators.foreach { case (host, port) => ccf.addPoolLocator(host, port) }
}
ccf
}
/** close the clientCache */
override def close(): Unit =
if (! clientCache.isClosed) clientCache.close()
/** ----------------------------------------- */
/** implementation of GemFireConnection trait */
/** ----------------------------------------- */
override def getQuery(queryString: String): Query =
clientCache.asInstanceOf[RegionService].getQueryService.newQuery(queryString)
override def validateRegion[K, V](regionPath: String): Unit = {
val md = getRegionMetadata[K, V](regionPath)
if (! md.isDefined) throw new RuntimeException(s"The region named $regionPath was not found")
}
def getRegionMetadata[K, V](regionPath: String): Option[RegionMetadata] = {
import scala.collection.JavaConversions.setAsJavaSet
val region = getRegionProxy[K, V](regionPath)
val set0: JSet[Integer] = Set[Integer](0)
val exec = FunctionService.onRegion(region).asInstanceOf[InternalExecution].withBucketFilter(set0)
exec.setWaitOnExceptionFlag(true)
try {
val collector = exec.execute(RetrieveRegionMetadataFunction.ID)
val r = collector.getResult.asInstanceOf[JList[RegionMetadata]]
logDebug(r.get(0).toString)
Some(r.get(0))
} catch {
case e: FunctionException =>
if (e.getMessage.contains(s"The region named /$regionPath was not found")) None
else throw e
}
}
def getRegionProxy[K, V](regionPath: String): Region[K, V] = {
val region1: Region[K, V] = clientCache.getRegion(regionPath).asInstanceOf[Region[K, V]]
if (region1 != null) region1
else DefaultGemFireConnection.regionLock.synchronized {
val region2 = clientCache.getRegion(regionPath).asInstanceOf[Region[K, V]]
if (region2 != null) region2
else clientCache.createClientRegionFactory[K, V](ClientRegionShortcut.PROXY).create(regionPath)
}
}
override def getRegionData[K, V](regionPath: String, whereClause: Option[String], split: GemFireRDDPartition): Iterator[(K, V)] = {
val region = getRegionProxy[K, V](regionPath)
val desc = s"""RDD($regionPath, "${whereClause.getOrElse("")}", ${split.index})"""
val args : Array[String] = Array[String](whereClause.getOrElse(""), desc)
val collector = new StructStreamingResultCollector(desc)
// RetrieveRegionResultCollector[(K, V)]
import scala.collection.JavaConversions.setAsJavaSet
val exec = FunctionService.onRegion(region).withArgs(args).withCollector(collector).asInstanceOf[InternalExecution]
.withBucketFilter(split.bucketSet.map(Integer.valueOf))
exec.setWaitOnExceptionFlag(true)
exec.execute(RetrieveRegionFunction.ID)
collector.getResult.map{objs: Array[Object] => (objs(0).asInstanceOf[K], objs(1).asInstanceOf[V])}
}
override def executeQuery(regionPath: String, bucketSet: Set[Int], queryString: String) = {
import scala.collection.JavaConversions.setAsJavaSet
FunctionService.registerFunction(QueryFunction.getInstance())
val collector = new QueryResultCollector
val region = getRegionProxy(regionPath)
val args: Array[String] = Array[String](queryString, bucketSet.toString)
val exec = FunctionService.onRegion(region).withCollector(collector).asInstanceOf[InternalExecution]
.withBucketFilter(bucketSet.map(Integer.valueOf))
.withArgs(args)
exec.execute(QueryFunction.ID)
collector.getResult
}
}
private[connector] object DefaultGemFireConnection {
/** a lock object only used by getRegionProxy...() */
private val regionLock = new Object
}
/** The purpose of this class is making unit test DefaultGemFireConnectionManager easier */
class DefaultGemFireConnectionFactory {
def newConnection(locators: Seq[(String, Int)], gemFireProps: Map[String, String] = Map.empty) =
new DefaultGemFireConnection(locators, gemFireProps)
}
| ysung-pivotal/incubator-geode | gemfire-spark-connector/gemfire-spark-connector/src/main/scala/io/pivotal/gemfire/spark/connector/internal/DefaultGemFireConnection.scala | Scala | apache-2.0 | 6,813 |
package twitter4s.response
import http.client.request.{CompletionEvaluation, HttpRequest}
import http.client.response.HttpResponse
import play.api.libs.json.JsSuccess
/** Keeps scrolling so long as the next_cursor is greater than zero.
*/
object TwitterEmptyNextCursorCompletionEvaluation extends CompletionEvaluation {
override def apply(request: HttpRequest, response: HttpResponse): Boolean = {
(response.json \\ "next_cursor").validate[Long] match {
case s: JsSuccess[Long] if s.get > 0L ⇒ false
case _ ⇒ true
}
}
}
| SocialOrra/social4s | twitter4s/src/main/scala/twitter4s/response/TwitterEmptyNextCursorCompletionEvaluation.scala | Scala | apache-2.0 | 581 |
/*
* Seldon -- open source prediction engine
* =======================================
* Copyright 2011-2015 Seldon Technologies Ltd and Rummble Ltd (http://www.seldon.io/)
*
**********************************************************************************************
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
**********************************************************************************************
*/
package io.seldon.spark.topics
import org.apache.log4j.Logger
import org.apache.log4j.Level
import org.apache.spark.SparkConf
import org.apache.spark.SparkContext
import org.apache.spark.SparkContext._
import org.joda.time.format.DateTimeFormat
import java.sql.ResultSet
import scala.collection.mutable.ListBuffer
import io.seldon.spark.SparkUtils
import org.apache.spark.mllib.feature.HashingTF
import org.apache.spark.mllib.feature.IDF
import org.joda.time.Duration
import org.joda.time.LocalDateTime
import org.apache.spark.mllib.linalg.SparseVector
import org.apache.spark.mllib.linalg.Vectors
import io.seldon.spark.rdd.FileUtils
import io.seldon.spark.rdd.DataSourceMode
import scala.collection.mutable.ListMap
case class NextActionConfig(
local : Boolean = false,
client : String = "",
inputPath : String = "",
outputPath : String = "",
awsKey : String = "",
jdbc : String = "",
tagAttr : String = "tag",
startDay : Int = 0,
days : Int = 1,
awsSecret : String = "",
maxNumActionsPerUser : Int = 500,
actionNumToStart : Int = 2,
maxUserHistoryFeatures : Int = 20,
minTermDocFreq : Int = 100)
class CreateNextItemFeatures(private val sc : SparkContext,config : NextActionConfig) {
def parseJsonActions(path : String) = {
val rdd = sc.textFile(path).flatMap{line =>
import org.json4s._
import org.json4s.jackson.JsonMethods._
implicit val formats = DefaultFormats
val formatter = DateTimeFormat.forPattern("yyyy-MM-dd'T'HH:mm:ss'Z'");
val json = parse(line)
val user = (json \\ "userid").extract[Int]
val item = (json \\ "itemid").extract[Int]
val value = (json \\ "value").extract[Float]
val dateUtc = (json \\ "timestamp_utc").extract[String]
if (value > 3)
{
val date1 = org.joda.time.format.ISODateTimeFormat.dateTimeParser.withZoneUTC.parseDateTime(dateUtc)
Seq((item,(user,date1.getMillis())))
}
else
None
}
rdd
}
def getItemTagsFromDb(jdbc : String,attr : String) =
{
//val sql = "select i.item_id,i.client_item_id,unix_timestamp(first_op),tags.value as tags from items i join item_map_"+table+" tags on (i.item_id=tags.item_id and tags.attr_id="+tagAttrId.toString()+") where i.item_id>? and i.item_id<?"
val sql = "select * from (SELECT i.item_id,i.client_item_id,unix_timestamp(first_op),CASE WHEN imi.value IS NOT NULL THEN cast(imi.value as char) WHEN imd.value IS NOT NULL THEN cast(imd.value as char) WHEN imb.value IS NOT NULL THEN cast(imb.value as char) WHEN imboo.value IS NOT NULL THEN cast(imboo.value as char) WHEN imt.value IS NOT NULL THEN imt.value WHEN imdt.value IS NOT NULL THEN cast(imdt.value as char) WHEN imv.value IS NOT NULL THEN imv.value WHEN e.value_name IS NOT NULL THEN e.value_name END" +
" tags FROM items i INNER JOIN item_attr a ON a.name in ('"+attr+"') and i.type=a.item_type LEFT JOIN item_map_int imi ON i.item_id=imi.item_id AND a.attr_id=imi.attr_id LEFT JOIN item_map_double imd ON i.item_id=imd.item_id AND a.attr_id=imd.attr_id LEFT JOIN item_map_enum ime ON i.item_id=ime.item_id AND a.attr_id=ime.attr_id LEFT JOIN item_map_bigint imb ON i.item_id=imb.item_id AND a.attr_id=imb.attr_id LEFT JOIN item_map_boolean imboo ON i.item_id=imboo.item_id AND a.attr_id=imboo.attr_id LEFT JOIN item_map_text imt ON i.item_id=imt.item_id AND a.attr_id=imt.attr_id LEFT JOIN item_map_datetime imdt ON i.item_id=imdt.item_id AND a.attr_id=imdt.attr_id LEFT JOIN item_map_varchar imv ON i.item_id=imv.item_id AND a.attr_id=imv.attr_id LEFT JOIN item_attr_enum e ON ime.attr_id =e.attr_id AND ime.value_id=e.value_id " +
" where i.item_id>? and i.item_id<? order by imv.pos) t where not t.tags is null"
val rdd = new org.apache.spark.rdd.JdbcRDD(
sc,
() => {
Class.forName("com.mysql.jdbc.Driver")
java.sql.DriverManager.getConnection(jdbc)
},
sql,
0, 999999999, 1,
(row : ResultSet) => (row.getInt("item_id"),row.getString("tags"))
)
rdd
}
def run()
{
val actionsGlob = config.inputPath + "/" + config.client+"/actions/"+SparkUtils.getS3UnixGlob(config.startDay,config.days)+"/*"
println("loading actions from "+actionsGlob)
val rddActions = parseJsonActions(actionsGlob)
val rddItems = getItemTagsFromDb(config.jdbc, config.tagAttr)
val rddCombined = rddActions.join(rddItems)
val countCombined = rddCombined.count()
println("actions with tags count is "+countCombined.toString())
val maxNumActions = config.maxNumActionsPerUser
val actionNumToStart = config.actionNumToStart
println("max actions "+maxNumActions.toString()+" start at "+actionNumToStart.toString())
// create feature for current item and the user history of items viewed
val rddFeatures = rddCombined.map{ case (item,((user,time),tags)) => (user,(item,time,tags))}.groupByKey()
.flatMapValues{v =>
val buf = new ListBuffer[String]()
val sorted = v.toArray.sortBy(_._2) // _.2 is time
var userHistory = ListBuffer[String]()
var c = 0
for ((item,t,tags) <- sorted)
{
if (c <= maxNumActions)
{
if (c >= actionNumToStart)
{
var line = new StringBuilder()
for(tag <- tags.split(","))
{
val tagToken = tag.trim().toLowerCase().replaceAll("[ :;'\\",]", "_")
// create a set of item tag features for each tag in current item
if (tagToken.size > 0)
{
line ++= " i_"
line ++= tagToken
}
}
// create a set if user tag features for each tag in user history
for (tag <- userHistory)
{
if (tag.size > 0)
{
line ++= " u_"
line ++= tag
}
}
buf.append(line.toString().trim())
}
// add all tags from current item to user history
userHistory.clear()
for (tag <- tags.split(","))
{
val tagToken = tag.trim().toLowerCase().replaceAll("[ :;'\\",]", "_")
if (tagToken.size > 0)
userHistory.append(tagToken)
}
c += 1
}
}
buf
}
val countFeatures = rddFeatures.count()
println("Count of rddFeatures "+countFeatures.toString())
val featuresIter = rddFeatures.map(_._2.split(" ").toSeq)
val hashingTF = new HashingTF()
val tf = hashingTF.transform(featuresIter)
val idf = new IDF(config.minTermDocFreq).fit(tf)
val tfidf = idf.transform(tf)
val featuresWithTfidf = rddFeatures.zip(tfidf)
val maxUserHistoryFeatures = config.maxUserHistoryFeatures
// map strings or orderd list of ids using broadcast map
val rddFeatureIds = featuresWithTfidf.map{case ((user,features),tfidfVec) =>
var line = new StringBuilder()
val hashingTF = new HashingTF()
var fset = Set[String]()
val utfidfMap = scala.collection.mutable.Map[String,Double]()
val itfidfMap = scala.collection.mutable.Map[String,Double]()
for (feature <- features.split(" "))
{
if (!fset.contains(feature))
{
val id = hashingTF.indexOf(feature)
val tfidf = tfidfVec(id)
if (feature.startsWith("i_"))
{
itfidfMap.put(feature, tfidf)
}
else
{
utfidfMap.put(feature, tfidf)
}
fset += feature
}
}
line ++= "1"
val itfidfSorted = ListMap(itfidfMap.toSeq.sortWith(_._2 < _._2):_*)
var c = 0
line ++= " |i "
for((feature,tfidf) <- itfidfSorted)
{
if (c <= maxUserHistoryFeatures)
{
line ++= " "+feature+":"+tfidf
c += 1
}
}
val tfidfSorted = ListMap(utfidfMap.toSeq.sortWith(_._2 < _._2):_*)
c = 0
line ++= " |u "
for((feature,tfidf) <- tfidfSorted)
{
if (c <= maxUserHistoryFeatures)
{
line ++= " "+feature+":"+tfidf
c += 1
}
}
line.toString().trim()
}
val outPath = config.outputPath + "/" + config.client + "/features/"+config.startDay
rddFeatureIds.saveAsTextFile(outPath)
val rddTags = rddFeatures.flatMap{case (user,features) => features.split(" ")}.distinct
val bcIDF = rddTags.context.broadcast(idf)
val tagIDFs = rddTags.map { tag =>
val idf = bcIDF.value
val hashingTF = new HashingTF()
val id = hashingTF.indexOf(tag)
val vec = Vectors.sparse(id+1, Seq((id,1.0)))
val idfVec = idf.transform(vec)
(tag,idfVec(id))
}.map{case (tag,idf) => tag+","+idf}
val idfOutPath = config.outputPath + "/" + config.client + "/idf/"+config.startDay
FileUtils.outputModelToFile(tagIDFs, idfOutPath, DataSourceMode.fromString(idfOutPath), "idf.csv")
}
}
object CreateNextItemFeatures
{
def main(args: Array[String])
{
Logger.getLogger("org.apache.spark").setLevel(Level.WARN)
Logger.getLogger("org.eclipse.jetty.server").setLevel(Level.OFF)
val parser = new scopt.OptionParser[NextActionConfig]("CreateActionFeatures") {
head("CreateActionFeatures", "1.x")
opt[Unit]('l', "local") action { (_, c) => c.copy(local = true) } text("debug mode - use local Master")
opt[String]('c', "client") required() valueName("<client>") action { (x, c) => c.copy(client = x) } text("client name (will be used as db and folder suffix)")
opt[String]('i', "input-path") required() valueName("path url") action { (x, c) => c.copy(inputPath = x) } text("path prefix for input")
opt[String]('o', "output-path") required() valueName("path url") action { (x, c) => c.copy(outputPath = x) } text("path prefix for output")
opt[String]('a', "awskey") valueName("aws access key") action { (x, c) => c.copy(awsKey = x) } text("aws key")
opt[String]('s', "awssecret") valueName("aws secret") action { (x, c) => c.copy(awsSecret = x) } text("aws secret")
opt[String]('j', "jdbc") required() valueName("<JDBC URL>") action { (x, c) => c.copy(jdbc = x) } text("jdbc url (to get dimension for all items)")
opt[String]('t', "tagAttr") required() action { (x, c) =>c.copy(tagAttr = x) } text("tag attribute in database")
opt[Int]('r', "numdays") required() action { (x, c) =>c.copy(days = x) } text("number of days in past to get actions for")
opt[Int]("start-day") required() action { (x, c) =>c.copy(startDay = x) } text("start day in unix time")
opt[Int]("maxNumActionsPerUser") action { (x, c) =>c.copy(maxNumActionsPerUser = x) } text("max number of actions a user must have")
opt[Int]("maxUserHistoryFeatures") action { (x, c) =>c.copy(maxUserHistoryFeatures = x) } text("max number of features from user history to include")
opt[Int]("actionNumToStart") action { (x, c) =>c.copy(actionNumToStart = x) } text("wait until this number of actions for a user before creating features")
}
parser.parse(args, NextActionConfig()) map { config =>
val conf = new SparkConf()
.setAppName("CreateActionFeatures")
if (config.local)
conf.setMaster("local")
.set("spark.executor.memory", "13g")
val sc = new SparkContext(conf)
try
{
sc.hadoopConfiguration.set("fs.s3.impl", "org.apache.hadoop.fs.s3native.NativeS3FileSystem")
if (config.awsKey.nonEmpty && config.awsSecret.nonEmpty)
{
sc.hadoopConfiguration.set("fs.s3n.awsAccessKeyId", config.awsKey)
sc.hadoopConfiguration.set("fs.s3n.awsSecretAccessKey", config.awsSecret)
}
val cByd = new CreateNextItemFeatures(sc,config)
cByd.run()
}
finally
{
println("Shutting down job")
sc.stop()
}
} getOrElse
{
}
// set up environment
}
} | Seichis/seldon-server | offline-jobs/spark/src/main/scala/io/seldon/spark/topics/CreateNextItemFeatures.scala | Scala | apache-2.0 | 13,276 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.carbondata.spark.testsuite.datacompaction
import java.sql.Date
import org.apache.spark.sql.Row
import org.apache.spark.sql.test.util.QueryTest
import org.scalatest.BeforeAndAfterAll
import org.apache.carbondata.core.constants.CarbonCommonConstants
import org.apache.carbondata.core.util.CarbonProperties
class MajorCompactionWithMeasureSortColumns extends QueryTest with BeforeAndAfterAll {
val csvFilePath = s"$resourcesPath/compaction/nodictionary_compaction.csv"
val backupDateFormat = CarbonProperties.getInstance()
.getProperty(CarbonCommonConstants.CARBON_DATE_FORMAT,
CarbonCommonConstants.CARBON_DATE_DEFAULT_FORMAT)
override def beforeAll: Unit = {
sql("drop table if exists store")
CarbonProperties.getInstance()
.addProperty(CarbonCommonConstants.CARBON_DATE_FORMAT,
CarbonCommonConstants.CARBON_DATE_DEFAULT_FORMAT)
}
override def afterAll {
sql("drop table if exists store")
CarbonProperties.getInstance()
.addProperty(CarbonCommonConstants.CARBON_DATE_FORMAT, backupDateFormat)
}
test("test major compaction with measure sort columns") {
CarbonProperties.getInstance()
.addProperty(CarbonCommonConstants.CARBON_MAJOR_COMPACTION_SIZE, "1024")
val createStoreTableSql =
s"""
| CREATE TABLE IF NOT EXISTS store(
| code1 STRING,
| code2 STRING,
| country_code STRING,
| category_id INTEGER,
| product_id LONG,
| date DATE,
| count1 LONG,
| count2 LONG,
| count3 LONG
| )
| STORED AS carbondata
| TBLPROPERTIES(
| 'SORT_COLUMNS'='code1, code2, country_code, date, category_id, product_id',
| 'SORT_SCOPE'='LOCAL_SORT',
| 'CACHE_LEVEL'='BLOCKLET'
| )
""".stripMargin
sql(createStoreTableSql)
sql(
s"""
| LOAD DATA LOCAL INPATH '$csvFilePath'
| INTO TABLE store
| OPTIONS('HEADER'='true', 'COMPLEX_DELIMITER_LEVEL_1'='#')
""".stripMargin).collect()
sql(
s"""
| LOAD DATA LOCAL INPATH '$csvFilePath'
| INTO TABLE store
| OPTIONS('HEADER'='true', 'COMPLEX_DELIMITER_LEVEL_1'='#')
""".stripMargin).collect()
val csvRows = sqlContext.sparkSession.read.option("header", "true")
.csv(csvFilePath).orderBy("code1")
sql("ALTER TABLE store COMPACT 'MAJOR'")
val answer = sql("select * from store ").orderBy("code1")
assert(csvRows.count() == answer.distinct().count())
if (!sqlContext.sparkContext.version.startsWith("3.1")) {
checkAnswer(answer.distinct(),
Seq(Row("51job, Inc.", "21695-534", "FR", 610, 60, Date.valueOf("2017-11-27"), 4483, 0,
510), Row("Intercontinental Exchange Inc.", "22100-020", "TH", 87, 4,
Date.valueOf("2017-10-16"), 2, 647, 69630)))
} else {
checkAnswer(answer.distinct(),
Seq(Row("Intercontinental Exchange Inc.", "22100-020", "TH", 87, 4,
Date.valueOf("2017-10-16"), 2, 647, 69630), Row("51job, Inc.", "21695-534", "FR", 610,
60, Date.valueOf("2017-11-27"), 4483, 0, 510)))
}
sql("drop table store")
}
}
| zzcclp/carbondata | integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/datacompaction/MajorCompactionWithMeasureSortColumns.scala | Scala | apache-2.0 | 4,010 |
package gitbucket.core.controller
import java.net.URI
import com.nimbusds.oauth2.sdk.id.State
import com.nimbusds.openid.connect.sdk.Nonce
import gitbucket.core.helper.xml
import gitbucket.core.model.Account
import gitbucket.core.service._
import gitbucket.core.util.Implicits._
import gitbucket.core.util.SyntaxSugars._
import gitbucket.core.util._
import org.scalatra.Ok
import org.scalatra.forms._
class IndexController
extends IndexControllerBase
with RepositoryService
with ActivityService
with AccountService
with RepositorySearchService
with IssuesService
with LabelsService
with MilestonesService
with PrioritiesService
with UsersAuthenticator
with ReferrerAuthenticator
with AccessTokenService
with AccountFederationService
with OpenIDConnectService
trait IndexControllerBase extends ControllerBase {
self: RepositoryService
with ActivityService
with AccountService
with RepositorySearchService
with UsersAuthenticator
with ReferrerAuthenticator
with AccessTokenService
with AccountFederationService
with OpenIDConnectService =>
case class SignInForm(userName: String, password: String, hash: Option[String])
val signinForm = mapping(
"userName" -> trim(label("Username", text(required))),
"password" -> trim(label("Password", text(required))),
"hash" -> trim(optional(text()))
)(SignInForm.apply)
// val searchForm = mapping(
// "query" -> trim(text(required)),
// "owner" -> trim(text(required)),
// "repository" -> trim(text(required))
// )(SearchForm.apply)
//
// case class SearchForm(query: String, owner: String, repository: String)
case class OidcContext(state: State, nonce: Nonce, redirectBackURI: String)
get("/") {
context.loginAccount
.map { account =>
val visibleOwnerSet: Set[String] = Set(account.userName) ++ getGroupsByUserName(account.userName)
gitbucket.core.html.index(
getRecentActivitiesByOwners(visibleOwnerSet),
getVisibleRepositories(Some(account), withoutPhysicalInfo = true),
showBannerToCreatePersonalAccessToken = hasAccountFederation(account.userName) && !hasAccessToken(
account.userName
)
)
}
.getOrElse {
gitbucket.core.html.index(
getRecentActivities(),
getVisibleRepositories(None, withoutPhysicalInfo = true),
showBannerToCreatePersonalAccessToken = false
)
}
}
get("/signin") {
val redirect = params.get("redirect")
if (redirect.isDefined && redirect.get.startsWith("/")) {
flash += Keys.Flash.Redirect -> redirect.get
}
gitbucket.core.html.signin(flash.get("userName"), flash.get("password"), flash.get("error"))
}
post("/signin", signinForm) { form =>
authenticate(context.settings, form.userName, form.password) match {
case Some(account) =>
flash.get(Keys.Flash.Redirect) match {
case Some(redirectUrl: String) => signin(account, redirectUrl + form.hash.getOrElse(""))
case _ => signin(account)
}
case None =>
flash += "userName" -> form.userName
flash += "password" -> form.password
flash += "error" -> "Sorry, your Username and/or Password is incorrect. Please try again."
redirect("/signin")
}
}
/**
* Initiate an OpenID Connect authentication request.
*/
post("/signin/oidc") {
context.settings.oidc.map { oidc =>
val redirectURI = new URI(s"$baseUrl/signin/oidc")
val authenticationRequest = createOIDCAuthenticationRequest(oidc.issuer, oidc.clientID, redirectURI)
val redirectBackURI = flash.get(Keys.Flash.Redirect) match {
case Some(redirectBackURI: String) => redirectBackURI + params.getOrElse("hash", "")
case _ => "/"
}
session.setAttribute(
Keys.Session.OidcContext,
OidcContext(authenticationRequest.getState, authenticationRequest.getNonce, redirectBackURI)
)
redirect(authenticationRequest.toURI.toString)
} getOrElse {
NotFound()
}
}
/**
* Handle an OpenID Connect authentication response.
*/
get("/signin/oidc") {
context.settings.oidc.map { oidc =>
val redirectURI = new URI(s"$baseUrl/signin/oidc")
session.get(Keys.Session.OidcContext) match {
case Some(context: OidcContext) =>
authenticate(params, redirectURI, context.state, context.nonce, oidc) map { account =>
signin(account, context.redirectBackURI)
} orElse {
flash += "error" -> "Sorry, authentication failed. Please try again."
session.invalidate()
redirect("/signin")
}
case _ =>
flash += "error" -> "Sorry, something wrong. Please try again."
session.invalidate()
redirect("/signin")
}
} getOrElse {
NotFound()
}
}
get("/signout") {
session.invalidate
redirect("/")
}
get("/activities.atom") {
contentType = "application/atom+xml; type=feed"
xml.feed(getRecentActivities())
}
post("/sidebar-collapse") {
if (params("collapse") == "true") {
session.setAttribute("sidebar-collapse", "true")
} else {
session.setAttribute("sidebar-collapse", null)
}
Ok()
}
/**
* Set account information into HttpSession and redirect.
*/
private def signin(account: Account, redirectUrl: String = "/") = {
session.setAttribute(Keys.Session.LoginAccount, account)
updateLastLoginDate(account.userName)
if (LDAPUtil.isDummyMailAddress(account)) {
redirect("/" + account.userName + "/_edit")
}
if (redirectUrl.stripSuffix("/") == request.getContextPath) {
redirect("/")
} else {
redirect(redirectUrl)
}
}
/**
* JSON API for collaborator completion.
*/
get("/_user/proposals")(usersOnly {
contentType = formats("json")
val user = params("user").toBoolean
val group = params("group").toBoolean
org.json4s.jackson.Serialization.write(
Map(
"options" -> (
getAllUsers(false)
.withFilter { t =>
(user, group) match {
case (true, true) => true
case (true, false) => !t.isGroupAccount
case (false, true) => t.isGroupAccount
case (false, false) => false
}
}
.map { t =>
Map(
"label" -> s"<b>@${StringUtil.escapeHtml(t.userName)}</b> ${StringUtil.escapeHtml(t.fullName)}",
"value" -> t.userName
)
}
)
)
)
})
/**
* JSON API for checking user or group existence.
* Returns a single string which is any of "group", "user" or "".
*/
post("/_user/existence")(usersOnly {
getAccountByUserNameIgnoreCase(params("userName")).map { account =>
if (account.isGroupAccount) "group" else "user"
} getOrElse ""
})
// TODO Move to RepositoryViwerController?
get("/:owner/:repository/search")(referrersOnly { repository =>
defining(params.getOrElse("q", "").trim, params.getOrElse("type", "code")) {
case (query, target) =>
val page = try {
val i = params.getOrElse("page", "1").toInt
if (i <= 0) 1 else i
} catch {
case e: NumberFormatException => 1
}
target.toLowerCase match {
case "issue" =>
gitbucket.core.search.html.issues(
if (query.nonEmpty) searchIssues(repository.owner, repository.name, query) else Nil,
query,
page,
repository
)
case "wiki" =>
gitbucket.core.search.html.wiki(
if (query.nonEmpty) searchWikiPages(repository.owner, repository.name, query) else Nil,
query,
page,
repository
)
case _ =>
gitbucket.core.search.html.code(
if (query.nonEmpty) searchFiles(repository.owner, repository.name, query) else Nil,
query,
page,
repository
)
}
}
})
get("/search") {
val query = params.getOrElse("query", "").trim.toLowerCase
val visibleRepositories =
getVisibleRepositories(context.loginAccount, repositoryUserName = None, withoutPhysicalInfo = true)
val repositories = visibleRepositories.filter { repository =>
repository.name.toLowerCase.indexOf(query) >= 0 || repository.owner.toLowerCase.indexOf(query) >= 0
}
gitbucket.core.search.html.repositories(query, repositories, visibleRepositories)
}
}
| x-way/gitbucket | src/main/scala/gitbucket/core/controller/IndexController.scala | Scala | apache-2.0 | 8,774 |
/*
* Copyright 2014–2018 SlamData Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package quasar.qscript.qsu
import slamdata.Predef._
import quasar.{RenderTree, RenderTreeT, RenderedTree}
import quasar.common.{JoinType, SortDir}
import quasar.contrib.pathy.AFile
import quasar.ejson.{EJson, Fixed}
import quasar.ejson.implicits._
import quasar.contrib.matryoshka._
import quasar.fp.ski.{ι, κ}
import quasar.fp._
import quasar.qscript._
import quasar.qscript.provenance.JoinKeys
import matryoshka.{Hole => _, birecursiveIso => _, _} // {delayEqual, equalTEqual, delayShow, showTShow, BirecursiveT, Delay, Embed, EqualT, ShowT}
import matryoshka.data._
import matryoshka.patterns.{CoEnv, EnvT}
import monocle.{Iso, PTraversal, Prism}
import pathy.Path
import scalaz.{Applicative, Bitraverse, Cofree, Enum, Equal, Forall, Free, Functor, Id, Order, Scalaz, Show, Traverse, \\/, \\/-, NonEmptyList => NEL}
import scalaz.std.anyVal._
import scalaz.std.list._
import scalaz.std.tuple._
import scalaz.syntax.equal._
import scalaz.syntax.show._
import scalaz.syntax.std.option._
sealed trait QScriptUniform[T[_[_]], A] extends Product with Serializable
object QScriptUniform {
implicit def traverse[T[_[_]]]: Traverse[QScriptUniform[T, ?]] = new Traverse[QScriptUniform[T, ?]] {
// we need both apply and traverse syntax, which conflict
import Scalaz._
def traverseImpl[G[_]: Applicative, A, B](qsu: QScriptUniform[T, A])(f: A => G[B])
: G[QScriptUniform[T, B]] = qsu match {
case AutoJoin2(left, right, combiner) =>
(f(left) |@| f(right))(AutoJoin2(_, _, combiner))
case AutoJoin3(left, center, right, combiner) =>
(f(left) |@| f(center) |@| f(right))(AutoJoin3(_, _, _, combiner))
case QSAutoJoin(left, right, keys, combiner) =>
(f(left) |@| f(right))(QSAutoJoin(_, _, keys, combiner))
case GroupBy(left, right) =>
(f(left) |@| f(right))(GroupBy(_, _))
case DimEdit(source, dtrans) =>
f(source).map(DimEdit(_, dtrans))
case LPJoin(left, right, condition, joinType, leftRef, rightRef) =>
(f(left) |@| f(right) |@| f(condition))(LPJoin(_, _, _, joinType, leftRef, rightRef))
case ThetaJoin(left, right, condition, joinType, combiner) =>
(f(left) |@| f(right))(ThetaJoin(_, _, condition, joinType, combiner))
case Unary(source, mf) =>
f(source).map(Unary(_, mf))
case Map(source, fm) =>
f(source).map(Map(_, fm))
case Read(path) => (Read(path): QScriptUniform[T, B]).point[G]
case Transpose(source, retain, rotations) =>
f(source).map(Transpose(_, retain, rotations))
case LeftShift(source, struct, idStatus, onUndefined, repair, rot) =>
f(source).map(LeftShift(_, struct, idStatus, onUndefined, repair, rot))
case MultiLeftShift(source, shifts, onUndefined, repair) =>
f(source).map(MultiLeftShift(_, shifts, onUndefined, repair))
case LPReduce(source, reduce) =>
f(source).map(LPReduce(_, reduce))
case QSReduce(source, buckets, reducers, repair) =>
f(source).map(QSReduce(_, buckets, reducers, repair))
case Distinct(source) =>
f(source).map(Distinct(_))
case LPSort(source, order) =>
val T = Bitraverse[(?, ?)].leftTraverse[SortDir]
val source2G = f(source)
val orders2G = order.traverse(p => T.traverse(p)(f))
(source2G |@| orders2G)(LPSort(_, _))
case QSSort(source, buckets, order) =>
f(source).map(QSSort(_, buckets, order))
case Union(left, right) =>
(f(left) |@| f(right))(Union(_, _))
case Subset(from, op, count) =>
(f(from) |@| f(count))(Subset(_, op, _))
case LPFilter(source, predicate) =>
(f(source) |@| f(predicate))(LPFilter(_, _))
case QSFilter(source, predicate) =>
f(source).map(QSFilter(_, predicate))
case JoinSideRef(id) => (JoinSideRef(id): QScriptUniform[T, B]).point[G]
case Unreferenced() => (Unreferenced(): QScriptUniform[T, B]).point[G]
}
}
@SuppressWarnings(Array("org.wartremover.warts.Recursion"))
implicit def show[T[_[_]]: ShowT]: Delay[Show, QScriptUniform[T, ?]] =
new Delay[Show, QScriptUniform[T, ?]] {
def apply[A](a: Show[A]) = {
implicit val showA = a
Show.shows {
case AutoJoin2(left, right, combiner) =>
s"AutoJoin2(${left.shows}, ${right.shows}, ${combiner.shows})"
case AutoJoin3(left, center, right, combiner) =>
s"AutoJoin3(${left.shows}, ${center.shows}, ${right.shows}, ${combiner.shows})"
case QSAutoJoin(left, right, keys, combiner) =>
s"QSAutoJoin(${left.shows}, ${right.shows}, ${keys.shows}, ${combiner.shows})"
case GroupBy(left, right) =>
s"GroupBy(${left.shows}, ${right.shows})"
case DimEdit(source, dtrans) =>
s"DimEdit(${source.shows}, ${dtrans.shows})"
case LPJoin(left, right, condition, joinType, leftRef, rightRef) =>
s"LPJoin(${left.shows}, ${right.shows}, ${condition.shows}, ${joinType.shows}, ${leftRef.shows}, ${rightRef.shows})"
case ThetaJoin(left, right, condition, joinType, combiner) =>
s"ThetaJoin(${left.shows}, ${right.shows}, ${condition.shows}, ${joinType.shows}, ${combiner.shows})"
case Unary(source, mf) =>
s"Unary(${source.shows}, ${mf.shows})"
case Map(source, fm) =>
s"Map(${source.shows}, ${fm.shows})"
case Read(path) =>
s"Read(${Path.posixCodec.printPath(path)})"
case Transpose(source, retain, rotations) =>
s"Transpose(${source.shows}, ${retain.shows}, ${rotations.shows})"
case LeftShift(source, struct, idStatus, onUndefined, repair, rot) =>
s"LeftShift(${source.shows}, ${struct.linearize.shows}, ${idStatus.shows}, ${onUndefined.shows}, ${repair.shows}, ${rot.shows})"
case MultiLeftShift(source, shifts, onUndefined, repair) =>
s"MultiLeftShift(${source.shows}, ${shifts.shows}, ${onUndefined.shows}, ${repair.shows})"
case LPReduce(source, reduce) =>
s"LPReduce(${source.shows}, ${reduce.shows})"
case QSReduce(source, buckets, reducers, repair) =>
s"QSReduce(${source.shows}, ${buckets.shows}, ${reducers.shows}, ${repair.shows})"
case Distinct(source) =>
s"Distinct(${source.shows})"
case LPSort(source, order) =>
s"LPSort(${source.shows}, ${order.shows})"
case QSSort(source, buckets, order) =>
s"QSSort(${source.shows}, ${buckets.shows}, ${order.shows})"
case Union(left, right) =>
s"Union(${left.shows}, ${right.shows})"
case Subset(from, op, count) =>
s"Subset(${from.shows}, ${op.shows}, ${count.shows})"
case LPFilter(source, predicate) =>
s"LPFilter(${source.shows}, ${predicate.shows})"
case QSFilter(source, predicate) =>
s"QSFilter(${source.shows}, ${predicate.shows})"
case JoinSideRef(id) =>
s"JoinSideRef(${id.shows})"
case Unreferenced() =>
"⊥"
}
}
}
@SuppressWarnings(Array("org.wartremover.warts.NonUnitStatements"))
implicit def renderTree[T[_[_]]: RenderTreeT: ShowT]
: Delay[RenderTree, QScriptUniform[T, ?]] = ???
@SuppressWarnings(Array("org.wartremover.warts.NonUnitStatements"))
implicit def equal[T[_[_]]: BirecursiveT: EqualT]
: Delay[Equal, QScriptUniform[T, ?]] = ???
final case class AutoJoin2[T[_[_]], A](
left: A,
right: A,
combiner: FreeMapA[T, JoinSide]) extends QScriptUniform[T, A]
final case class AutoJoin3[T[_[_]], A](
left: A,
center: A,
right: A,
combiner: FreeMapA[T, JoinSide3]) extends QScriptUniform[T, A]
final case class QSAutoJoin[T[_[_]], A](
left: A,
right: A,
keys: JoinKeys[QIdAccess[T]],
combiner: JoinFunc[T]) extends QScriptUniform[T, A]
final case class GroupBy[T[_[_]], A](
left: A,
right: A) extends QScriptUniform[T, A]
final case class DimEdit[T[_[_]], A](
source: A,
trans: DTrans[T]) extends QScriptUniform[T, A]
sealed trait DTrans[T[_[_]]] extends Product with Serializable
object DTrans {
final case class Squash[T[_[_]]]() extends DTrans[T]
final case class Group[T[_[_]]](getKey: FreeMap[T]) extends DTrans[T]
implicit def show[T[_[_]]: ShowT]: Show[DTrans[T]] =
Show.shows[DTrans[T]] {
case Squash() => "Squash"
case Group(k) => s"Group(${k.shows})"
}
}
// LPish
final case class LPJoin[T[_[_]], A](
left: A,
right: A,
condition: A,
joinType: JoinType,
leftRef: Symbol,
rightRef: Symbol) extends QScriptUniform[T, A]
// QScriptish
final case class ThetaJoin[T[_[_]], A](
left: A,
right: A,
condition: JoinFunc[T],
joinType: JoinType,
combiner: JoinFunc[T]) extends QScriptUniform[T, A]
/**
* This is a non-free (as in monad) variant of Map. We need it
* in ReadLP so that graph compaction is defined, which is required
* because compaction utilizes an `SMap[QSU[Symbol], Symbol]`, which
* isn't valid when the `QSU`s inside the keys are libre.
*/
final case class Unary[T[_[_]], A](
source: A,
mf: MapFunc[T, Hole]) extends QScriptUniform[T, A]
final case class Map[T[_[_]], A](
source: A,
fm: FreeMap[T]) extends QScriptUniform[T, A]
final case class Read[T[_[_]], A](path: AFile) extends QScriptUniform[T, A]
// LPish
final case class Transpose[T[_[_]], A](
source: A,
retain: Retain,
rotations: Rotation) extends QScriptUniform[T, A]
sealed trait Retain extends Product with Serializable {
def fold[A](ids: => A, vals: => A): A = this match {
case Retain.Identities => ids
case Retain.Values => vals
}
}
object Retain {
case object Identities extends Retain
case object Values extends Retain
implicit val enum: Enum[Retain] =
new Enum[Retain] {
def succ(r: Retain) =
r match {
case Identities => Values
case Values => Identities
}
def pred(r: Retain) =
r match {
case Identities => Values
case Values => Identities
}
override val min = Some(Identities)
override val max = Some(Values)
def order(x: Retain, y: Retain) =
Order[Int].order(toInt(x), toInt(y))
val toInt: Retain => Int = {
case Identities => 0
case Values => 1
}
}
implicit val show: Show[Retain] =
Show.showFromToString
}
sealed trait Rotation extends Product with Serializable
object Rotation {
case object FlattenArray extends Rotation
case object ShiftArray extends Rotation
case object FlattenMap extends Rotation
case object ShiftMap extends Rotation
implicit val enum: Enum[Rotation] =
new Enum[Rotation] {
def succ(r: Rotation) =
r match {
case FlattenArray => ShiftArray
case ShiftArray => FlattenMap
case FlattenMap => ShiftMap
case ShiftMap => FlattenArray
}
def pred(r: Rotation) =
r match {
case FlattenArray => ShiftMap
case ShiftArray => FlattenArray
case FlattenMap => ShiftArray
case ShiftMap => FlattenMap
}
override val min = Some(FlattenArray)
override val max = Some(ShiftMap)
def order(x: Rotation, y: Rotation) =
Order[Int].order(toInt(x), toInt(y))
val toInt: Rotation => Int = {
case FlattenArray => 0
case ShiftArray => 1
case FlattenMap => 2
case ShiftMap => 3
}
}
implicit val show: Show[Rotation] =
Show.showFromToString
}
sealed trait ShiftTarget[T[_[_]]] extends Product with Serializable
object ShiftTarget {
final case class LeftTarget[T[_[_]]]() extends ShiftTarget[T]
final case class RightTarget[T[_[_]]]() extends ShiftTarget[T]
final case class AccessLeftTarget[T[_[_]]](access: QAccess[T, Hole]) extends ShiftTarget[T]
implicit def equalShiftTarget[T[_[_]]: BirecursiveT: EqualT]: Equal[ShiftTarget[T]] = Equal.equal {
case (AccessLeftTarget(access1), AccessLeftTarget(access2)) => access1 ≟ access2
case (LeftTarget(), LeftTarget()) => true
case (RightTarget(), RightTarget()) => true
case _ => false
}
implicit def showShiftTarget[T[_[_]]: ShowT]: Show[ShiftTarget[T]] = Show.shows {
case LeftTarget() => "LeftTarget"
case RightTarget() => "RightTarget"
case AccessLeftTarget(access) => s"AccessLeftTarget(${access.shows})"
}
implicit def renderShiftTarget[T[_[_]]: RecursiveT: RenderTreeT: ShowT]: RenderTree[ShiftTarget[T]] = RenderTree.make {
case LeftTarget() =>
RenderedTree("ShiftTarget" :: Nil, "LeftTarget".some, Nil)
case RightTarget() =>
RenderedTree("ShiftTarget" :: Nil, "RightTarget".some, Nil)
case AccessLeftTarget(access) =>
RenderedTree("ShiftTarget" :: Nil, "AccessLeftTarget".some, RenderTree[QAccess[T, Hole]].render(access) :: Nil)
}
}
// QScriptish
final case class LeftShift[T[_[_]], A](
source: A,
struct: RecFreeMap[T],
idStatus: IdStatus,
onUndefined: OnUndefined,
repair: FreeMapA[T, ShiftTarget[T]],
rot: Rotation) extends QScriptUniform[T, A]
// shifting multiple structs on the same source;
// horizontal composition of LeftShifts
final case class MultiLeftShift[T[_[_]], A](
source: A,
shifts: List[(FreeMap[T], IdStatus, Rotation)],
onUndefined: OnUndefined,
repair: FreeMapA[T, QAccess[T, Hole] \\/ Int]) extends QScriptUniform[T, A]
// LPish
final case class LPReduce[T[_[_]], A](
source: A,
reduce: ReduceFunc[Unit]) extends QScriptUniform[T, A]
// QScriptish
final case class QSReduce[T[_[_]], A](
source: A,
buckets: List[FreeMapA[T, QAccess[T, Hole]]],
reducers: List[ReduceFunc[FreeMap[T]]],
repair: FreeMapA[T, ReduceIndex]) extends QScriptUniform[T, A]
final case class Distinct[T[_[_]], A](source: A) extends QScriptUniform[T, A]
// LPish
final case class LPSort[T[_[_]], A](
source: A,
order: NEL[(A, SortDir)]) extends QScriptUniform[T, A]
// QScriptish
final case class QSSort[T[_[_]], A](
source: A,
buckets: List[FreeMapA[T, QAccess[T, Hole]]],
order: NEL[(FreeMap[T], SortDir)]) extends QScriptUniform[T, A]
final case class Union[T[_[_]], A](left: A, right: A) extends QScriptUniform[T, A]
final case class Subset[T[_[_]], A](
from: A,
op: SelectionOp,
count: A) extends QScriptUniform[T, A]
// LPish
final case class LPFilter[T[_[_]], A](
source: A,
predicate: A) extends QScriptUniform[T, A]
// QScriptish
final case class QSFilter[T[_[_]], A](
source: A,
predicate: FreeMap[T]) extends QScriptUniform[T, A]
final case class Unreferenced[T[_[_]], A]() extends QScriptUniform[T, A]
final case class JoinSideRef[T[_[_]], A](id: Symbol) extends QScriptUniform[T, A]
final class Optics[T[_[_]]] private () extends QSUTTypes[T] {
def autojoin2[A]: Prism[QScriptUniform[A], (A, A, FreeMapA[JoinSide])] =
Prism.partial[QScriptUniform[A], (A, A, FreeMapA[JoinSide])] {
case AutoJoin2(left, right, func) => (left, right, func)
} { case (left, right, func) => AutoJoin2(left, right, func) }
def autojoin3[A]: Prism[QScriptUniform[A], (A, A, A, FreeMapA[JoinSide3])] =
Prism.partial[QScriptUniform[A], (A, A, A, FreeMapA[JoinSide3])] {
case AutoJoin3(left, center, right, func) => (left, center, right, func)
} { case (left, center, right, func) => AutoJoin3(left, center, right, func) }
def dimEdit[A]: Prism[QScriptUniform[A], (A, DTrans[T])] =
Prism.partial[QScriptUniform[A], (A, DTrans[T])] {
case DimEdit(a, dt) => (a, dt)
} { case (a, dt) => DimEdit(a, dt) }
def distinct[A]: Prism[QScriptUniform[A], A] =
Prism.partial[QScriptUniform[A], A] {
case Distinct(a) => a
} (Distinct(_))
def groupBy[A]: Prism[QScriptUniform[A], (A, A)] =
Prism.partial[QScriptUniform[A], (A, A)] {
case GroupBy(l, r) => (l, r)
} { case (l, r) => GroupBy(l, r) }
def joinSideRef[A]: Prism[QScriptUniform[A], Symbol] =
Prism.partial[QScriptUniform[A], Symbol] {
case JoinSideRef(s) => s
} (JoinSideRef(_))
def leftShift[A]: Prism[QScriptUniform[A], (A, RecFreeMap, IdStatus, OnUndefined, FreeMapA[ShiftTarget[T]], Rotation)] =
Prism.partial[QScriptUniform[A], (A, RecFreeMap, IdStatus, OnUndefined, FreeMapA[ShiftTarget[T]], Rotation)] {
case LeftShift(s, fm, ids, ou, jf, rot) => (s, fm, ids, ou, jf, rot)
} { case (s, fm, ids, ou, jf, rot) => LeftShift(s, fm, ids, ou, jf, rot) }
def multiLeftShift[A]: Prism[QScriptUniform[A], (A, List[(FreeMap, IdStatus, Rotation)], OnUndefined, FreeMapA[QAccess[Hole] \\/ Int])] =
Prism.partial[QScriptUniform[A], (A, List[(FreeMap, IdStatus, Rotation)], OnUndefined, FreeMapA[QAccess[Hole] \\/ Int])] {
case MultiLeftShift(s, ss, ou, map) => (s, ss, ou, map)
} { case (s, ss, ou, map) => MultiLeftShift(s, ss, ou, map) }
def lpFilter[A]: Prism[QScriptUniform[A], (A, A)] =
Prism.partial[QScriptUniform[A], (A, A)] {
case LPFilter(s, p) => (s, p)
} { case (s, p) => LPFilter(s, p) }
def lpJoin[A]: Prism[QScriptUniform[A], (A, A, A, JoinType, Symbol, Symbol)] =
Prism.partial[QScriptUniform[A], (A, A, A, JoinType, Symbol, Symbol)] {
case LPJoin(l, r, c, t, lr, rr) => (l, r, c, t, lr, rr)
} { case (l, r, c, t, lr, rr) => LPJoin(l, r, c, t, lr, rr) }
def lpReduce[A]: Prism[QScriptUniform[A], (A, ReduceFunc[Unit])] =
Prism.partial[QScriptUniform[A], (A, ReduceFunc[Unit])] {
case LPReduce(a, rf) => (a, rf)
} { case (a, rf) => LPReduce(a, rf) }
def lpSort[A]: Prism[QScriptUniform[A], (A, NEL[(A, SortDir)])] =
Prism.partial[QScriptUniform[A], (A, NEL[(A, SortDir)])] {
case LPSort(a, keys) => (a, keys)
} { case (a, keys) => LPSort(a, keys) }
def unary[A]: Prism[QScriptUniform[A], (A, MapFunc[Hole])] =
Prism.partial[QScriptUniform[A], (A, MapFunc[Hole])] {
case Unary(a, mf) => (a, mf)
} { case (a, mf) => Unary(a, mf) }
def map[A]: Prism[QScriptUniform[A], (A, FreeMap)] =
Prism.partial[QScriptUniform[A], (A, FreeMap)] {
case Map(a, fm) => (a, fm)
} { case (a, fm) => Map(a, fm) }
def qsAutoJoin[A]: Prism[QScriptUniform[A], (A, A, JoinKeys[QIdAccess], JoinFunc)] =
Prism.partial[QScriptUniform[A], (A, A, JoinKeys[QIdAccess], JoinFunc)] {
case QSAutoJoin(l, r, ks, c) => (l, r, ks, c)
} { case (l, r, ks, c) => QSAutoJoin(l, r, ks, c) }
def qsFilter[A]: Prism[QScriptUniform[A], (A, FreeMap)] =
Prism.partial[QScriptUniform[A], (A, FreeMap)] {
case QSFilter(a, p) => (a, p)
} { case (a, p) => QSFilter(a, p) }
def qsReduce[A]: Prism[QScriptUniform[A], (A, List[FreeAccess[Hole]], List[ReduceFunc[FreeMap]], FreeMapA[ReduceIndex])] =
Prism.partial[QScriptUniform[A], (A, List[FreeAccess[Hole]], List[ReduceFunc[FreeMap]], FreeMapA[ReduceIndex])] {
case QSReduce(a, bs, rfs, rep) => (a, bs, rfs, rep)
} { case (a, bs, rfs, rep) => QSReduce(a, bs, rfs, rep) }
def qsSort[A]: Prism[QScriptUniform[A], (A, List[FreeAccess[Hole]], NEL[(FreeMap, SortDir)])] =
Prism.partial[QScriptUniform[A], (A, List[FreeAccess[Hole]], NEL[(FreeMap, SortDir)])] {
case QSSort(a, buckets, keys) => (a, buckets, keys)
} { case (a, buckets, keys) => QSSort(a, buckets, keys) }
def read[A]: Prism[QScriptUniform[A], AFile] =
Prism.partial[QScriptUniform[A], AFile] {
case Read(f) => f
} (Read(_))
def subset[A]: Prism[QScriptUniform[A], (A, SelectionOp, A)] =
Prism.partial[QScriptUniform[A], (A, SelectionOp, A)] {
case Subset(f, op, c) => (f, op, c)
} { case (f, op, c) => Subset(f, op, c) }
def thetaJoin[A]: Prism[QScriptUniform[A], (A, A, JoinFunc, JoinType, JoinFunc)] =
Prism.partial[QScriptUniform[A], (A, A, JoinFunc, JoinType, JoinFunc)] {
case ThetaJoin(l, r, c, t, b) => (l, r, c, t, b)
} { case (l, r, c, t, b) => ThetaJoin(l, r, c, t, b) }
def transpose[A]: Prism[QScriptUniform[A], (A, Retain, Rotation)] =
Prism.partial[QScriptUniform[A], (A, Retain, Rotation)] {
case Transpose(a, ret, rot) => (a, ret, rot)
} { case (a, ret, rot) => Transpose(a, ret, rot) }
def union[A]: Prism[QScriptUniform[A], (A, A)] =
Prism.partial[QScriptUniform[A], (A, A)] {
case Union(l, r) => (l, r)
} { case (l, r) => Union(l, r) }
def unreferenced[A]: Prism[QScriptUniform[A], Unit] =
Prism.partial[QScriptUniform[A], Unit] {
case Unreferenced() => ()
} (κ(Unreferenced()))
def holes[A, B]: PTraversal[QScriptUniform[A], QScriptUniform[B], A, B] =
PTraversal.fromTraverse[QScriptUniform, A, B]
}
object Optics {
def apply[T[_[_]]]: Optics[T] = new Optics[T]
}
sealed abstract class Dsl[T[_[_]]: BirecursiveT, F[_]: Functor, A] extends QSUTTypes[T] {
import Scalaz._
val iso: Iso[A, F[QScriptUniform[A]]]
def lifting[S, A]: Prism[S, A] => Prism[F[S], F[A]]
type Bin[A] = (A, A) => Binary[T, A]
type Tri[A] = (A, A, A) => Ternary[T, A]
private val O = Optics[T]
def mfc[A] = PrismNT.inject[MapFuncCore, MapFunc].asPrism[A]
private def composeLifting[G[_]](optic: Prism[QScriptUniform[A], G[A]]) =
iso composePrism lifting[QScriptUniform[A], G[A]](optic)
def _autojoin2: Prism[A, F[(A, A, FreeMapA[JoinSide])]] = {
type G[A] = (A, A, FreeMapA[JoinSide])
composeLifting[G](O.autojoin2[A])
}
def _autojoin3: Prism[A, F[(A, A, A, FreeMapA[JoinSide3])]] = {
type G[A] = (A, A, A, FreeMapA[JoinSide3])
composeLifting[G](O.autojoin3[A])
}
def autojoin2(input: F[(A, A, Forall.CPS[Bin])]): A =
_autojoin2(input.map {
case (left, right, combiner) =>
(left, right,
Free.liftF(mfc(Forall[Bin](combiner)[JoinSide](LeftSide, RightSide))))
})
def autojoin3(input: F[(A, A, A, Forall.CPS[Tri])]): A =
_autojoin3(input.map {
case (left, center, right, combiner) =>
(left, center, right,
Free.liftF(mfc(Forall[Tri](combiner)[JoinSide3](LeftSide3, Center, RightSide3))))
})
def dimEdit: Prism[A, F[(A, DTrans[T])]] =
composeLifting[(?, DTrans[T])](O.dimEdit[A])
def distinct: Prism[A, F[A]] =
composeLifting[Id](O.distinct[A])
def groupBy: Prism[A, F[(A, A)]] = {
type G[A] = (A, A)
composeLifting[G](O.groupBy[A])
}
def joinSideRef: Prism[A, F[Symbol]] = {
type G[A] = Symbol
composeLifting[G](O.joinSideRef[A])
}
def leftShift: Prism[A, F[(A, RecFreeMap, IdStatus, OnUndefined, FreeMapA[ShiftTarget[T]], Rotation)]] = {
composeLifting[(?, RecFreeMap, IdStatus, OnUndefined, FreeMapA[ShiftTarget[T]], Rotation)](O.leftShift[A])
}
def multiLeftShift: Prism[A, F[(A, List[(FreeMap, IdStatus, Rotation)], OnUndefined, FreeMapA[QAccess[Hole] \\/ Int])]] = {
composeLifting[(?, List[(FreeMap, IdStatus, Rotation)], OnUndefined, FreeMapA[QAccess[Hole] \\/ Int])](O.multiLeftShift[A])
}
def lpFilter: Prism[A, F[(A, A)]] = {
type G[A] = (A, A)
composeLifting[G](O.lpFilter[A])
}
def lpJoin: Prism[A, F[(A, A, A, JoinType, Symbol, Symbol)]] = {
type G[A] = (A, A, A, JoinType, Symbol, Symbol)
composeLifting[G](O.lpJoin[A])
}
def lpReduce: Prism[A, F[(A, ReduceFunc[Unit])]] =
composeLifting[(?, ReduceFunc[Unit])](O.lpReduce[A])
def lpSort: Prism[A, F[(A, NEL[(A, SortDir)])]] = {
type G[A] = (A, NEL[(A, SortDir)])
composeLifting[G](O.lpSort[A])
}
def unary: Prism[A, F[(A, MapFunc[Hole])]] =
composeLifting[(?, MapFunc[Hole])](O.unary[A])
def map: Prism[A, F[(A, FreeMap)]] =
composeLifting[(?, FreeMap)](O.map[A])
def map1(pair: F[(A, MapFuncCore[Hole])]): A =
map(pair.map {
case(src, f) => (src, Free.roll(mfc(f as HoleF[T])))
})
def qsAutoJoin: Prism[A, F[(A, A, JoinKeys[QIdAccess], JoinFunc)]] = {
type G[A] = (A, A, JoinKeys[QIdAccess], JoinFunc)
composeLifting[G](O.qsAutoJoin[A])
}
def qsFilter: Prism[A, F[(A, FreeMap)]] =
composeLifting[(?, FreeMap)](O.qsFilter[A])
def qsReduce: Prism[A, F[(A, List[FreeAccess[Hole]], List[ReduceFunc[FreeMap]], FreeMapA[ReduceIndex])]] =
composeLifting[(?, List[FreeAccess[Hole]], List[ReduceFunc[FreeMap]], FreeMapA[ReduceIndex])](O.qsReduce[A])
def qsSort: Prism[A, F[(A, List[FreeAccess[Hole]], NEL[(FreeMap, SortDir)])]] =
composeLifting[(?, List[FreeAccess[Hole]], NEL[(FreeMap, SortDir)])](O.qsSort[A])
def read: Prism[A, F[AFile]] = {
type G[_] = AFile
composeLifting[G](O.read[A])
}
def subset: Prism[A, F[(A, SelectionOp, A)]] = {
type G[A] = (A, SelectionOp, A)
composeLifting[G](O.subset[A])
}
def thetaJoin: Prism[A, F[(A, A, JoinFunc, JoinType, JoinFunc)]] = {
type G[A] = (A, A, JoinFunc, JoinType, JoinFunc)
composeLifting[G](O.thetaJoin[A])
}
def transpose: Prism[A, F[(A, Retain, Rotation)]] =
composeLifting[(?, Retain, Rotation)](O.transpose[A])
def union: Prism[A, F[(A, A)]] = {
type G[A] = (A, A)
composeLifting[G](O.union[A])
}
def unreferenced: Prism[A, F[Unit]] = {
type G[_] = Unit
composeLifting[G](O.unreferenced[A])
}
}
sealed abstract class DslT[T[_[_]]: BirecursiveT] private () extends Dsl[T, Id.Id, T[QScriptUniform[T, ?]]] {
type QSU[A] = QScriptUniform[A]
private val J = Fixed[T[EJson]]
// read
def tread(file: AFile): T[QSU] =
transpose(read(file), Retain.Values, Rotation.ShiftMap)
def tread1(name: String): T[QSU] =
tread(Path.rootDir </> Path.file(name))
// undefined
val undefined: Prism[T[QSU], Unit] =
Prism[T[QSU], Unit](map.getOption(_) collect {
case (Unreferenced(), Embed(CoEnv(\\/-(MFC(MapFuncsCore.Undefined()))))) => ()
})(_ => map(unreferenced(), Free.roll(mfc[FreeMap](MapFuncsCore.Undefined()))))
// constants
val constant: Prism[T[QSU], T[EJson]] =
Prism[T[QSU], T[EJson]](map.getOption(_) collect {
case (Unreferenced(), Embed(CoEnv(\\/-(MFC(MapFuncsCore.Constant(ejs)))))) => ejs
})(ejs => map(unreferenced(), Free.roll(mfc[FreeMap](MapFuncsCore.Constant(ejs)))))
val carr: Prism[T[QSU], List[T[EJson]]] =
constant composePrism J.arr
val cbool: Prism[T[QSU], Boolean] =
constant composePrism J.bool
val cbyte: Prism[T[QSU], Byte] =
constant composePrism J.byte
val cchar: Prism[T[QSU], Char] =
constant composePrism J.char
val cdec: Prism[T[QSU], BigDecimal] =
constant composePrism J.dec
val cint: Prism[T[QSU], BigInt] =
constant composePrism J.int
val cmap: Prism[T[QSU], List[(T[EJson], T[EJson])]] =
constant composePrism J.map
val cmeta: Prism[T[QSU], (T[EJson], T[EJson])] =
constant composePrism J.meta
val cnull: Prism[T[QSU], Unit] =
constant composePrism J.nul
val cstr: Prism[T[QSU], String] =
constant composePrism J.str
}
object DslT {
def apply[T[_[_]]: BirecursiveT]: DslT[T] =
new DslT {
val iso: Iso[T[QSU], QSU[T[QSU]]] = birecursiveIso[T[QSU], QSU]
def lifting[S, A]: Prism[S, A] => Prism[S, A] = ι
}
}
object AnnotatedDsl {
import Scalaz._
def apply[T[_[_]]: BirecursiveT, A]
: Dsl[T, (A, ?), Cofree[QScriptUniform[T, ?], A]] = {
type QSU[B] = QScriptUniform[T, B]
type CoQSU = Cofree[QSU, A]
new Dsl[T, (A, ?), CoQSU] {
val iso: Iso[CoQSU, (A, QSU[CoQSU])] =
birecursiveIso[CoQSU, EnvT[A, QSU, ?]]
.composeIso(envTIso[A, QSU, CoQSU])
def lifting[S, B]: Prism[S, B] => Prism[(A, S), (A, B)] =
_.second[A]
}
}
}
}
| jedesah/Quasar | connector/src/main/scala/quasar/qscript/qsu/QScriptUniform.scala | Scala | apache-2.0 | 29,101 |
package com.lucaongaro.similaria.lmdb
import scala.language.implicitConversions
import java.nio.ByteBuffer
// A class to serialize/deserialize occurrency counts with an active flag.
// Since counts can only be positive, the sign is used for the active flag:
// positive means active, negative means inactive.
case class CountMuted(
count: Int,
isMuted: Boolean
) {
def +( increment: Int ) = {
CountMuted( count + increment, isMuted )
}
}
object CountMuted {
def unapply( bytes: Array[Byte] ) = {
if ( bytes == null )
None
else {
val value = ByteBuffer.wrap( bytes ).getInt
Some( (value.abs, value < 0) )
}
}
implicit def countMutedToBytes( ca: CountMuted ) = {
val bb = ByteBuffer.allocate(4)
val s = if ( ca.isMuted ) -1 else 1
bb.putInt( ca.count * s ).array()
}
}
| lucaong/similaria | src/main/scala/com/lucaongaro/similaria/lmdb/CountMuted.scala | Scala | mit | 836 |
package org.bidpulse
import akka.actor.Actor
import spray.routing._
import spray.http._
import MediaTypes._
// we don't implement our route structure directly in the service actor because
// we want to be able to test it independently, without having to spin up an actor
class ServiceActor extends Actor with Service {
// the HttpService trait defines only one abstract member, which
// connects the services environment to the enclosing actor or test
def actorRefFactory = context
// this actor only runs our route, but you could add
// other things here, like request stream processing
// or timeout handling
def receive = runRoute(myRoute)
}
// this trait defines our service behavior independently from the service actor
trait Service extends HttpService {
val myRoute =
path("") {
get {
respondWithMediaType(`text/html`) { // XML is marshalled to `text/xml` by default, so we simply override here
complete {
<html>
<body>
<h1>Hello</h1>
</body>
</html>
}
}
}
}
} | EugenyLoy/BidPulse | src/main/scala/org/bidpulse/Service.scala | Scala | mit | 1,114 |
package dao
import javax.inject.{Inject, Singleton}
import models.{NewEvent, Event}
import play.modules.reactivemongo.ReactiveMongoApi
import play.modules.reactivemongo.json.collection.JSONCollection
import play.api.libs.json._
import play.api.libs.concurrent.Execution.Implicits.defaultContext
import reactivemongo.api.indexes.{IndexType, Index}
import reactivemongo.bson.BSONObjectID
import play.modules.reactivemongo.json._
import play.modules.reactivemongo.json.collection._
import formats.MongoJsonFormats._
@Singleton
class EventDao @Inject()(val reactiveMongoApi: ReactiveMongoApi) {
lazy val db = reactiveMongoApi.db
def collection: JSONCollection = db.collection[JSONCollection]("events")
// migrate data before app startup and after injection
updateDB
def updateDB = {
collection.indexesManager.drop("storyIdViewerProfileIdUniqueIndex")
}
def newEventToEvent(newEvent: NewEvent, viewerProfileId: String, tags: List[String], ip: String): Event =
Event(newEvent.storyId, newEvent._type, tags, viewerProfileId, BSONObjectID.generate.stringify, ip = ip)
def addEvent(event: Event) = collection.insert(event)
def findLastOne(ip: String, _type: String) = collection.find(
Json.obj(
"ip" -> ip,
"type" -> _type)
).sort(Json.obj("date" -> -1)).one[Event]
}
| siz-io/siz-api | app/dao/EventDao.scala | Scala | isc | 1,313 |
// Copyright: 2010 - 2017 https://github.com/ensime/ensime-server/graphs
// License: http://www.gnu.org/licenses/gpl-3.0.en.html
package org.ensime.lsp.api.commands
import org.ensime.lsp.api.types._
import spray.json._
import scalaz.deriving
object TextDocumentSyncKind {
/**
* Documents should not be synced at all.
*/
final val None = 0
/**
* Documents are synced by always sending the full content
* of the document.
*/
final val Full = 1
/**
* Documents are synced by sending the full content on open.
* After that only incremental updates to the document are
* send.
*/
final val Incremental = 2
}
object MessageType {
/** An error message. */
final val Error = 1
/** A warning message. */
final val Warning = 2
/** An information message. */
final val Info = 3
/** A log message. */
final val Log = 4
}
sealed trait Message
sealed trait ServerCommand extends Message
sealed trait ClientCommand extends Message
sealed trait Response extends Message
sealed trait ResultResponse extends Response
sealed trait Notification extends Message
/**
* Parameters and types used in the `initialize` message.
*/
@deriving(JsReader, JsWriter)
final case class InitializeParams(
// The process Id of the parent process that started the server.
processId: Long,
//The rootPath of the workspace. Is null if no folder is open.
rootPath: String,
//The capabilities provided by the client (editor)
capabilities: ClientCapabilities)
extends ServerCommand
final case class InitializeError(retry: Boolean)
@deriving(JsReader, JsWriter)
final case class ClientCapabilities()
@deriving(JsReader, JsWriter)
final case class ServerCapabilities(
//Defines how text documents are synced.
textDocumentSync: Int = TextDocumentSyncKind.Full,
//The server provides hover support.
hoverProvider: Boolean = false,
//The server provides completion support.
completionProvider: Option[CompletionOptions],
//The server provides signature help support.
signatureHelpProvider: Option[SignatureHelpOptions] = None,
//The server provides goto definition support.
definitionProvider: Boolean = false,
///The server provides find references support.
referencesProvider: Boolean = false,
//The server provides document highlight support.
documentHighlightProvider: Boolean = false,
//The server provides document symbol support.
documentSymbolProvider: Boolean = false,
//The server provides workspace symbol support.
workspaceSymbolProvider: Boolean = false,
//The server provides code actions.
codeActionProvider: Boolean = false,
//The server provides code lens.
codeLensProvider: Option[CodeLensOptions] = None,
//The server provides document formatting.
documentFormattingProvider: Boolean = false,
//The server provides document range formatting.
documentRangeFormattingProvider: Boolean = false,
//The server provides document formatting on typing.
documentOnTypeFormattingProvider: Option[DocumentOnTypeFormattingOptions] =
None,
//The server provides rename support.
renameProvider: Boolean = false
)
@deriving(JsReader, JsWriter)
final case class CompletionOptions(resolveProvider: Boolean,
triggerCharacters: Seq[String])
@deriving(JsReader, JsWriter)
final case class SignatureHelpOptions(triggerCharacters: Seq[String])
@deriving(JsReader, JsWriter)
final case class CodeLensOptions(resolveProvider: Boolean = false)
@deriving(JsReader, JsWriter)
final case class DocumentOnTypeFormattingOptions(
firstTriggerCharacter: String,
moreTriggerCharacters: Seq[String]
)
@deriving(JsReader, JsWriter)
final case class CompletionList(isIncomplete: Boolean,
items: Seq[CompletionItem])
extends ResultResponse
@deriving(JsReader, JsWriter)
final case class InitializeResult(capabilities: ServerCapabilities)
extends ResultResponse
@deriving(JsReader, JsWriter)
final case class Shutdown() extends ServerCommand
@deriving(JsWriter)
final case class ShutdownResult() extends ResultResponse
@deriving(JsReader, JsWriter)
final case class ShowMessageRequestParams(
//The message type. @see MessageType
tpe: Long,
//The actual message
message: String,
//The message action items to present.
actions: Seq[MessageActionItem])
extends ClientCommand
/**
* A short title like 'Retry', 'Open Log' etc.
*/
@deriving(JsReader, JsWriter)
final case class MessageActionItem(title: String)
@deriving(JsReader, JsWriter)
final case class TextDocumentPositionParams(
textDocument: TextDocumentIdentifier,
position: Position
)
@deriving(JsReader, JsWriter)
final case class DocumentSymbolParams(textDocument: TextDocumentIdentifier)
extends ServerCommand
final case class TextDocumentCompletionRequest(
params: TextDocumentPositionParams
) extends ServerCommand
object TextDocumentCompletionRequest {
implicit val jsWriter: JsWriter[TextDocumentCompletionRequest] =
JsWriter[TextDocumentPositionParams].contramap(_.params)
implicit val jsReader: JsReader[TextDocumentCompletionRequest] =
JsReader[TextDocumentPositionParams].map(TextDocumentCompletionRequest(_))
}
final case class TextDocumentDefinitionRequest(
params: TextDocumentPositionParams
) extends ServerCommand
object TextDocumentDefinitionRequest {
implicit val jsWriter: JsWriter[TextDocumentDefinitionRequest] =
JsWriter[TextDocumentPositionParams].contramap(_.params)
implicit val jsReader: JsReader[TextDocumentDefinitionRequest] =
JsReader[TextDocumentPositionParams].map(TextDocumentDefinitionRequest(_))
}
final case class TextDocumentHoverRequest(params: TextDocumentPositionParams)
extends ServerCommand
object TextDocumentHoverRequest {
implicit val jsWriter: JsWriter[TextDocumentHoverRequest] =
JsWriter[TextDocumentPositionParams].contramap(_.params)
implicit val jsReader: JsReader[TextDocumentHoverRequest] =
JsReader[TextDocumentPositionParams].map(TextDocumentHoverRequest(_))
}
@deriving(JsReader, JsWriter)
final case class Hover(contents: Seq[MarkedString], range: Option[Range])
extends ResultResponse
///////////////////////////// Notifications ///////////////////////////////
// From server to client
@deriving(JsReader, JsWriter)
final case class ShowMessageParams(tpe: Int, message: String)
extends Notification
@deriving(JsReader, JsWriter)
final case class LogMessageParams(tpe: Int, message: String)
extends Notification
@deriving(JsReader, JsWriter)
final case class PublishDiagnostics(uri: String, diagnostics: Seq[Diagnostic])
extends Notification
// from client to server
@deriving(JsReader, JsWriter)
final case class Exit() extends Notification
@deriving(JsReader, JsWriter)
final case class DidOpenTextDocumentParams(textDocument: TextDocumentItem)
extends Notification
@deriving(JsReader, JsWriter)
final case class DidChangeTextDocumentParams(
textDocument: VersionedTextDocumentIdentifier,
contentChanges: Seq[TextDocumentContentChangeEvent]
) extends Notification
@deriving(JsReader, JsWriter)
final case class DidCloseTextDocumentParams(
textDocument: TextDocumentIdentifier
) extends Notification
@deriving(JsReader, JsWriter)
final case class DidSaveTextDocumentParams(textDocument: TextDocumentIdentifier)
extends Notification
@deriving(JsReader, JsWriter)
final case class DidChangeWatchedFiles(changes: Seq[FileEvent])
extends Notification
@deriving(JsReader, JsWriter)
final case class Initialized() extends Notification
@deriving(JsReader, JsWriter)
final case class CancelRequest(id: Int) extends Notification
@deriving(JsReader, JsWriter)
final case class FileEvent(uri: String, `type`: Int)
object FileChangeType {
final val Created = 1
final val Changed = 2
final val Deleted = 3
}
final case class DocumentSymbolResult(params: Seq[SymbolInformation])
extends ResultResponse
object DocumentSymbolResult {
implicit val jsWriter: JsWriter[DocumentSymbolResult] =
JsWriter[Seq[SymbolInformation]].contramap(_.params)
implicit val jsReader: JsReader[DocumentSymbolResult] =
JsReader[Seq[SymbolInformation]].map(DocumentSymbolResult(_))
}
final case class DefinitionResult(params: Seq[Location]) extends ResultResponse
object DefinitionResult {
implicit val jsWriter: JsWriter[DefinitionResult] =
JsWriter[Seq[Location]].contramap(_.params)
implicit val jsReader: JsReader[DefinitionResult] =
JsReader[Seq[Location]].map(DefinitionResult(_))
}
| ensime/ensime-server | lsp/src/main/scala/org/ensime/lsp/api/commands.scala | Scala | gpl-3.0 | 8,934 |
package net.liftmodules
import org.specs2.mutable.Specification
object FoBoPopAPISpec extends Specification {
"FoBoPopAPISpec Specification".title
sequential
}
| karma4u101/FoBo | Popper/Popper-API/src/test/scala/net/liftmodules/fobopop/FoBoPopAPISpec.scala | Scala | apache-2.0 | 168 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.ui
import java.util.{Timer, TimerTask}
import org.apache.spark._
import org.apache.spark.internal.Logging
import org.apache.spark.internal.config.UI._
import org.apache.spark.status.api.v1.StageData
/**
* ConsoleProgressBar shows the progress of stages in the next line of the console. It poll the
* status of active stages from the app state store periodically, the progress bar will be showed
* up after the stage has ran at least 500ms. If multiple stages run in the same time, the status
* of them will be combined together, showed in one line.
*/
private[spark] class ConsoleProgressBar(sc: SparkContext) extends Logging {
// Carriage return
private val CR = '\\r'
// Update period of progress bar, in milliseconds
private val updatePeriodMSec = sc.getConf.get(UI_CONSOLE_PROGRESS_UPDATE_INTERVAL)
// Delay to show up a progress bar, in milliseconds
private val firstDelayMSec = 500L
// The width of terminal
private val TerminalWidth = if (!sys.env.getOrElse("COLUMNS", "").isEmpty) {
sys.env.get("COLUMNS").get.toInt
} else {
80
}
private var lastFinishTime = 0L
private var lastUpdateTime = 0L
private var lastProgressBar = ""
// Schedule a refresh thread to run periodically
private val timer = new Timer("refresh progress", true)
timer.schedule(new TimerTask{
override def run() {
refresh()
}
}, firstDelayMSec, updatePeriodMSec)
/**
* Try to refresh the progress bar in every cycle
*/
private def refresh(): Unit = synchronized {
val now = System.currentTimeMillis()
if (now - lastFinishTime < firstDelayMSec) {
return
}
val stages = sc.statusStore.activeStages()
.filter { s => now - s.submissionTime.get.getTime() > firstDelayMSec }
if (stages.length > 0) {
show(now, stages.take(3)) // display at most 3 stages in same time
}
}
/**
* Show progress bar in console. The progress bar is displayed in the next line
* after your last output, keeps overwriting itself to hold in one line. The logging will follow
* the progress bar, then progress bar will be showed in next line without overwrite logs.
*/
private def show(now: Long, stages: Seq[StageData]) {
val width = TerminalWidth / stages.size
val bar = stages.map { s =>
val total = s.numTasks
val header = s"[Stage ${s.stageId}:"
val tailer = s"(${s.numCompleteTasks} + ${s.numActiveTasks}) / $total]"
val w = width - header.length - tailer.length
val bar = if (w > 0) {
val percent = w * s.numCompleteTasks / total
(0 until w).map { i =>
if (i < percent) "=" else if (i == percent) ">" else " "
}.mkString("")
} else {
""
}
header + bar + tailer
}.mkString("")
// only refresh if it's changed OR after 1 minute (or the ssh connection will be closed
// after idle some time)
if (bar != lastProgressBar || now - lastUpdateTime > 60 * 1000L) {
System.err.print(CR + bar)
lastUpdateTime = now
}
lastProgressBar = bar
}
/**
* Clear the progress bar if showed.
*/
private def clear() {
if (!lastProgressBar.isEmpty) {
System.err.printf(CR + " " * TerminalWidth + CR)
lastProgressBar = ""
}
}
/**
* Mark all the stages as finished, clear the progress bar if showed, then the progress will not
* interweave with output of jobs.
*/
def finishAll(): Unit = synchronized {
clear()
lastFinishTime = System.currentTimeMillis()
}
/**
* Tear down the timer thread. The timer thread is a GC root, and it retains the entire
* SparkContext if it's not terminated.
*/
def stop(): Unit = timer.cancel()
}
| WindCanDie/spark | core/src/main/scala/org/apache/spark/ui/ConsoleProgressBar.scala | Scala | apache-2.0 | 4,527 |
package com.tirthal.learning.scala.functional
// See it in action -> Option-Some-None for Functional error handling
// 'Some' and 'None' both are the children of 'Option' type
object ErrorHandlingUsingOptionSomeNone extends App {
// Use the 'Option' type to return a value from a function that can be null
// Wraps valid result types in using the 'Some' type and nulls using the 'None' type
def convertToInt(numberStr: String): Option[Int] = {
try {
Some(Integer.parseInt(numberStr.trim))
} catch {
case _: NumberFormatException => None
}
}
// Run it
println(convertToInt("1"))
println(convertToInt("Oops"))
println(convertToInt("100").getOrElse(0))
println(convertToInt("Oops").getOrElse(0))
val bag = List("10", "20", "Ping", "30", "Pong")
val numbers = bag.map(convertToInt)
println(numbers)
println(numbers.map(_.getOrElse(0)))
}
| tirthalpatel/Learning-Scala | ScalaQuickStart/src/main/scala/com/tirthal/learning/scala/functional/ErrorHandlingUsingOptionSomeNone.scala | Scala | mit | 890 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.server
import kafka.network.RequestChannel
import kafka.security.authorizer.AuthorizerUtils
import kafka.utils.Logging
import org.apache.kafka.common.acl.AclOperation._
import org.apache.kafka.common.acl.AclBinding
import org.apache.kafka.common.errors._
import org.apache.kafka.common.message.CreateAclsResponseData.AclCreationResult
import org.apache.kafka.common.message._
import org.apache.kafka.common.protocol.Errors
import org.apache.kafka.common.requests._
import org.apache.kafka.common.resource.Resource.CLUSTER_NAME
import org.apache.kafka.common.resource.ResourceType
import org.apache.kafka.server.authorizer._
import java.util
import scala.collection.mutable.ArrayBuffer
import scala.collection.mutable
import scala.compat.java8.OptionConverters._
import scala.jdk.CollectionConverters._
/**
* Logic to handle ACL requests.
*/
class AclApis(authHelper: AuthHelper,
authorizer: Option[Authorizer],
requestHelper: RequestHandlerHelper,
name: String,
config: KafkaConfig) extends Logging {
this.logIdent = "[AclApis-%s-%s] ".format(name, config.nodeId)
private val alterAclsPurgatory =
new DelayedFuturePurgatory(purgatoryName = "AlterAcls", brokerId = config.nodeId)
def isClosed: Boolean = alterAclsPurgatory.isShutdown
def close(): Unit = alterAclsPurgatory.shutdown()
def handleDescribeAcls(request: RequestChannel.Request): Unit = {
authHelper.authorizeClusterOperation(request, DESCRIBE)
val describeAclsRequest = request.body[DescribeAclsRequest]
authorizer match {
case None =>
requestHelper.sendResponseMaybeThrottle(request, requestThrottleMs =>
new DescribeAclsResponse(new DescribeAclsResponseData()
.setErrorCode(Errors.SECURITY_DISABLED.code)
.setErrorMessage("No Authorizer is configured on the broker")
.setThrottleTimeMs(requestThrottleMs),
describeAclsRequest.version))
case Some(auth) =>
val filter = describeAclsRequest.filter
val returnedAcls = new util.HashSet[AclBinding]()
auth.acls(filter).forEach(returnedAcls.add)
requestHelper.sendResponseMaybeThrottle(request, requestThrottleMs =>
new DescribeAclsResponse(new DescribeAclsResponseData()
.setThrottleTimeMs(requestThrottleMs)
.setResources(DescribeAclsResponse.aclsResources(returnedAcls)),
describeAclsRequest.version))
}
}
def handleCreateAcls(request: RequestChannel.Request): Unit = {
authHelper.authorizeClusterOperation(request, ALTER)
val createAclsRequest = request.body[CreateAclsRequest]
authorizer match {
case None => requestHelper.sendResponseMaybeThrottle(request, requestThrottleMs =>
createAclsRequest.getErrorResponse(requestThrottleMs,
new SecurityDisabledException("No Authorizer is configured.")))
case Some(auth) =>
val allBindings = createAclsRequest.aclCreations.asScala.map(CreateAclsRequest.aclBinding)
val errorResults = mutable.Map[AclBinding, AclCreateResult]()
val validBindings = new ArrayBuffer[AclBinding]
allBindings.foreach { acl =>
val resource = acl.pattern
val throwable = if (resource.resourceType == ResourceType.CLUSTER && !AuthorizerUtils.isClusterResource(resource.name))
new InvalidRequestException("The only valid name for the CLUSTER resource is " + CLUSTER_NAME)
else if (resource.name.isEmpty)
new InvalidRequestException("Invalid empty resource name")
else
null
if (throwable != null) {
debug(s"Failed to add acl $acl to $resource", throwable)
errorResults(acl) = new AclCreateResult(throwable)
} else
validBindings += acl
}
val createResults = auth.createAcls(request.context, validBindings.asJava).asScala.map(_.toCompletableFuture)
def sendResponseCallback(): Unit = {
val aclCreationResults = allBindings.map { acl =>
val result = errorResults.getOrElse(acl, createResults(validBindings.indexOf(acl)).get)
val creationResult = new AclCreationResult()
result.exception.asScala.foreach { throwable =>
val apiError = ApiError.fromThrowable(throwable)
creationResult
.setErrorCode(apiError.error.code)
.setErrorMessage(apiError.message)
}
creationResult
}
requestHelper.sendResponseMaybeThrottle(request, requestThrottleMs =>
new CreateAclsResponse(new CreateAclsResponseData()
.setThrottleTimeMs(requestThrottleMs)
.setResults(aclCreationResults.asJava)))
}
alterAclsPurgatory.tryCompleteElseWatch(config.connectionsMaxIdleMs, createResults, sendResponseCallback)
}
}
def handleDeleteAcls(request: RequestChannel.Request): Unit = {
authHelper.authorizeClusterOperation(request, ALTER)
val deleteAclsRequest = request.body[DeleteAclsRequest]
authorizer match {
case None =>
requestHelper.sendResponseMaybeThrottle(request, requestThrottleMs =>
deleteAclsRequest.getErrorResponse(requestThrottleMs,
new SecurityDisabledException("No Authorizer is configured.")))
case Some(auth) =>
val deleteResults = auth.deleteAcls(request.context, deleteAclsRequest.filters)
.asScala.map(_.toCompletableFuture).toList
def sendResponseCallback(): Unit = {
val filterResults = deleteResults.map(_.get).map(DeleteAclsResponse.filterResult).asJava
requestHelper.sendResponseMaybeThrottle(request, requestThrottleMs =>
new DeleteAclsResponse(
new DeleteAclsResponseData()
.setThrottleTimeMs(requestThrottleMs)
.setFilterResults(filterResults),
deleteAclsRequest.version))
}
alterAclsPurgatory.tryCompleteElseWatch(config.connectionsMaxIdleMs, deleteResults, sendResponseCallback)
}
}
}
| guozhangwang/kafka | core/src/main/scala/kafka/server/AclApis.scala | Scala | apache-2.0 | 6,922 |
import scala.reflect.runtime.universe._
import scala.reflect.runtime.{universe => ru}
import scala.reflect.runtime.{currentMirror => cm}
import scala.tools.reflect.{ToolBox, mkSilentFrontEnd}
object Test extends App {
val toolbox = cm.mkToolBox(options = "-deprecation", frontEnd = mkSilentFrontEnd())
toolbox.eval(reify{
object Utils {
@deprecated("test", "2.10.0")
def foo: Unit = { println("hello") }
}
Utils.foo
}.tree)
println("============compiler messages============")
toolbox.frontEnd.infos.foreach(println(_))
println("=========================================")
}
| lampepfl/dotty | tests/disabled/macro/run/toolbox_silent_reporter.scala | Scala | apache-2.0 | 613 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.planner.plan.nodes.physical.batch
import org.apache.flink.table.planner.calcite.FlinkTypeFactory
import org.apache.flink.table.planner.plan.nodes.exec.ExecNode
import org.apache.flink.table.planner.plan.nodes.exec.batch.BatchExecLegacyTableSourceScan
import org.apache.flink.table.planner.plan.nodes.physical.common.CommonPhysicalLegacyTableSourceScan
import org.apache.flink.table.planner.plan.schema.LegacyTableSourceTable
import org.apache.flink.table.sources.StreamTableSource
import org.apache.calcite.plan._
import org.apache.calcite.rel.RelNode
import org.apache.calcite.rel.metadata.RelMetadataQuery
import java.util
/**
* Batch physical RelNode to read data from an external source defined by a
* bounded [[StreamTableSource]].
*/
class BatchPhysicalLegacyTableSourceScan(
cluster: RelOptCluster,
traitSet: RelTraitSet,
tableSourceTable: LegacyTableSourceTable[_])
extends CommonPhysicalLegacyTableSourceScan(cluster, traitSet, tableSourceTable)
with BatchPhysicalRel {
override def copy(traitSet: RelTraitSet, inputs: util.List[RelNode]): RelNode = {
new BatchPhysicalLegacyTableSourceScan(cluster, traitSet, tableSourceTable)
}
override def computeSelfCost(planner: RelOptPlanner, mq: RelMetadataQuery): RelOptCost = {
val rowCnt = mq.getRowCount(this)
if (rowCnt == null) {
return null
}
val cpu = 0
val rowSize = mq.getAverageRowSize(this)
val size = rowCnt * rowSize
planner.getCostFactory.makeCost(rowCnt, cpu, size)
}
override def translateToExecNode(): ExecNode[_] = {
new BatchExecLegacyTableSourceScan(
tableSource,
getTable.getQualifiedName,
FlinkTypeFactory.toLogicalRowType(getRowType),
getRelDetailedDescription)
}
}
| aljoscha/flink | flink-table/flink-table-planner-blink/src/main/scala/org/apache/flink/table/planner/plan/nodes/physical/batch/BatchPhysicalLegacyTableSourceScan.scala | Scala | apache-2.0 | 2,585 |
package filodb.coordinator
import java.util.concurrent.atomic.{AtomicBoolean, AtomicReference}
import scala.collection.immutable
import scala.concurrent.{Await, Future}
import scala.concurrent.duration._
import akka.Done
import akka.actor._
import akka.cluster._
import akka.cluster.ClusterEvent._
import akka.util.Timeout
import com.typesafe.config.{Config, ConfigFactory}
import com.typesafe.scalalogging.StrictLogging
import kamon.Kamon
import monix.execution.{Scheduler, UncaughtExceptionReporter}
import filodb.core.GlobalScheduler
import filodb.core.memstore.FiloSchedulers
import filodb.core.store.MetaStore
/** The base Coordinator Extension implementation providing standard ActorSystem startup.
* The coordinator module is responsible for cluster coordination and node membership information.
* Changes to the cluster are events that can be subscribed to.
* Commands to operate the cluster for managmement are provided based on role/authorization.
*
* Provides a compute scheduler (ec) and one for I/O operations.
*/
object FilodbCluster extends ExtensionId[FilodbCluster] with ExtensionIdProvider {
override def get(system: ActorSystem): FilodbCluster = super.get(system)
override def lookup: ExtensionId[_ <: Extension] = FilodbCluster
override def createExtension(system: ExtendedActorSystem): FilodbCluster = new FilodbCluster(system)
}
/**
* Coordinator Extension Id and factory for creating a basic Coordinator extension.
*/
final class FilodbCluster(val system: ExtendedActorSystem, overrideConfig: Config = ConfigFactory.empty())
extends Extension with StrictLogging {
import ActorName.{NodeGuardianName => guardianName}
import NodeProtocol._
import akka.pattern.ask
val settings = FilodbSettings.initialize(ConfigFactory.load(overrideConfig).withFallback(system.settings.config))
import settings._
implicit lazy val timeout: Timeout = DefaultTaskTimeout
private[coordinator] val _isInitialized = new AtomicBoolean(false)
private val _isJoined = new AtomicBoolean(false)
private val _isTerminated = new AtomicBoolean(false)
private val _cluster = new AtomicReference[Option[Cluster]](None)
private val _coordinatorActor = new AtomicReference[Option[ActorRef]](None)
private val _clusterActor = new AtomicReference[Option[ActorRef]](None)
implicit lazy val ec = GlobalScheduler.globalImplicitScheduler
lazy val ioPool = Scheduler.io(name = FiloSchedulers.IOSchedName,
reporter = UncaughtExceptionReporter(
logger.error("Uncaught Exception in FilodbCluster.ioPool", _)))
/** Initializes columnStore and metaStore using the factory setting from config. */
private lazy val factory = StoreFactory(settings, ioPool)
lazy val metaStore: MetaStore = factory.metaStore
lazy val memStore = factory.memStore
/** The supervisor creates nothing unless specific tasks are requested of it.
* All actions are idempotent. It manages the underlying lifecycle of all node actors.
*/
private[coordinator] lazy val guardian = system.actorOf(NodeGuardian.props(
settings, metaStore, memStore, DefaultShardAssignmentStrategy), guardianName)
def isInitialized: Boolean = _isInitialized.get
def isTerminated: Boolean = _isTerminated.get
def coordinatorActor: ActorRef = _coordinatorActor.get.getOrElse {
val actor = Await.result((guardian ? CreateCoordinator).mapTo[CoordinatorIdentity], DefaultTaskTimeout).identity
logger.info(s"NodeCoordinatorActor created: $actor")
actor
}
def cluster: Cluster = _cluster.get.getOrElse {
val c = Cluster(system)
_cluster.set(Some(c))
c.registerOnMemberUp(startListener())
logger.info(s"Filodb cluster node starting on ${c.selfAddress}")
c
}
def selfAddress: Address = cluster.selfAddress
/** The address including a `uid` of this cluster member. */
def selfUniqueAddress: UniqueAddress = cluster.selfUniqueAddress
/** Current snapshot state of the cluster. */
def state: ClusterEvent.CurrentClusterState = cluster.state
/** Join the cluster using the cluster selfAddress. Idempotent.
* INTERNAL API.
*/
def join(): Unit = join(selfAddress)
/** Join the cluster using the provided address. Idempotent.
* Used by drivers or other users.
* INTERNAL API.
*
* @param address the address from a driver to use for joining the cluster.
* The driver joins using cluster.selfAddress, executors join
* using `spark-driver-addr` configured dynamically during
* a driver's initialization.
*/
def join(address: Address): Unit = cluster join address
/** Join the cluster using the configured seed nodes. Idempotent.
* This action ensures the cluster is joined only after the `NodeCoordinatorActor` is created.
* This is so that when the NodeClusterActor discovers the joined node, it can find the coordinator right away.
* Used by FiloDB server.
*
* This is a static way to join the cluster. For a more dynamic way to join the cluster,
* see the akka-bootstrapper module.
*
* INTERNAL API.
*/
def joinSeedNodes(providerSeeds: immutable.Seq[Address]): Unit = {
val seeds = if (providerSeeds.nonEmpty) providerSeeds else SeedNodes.map(AddressFromURIString.apply)
logger.info(s"Attempting to join cluster with seed nodes $seeds")
cluster.joinSeedNodes(seeds)
}
/** Returns true if self-node has joined the cluster and is MemberStatus.Up.
* Returns false if local node is removed from the cluster, by graceful leave
* or failure/unreachable downing.
*/
def isJoined: Boolean = _isJoined.get
/** Returns true if the node for the given `address` is unreachable and `Down`. */
def isUnreachable(address: Address): Boolean = state.unreachable.exists(m =>
m.address == address && m.status == MemberStatus.Down)
/** All roles but the `Cli` create this actor. `Server` creates
* it as a val. `Executor` creates it after calling join on cluster.
* `Driver` creates it after initializing metaStore and all executors.
*/
def clusterActor: Option[ActorRef] = _clusterActor.get
/** Returns a singleton proxy reference to the NodeClusterActor.
* Only one will exist per cluster. This should be called on every FiloDB
* Coordinator/ingestion node. The proxy can be started on every node where
* the singleton needs to be reached. If withManager is true, additionally
* creates a ClusterSingletonManager.
*
* Idempotent.
*
* @param role the FilodbClusterNode.role
*
* @param watcher an optional Test watcher
*/
def clusterSingleton(role: ClusterRole, watcher: Option[ActorRef]): ActorRef =
_clusterActor.get.getOrElse {
val e = CreateClusterSingleton(role.roleName, watcher)
val actor = Await.result((guardian ? e).mapTo[ClusterSingletonIdentity], DefaultTaskTimeout).identity
_clusterActor.set(Some(actor))
_isInitialized.set(true)
actor
}
/**
* Hook into Akka's CoordinatedShutdown sequence
* Please see https://doc.akka.io/docs/akka/current/actors.html#coordinated-shutdown for details
*/
CoordinatedShutdown(system).addTask(CoordinatedShutdown.PhaseServiceUnbind, "queryShutdown") { () =>
implicit val timeout = Timeout(15.seconds)
// Reset shuts down all ingestion and query actors on this node
// TODO: be sure that status gets updated across cluster?
(coordinatorActor ? NodeProtocol.ResetState).map(_ ⇒ Done)
}
// TODO: hook into service-stop "forcefully kill connections?" Maybe send each outstanding query "TooBad" etc.
CoordinatedShutdown(system).addTask(CoordinatedShutdown.PhaseBeforeClusterShutdown, "storeShutdown") { () =>
_isInitialized.set(false)
logger.info("Terminating: starting shutdown")
try {
metaStore.shutdown()
memStore.shutdown()
ioPool.shutdown()
} catch {
case _: Exception =>
system.terminate()
ioPool.shutdown()
}
finally _isTerminated.set(true)
Future.successful(Done)
}
/**
* Invokes CoordinatedShutdown to shut down the ActorSystem, leave the Cluster, and our hooks above
* which shuts down the stores and threadpools.
* NOTE: Depending on the setting of coordinated-shutdown.exit-jvm, this might cause the JVM to exit.
*/
protected[filodb] def shutdown(): Unit = {
CoordinatedShutdown(system).run(CoordinatedShutdown.UnknownReason)
}
/** For usage when the `akka.cluster.Member` is needed in a non-actor.
* Creates a temporary actor which subscribes to `akka.cluster.MemberRemoved`.
* for local node to privately set the appropriate node flag internally.
* Upon receiving sets self-node flag, stops the listener.
*/
private def startListener(): Unit = {
cluster.subscribe(system.actorOf(Props(new Actor {
guardian ! NodeProtocol.ListenerRef(self)
def receive: Actor.Receive = {
case e: MemberUp if e.member.address == selfAddress =>
_isJoined.set(true)
case e: MemberRemoved if e.member.address == selfAddress =>
_isJoined.set(false)
cluster unsubscribe self
context stop self
}
})), InitialStateAsEvents, classOf[MemberUp], classOf[MemberRemoved])
}
}
private[filodb] trait KamonInit {
Kamon.init()
}
/** Mixin for easy usage of the FiloDBCluster Extension.
* Used by all `ClusterRole` nodes starting an ActorSystem and FiloDB Cluster nodes.
*/
private[filodb] trait FilodbClusterNode extends KamonInit with NodeConfiguration with StrictLogging {
def role: ClusterRole
/** Override to pass in additional module config. */
protected lazy val roleConfig: Config = ConfigFactory.empty
/** The `ActorSystem` used to create the FilodbCluster Akka Extension. */
final lazy val system = {
val allConfig = roleConfig.withFallback(role match {
// For CLI: leave off Cluster extension as cluster is not needed. Turn off normal shutdown for quicker exit.
case ClusterRole.Cli => ConfigFactory.parseString(
"""# akka.actor.provider=akka.remote.RemoteActorRefProvider
|akka.coordinated-shutdown.run-by-jvm-shutdown-hook=off
""".stripMargin)
case _ => ConfigFactory.parseString(s"""akka.cluster.roles=["${role.roleName}"]""")
}).withFallback(systemConfig)
ActorSystem(role.systemName, allConfig)
}
lazy val cluster = FilodbCluster(system)
implicit lazy val ec = cluster.ec
lazy val metaStore: MetaStore = cluster.metaStore
/** If `role` is `ClusterRole.Cli`, the `FilodbCluster` `isInitialized`
* flag is set here, on creation of the `NodeCoordinatorActor`. All other
* roles are marked as initialized after `NodeClusterActor` is created.
*/
lazy val coordinatorActor: ActorRef = {
val actor = cluster.coordinatorActor
role match {
case ClusterRole.Cli if actor != Actor.noSender =>
cluster._isInitialized.set(true)
case _ =>
}
actor
}
/** Returns a singleton proxy reference to the `NodeClusterActor`. */
def clusterSingleton(role: ClusterRole, watcher: Option[ActorRef]): ActorRef =
cluster.clusterSingleton(role, watcher)
def shutdown(): Unit = cluster.shutdown()
}
| filodb/FiloDB | coordinator/src/main/scala/filodb.coordinator/FilodbCluster.scala | Scala | apache-2.0 | 11,299 |
package ee.cone.c4ui
import java.net.URL
import java.util.UUID
import ee.cone.c4actor_branch.BranchTypes.BranchKey
import ee.cone.c4actor.LEvent.{delete, update}
import ee.cone.c4actor.Types.SrcId
import ee.cone.c4actor._
import ee.cone.c4actor_branch._
import ee.cone.c4assemble.Types.{Each, Values}
import ee.cone.c4assemble.{Assemble, assemble, c4assemble}
import ee.cone.c4gate.AlienProtocol.{U_FromAlienState, U_ToAlienWrite}
import ee.cone.c4gate.HttpProtocol.S_HttpRequest
import ee.cone.c4gate.LocalHttpConsumer
import ee.cone.c4di.c4
import okio.ByteString
import scala.collection.immutable.Seq
case object ToAlienPriorityKey extends TransientLens[java.lang.Long](0L)
@c4("AlienExchangeCompApp") final class ToAlienSenderImpl(
txAdd: LTxAdd,
) extends ToAlienSender {
def send(sessionKeys: Seq[String], evType: String, data: String): Context => Context = local =>
if(sessionKeys.isEmpty) local else doSend(sessionKeys, evType, data, local)
private def doSend(sessionKeys: Seq[String], evType: String, data: String, local: Context): Context = {
val priority = ToAlienPriorityKey.of(local)
val messages = sessionKeys.zipWithIndex.flatMap{
case (sessionKey,i) =>
val id = UUID.randomUUID.toString
update(U_ToAlienWrite(id,sessionKey,evType,data,priority+i))
}
//println(s"messages: $messages")
ToAlienPriorityKey.modify(_+sessionKeys.size).andThen(txAdd.add(messages))(local)
}
}
case class MessageFromAlienImpl(
srcId: String,
headers: Map[String,String],
request: S_HttpRequest
) extends BranchMessage {
def method: String = request.method match { case "" => "POST" case m => m }
def header: String => String = k => headers.getOrElse(k,"")
def body: ByteString = request.body
def deletes: Seq[LEvent[Product]] = delete(request)
}
@c4assemble("AlienExchangeCompApp") class MessageFromAlienAssembleBase {
def mapHttpReqByBranch(
key: SrcId,
req: Each[S_HttpRequest]
): Values[(BranchKey, BranchMessage)] = if(req.path != "/connection") Nil else for(
headers <- List(req.headers.flatMap(h =>
if(h.key.startsWith("x-r-")) List(h.key->h.value) else Nil
).toMap);
branchKey <- headers.get("x-r-branch");
index <- headers.get("x-r-index").map(_.toLong)
) yield branchKey -> MessageFromAlienImpl(req.srcId,headers,req)
def consumersForHandlers(
key: SrcId,
h: Each[BranchHandler]
): Values[(SrcId,LocalHttpConsumer)] =
List(WithPK(LocalHttpConsumer(h.branchKey)))
}
@c4assemble("AlienExchangeCompApp") class FromAlienBranchAssembleBase(operations: BranchOperations) {
// more rich session may be joined
def fromAliensToSeeds(
key: SrcId,
fromAlien: Each[U_FromAlienState]
): Values[(BranchKey, BranchRel)] = {
val child = operations.toSeed(fromAlien)
List(operations.toRel(child, fromAlien.sessionKey, parentIsSession = true))
}
}
@assemble class FromAlienTaskAssembleBase(file: String) {
def mapBranchTaskByLocationHash(
key: SrcId,
task: Each[BranchTask]
): Values[(SrcId, FromAlienTask)] =
for (
fromAlien <- List(task.product).collect { case s: U_FromAlienState => s };
url <- Option(new URL(fromAlien.location))
if /*url.getHost == host && (*/ url.getFile == file || url.getPath == file
) yield task.branchKey -> FromAlienTask(
task.branchKey,
task,
fromAlien,
Option(url.getQuery).getOrElse(""),
Option(url.getRef).getOrElse("")
)
}
| conecenter/c4proto | base_lib/src/main/scala/ee/cone/c4ui/AlienExchangeImpl.scala | Scala | apache-2.0 | 3,474 |
package test
import dotty.tools.dotc.core._
import dotty.tools.dotc.core.Contexts._
import dotty.tools.dotc.core.Symbols._
import dotty.tools.dotc.core.Flags._
import Types._, Symbols._, Decorators._
import dotty.tools.dotc.printing.Texts._
import dotty.tools.dotc.reporting.ConsoleReporter
import dotty.tools.dotc.core.Decorators._
import dotty.tools.dotc.ast.tpd
import dotty.tools.dotc.Compiler
import dotty.tools.dotc
import dotty.tools.dotc.core.Phases.Phase
class DottyTest {
dotty.tools.dotc.parsing.Scanners // initialize keywords
implicit val ctx: Context = {
val base = new ContextBase
import base.settings._
val ctx = base.initialCtx.fresh
.withSetting(verbose, true)
// .withSetting(debug, true)
// .withSetting(debugTrace, true)
// .withSetting(prompt, true)
.withSetting(Ylogcp, true)
.withSetting(printtypes, true)
.withSetting(pageWidth, 90)
.withSetting(log, List("<some"))
// .withTyperState(new TyperState(new ConsoleReporter()(base.initialCtx)))
// .withSetting(uniqid, true)
println(ctx.settings)
base.definitions.init(ctx)
ctx
}
private def compilerWithChecker(phase: String)(assertion:(tpd.Tree, Context) => Unit) = new Compiler {
override def phases = {
val allPhases = super.phases
val targetPhase = allPhases.find{p=> p.name == phase}
assert(targetPhase isDefined)
val phasesBefore = allPhases.takeWhile(x=> ! (x eq targetPhase.get))
val checker = new Phase{
def name = "assertionChecker"
override def run(implicit ctx: Context): Unit = assertion(ctx.compilationUnit.tpdTree, ctx)
}
phasesBefore:::List(targetPhase.get, checker)
}
}
def checkCompile(checkAfterPhase: String, source:String)(assertion:(tpd.Tree, Context) => Unit): Unit = {
val c = compilerWithChecker(checkAfterPhase)(assertion)
c.rootContext(ctx)
val run = c.newRun
run.compile(source)
}
def checkCompile(checkAfterPhase: String, sources:List[String])(assertion:(tpd.Tree, Context) => Unit): Unit = {
val c = compilerWithChecker(checkAfterPhase)(assertion)
c.rootContext(ctx)
val run = c.newRun
run.compile(sources)
}
def methType(names: String*)(paramTypes: Type*)(resultType: Type = defn.UnitType) =
MethodType(names.toList map (_.toTermName), paramTypes.toList, resultType)
}
| DarkDimius/dotty | test/test/DottyTest.scala | Scala | bsd-3-clause | 2,407 |
package modules
import com.google.inject.{ AbstractModule, Provides }
import models.Thing
import models.daos._
import services.ThingService
import services.impl.ThingServiceImpl
import net.codingwell.scalaguice.ScalaModule
import play.api.Configuration
import play.api.libs.concurrent.Execution.Implicits._
class IdeenModule extends AbstractModule with ScalaModule {
/**
* Configures the module.
*/
def configure() {
bind[ThingService].to[ThingServiceImpl]
bind[ThingDao].to[ThingDaoSlick]
}
}
| sne11ius/ideen | app/modules/IdeenModule.scala | Scala | gpl-3.0 | 518 |
package me.sgrouples.rogue.cc.macros
import java.util.concurrent.atomic.AtomicInteger
import me.sgrouples.rogue.cc.{Marker, NamesResolver}
import shapeless.tag
import shapeless.tag.@@
import scala.collection.mutable
trait MacroNamesResolver[T] extends NamesResolver {
private var resolved: Boolean = false
private[this] val nameId = new AtomicInteger(-1)
private[this] def nextNameId = nameId.incrementAndGet()
private[this] def resolveError(id: Int): Nothing =
throw new IllegalStateException(debugInfo(id))
private[cc] def debugInfo(id: Int): String = {
s"error macro resolving id ${id}"
}
private[this] val fields: mutable.Map[String, io.fsq.field.Field[_, _]] =
mutable.Map.empty
private[this] val _validNames: mutable.Set[String] = mutable.Set.empty[String]
protected def macroGen: MacroBsonFormat[T]
//val macroGenProvided: MacroGen[T] = implicitly[MacroGen[T]]
//implicitly[MacroGen[T]]
def resolve(): Unit = {
// _validNames ++= macroGen.validNames()
resolved = true
}
override def named[T <: io.fsq.field.Field[_, _]](
name: String
)(func: String => T): T @@ Marker = {
if (!resolved) resolve()
//if (!_validNames.contains(name)) throw new IllegalArgumentException(s"no field named ${name} found in ${this.getClass.getName}")
val field = func(name)
if (fields.contains(name))
throw new IllegalArgumentException(
s"Field with name $name is already defined"
)
fields += (name -> field)
tag[Marker][T](field)
}
override def named[T <: io.fsq.field.Field[_, _]](
func: String => T
): T @@ Marker = {
val caller = Thread.currentThread().getStackTrace()(1)
new RuntimeException().printStackTrace()
throw new IllegalArgumentException(
s"[${caller.getClassName}:L${caller.getLineNumber}] named without name not supported in macros. use @f [me.sgrouples.rogue.cc.macros.f] or provide name"
)
}
}
| sgrouples/rogue-fsqio | cc/src/main/scala/me/sgrouples/rogue/cc/macros/MacroNamesResolver.scala | Scala | apache-2.0 | 1,938 |
package controllers
import play.api.mvc.{Controller, Action}
object OptionsController extends Controller {
def options(path: String) = Action {
implicit request =>
// Access-Control headers are handled by the CORSFilter, so we just reply OK
Ok
}
}
| statwarn/alerting-api | app/controllers/OptionsController.scala | Scala | mit | 270 |
/*
* Copyright (c) 2014-2021 by The Monix Project Developers.
* See the project homepage at: https://monix.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package monix.reactive.internal.builders
import java.io.{Reader, StringReader}
import cats.effect.ExitCase
import minitest.SimpleTestSuite
import minitest.laws.Checkers
import monix.eval.Task
import monix.execution.Ack
import monix.execution.Ack.Continue
import monix.execution.ExecutionModel.{AlwaysAsyncExecution, BatchedExecution, SynchronousExecution}
import monix.execution.exceptions.APIContractViolationException
import monix.execution.schedulers.TestScheduler
import monix.reactive.Observable
import monix.execution.exceptions.DummyException
import monix.reactive.observers.Subscriber
import org.scalacheck.{Gen, Prop}
import scala.collection.mutable.ArrayBuffer
import scala.util.{Failure, Random, Success}
object CharsReaderObservableSuite extends SimpleTestSuite with Checkers {
test("fromCharsReaderUnsafe yields a single subscriber observable") {
implicit val s = TestScheduler()
var errorThrown: Throwable = null
val obs = Observable.fromCharsReaderUnsafe(new StringReader(randomString()))
obs.unsafeSubscribeFn(Subscriber.empty(s))
s.tick()
obs.unsafeSubscribeFn(new Subscriber[Array[Char]] {
implicit val scheduler = s
def onNext(elem: Array[Char]): Ack =
throw new IllegalStateException("onNext")
def onComplete(): Unit =
throw new IllegalStateException("onComplete")
def onError(ex: Throwable): Unit =
errorThrown = ex
})
assert(errorThrown.isInstanceOf[APIContractViolationException])
assert(s.state.tasks.isEmpty, "should be left with no pending tasks")
}
test("fromCharsReaderUnsafe should throw if the chunkSize is zero") {
val str = randomString()
val in = new StringReader(str)
val error = intercept[IllegalArgumentException] {
Observable.fromCharsReaderUnsafe(in, 0)
()
}
assert(error.getMessage.contains("chunkSize"))
}
test("fromCharsReaderUnsafe should throw if the chunkSize is negative") {
val str = randomString()
val in = new StringReader(str)
val error = intercept[IllegalArgumentException] {
Observable.fromCharsReaderUnsafe(in, -1)
()
}
assert(error.getMessage.contains("chunkSize"))
}
test("fromCharsReaderUnsafe works for BatchedExecution") {
implicit val s = TestScheduler(BatchedExecution(1024))
val string = randomString()
val in = new StringReader(string)
val result = Observable
.fromCharsReaderUnsafe(in, 40)
.foldLeft(Array.empty[Char])(_ ++ _)
.runAsyncGetFirst
.map(_.map(arr => new String(arr)))
s.tick()
assertEquals(result.value, Some(Success(Some(string))))
assert(s.state.tasks.isEmpty, "should be left with no pending tasks")
}
test("fromCharsReaderUnsafe works for AlwaysAsyncExecution") {
implicit val s = TestScheduler(AlwaysAsyncExecution)
val string = randomString()
val in = new StringReader(string)
val result = Observable
.fromCharsReaderUnsafe(in, 40)
.foldLeft(Array.empty[Char])(_ ++ _)
.runAsyncGetFirst
.map(_.map(arr => new String(arr)))
s.tick()
assertEquals(result.value, Some(Success(Some(string))))
assert(s.state.tasks.isEmpty, "should be left with no pending tasks")
}
test("fromCharsReaderUnsafe works for SynchronousExecution") {
implicit val s = TestScheduler(SynchronousExecution)
var wasCompleted = 0
val received = ArrayBuffer.empty[Char]
val string = randomString()
val in = new StringReader(string)
val obs: Observable[Array[Char]] = Observable
.fromCharsReaderUnsafe(in)
.foldLeft(Array.empty[Char])(_ ++ _)
obs.unsafeSubscribeFn(new Subscriber[Array[Char]] {
implicit val scheduler = s
def onError(ex: Throwable): Unit =
throw new IllegalStateException("onError")
def onComplete(): Unit =
wasCompleted += 1
def onNext(elem: Array[Char]): Ack = {
received.appendAll(elem)
Continue
}
})
s.tick()
assertEquals(new String(received.toArray), string)
assertEquals(wasCompleted, 1)
assert(s.state.tasks.isEmpty, "should be left with no pending tasks")
}
test("fromCharsReader closes the file handle onComplete") {
implicit val s = TestScheduler()
var wasClosed = false
val in = randomReaderWithOnFinish(() => wasClosed = true)
val f = Observable.fromCharsReaderF(Task(in)).completedL.runToFuture
s.tick()
assertEquals(f.value, Some(Success(())))
assert(wasClosed, "Reader should have been closed")
assert(s.state.tasks.isEmpty, "should be left with no pending tasks")
}
test("fromCharsReader closes the file handle onError on first call") {
implicit val s = TestScheduler()
var wasClosed = false
val ex = DummyException("dummy")
val in = inputWithError(ex, 1, () => wasClosed = true)
val f = Observable.fromCharsReaderF(Task(in)).completedL.runToFuture
s.tick()
assertEquals(f.value, Some(Failure(ex)))
assert(wasClosed, "Reader should have been closed")
assert(s.state.tasks.isEmpty, "should be left with no pending tasks")
}
test("fromCharsReader closes the file handle onError on second call") {
implicit val s = TestScheduler()
var wasClosed = false
val ex = DummyException("dummy")
val in = inputWithError(ex, 2, () => wasClosed = true)
val f = Observable.fromCharsReaderF(Task(in)).completedL.runToFuture
s.tick()
assertEquals(f.value, Some(Failure(ex)))
assert(wasClosed, "Reader should have been closed")
assert(s.state.tasks.isEmpty, "should be left with no pending tasks")
}
test("fromCharsReader closes the file handle on cancel") {
for (_ <- 0 until 100) {
import scala.concurrent.duration._
implicit val s = TestScheduler(AlwaysAsyncExecution)
var wasClosed = false
var wasCanceled = 0
var wasStarted = false
var wasCompleted = false
val f = Observable
.fromCharsReaderF(Task.pure(
randomReaderWithOnFinish(() => wasClosed = true)
))
.flatMap { _ =>
Observable.suspend {
wasStarted = true
Observable.eval {
assert(!wasClosed, "Resource should be available")
}.delayExecution(1.second).guaranteeCase {
case ExitCase.Canceled =>
Task { wasCanceled += 1 }
case _ =>
Task { wasCompleted = true }
}
}
}
.doOnSubscriptionCancel(Task { wasCanceled += 2 })
.completedL
.runToFuture
s.tick()
f.cancel()
s.tick()
// Test needed because string could be empty
if (wasStarted) {
assertEquals(f.value, None)
assert(!wasCompleted, "Task shouldn't have completed")
assertEquals(wasCanceled, 3)
}
assert(wasClosed, "Reader should have been closed")
assertEquals(s.state.lastReportedError, null)
assert(s.state.tasks.isEmpty, "should be left with no pending tasks")
}
}
test("fromCharsReader does not block on initial execution") {
implicit val s = TestScheduler()
var didRead = false
val reader = new Reader() {
def read(cbuf: Array[Char], off: Int, len: Int): Int = {
didRead = true
-1
}
def close(): Unit = ()
}
// Should not fail without s.tick()
Observable.fromCharsReaderUnsafe(reader).foreach(_ => ())
assert(!didRead)
}
test("fromCharsReader signals an error if the chunk size is zero or negative") {
implicit val s = TestScheduler()
val str = randomString()
val in = new StringReader(str)
val f = Observable
.fromCharsReader(Task(in), 0)
.completedL
.runToFuture
s.tick()
intercept[IllegalArgumentException] {
f.value.get.get
()
}
assert(s.state.tasks.isEmpty, "should be left with no pending tasks")
}
test("fromCharsReader completes normally for files with size == 0") {
implicit val s = TestScheduler()
val in = new StringReader("")
val f = Observable
.fromCharsReader(Task(in))
.map(_.length)
.sumL
.runToFuture
s.tick()
assertEquals(f.value.get.get, 0)
}
test("fromCharsReader fills the buffer up to 'chunkSize' if possible") {
implicit val s = TestScheduler()
val gen = for {
nCharsPerLine <- Gen.choose(1, 150)
nLines <- Gen.choose(1, 250)
chunkSize <- Gen.choose(Math.floorDiv(nCharsPerLine, 2).max(1), nCharsPerLine * 2)
} yield (nCharsPerLine, nLines, chunkSize)
val prop = Prop
.forAllNoShrink(gen) { // do not shrink to avoid a zero for the chunkSize
case (nCharsPerLine, nLines, chunkSize) =>
val str = randomString(nLines, 1, nCharsPerLine, 1)
val forcedReadSize = Math.floorDiv(nCharsPerLine, 10).max(1) // avoid zero-char reads
val in = inputWithStaggeredChars(forcedReadSize, new StringReader(str))
val f = Observable
.fromCharsReader(Task(in), chunkSize)
.foldLeftL(Vector.empty[Int]) {
case (acc, charArray) => acc :+ charArray.length
}
.runToFuture
s.tick()
val resultChunkSizes = f.value.get.get
if (str.length > chunkSize) // all values except the last should be equal to the chunkSize
resultChunkSizes.init.forall(_ == chunkSize) && resultChunkSizes.last <= chunkSize
else resultChunkSizes.head <= chunkSize
}
check(prop)
}
def inputWithStaggeredChars(forcedReadSize: Int, underlying: StringReader): Reader = {
new Reader {
override def read(): Int = underlying.read()
override def read(b: Array[Char]): Int =
read(b, 0, forcedReadSize)
override def read(b: Array[Char], off: Int, len: Int): Int =
underlying.read(b, off, forcedReadSize.min(len))
override def close(): Unit = underlying.close()
}
}
def inputWithError(ex: Throwable, whenToThrow: Int, onFinish: () => Unit): Reader =
new Reader {
private[this] var callIdx = 0
def read(cbuf: Array[Char], off: Int, len: Int): Int = {
callIdx += 1
if (callIdx == whenToThrow) throw ex
else if (off < len) {
cbuf(off) = 'a'; 1
} else 0
}
override def close(): Unit =
onFinish()
}
def randomReaderWithOnFinish(onFinish: () => Unit): Reader = {
val string = randomString()
val underlying = new StringReader(string)
new Reader {
def read(cbuf: Array[Char], off: Int, len: Int): Int =
underlying.read(cbuf, off, len)
override def close(): Unit = {
underlying.close()
onFinish()
}
}
}
def randomString(
nLines: Int = 100,
nMinLines: Int = 0,
nCharsPerLine: Int = 100,
nMinCharsPerLine: Int = 0): String = {
val chars = (('a' to 'z') ++ ('A' to 'Z') ++ ('0' to '9')).toVector
val builder = new StringBuilder
val lines = Random.nextInt(nLines).max(nMinLines)
for (_ <- 0 until lines) {
val lineLength = Random.nextInt(nCharsPerLine).max(nMinCharsPerLine)
val line = for (_ <- 0 until lineLength) yield chars(Random.nextInt(chars.length))
builder.append(new String(line.toArray))
builder.append('\\n')
}
builder.toString()
}
}
| monixio/monix | monix-reactive/shared/src/test/scala/monix/reactive/internal/builders/CharsReaderObservableSuite.scala | Scala | apache-2.0 | 12,033 |
package gitbucket.core.model
trait AccountFederationComponent { self: Profile =>
import profile.api._
lazy val AccountFederations = TableQuery[AccountFederations]
class AccountFederations(tag: Tag) extends Table[AccountFederation](tag, "ACCOUNT_FEDERATION") {
val issuer = column[String]("ISSUER")
val subject = column[String]("SUBJECT")
val userName = column[String]("USER_NAME")
def * = (issuer, subject, userName) <> (AccountFederation.tupled, AccountFederation.unapply)
def byPrimaryKey(issuer: String, subject: String): Rep[Boolean] =
(this.issuer === issuer.bind) && (this.subject === subject.bind)
}
}
case class AccountFederation(issuer: String, subject: String, userName: String)
| McFoggy/gitbucket | src/main/scala/gitbucket/core/model/AccountFederation.scala | Scala | apache-2.0 | 728 |
package guiobjects
import communication.PlayerClient
import gamelogic.{GameAction, GameEvents, GameState}
import gui._
/**
* This represents a Player sitting at the table.
*/
class PlayerFrame(val player: String, playerClient: PlayerClient) extends Frame(UIParent) {
setWidth(200)
setHeight(50)
private val border = createTexture(layer = Artwork)
border.setAllPoints()
border.setVertexColor(
PlayerFrame.frontColor._1, PlayerFrame.frontColor._2, PlayerFrame.frontColor._3
)
border.setMode(LineMode)
border.lineWidth = 3
private val background = createTexture(layer = Artwork)
background.setAllPoints()
background.setVertexColor(
PlayerFrame.backgroundColor._1, PlayerFrame.backgroundColor._2, PlayerFrame.backgroundColor._3, 0.7
)
private val playerName = createFontString()
playerName.setPoint(TopLeft, this, TopLeft, 5, 0)
playerName.setSize(150, 25)
playerName.setText(player)
playerName.setTextColor(
PlayerFrame.frontColor._1, PlayerFrame.frontColor._2, PlayerFrame.frontColor._3
)
playerName.setJustifyH(JustifyLeft)
private val playerPoints = createFontString()
playerPoints.setPoint(TopRight, this, TopRight, -5, 0)
playerPoints.setPoint(BottomLeft, playerName, BottomRight)
playerPoints.setText(playerClient.currentGameState.points(player).toString)
playerPoints.setTextColor(
PlayerFrame.frontColor._1, PlayerFrame.frontColor._2, PlayerFrame.frontColor._3
)
playerPoints.setJustifyH(JustifyRight)
private val placeInTrick = createFontString()
placeInTrick.setPoint(BottomRight, this, BottomRight)
placeInTrick.setSize(20, 20)
placeInTrick.setText("")
placeInTrick.setTextColor(0,0,0)
/**
* Used when hovering "View Last Trick" button.
*/
def setPlaceInTrickTextAndShow(t: String, visible: Boolean): Unit = {
placeInTrick.setText(t)
if (visible) placeInTrick.show() else placeInTrick.hide()
}
private val tricks = createFontString()
tricks.setPoint(BottomLeft, this, BottomLeft, 5, 0)
tricks.setPoint(TopRight, playerPoints, BottomRight)
setTrickText()
tricks.setTextColor(
PlayerFrame.frontColor._1, PlayerFrame.frontColor._2, PlayerFrame.frontColor._3
)
private def setTrickText(gameState: GameState = playerClient.currentGameState): Unit =
tricks.setText(s"${gameState.tricks.getOrElse(player, "")} / " +
s"${gameState.bets.getOrElse(player, "")}")
def colorFocus(flag: Boolean): Unit = {
if (flag) {
border.setVertexColor(
PlayerFrame.focusedColor._1, PlayerFrame.focusedColor._2, PlayerFrame.focusedColor._3
)
} else {
border.setVertexColor(
PlayerFrame.frontColor._1, PlayerFrame.frontColor._2, PlayerFrame.frontColor._3
)
}
}
registerEvent(GameEvents.onActionTaken)((_: Frame, gameState: GameState, _: GameAction) => {
playerPoints.setText(gameState.points(player).toString)
setTrickText(gameState)
try {
colorFocus(player == gameState.turnOfPlayer._1)
} catch {
case _: Throwable =>
}
if (playerClient.playerName != player) {
if (gameState.hands.isDefinedAt(player) && gameState.hands(player).nonEmpty) {
handFrame.show()
handFrame.updateCardPlaces(player)
} else {
handFrame.hide()
}
}
})
val handFrame: HandFrame = new HandFrame(player, playerClient, frameWidth = 70.0)
handFrame.removeScript(ScriptKind.OnClick)
handFrame.setParent(this)
handFrame.clearAllPoints()
handFrame.setPoint(TopRight, this, BottomRight, 0, -10)
handFrame.hide()
}
object PlayerFrame {
private val backgroundColor: (Double, Double, Double) = (0.85, 0.85, 0.85)
private val frontColor: (Double, Double, Double) = (70 / 255.0, 130 / 255.0, 1.0)
private val focusedColor: (Double, Double, Double) = (1.0, 165 / 255.0, 0.0)
} | sherpal/oh-hell-card-game | gameplaying/src/main/scala/guiobjects/PlayerFrame.scala | Scala | mit | 3,955 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.streaming.ui
import java.text.SimpleDateFormat
import java.util.Date
import java.util.concurrent.TimeUnit
import javax.servlet.http.HttpServletRequest
import scala.collection.mutable.ArrayBuffer
import scala.xml.{Node, Unparsed}
import org.apache.spark.Logging
import org.apache.spark.ui._
import org.apache.spark.ui.{UIUtils => SparkUIUtils}
/**
* A helper class to generate JavaScript and HTML for both timeline and histogram graphs.
*
* @param timelineDivId the timeline `id` used in the html `div` tag
* @param histogramDivId the timeline `id` used in the html `div` tag
* @param data the data for the graph
* @param minX the min value of X axis
* @param maxX the max value of X axis
* @param minY the min value of Y axis
* @param maxY the max value of Y axis
* @param unitY the unit of Y axis
* @param batchInterval if `batchInterval` is not None, we will draw a line for `batchInterval` in
* the graph
*/
private[ui] class GraphUIData(
timelineDivId: String,
histogramDivId: String,
data: Seq[(Long, Double)],
minX: Long,
maxX: Long,
minY: Double,
maxY: Double,
unitY: String,
batchInterval: Option[Double] = None) {
private var dataJavaScriptName: String = _
def generateDataJs(jsCollector: JsCollector): Unit = {
val jsForData = data.map { case (x, y) =>
s"""{"x": $x, "y": $y}"""
}.mkString("[", ",", "]")
dataJavaScriptName = jsCollector.nextVariableName
jsCollector.addPreparedStatement(s"var $dataJavaScriptName = $jsForData;")
}
def generateTimelineHtml(jsCollector: JsCollector): Seq[Node] = {
jsCollector.addPreparedStatement(s"registerTimeline($minY, $maxY);")
if (batchInterval.isDefined) {
jsCollector.addStatement(
"drawTimeline(" +
s"'#$timelineDivId', $dataJavaScriptName, $minX, $maxX, $minY, $maxY, '$unitY'," +
s" ${batchInterval.get}" +
");")
} else {
jsCollector.addStatement(
s"drawTimeline('#$timelineDivId', $dataJavaScriptName, $minX, $maxX, $minY, $maxY," +
s" '$unitY');")
}
<div id={timelineDivId}></div>
}
def generateHistogramHtml(jsCollector: JsCollector): Seq[Node] = {
val histogramData = s"$dataJavaScriptName.map(function(d) { return d.y; })"
jsCollector.addPreparedStatement(s"registerHistogram($histogramData, $minY, $maxY);")
if (batchInterval.isDefined) {
jsCollector.addStatement(
"drawHistogram(" +
s"'#$histogramDivId', $histogramData, $minY, $maxY, '$unitY', ${batchInterval.get}" +
");")
} else {
jsCollector.addStatement(
s"drawHistogram('#$histogramDivId', $histogramData, $minY, $maxY, '$unitY');")
}
<div id={histogramDivId}></div>
}
}
/**
* A helper class for "scheduling delay", "processing time" and "total delay" to generate data that
* will be used in the timeline and histogram graphs.
*
* @param data (batchTime, milliseconds). "milliseconds" is something like "processing time".
*/
private[ui] class MillisecondsStatUIData(data: Seq[(Long, Long)]) {
/**
* Converting the original data as per `unit`.
*/
def timelineData(unit: TimeUnit): Seq[(Long, Double)] =
data.map(x => x._1 -> UIUtils.convertToTimeUnit(x._2, unit))
/**
* Converting the original data as per `unit`.
*/
def histogramData(unit: TimeUnit): Seq[Double] =
data.map(x => UIUtils.convertToTimeUnit(x._2, unit))
val avg: Option[Long] = if (data.isEmpty) None else Some(data.map(_._2).sum / data.size)
val formattedAvg: String = StreamingPage.formatDurationOption(avg)
val max: Option[Long] = if (data.isEmpty) None else Some(data.map(_._2).max)
}
/**
* A helper class for "input rate" to generate data that will be used in the timeline and histogram
* graphs.
*
* @param data (batchTime, event-rate).
*/
private[ui] class EventRateUIData(val data: Seq[(Long, Double)]) {
val avg: Option[Double] = if (data.isEmpty) None else Some(data.map(_._2).sum / data.size)
val formattedAvg: String = avg.map(_.formatted("%.2f")).getOrElse("-")
val max: Option[Double] = if (data.isEmpty) None else Some(data.map(_._2).max)
}
/** Page for Spark Web UI that shows statistics of a streaming job */
private[ui] class StreamingPage(parent: StreamingTab)
extends WebUIPage("") with Logging {
import StreamingPage._
private val listener = parent.listener
private val startTime = System.currentTimeMillis()
/** Render the page */
def render(request: HttpServletRequest): Seq[Node] = {
val resources = generateLoadResources()
val basicInfo = generateBasicInfo()
val content = resources ++
basicInfo ++
listener.synchronized {
generateStatTable() ++
generateBatchListTables()
}
SparkUIUtils.headerSparkPage("Streaming Statistics", content, parent, Some(5000))
}
/**
* Generate html that will load css/js files for StreamingPage
*/
private def generateLoadResources(): Seq[Node] = {
// scalastyle:off
<script src={SparkUIUtils.prependBaseUri("/static/d3.min.js")}></script>
<link rel="stylesheet" href={SparkUIUtils.prependBaseUri("/static/streaming/streaming-page.css")} type="text/css"/>
<script src={SparkUIUtils.prependBaseUri("/static/streaming/streaming-page.js")}></script>
// scalastyle:on
}
/** Generate basic information of the streaming program */
private def generateBasicInfo(): Seq[Node] = {
val timeSinceStart = System.currentTimeMillis() - startTime
<div>Running batches of
<strong>
{SparkUIUtils.formatDurationVerbose(listener.batchDuration)}
</strong>
for
<strong>
{SparkUIUtils.formatDurationVerbose(timeSinceStart)}
</strong>
since
<strong>
{SparkUIUtils.formatDate(startTime)}
</strong>
(<strong>{listener.numTotalCompletedBatches}</strong>
completed batches, <strong>{listener.numTotalReceivedRecords}</strong> records)
</div>
<br />
}
/**
* Generate a global "timeFormat" dictionary in the JavaScript to store the time and its formatted
* string. Because we cannot specify a timezone in JavaScript, to make sure the server and client
* use the same timezone, we use the "timeFormat" dictionary to format all time values used in the
* graphs.
*
* @param times all time values that will be used in the graphs.
*/
private def generateTimeMap(times: Seq[Long]): Seq[Node] = {
val js = "var timeFormat = {};\\n" + times.map { time =>
val formattedTime =
UIUtils.formatBatchTime(time, listener.batchDuration, showYYYYMMSS = false)
s"timeFormat[$time] = '$formattedTime';"
}.mkString("\\n")
<script>{Unparsed(js)}</script>
}
private def generateStatTable(): Seq[Node] = {
val batches = listener.retainedBatches
val batchTimes = batches.map(_.batchTime.milliseconds)
val minBatchTime = if (batchTimes.isEmpty) startTime else batchTimes.min
val maxBatchTime = if (batchTimes.isEmpty) startTime else batchTimes.max
val eventRateForAllStreams = new EventRateUIData(batches.map { batchInfo =>
(batchInfo.batchTime.milliseconds, batchInfo.numRecords * 1000.0 / listener.batchDuration)
})
val schedulingDelay = new MillisecondsStatUIData(batches.flatMap { batchInfo =>
batchInfo.schedulingDelay.map(batchInfo.batchTime.milliseconds -> _)
})
val processingTime = new MillisecondsStatUIData(batches.flatMap { batchInfo =>
batchInfo.processingDelay.map(batchInfo.batchTime.milliseconds -> _)
})
val totalDelay = new MillisecondsStatUIData(batches.flatMap { batchInfo =>
batchInfo.totalDelay.map(batchInfo.batchTime.milliseconds -> _)
})
// Use the max value of "schedulingDelay", "processingTime", and "totalDelay" to make the
// Y axis ranges same.
val _maxTime =
(for (m1 <- schedulingDelay.max; m2 <- processingTime.max; m3 <- totalDelay.max) yield
m1 max m2 max m3).getOrElse(0L)
// Should start at 0
val minTime = 0L
val (maxTime, normalizedUnit) = UIUtils.normalizeDuration(_maxTime)
val formattedUnit = UIUtils.shortTimeUnitString(normalizedUnit)
// Use the max input rate for all InputDStreams' graphs to make the Y axis ranges same.
// If it's not an integral number, just use its ceil integral number.
val maxEventRate = eventRateForAllStreams.max.map(_.ceil.toLong).getOrElse(0L)
val minEventRate = 0L
val batchInterval = UIUtils.convertToTimeUnit(listener.batchDuration, normalizedUnit)
val jsCollector = new JsCollector
val graphUIDataForEventRateOfAllStreams =
new GraphUIData(
"all-stream-events-timeline",
"all-stream-events-histogram",
eventRateForAllStreams.data,
minBatchTime,
maxBatchTime,
minEventRate,
maxEventRate,
"events/sec")
graphUIDataForEventRateOfAllStreams.generateDataJs(jsCollector)
val graphUIDataForSchedulingDelay =
new GraphUIData(
"scheduling-delay-timeline",
"scheduling-delay-histogram",
schedulingDelay.timelineData(normalizedUnit),
minBatchTime,
maxBatchTime,
minTime,
maxTime,
formattedUnit)
graphUIDataForSchedulingDelay.generateDataJs(jsCollector)
val graphUIDataForProcessingTime =
new GraphUIData(
"processing-time-timeline",
"processing-time-histogram",
processingTime.timelineData(normalizedUnit),
minBatchTime,
maxBatchTime,
minTime,
maxTime,
formattedUnit, Some(batchInterval))
graphUIDataForProcessingTime.generateDataJs(jsCollector)
val graphUIDataForTotalDelay =
new GraphUIData(
"total-delay-timeline",
"total-delay-histogram",
totalDelay.timelineData(normalizedUnit),
minBatchTime,
maxBatchTime,
minTime,
maxTime,
formattedUnit)
graphUIDataForTotalDelay.generateDataJs(jsCollector)
// It's false before the user registers the first InputDStream
val hasStream = listener.streamIds.nonEmpty
val numCompletedBatches = listener.retainedCompletedBatches.size
val numActiveBatches = batchTimes.length - numCompletedBatches
val table =
// scalastyle:off
<table id="stat-table" class="table table-bordered" style="width: auto">
<thead>
<tr>
<th style="width: 160px;"></th>
<th style="width: 492px;">Timelines (Last {batchTimes.length} batches, {numActiveBatches} active, {numCompletedBatches} completed)</th>
<th style="width: 350px;">Histograms</th></tr>
</thead>
<tbody>
<tr>
<td style="vertical-align: middle;">
<div style="width: 160px;">
<div>
{
if (hasStream) {
<span class="expand-input-rate">
<span class="expand-input-rate-arrow arrow-closed"></span>
<a data-toggle="tooltip" title="Show/hide details of each receiver" data-placement="right">
<strong>Input Rate</strong>
</a>
</span>
} else {
<strong>Input Rate</strong>
}
}
</div>
<div>Avg: {eventRateForAllStreams.formattedAvg} events/sec</div>
</div>
</td>
<td class="timeline">{graphUIDataForEventRateOfAllStreams.generateTimelineHtml(jsCollector)}</td>
<td class="histogram">{graphUIDataForEventRateOfAllStreams.generateHistogramHtml(jsCollector)}</td>
</tr>
{if (hasStream) {
<tr id="inputs-table" style="display: none;" >
<td colspan="3">
{generateInputDStreamsTable(jsCollector, minBatchTime, maxBatchTime, minEventRate, maxEventRate)}
</td>
</tr>
}}
<tr>
<td style="vertical-align: middle;">
<div style="width: 160px;">
<div><strong>Scheduling Delay {SparkUIUtils.tooltip("Time taken by Streaming scheduler to submit jobs of a batch", "right")}</strong></div>
<div>Avg: {schedulingDelay.formattedAvg}</div>
</div>
</td>
<td class="timeline">{graphUIDataForSchedulingDelay.generateTimelineHtml(jsCollector)}</td>
<td class="histogram">{graphUIDataForSchedulingDelay.generateHistogramHtml(jsCollector)}</td>
</tr>
<tr>
<td style="vertical-align: middle;">
<div style="width: 160px;">
<div><strong>Processing Time {SparkUIUtils.tooltip("Time taken to process all jobs of a batch", "right")}</strong></div>
<div>Avg: {processingTime.formattedAvg}</div>
</div>
</td>
<td class="timeline">{graphUIDataForProcessingTime.generateTimelineHtml(jsCollector)}</td>
<td class="histogram">{graphUIDataForProcessingTime.generateHistogramHtml(jsCollector)}</td>
</tr>
<tr>
<td style="vertical-align: middle;">
<div style="width: 160px;">
<div><strong>Total Delay {SparkUIUtils.tooltip("Total time taken to handle a batch", "right")}</strong></div>
<div>Avg: {totalDelay.formattedAvg}</div>
</div>
</td>
<td class="timeline">{graphUIDataForTotalDelay.generateTimelineHtml(jsCollector)}</td>
<td class="histogram">{graphUIDataForTotalDelay.generateHistogramHtml(jsCollector)}</td>
</tr>
</tbody>
</table>
// scalastyle:on
generateTimeMap(batchTimes) ++ table ++ jsCollector.toHtml
}
private def generateInputDStreamsTable(
jsCollector: JsCollector,
minX: Long,
maxX: Long,
minY: Double,
maxY: Double): Seq[Node] = {
val content = listener.receivedEventRateWithBatchTime.map { case (streamId, eventRates) =>
generateInputDStreamRow(jsCollector, streamId, eventRates, minX, maxX, minY, maxY)
}.foldLeft[Seq[Node]](Nil)(_ ++ _)
// scalastyle:off
<table class="table table-bordered" style="width: auto">
<thead>
<tr>
<th style="width: 151px;"></th>
<th style="width: 167px; padding: 8px 0 8px 0"><div style="margin: 0 8px 0 8px">Status</div></th>
<th style="width: 167px; padding: 8px 0 8px 0"><div style="margin: 0 8px 0 8px">Location</div></th>
<th style="width: 166px; padding: 8px 0 8px 0"><div style="margin: 0 8px 0 8px">Last Error Time</div></th>
<th>Last Error Message</th>
</tr>
</thead>
<tbody>
{content}
</tbody>
</table>
// scalastyle:on
}
private def generateInputDStreamRow(
jsCollector: JsCollector,
streamId: Int,
eventRates: Seq[(Long, Double)],
minX: Long,
maxX: Long,
minY: Double,
maxY: Double): Seq[Node] = {
// If this is a ReceiverInputDStream, we need to show the receiver info. Or we only need the
// InputDStream name.
val receiverInfo = listener.receiverInfo(streamId)
val receiverName = receiverInfo.map(_.name).
orElse(listener.streamName(streamId)).getOrElse(s"Stream-$streamId")
val receiverActive = receiverInfo.map { info =>
if (info.active) "ACTIVE" else "INACTIVE"
}.getOrElse(emptyCell)
val receiverLocation = receiverInfo.map(_.location).getOrElse(emptyCell)
val receiverLastError = receiverInfo.map { info =>
val msg = s"${info.lastErrorMessage} - ${info.lastError}"
if (msg.size > 100) msg.take(97) + "..." else msg
}.getOrElse(emptyCell)
val receiverLastErrorTime = receiverInfo.map {
r => if (r.lastErrorTime < 0) "-" else SparkUIUtils.formatDate(r.lastErrorTime)
}.getOrElse(emptyCell)
val receivedRecords = new EventRateUIData(eventRates)
val graphUIDataForEventRate =
new GraphUIData(
s"stream-$streamId-events-timeline",
s"stream-$streamId-events-histogram",
receivedRecords.data,
minX,
maxX,
minY,
maxY,
"events/sec")
graphUIDataForEventRate.generateDataJs(jsCollector)
<tr>
<td rowspan="2" style="vertical-align: middle; width: 151px;">
<div style="width: 151px;">
<div><strong>{receiverName}</strong></div>
<div>Avg: {receivedRecords.formattedAvg} events/sec</div>
</div>
</td>
<td>{receiverActive}</td>
<td>{receiverLocation}</td>
<td>{receiverLastErrorTime}</td>
<td><div style="width: 342px;">{receiverLastError}</div></td>
</tr>
<tr>
<td colspan="3" class="timeline">
{graphUIDataForEventRate.generateTimelineHtml(jsCollector)}
</td>
<td class="histogram">{graphUIDataForEventRate.generateHistogramHtml(jsCollector)}</td>
</tr>
}
private def generateBatchListTables(): Seq[Node] = {
val runningBatches = listener.runningBatches.sortBy(_.batchTime.milliseconds).reverse
val waitingBatches = listener.waitingBatches.sortBy(_.batchTime.milliseconds).reverse
val completedBatches = listener.retainedCompletedBatches.
sortBy(_.batchTime.milliseconds).reverse
val activeBatchesContent = {
<h4 id="active">Active Batches ({runningBatches.size + waitingBatches.size})</h4> ++
new ActiveBatchTable(runningBatches, waitingBatches, listener.batchDuration).toNodeSeq
}
val completedBatchesContent = {
<h4 id="completed">
Completed Batches (last {completedBatches.size} out of {listener.numTotalCompletedBatches})
</h4> ++
new CompletedBatchTable(completedBatches, listener.batchDuration).toNodeSeq
}
activeBatchesContent ++ completedBatchesContent
}
}
private[ui] object StreamingPage {
val BLACK_RIGHT_TRIANGLE_HTML = "▶"
val BLACK_DOWN_TRIANGLE_HTML = "▼"
val emptyCell = "-"
/**
* Returns a human-readable string representing a duration such as "5 second 35 ms"
*/
def formatDurationOption(msOption: Option[Long]): String = {
msOption.map(SparkUIUtils.formatDurationVerbose).getOrElse(emptyCell)
}
}
/**
* A helper class that allows the user to add JavaScript statements which will be executed when the
* DOM has finished loading.
*/
private[ui] class JsCollector {
private var variableId = 0
/**
* Return the next unused JavaScript variable name
*/
def nextVariableName: String = {
variableId += 1
"v" + variableId
}
/**
* JavaScript statements that will execute before `statements`
*/
private val preparedStatements = ArrayBuffer[String]()
/**
* JavaScript statements that will execute after `preparedStatements`
*/
private val statements = ArrayBuffer[String]()
def addPreparedStatement(js: String): Unit = {
preparedStatements += js
}
def addStatement(js: String): Unit = {
statements += js
}
/**
* Generate a html snippet that will execute all scripts when the DOM has finished loading.
*/
def toHtml: Seq[Node] = {
val js =
s"""
|$$(document).ready(function(){
| ${preparedStatements.mkString("\\n")}
| ${statements.mkString("\\n")}
|});""".stripMargin
<script>{Unparsed(js)}</script>
}
}
| andrewor14/iolap | streaming/src/main/scala/org/apache/spark/streaming/ui/StreamingPage.scala | Scala | apache-2.0 | 20,112 |
package ml.combust.mleap.runtime.transformer.feature
import ml.combust.mleap.core.feature.MathUnaryModel
import ml.combust.mleap.core.feature.UnaryOperation.Sin
import ml.combust.mleap.core.types._
import ml.combust.mleap.runtime.frame.{DefaultLeapFrame, Row}
import org.scalatest.FunSpec
/**
* Created by hollinwilkins on 12/27/16.
*/
class MathUnarySpec extends FunSpec {
val schema = StructType(StructField("test_a", ScalarType.Double)).get
val dataset = Seq(Row(42.0))
val frame = DefaultLeapFrame(schema, dataset)
val transformer = MathUnary(
shape = NodeShape.feature(inputCol = "test_a",
outputCol = "test_out"),
model = MathUnaryModel(Sin))
describe("#transform") {
it("transforms the leap frame using the given input and operation") {
val calc = transformer.transform(frame).get.dataset(0).getDouble(1)
assert(calc == Math.sin(42.0))
}
}
describe("input/output schema") {
it("has the correct inputs and outputs") {
assert(transformer.schema.fields ==
Seq(StructField("test_a", ScalarType.Double.nonNullable),
StructField("test_out", ScalarType.Double.nonNullable)))
}
}
} | combust/mleap | mleap-runtime/src/test/scala/ml/combust/mleap/runtime/transformer/feature/MathUnarySpec.scala | Scala | apache-2.0 | 1,171 |
package controllers.circs.report_changes
import controllers.CarersForms._
import controllers.mappings.Mappings._
import models.domain.CircumstancesStoppedCaring
import models.view.{CachedChangeOfCircs, Navigable}
import play.api.Play._
import play.api.data.Form
import play.api.data.Forms._
import play.api.mvc.Controller
import utils.helpers.CarersForm._
import play.api.i18n._
import scala.language.reflectiveCalls
object GPermanentlyStoppedCaring extends Controller with CachedChangeOfCircs with Navigable with I18nSupport {
override val messagesApi: MessagesApi = current.injector.instanceOf[MMessages]
val form = Form(mapping(
"stoppedCaringDate" -> dayMonthYear.verifying(validDate),
"moreAboutChanges" -> optional(carersText(maxLength = CircumstancesStoppedCaring.textMaxLength))
)(CircumstancesStoppedCaring.apply)(CircumstancesStoppedCaring.unapply))
def present = claiming {implicit circs => implicit request => implicit request2lang =>
track(CircumstancesStoppedCaring) {
implicit circs => Ok(views.html.circs.report_changes.permanentlyStoppedCaring(form.fill(CircumstancesStoppedCaring)))
}
}
def submit = claiming {implicit circs => implicit request => implicit request2lang =>
form.bindEncrypted.fold(
formWithErrors => BadRequest(views.html.circs.report_changes.permanentlyStoppedCaring(formWithErrors)),
f => circs.update(f) -> Redirect(controllers.circs.consent_and_declaration.routes.GCircsDeclaration.present())
)
}
}
| Department-for-Work-and-Pensions/ClaimCapture | c3/app/controllers/circs/report_changes/GPermanentlyStoppedCaring.scala | Scala | mit | 1,497 |
/*
* Copyright (c) 2014-2022 Snowplow Analytics Ltd. All rights reserved.
*
* This program is licensed to you under the Apache License Version 2.0,
* and you may not use this file except in compliance with the Apache License Version 2.0.
* You may obtain a copy of the Apache License Version 2.0 at http://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the Apache License Version 2.0 is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the Apache License Version 2.0 for the specific language governing permissions and limitations there under.
*/
package com.snowplowanalytics.iglu.schemaddl.jsonschema
package properties
/**
* Marker trait for properties specific *ONLY* for objects
*/
private[iglu] sealed trait ObjectProperty
object ObjectProperty {
/**
* Type representing keyword `properties`
*
* @see http://json-schema.org/latest/json-schema-validation.html#anchor64
*/
final case class Properties(value: Map[String, Schema]) extends JsonSchemaProperty with ObjectProperty {
val keyword: Keyword = Keyword.Properties
}
/**
* ADT representing value for `additionalProperties` keyword
*
* @see http://json-schema.org/latest/json-schema-validation.html#anchor64
*/
sealed trait AdditionalProperties extends JsonSchemaProperty with ObjectProperty {
val keyword: Keyword = Keyword.AdditionalProperties
}
object AdditionalProperties {
/**
* Allowance of properties not listed in `properties` and `patternProperties`
*/
final case class AdditionalPropertiesAllowed(value: Boolean) extends AdditionalProperties
/**
* Value **must** be always valid Schema, but it's always equals to just
* `additionalProperties: true`
*/
final case class AdditionalPropertiesSchema(value: Schema) extends AdditionalProperties
}
/**
* ADT representing holder for `required` keyword
*
* @see http://json-schema.org/latest/json-schema-validation.html#anchor61
*/
final case class Required(value: List[String]) extends JsonSchemaProperty with ObjectProperty {
val keyword: Keyword = Keyword.Required
}
/**
* ADT representing value for `patternProperties` keyword
*
* @see http://json-schema.org/latest/json-schema-validation.html#anchor64
*/
final case class PatternProperties(value: Map[String, Schema]) extends JsonSchemaProperty with ObjectProperty {
val keyword: Keyword = Keyword.PatternProperties
}
}
| snowplow/schema-ddl | modules/core/src/main/scala/com.snowplowanalytics/iglu.schemaddl/jsonschema/properties/ObjectProperty.scala | Scala | apache-2.0 | 2,592 |
// Copyright (c) 2013, Johns Hopkins University. All rights reserved.
// This software is released under the 2-clause BSD license.
// See /LICENSE.txt
// Travis Wolfe, [email protected], 30 July 2013
package edu.jhu.hlt.parma.active
import edu.jhu.hlt.parma.types._
import edu.jhu.hlt.parma.util._
import java.io.File
import collection.mutable.ArrayBuffer
class InteractiveLearner {
// label = sure|possible|no
class Instance(val alignment: Alignment, val context: Context, val label: Option[String])
// what is a feature function? (this will change)
type Feature = (Alignment, Context) => Double
var features = new ArrayBuffer[Feature]
var labledExamples = new ArrayBuffer[Instance]
var unlabledExamples = new ArrayBuffer[Instance]
def workingSet {
println("working with %d features, %d labled examples, and %d unlabled examples"
.format(features.size, labledExamples.size, unlabledExamples.size))
println(Describe.memoryUsage())
}
def help {
println("TODO: this should print out what you can do with Machine")
}
def trainPerformance: Double = {
throw new RuntimeException("implement me")
}
def testPerformance: Double = {
throw new RuntimeException("implement me")
}
def loadExamples(discourseFile: File, communicationFile: File) {
// TODO compute features
throw new RuntimeException("implement me")
}
def addNewFeature(f: Feature) {
features += f
// TODO compute this feature for all examples
// TODO retrain the model with this feature added
throw new RuntimeException("implement me")
}
def chooseExampleHeuristically(label: Option[String] = None): Instance = {
// TODO put your heuristic to "explore the feature space" here
throw new RuntimeException("implement me")
}
def chooseExampleRandomly(label: Option[String] = None): Instance = {
// TODO scan through unannotated examples and return one at random
throw new RuntimeException("implement me")
}
def annotate(howMany: Int = 1) {
for(i <- 1 to howMany)
annotate(chooseExampleRandomly())
}
def annotate(inst: Instance) {
// display the alignment to the user
println(Describe.alignment(inst.alignment, inst.context))
println("Is this aligned? [sure|possible|no]")
// read back the users' annotation
val response = System.console.readLine
// TODO write out the annotation to a text file or database
throw new RuntimeException("implement me")
}
def findExamplesMaximizing(score: Instance => Double, howMany: Int = 1): Seq[Instance] = {
throw new RuntimeException("implement me")
}
def findExamplesWhere(predicate: Instance => Boolean): Seq[Instance] = {
throw new RuntimeException("implement me")
}
}
| hltcoe/parma | src/main/scala/edu/jhu/hlt/parma/active/InteractiveLearner.scala | Scala | bsd-2-clause | 2,667 |
package redis.commands
import redis._
import scala.concurrent.Future
import redis.api.strings._
import redis.api._
trait Strings extends Request {
def append[V: ByteStringSerializer](key: String, value: V): Future[Long] =
send(Append(key, value))
def bitcount(key: String): Future[Long] =
send(Bitcount(key))
def bitcount(key: String, start: Long, end: Long): Future[Long] =
send(BitcountRange(key, start, end))
def bitopAND(destkey: String, keys: String*): Future[Long] =
bitop(AND, destkey, keys: _*)
def bitopOR(destkey: String, keys: String*): Future[Long] =
bitop(OR, destkey, keys: _*)
def bitopXOR(destkey: String, keys: String*): Future[Long] =
bitop(XOR, destkey, keys: _*)
def bitopNOT(destkey: String, key: String): Future[Long] =
bitop(NOT, destkey, key)
def bitop(operation: BitOperator, destkey: String, keys: String*): Future[Long] =
send(Bitop(operation, destkey, keys))
def decr(key: String): Future[Long] =
send(Decr(key))
def decrby(key: String, decrement: Long): Future[Long] =
send(Decrby(key, decrement))
def get[R: ByteStringDeserializer](key: String): Future[Option[R]] =
send(Get(key))
def getbit(key: String, offset: Long): Future[Boolean] =
send(Getbit(key, offset))
def getrange[R: ByteStringDeserializer](key: String, start: Long, end: Long): Future[Option[R]] =
send(Getrange(key, start, end))
def getset[V: ByteStringSerializer, R: ByteStringDeserializer](key: String, value: V): Future[Option[R]] =
send(Getset(key, value))
def incr(key: String): Future[Long] =
send(Incr(key))
def incrby(key: String, increment: Long): Future[Long] =
send(Incrby(key, increment))
def incrbyfloat(key: String, increment: Double): Future[Option[Double]] =
send(Incrbyfloat(key, increment))
def mget[R: ByteStringDeserializer](keys: String*): Future[Seq[Option[R]]] =
send(Mget(keys))
def mset[V: ByteStringSerializer](keysValues: Map[String, V]): Future[Boolean] =
send(Mset(keysValues))
def msetnx[V: ByteStringSerializer](keysValues: Map[String, V]): Future[Boolean] =
send(Msetnx(keysValues))
def psetex[V: ByteStringSerializer](key: String, milliseconds: Long, value: V): Future[Boolean] =
send(Psetex(key, milliseconds, value))
def set[V: ByteStringSerializer](key: String, value: V,
exSeconds: Option[Long] = None,
pxMilliseconds: Option[Long] = None,
NX: Boolean = false,
XX: Boolean = false): Future[Boolean] = {
send(Set(key, value, exSeconds, pxMilliseconds, NX, XX))
}
def setbit(key: String, offset: Long, value: Boolean): Future[Boolean] =
send(Setbit(key, offset, value))
def setex[V: ByteStringSerializer](key: String, seconds: Long, value: V): Future[Boolean] =
send(Setex(key, seconds, value))
def setnx[V: ByteStringSerializer](key: String, value: V): Future[Boolean] =
send(Setnx(key, value))
def setrange[V: ByteStringSerializer](key: String, offset: Long, value: V): Future[Long] =
send(Setrange(key, offset, value))
def strlen(key: String): Future[Long] =
send(Strlen(key))
}
| 272029252/rediscala | src/main/scala/redis/commands/Strings.scala | Scala | apache-2.0 | 3,249 |
/*
* Copyright 2016 Dennis Vriend
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.github.dnvriend.adapter
import akka.persistence.journal.{ EventSeq, ReadEventAdapter }
import pprint._, Config.Colors.PPrintConfig
class WrapperEventAdapter extends ReadEventAdapter {
override def fromJournal(event: Any, manifest: String): EventSeq = event match {
case Wrapper(payload, created) ⇒
log2("Payload: " + payload)
log2("Created: " + created)
EventSeq.single(payload)
}
}
| dnvriend/demo-akka-persistence-jdbc | src/main/scala/com/github/dnvriend/adapter/WrapperEventAdapter.scala | Scala | apache-2.0 | 1,023 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.ui
private[spark] object ToolTips {
val SCHEDULER_DELAY =
"""Scheduler delay includes time to ship the task from the scheduler to
the executor, and time to send the task result from the executor to the scheduler. If
scheduler delay is large, consider decreasing the size of tasks or decreasing the size
of task results."""
val TASK_DESERIALIZATION_TIME =
"""Time spent deserializing the task closure on the executor, including the time to read the
broadcasted task."""
val SHUFFLE_READ_BLOCKED_TIME =
"Time that the task spent blocked waiting for shuffle data to be read from remote machines."
val INPUT = "Bytes read from Hadoop or from Spark storage."
val OUTPUT = "Bytes written to Hadoop."
val STORAGE_MEMORY =
"Memory used / total available memory for storage of data " +
"like RDD partitions cached in memory. "
val SHUFFLE_WRITE =
"Bytes and records written to disk in order to be read by a shuffle in a future stage."
val SHUFFLE_READ =
"""Total shuffle bytes and records read (includes both data read locally and data read from
remote executors). """
val SHUFFLE_READ_REMOTE_SIZE =
"""Total shuffle bytes read from remote executors. This is a subset of the shuffle
read bytes; the remaining shuffle data is read locally. """
val GETTING_RESULT_TIME =
"""Time that the driver spends fetching task results from workers. If this is large, consider
decreasing the amount of data returned from each task."""
val RESULT_SERIALIZATION_TIME =
"""Time spent serializing the task result on the executor before sending it back to the
driver."""
val GC_TIME =
"""Time that the executor spent paused for Java garbage collection while the task was
running."""
val PEAK_EXECUTION_MEMORY =
"""Execution memory refers to the memory used by internal data structures created during
shuffles, aggregations and joins when Tungsten is enabled. The value of this accumulator
should be approximately the sum of the peak sizes across all such data structures created
in this task. For SQL jobs, this only tracks all unsafe operators, broadcast joins, and
external sort."""
val JOB_TIMELINE =
"""Shows when jobs started and ended and when executors joined or left. Drag to scroll.
Click Enable Zooming and use mouse wheel to zoom in/out."""
val STAGE_TIMELINE =
"""Shows when stages started and ended and when executors joined or left. Drag to scroll.
Click Enable Zooming and use mouse wheel to zoom in/out."""
val JOB_DAG =
"""Shows a graph of stages executed for this job, each of which can contain
multiple RDD operations (e.g. map() and filter()), and of RDDs inside each operation
(shown as dots)."""
val STAGE_DAG =
"""Shows a graph of RDD operations in this stage, and RDDs inside each one. A stage can run
multiple operations (e.g. two map() functions) if they can be pipelined. Some operations
also create multiple RDDs internally. Cached RDDs are shown in green.
"""
val TASK_TIME =
"Shaded red when garbage collection (GC) time is over 10% of task time"
val APPLICATION_EXECUTOR_LIMIT =
"""Maximum number of executors that this application will use. This limit is finite only when
dynamic allocation is enabled. The number of granted executors may exceed the limit
ephemerally when executors are being killed.
"""
val DURATION =
"Elapsed time since the stage was submitted until execution completion of all its tasks."
}
| wangmiao1981/spark | core/src/main/scala/org/apache/spark/ui/ToolTips.scala | Scala | apache-2.0 | 4,434 |
/*
* The MIT License (MIT)
* <p/>
* Copyright (c) 2016 SWEeneyThreads
* <p/>
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
* <p/>
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
* <p/>
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
* <p/>
*
* @author SWEeneyThreads
* @version 0.0.1
* @since 0.0.1
*/
package server.utils.fileManagerLibrary
import java.io.RandomAccessFile
/**
* Trait that every algorithm that implements the removal of a section from a file has to implement.
*/
trait RemoveStrategy {
/**
* Remove the selected section from a file.
*
* @param file The file.
* @param init The beginning of the section.
* @param off The length of the section.
*/
def remove(file: RandomAccessFile, init: Long, off: Long): Unit
}
| SweeneyThreads/Actorbase | src/main/scala/server/utils/fileManagerLibrary/RemoveStrategy.scala | Scala | mit | 1,702 |
package models.addons
import scalaz._
import Scalaz._
import scalaz.effect.IO
import scalaz.Validation
import scalaz.Validation.FlatMap._
import scalaz.NonEmptyList._
import cache._
import db._
import models.Constants._
import models.json.tosca._
import models.tosca.{ KeyValueField, KeyValueList}
import io.megam.auth.funnel.FunnelErrors._
import com.datastax.driver.core.{ ResultSet, Row }
import com.websudos.phantom.dsl._
import scala.concurrent.{ Future => ScalaFuture }
import com.websudos.phantom.connectors.{ ContactPoint, KeySpaceDef }
import scala.concurrent.Await
import scala.concurrent.duration._
import utils.DateHelper
import io.megam.util.Time
import org.joda.time.{DateTime, DateTimeZone}
import org.joda.time.format.{DateTimeFormat,ISODateTimeFormat}
import io.megam.common.uid.UID
import net.liftweb.json._
import controllers.stack.ImplicitJsonFormats
import net.liftweb.json.scalaz.JsonScalaz._
import java.nio.charset.Charset
/**
* @author ranjitha
*
*/
case class AddonsInput( provider_id: String, provider_name: String, options: models.tosca.KeyValueList)
case class AddonsResult(
id: String,
provider_id: String,
account_id: String,
provider_name: String,
options: models.tosca.KeyValueList,
json_claz: String,
created_at: DateTime)
sealed class AddonsSacks extends CassandraTable[AddonsSacks, AddonsResult] with ImplicitJsonFormats {
object id extends StringColumn(this)
object provider_id extends StringColumn(this)
object account_id extends StringColumn(this) with PrimaryKey[String]
object provider_name extends StringColumn(this) with PartitionKey[String]
object options extends JsonListColumn[AddonsSacks, AddonsResult, KeyValueField](this) {
override def fromJson(obj: String): KeyValueField = {
JsonParser.parse(obj).extract[KeyValueField]
}
override def toJson(obj: KeyValueField): String = {
compactRender(Extraction.decompose(obj))
}
}
object json_claz extends StringColumn(this)
object created_at extends DateTimeColumn(this)
def fromRow(row: Row): AddonsResult = {
AddonsResult(
id(row),
provider_id(row),
account_id(row),
provider_name(row),
options(row),
json_claz(row),
created_at(row))
}
}
abstract class ConcreteAddons extends AddonsSacks with RootConnector {
override lazy val tableName = "addons"
override implicit def space: KeySpace = scyllaConnection.space
override implicit def session: Session = scyllaConnection.session
def insertNewRecord(adn: AddonsResult): ValidationNel[Throwable, ResultSet] = {
val res = insert.value(_.id, adn.id)
.value(_.provider_id, adn.provider_id)
.value(_.account_id, adn.account_id)
.value(_.provider_name, adn.provider_name)
.value(_.options, adn.options)
.value(_.json_claz, adn.json_claz)
.value(_.created_at, adn.created_at)
.future()
Await.result(res, 5.seconds).successNel
}
def getRecords(email: String, name: String): ValidationNel[Throwable, Seq[AddonsResult]] = {
val res = select.allowFiltering().where(_.account_id eqs email).and(_.provider_name eqs name).fetch()
Await.result(res, 5.seconds).successNel
}
def deleteRecords(email: String): ValidationNel[Throwable, ResultSet] = {
val res = delete.where(_.account_id eqs email).future()
Await.result(res, 5.seconds).successNel
}
}
object Addons extends ConcreteAddons {
private def mkAddonsSack(email: String, input: String): ValidationNel[Throwable, AddonsResult] = {
val adnInput: ValidationNel[Throwable, AddonsInput] = (Validation.fromTryCatchThrowable[AddonsInput, Throwable] {
parse(input).extract[AddonsInput]
} leftMap { t: Throwable => new MalformedBodyError(input, t.getMessage) }).toValidationNel //capture failure
for {
ads <- adnInput
uir <- (UID("adn").get leftMap { ut: NonEmptyList[Throwable] => ut })
} yield {
val bvalue = Set(email)
val json = new AddonsResult(uir.get._1 + uir.get._2, ads.provider_id, email, ads.provider_name, ads.options, "Megam::Addons", DateHelper.now())
json
}
}
def create(email: String, input: String): ValidationNel[Throwable, Option[AddonsResult]] = {
for {
wa <- (mkAddonsSack(email, input) leftMap { err: NonEmptyList[Throwable] => err })
set <- (insertNewRecord(wa) leftMap { t: NonEmptyList[Throwable] => t })
} yield {
play.api.Logger.warn(("%s%s%-20s%s%s").format(Console.GREEN, Console.BOLD, "Addons","|+| ✔", Console.RESET))
wa.some
}
}
def findById(email: String, name: String): ValidationNel[Throwable, Seq[AddonsResult]] = {
(getRecords(email, name) leftMap { t: NonEmptyList[Throwable] =>
new ResourceItemNotFound(name, "Addons = nothing found.")
}).toValidationNel.flatMap { nm: Seq[AddonsResult] =>
if (!nm.isEmpty)
Validation.success[Throwable, Seq[AddonsResult]](nm).toValidationNel
else
Validation.failure[Throwable, Seq[AddonsResult]]((new ResourceItemNotFound(name, "Addons = nothing found."))).toValidationNel
}
}
def delete(email: String): ValidationNel[Throwable, Option[AddonsResult]] = {
deleteRecords(email) match {
case Success(value) => Validation.success[Throwable, Option[AddonsResult]](none).toValidationNel
case Failure(err) => Validation.success[Throwable, Option[AddonsResult]](none).toValidationNel
}
}
}
| indykish/verticegateway | app/models/addons/Addons.scala | Scala | mit | 5,440 |
/**
* Copyright (C) 2013 Carnegie Mellon University
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package tdb.list
import java.io.Serializable
import scala.collection.mutable.{Buffer, Map}
import tdb._
import tdb.Constants._
import tdb.TDB._
class DoubleChunkList[T, U]
(val head: Mod[DoubleChunkListNode[T, U]],
conf: ListConf,
val sorted: Boolean = false,
val datastoreId: TaskId = -1)
extends AdjustableList[T, U] with Serializable {
override def chunkMap[V, W](f: Iterable[(T, U)] => (V, W))
(implicit c: Context): DoubleList[V, W] = {
val memo = new Memoizer[Mod[DoubleListNode[V, W]]]()
new DoubleList(
mod {
read(head) {
case null => write[DoubleListNode[V, W]](null)
case node => node.chunkMap(f, memo)
}
}, false, datastoreId
)
}
def filter(pred: ((T, U)) => Boolean)
(implicit c: Context): DoubleChunkList[T, U] = ???
def flatMap[V, W](f: ((T, U)) => Iterable[(V, W)])
(implicit c: Context): DoubleChunkList[V, W] = {
val memo = new Memoizer[Mod[DoubleChunkListNode[V, W]]]()
new DoubleChunkList(
mod {
read(head) {
case null => write[DoubleChunkListNode[V, W]](null)
case node => node.flatMap(f, memo)
}
}, conf, false, datastoreId
)
}
override def foreach(f: ((T, U), Context) => Unit)
(implicit c: Context): Unit = {
val memo = new Memoizer[Unit]()
readAny(head) {
case null =>
case node =>
memo(node) {
node.foreach(f, memo)
}
}
}
override def foreachChunk(f: (Iterable[(T, U)], Context) => Unit)
(implicit c: Context): Unit = {
val memo = new Memoizer[Unit]()
readAny(head) {
case null =>
case node =>
memo(node) {
node.foreachChunk(f, memo)
}
}
}
def join[V](_that: AdjustableList[T, V], condition: ((T, V), (T, U)) => Boolean)
(implicit c: Context): DoubleChunkList[T, (U, V)] = ???
def map[V, W](f: ((T, U)) => (V, W))
(implicit c: Context): DoubleChunkList[V, W] = {
val memo = new Memoizer[Mod[DoubleChunkListNode[V, W]]]()
new DoubleChunkList(
mod {
read(head) {
case null => write[DoubleChunkListNode[V, W]](null)
case node => node.map(f, memo)
}
}, conf, false, datastoreId
)
}
override def mapValues[V](f: U => V)
(implicit c: Context): DoubleChunkList[T, V] = {
val memo = new Memoizer[Changeable[DoubleChunkListNode[T, V]]]()
new DoubleChunkList(
mod {
read(head) {
case null => write[DoubleChunkListNode[T, V]](null)
case node => node.mapValues(f, memo)
}
}, conf, sorted, datastoreId
)
}
def merge(that: DoubleChunkList[T, U])
(implicit c: Context,
ordering: Ordering[T]): DoubleChunkList[T, U] = ???
def reduce(f: ((T, U), (T, U)) => (T, U))
(implicit c: Context): Mod[(T, U)] = {
chunkMap(_.reduce(f)).reduce(f)
}
def toBuffer(mutator: Mutator): Buffer[(T, U)] = {
val buf = Buffer[(T, U)]()
var node = mutator.read(head)
while (node != null) {
buf ++= mutator.read(node.chunkMod)
node = mutator.read(node.nextMod)
}
buf
}
override def equals(that: Any): Boolean = {
that match {
case thatList: DoubleChunkList[T, U] => head == thatList.head
case _ => false
}
}
override def hashCode() = head.hashCode()
override def toString: String = {
head.toString
}
}
| twmarshall/tdb | core/src/main/scala/tdb/list/DoubleChunkList.scala | Scala | apache-2.0 | 4,067 |
package genetic.main
import java.io.IOException
import java.nio.charset.StandardCharsets
import java.nio.file.{Files, Paths}
import java.util.{ArrayList, Random, Scanner}
import genetic.analysys.{Analysis, Benchmark}
import genetic.baldwin.BaldwinMain
import genetic.func.{Func, GeneticFuncMain, HoldersTableFunction, LabTestFunction}
import genetic.genetic.{Genetic, GeneticAlg, GeneticEngine}
import genetic.genetic.fitnessMapping.FitnessMapping
import genetic.genetic.generation.{Crossover, Generation}
import genetic.genetic.localOptima.LocalOptimaSignal
import genetic.genetic.selection.ParentSelection
import genetic.genetic.survivors.SurvivorSelectionStrategy
import genetic.genetic.survivors.construction.{DeduplicatedConstruction, NormalConstruction, PopulationConstruction}
import genetic.genetic.types.{Gene, Population}
import genetic.genetic.GeneticMetadata
import genetic.knapsack.{GeneticKnapsackMain, Item}
import genetic.mdKnapsack.{MDKnapsackMain, MDKnapsackParser}
import genetic.parametric.{Instances, Parametric}
import genetic.params.{GeneticParamsMain, Params}
import genetic.queens.{GeneticQueenMain, QueenMating, QueenMutation}
import genetic.string.{GeneticStringMain, HillClimbing, StringHeuristics}
import scala.annotation.tailrec
import scala.util.Try
import scalaz.std.list.listInstance
object UserMain {
var in: Scanner = new Scanner(System.in)
def main(args: Array[String]) {
println()
println("Genetic Algorithms Lab 3 by Ilan Godik & Yuval Alfassi")
mainMenu()
println("Press any key to exit")
in.nextLine()
}
def mainMenu(): Unit = {
println()
println(
"""1. Genetic Algorithm
|2. Hill Climbing - String matching
|3. Baldwin's Effect""".stripMargin)
// |4. Minimal Conflicts - N-Queens
val whatToRun = readIntLoop("Please choose what you want to run: ")
whatToRun match {
case 1 =>
val alg = chooseGeneticMetadata()
menu(alg, alg.defaultGeneticAlgParametric)
case 2 => runHillClimbing()
case 3 =>
val baldwin = chooseBaldwinAlg()
val generation =
for {
selection <- Instances.rws
mutation <- Instances.mutation.updateDefaults(Map.empty, Map.empty, Map("Mutation Rate" -> 0.0))
survivorSelection <- Instances.elitism.updateDefaults(Map.empty, Map.empty, Map("Elitism Rate" -> 0.0))
} yield new Generation(selection, mutation, Array(survivorSelection), new NormalConstruction, Array.empty)
val engine = Instances.geneticEngine(Instances.ignoreLocalOptima, generation, generation)
val geneticAlg = baldwin.alg(engine)
println("## Genetic Engine: Using RWS ##")
menu(baldwin, geneticAlg)
// case 4 => runMinimalConflicts()
// None or invalid int
case _ => mainMenu()
}
}
// def runMinimalConflicts(): Unit = println("Not yet supported")
def runHillClimbing(): Unit = {
println("Choose your secret: ")
val secret = in.nextLine()
println()
val heuristic = chooseStringHeuristic()
println()
val printInt = readIntWithDefault("Print intermediate states? (0 / 1): ", 1)
val print =
if (printInt == 1) true
else if (printInt == 0) false
else true
HillClimbing.run(secret, heuristic, print)
mainMenu()
}
def chooseGeneticMetadata(): GeneticMetadata[_] = {
// Choose genetic Alg.
// Choose variants
println("\\n" +
"""1. String searching
|2. Function optimization
|3. N-Queens
|4. Knapsack
|5. Multi-Dimentional Knapsack""".stripMargin)
val algNum = readIntLoop("What problem do you want to solve? ")
algNum match {
case 1 => chooseStringAlg()
case 2 => chooseFunctionAlg()
case 3 => chooseNQueensAlg()
case 4 => chooseKnapsackAlg()
case 5 => chooseMDKnapsackAlg()
case _ => chooseGeneticMetadata()
}
}
def chooseMDKnapsackAlg(): GeneticMetadata[_] = {
print("\\nChoose an instance (file name, e.g. PET7.dat): ")
val fileName = in.nextLine()
try {
var filePath = Paths.get("samples/").resolve(fileName)
if(!Files.exists(filePath))
filePath = Paths.get(fileName)
val data = new String(Files.readAllBytes(filePath), StandardCharsets.UTF_8)
val instance = MDKnapsackParser.parse(data)
new MDKnapsackMain(instance)
} catch {
case e: IOException =>
println("Invalid file.")
chooseMDKnapsackAlg()
}
}
@tailrec
def chooseStringHeuristic(): (Array[Char], Array[Char]) => Double = {
println(
"""1. Individual distance heuristic
|2. Exact matches heuristic
|3. Exact matches and char-contained matches""".stripMargin)
val heuristicNum = readIntWithDefault("Choose a string heuristic (default 2): ", 2)
heuristicNum match {
case 1 => StringHeuristics.heuristic1
case 2 => StringHeuristics.heuristic2
case 3 =>
val exactsWeight = readIntLoop("Choose a relative weight for exact matches (int): ")
val containsWeight = readIntLoop("Choose a relative weight for contained matches (int): ")
StringHeuristics.heuristic3(_, _, containsWeight, exactsWeight)
case _ => chooseStringHeuristic()
}
}
def chooseBaldwinAlg(): GeneticMetadata[_] = {
@tailrec
def chooseBinaryString(): Array[Byte] = {
println("\\nChoose a binary string to search (blank for 20 x 0's): ")
val secret = in.nextLine()
if (secret.isEmpty) Array.fill(20)(0)
else if (secret.forall(c => c == '0' || c == '1')) {
secret.iterator.map[Byte] {
case '0' => 0
case '1' => 1
}.toArray
} else {
chooseBinaryString()
}
}
val secret = chooseBinaryString()
new BaldwinMain(secret)
}
def chooseStringAlg(): GeneticMetadata[_] = {
// Choose String
println("\\nChoose your secret: ")
val secret = in.nextLine()
println()
// Choose crossover
def chooseCrossover(): (Array[Char], Array[Char], Random) => Array[Char] = {
println(
"""1. One Point Crossover
|2. Two Point Crossover
|3. Uniform Crossover""".stripMargin)
val crossoverNum = readIntWithDefault("Choose a crossover strategy (default 1): ", 1)
crossoverNum match {
case 1 => Crossover.onePointCrossoverString
case 2 => Crossover.twoPointCrossoverString
case 3 => Crossover.uniformCrossoverString
}
}
val crossover = chooseCrossover()
println()
val heuristic = chooseStringHeuristic()
new GeneticStringMain(secret, crossover, heuristic)
}
def chooseFunctionAlg(): GeneticMetadata[_] = {
def chooseFunc(): Func = {
println(
"""1. Lab test Function
|2. Holder's table Function
""".stripMargin)
val funcNum = readIntWithDefault("Choose a function (default 2): ", 2)
funcNum match {
case 1 => LabTestFunction
case 2 => HoldersTableFunction
case _ => chooseFunc()
}
}
val func = chooseFunc()
new GeneticFuncMain(func)
}
def chooseNQueensAlg(): GeneticMetadata[_] = {
val boardSize = readIntWithDefault("Choose board size (default 10): ", 10)
def chooseMating(): (Array[Int], Array[Int], Random) => Array[Int] = {
println()
println(
"""1. PMX - Partially Matched Crossover (*)
|2. OX - Ordered Crossover
|3. CX - Cycle Crossover
""".stripMargin)
val matingNum = readIntWithDefault("Choose a mating algorithm (default 1): ", 1)
matingNum match {
case 1 => QueenMating.pmx
case 2 => QueenMating.ox
case 3 => QueenMating.cx
case _ => chooseMating()
}
}
val mating = chooseMating()
def chooseMutation(): (Array[Int], Random) => Unit = {
println()
println(
"""1. Displacement
|2. Exchange (*)
|3. Insertion
|4. Simple Inversion
|5. Complex Inversion
|6. Scramble
""".stripMargin)
val mutationNum = readIntWithDefault("Choose a mutation algorithm (default 2): ", 2)
mutationNum match {
case 1 => QueenMutation.displacement
case 2 => QueenMutation.exchange
case 3 => QueenMutation.insertion
case 4 => QueenMutation.simpleInversion
case 5 => QueenMutation.complexInversion
case 6 => QueenMutation.scramble
case _ => chooseMutation()
}
}
val mutation = chooseMutation()
new GeneticQueenMain(boardSize, mating, mutation)
}
def chooseKnapsackAlg(): GeneticMetadata[_] = {
println()
@tailrec
def chooseItems(i: Int, items: ArrayList[Item]): Array[Item] = {
val weight = readDoubleLoop(s"Enter the $i${numSuffix(i)} item's weight (0 to stop): ")
if (weight == 0) return items.toArray(Array.empty[Item])
val value = readDoubleLoop(s"Enter the $i${numSuffix(i)} item's value (0 to stop): ")
if (value == 0) return items.toArray(Array.empty[Item])
items.add(Item(weight, value))
chooseItems(i + 1, items)
}
val items = chooseItems(1, new ArrayList[Item]())
val maxWeight = readDoubleLoop("Enter the maximum weight: ")
print("Enter the solution if you know it (nothing if not): ")
val solution = tryReadDouble()
new GeneticKnapsackMain(items, maxWeight, solution)
}
@tailrec
def chooseParentSelection(): Parametric[ParentSelection] = {
println("\\n# Choose Parent Selection Algorithm:")
println(
"""1. Top Selection (*)
|2. Roulette Wheel Selection - RWS
|3. Stochastic Universal Sampling - SUS
|4. Ranking
|5. Tournament
""".stripMargin)
readIntWithDefault("Choose a parent selection strategy (default 1): ", 1) match {
case 1 => Instances.topSelection
case 2 => Instances.rws
case 3 => Instances.sus
case 4 => Instances.ranking
case 5 => Instances.tournament
case _ => chooseParentSelection()
}
}
def chooseSurvivorSelection(localOptimum: Boolean): Parametric[Array[SurvivorSelectionStrategy]] = {
println("\\n# Choose Survivor Selection Algorithm:")
val default = if (!localOptimum) 1 else 2
println("1. Elitism " + (if (!localOptimum) "(*)" else ""))
println("2. Elitism with Random Immigrants " + (if (localOptimum) "(*)" else ""))
readIntWithDefault(s"Choose a survivor selection strategy (default $default): ", default) match {
case 1 => Instances.elitism.map(Array(_))
case 2 => Parametric.map2(Instances.elitism, Instances.randomImmigrantsElitism)(Array(_, _))
case _ => chooseSurvivorSelection(localOptimum)
}
}
def chooseFitnessMappings(): Parametric[List[FitnessMapping]] = {
def optional[A](select: Boolean, element: A): List[A] = if (select) List(element) else Nil
@tailrec
def go(windowing: Boolean, exponential: Boolean, sigma: Boolean, aging: Boolean, niching: Boolean): List[Parametric[FitnessMapping]] = {
println("\\n# Choose Fitness Mappings:")
println("1. Windowing " + (if (windowing) "(*)" else ""))
println("2. Exponential Scaling " + (if (exponential) "(*)" else ""))
println("3. Sigma Scaling " + (if (sigma) "(*)" else ""))
println("4. Aging " + (if (aging) "(*)" else ""))
println("5. Niching " + (if (niching) "(*)" else ""))
print("Enter what you want to choose (multiple selection, blank to continue): ")
tryReadInt() match {
case Some(1) => go(!windowing, exponential, sigma, aging, niching)
case Some(2) => go(windowing, !exponential, sigma, aging, niching)
case Some(3) => go(windowing, exponential, !sigma, aging, niching)
case Some(4) => go(windowing, exponential, sigma, !aging, niching)
case Some(5) => go(windowing, exponential, sigma, aging, !niching)
case Some(_) => go(windowing, exponential, sigma, aging, niching)
case None =>
optional(windowing, Instances.windowing) ++
optional(exponential, Instances.exponentialScaling) ++
optional(sigma, Instances.sigmaScaling) ++
optional(aging, Instances.aging) ++
optional(niching, Instances.niching)
}
}
val mappings: List[Parametric[FitnessMapping]] = go(windowing = false, exponential = false, sigma = false, aging = false, niching = false)
Parametric.parametricMonad.sequence(mappings)
}
def choosePopulationConstruction(): PopulationConstruction = {
print("\\nDo you want to deduplicate genes? (y/n, default n): ")
val answer = in.nextLine().trim
answer.toLowerCase match {
case "y" | "yes" | "1" | "true" => new DeduplicatedConstruction
case "n" | "no" | "0" | "false" | "" => new NormalConstruction
case _ => choosePopulationConstruction()
}
}
def chooseGeneration(localOptimum: Boolean): Parametric[Generation] = {
if (!localOptimum) println("\\n### Choosing Normal Generation ###") else println("\\n### Choosing Local Optimum Generation ###")
val parentSelectionParam: Parametric[ParentSelection] = chooseParentSelection()
val survivorSelectionParam: Parametric[Array[SurvivorSelectionStrategy]] = chooseSurvivorSelection(localOptimum)
val populationConstruction: PopulationConstruction = choosePopulationConstruction()
val fitnessMappingsParam: Parametric[List[FitnessMapping]] = chooseFitnessMappings()
val mutationStrategy = if (!localOptimum) Instances.mutation else Instances.hyperMutation
for {
parentSelection <- parentSelectionParam
mutationStrategy <- mutationStrategy
survivorSelection <- survivorSelectionParam
fitnessMappings <- fitnessMappingsParam
} yield new Generation(parentSelection, mutationStrategy, survivorSelection, populationConstruction, fitnessMappings.toArray)
}
def chooseLocalOptimumSignal(): (Boolean, Parametric[LocalOptimaSignal]) = {
println("\\n# Choose Local Optima Signal:")
println(
"""1. Ignore Local Optima
|2. Gene Similarity Detection (*)
|3. Fitness Similarity Detection (std. dev)
""".stripMargin)
readIntWithDefault("Choose a local optima signal (default 2): ", 2) match {
case 1 => (false, Instances.ignoreLocalOptima)
case 2 => (true, Instances.geneSimilarity)
case 3 => (true, Instances.stdDevSimilarity)
case _ => chooseLocalOptimumSignal()
}
}
def chooseEngine(): Parametric[GeneticEngine] = {
println("\\n########## Choosing Genetic Engine ##########\\n")
val normalGeneration: Parametric[Generation] = chooseGeneration(localOptimum = false)
val (detectLocalOptimum, localOptimumSignal) = chooseLocalOptimumSignal()
val localOptimumGeneration =
if (detectLocalOptimum)
chooseGeneration(localOptimum = true)
else
normalGeneration
Instances.geneticEngine(localOptimumSignal, normalGeneration, localOptimumGeneration)
}
def menu(geneticMeta: GeneticMetadata[_], algParams: Parametric[GeneticAlg[_]]): Params = {
println("\\n" +
s"""|1. run - Run ${geneticMeta.name}
|2. params - Change Parameters of the Genetic Algorithm
|3. engine - Choose the Genetic Engine Algorithms
|4. opt - Optimize Parameters of the Genetic Algorithm
|5. analyse - Create a statistical report of the Genetic Algorithm
|6. bench - Benchmark the Genetic Algorithm
|7. main - Return to the main menu
""".stripMargin)
print("Enter your selection: ")
val input = in.nextLine()
input match {
case "run" | "1" =>
val defaultMaxTime = geneticMeta.defaultMaxTime
val maxTime = readDoubleWithDefault(s"Enter the maximum runtime in seconds (default $defaultMaxTime): ", defaultMaxTime) max 0
val defaultPrintEvery = geneticMeta.defaultPrintEvery
val printEvery = readIntWithDefault(s"Print best every how many iterations? (default $defaultPrintEvery, 0 for never) ", defaultPrintEvery) max 0
val newParams = runGenetic(geneticMeta, algParams, maxTime, printEvery)
if (!geneticMeta.isOpt) menu(geneticMeta, algParams)
else newParams // return, not recurse
case "genetic/params" | "2" =>
val newParams = modifyParams(algParams)
menu(geneticMeta, newParams)
case "engine" | "3" =>
val newEngine = chooseEngine()
menu(geneticMeta, geneticMeta.alg(newEngine))
case "opt" | "4" =>
val geneticParams = new GeneticParamsMain(geneticMeta, algParams, defaultMaxTime = 100.0)
val optimizedAlgParams = menu(geneticParams, geneticParams.defaultGeneticAlgParametric).prettify
menu(geneticMeta, algParams.updateArrayParams(optimizedAlgParams))
case "analyse" | "5" =>
println("Enter analysis name: ")
val name = in.nextLine()
analysis(name, Analysis.analysis(name, algParams))
menu(geneticMeta, algParams)
case "bench" | "6" =>
bench(algParams)
menu(geneticMeta, algParams)
case "genetic/main" | "7" =>
mainMenu()
System.exit(0)
throw new IllegalStateException()
case _ => menu(geneticMeta, algParams)
}
}
def analysis(name: String, analysisParams: Parametric[Analysis]): Unit = {
println()
println(
"""run - Run the analysis
|params - Change analysis parameters
""".stripMargin)
print("Enter your selection: ")
val input = in.nextLine()
input match {
case "run" =>
analysisParams.applyDefaults().runAnalysis()
case "genetic/params" =>
val newAnalysisParams = modifyParams(analysisParams)
analysis(name, newAnalysisParams)
case _ =>
analysis(name, analysisParams)
}
}
def bench(alg: Parametric[GeneticAlg[_]]): Unit = {
val rounds = readIntWithDefault("Enter the number of rounds (1000 default): ", 1000)
val maxTime = readDoubleWithDefault("Enter the time limit per run (0.3 default): ", 0.3)
val benchmark = Benchmark.benchmark(alg.asInstanceOf[Parametric[GeneticAlg[Object]]], rounds, maxTime)
println()
println(benchmark.prettyFormat)
}
def modifyParams[A](params: Parametric[A]): Parametric[A] = {
val ints = params.intNamesDefaults.toIndexedSeq
val doubles = params.doubleNamesDefaults.toIndexedSeq
println {
(ints ++ doubles).iterator.zipWithIndex.map {
case ((name, value), index) => s"${index + 1}. $name = $value"
}.mkString("\\n")
}
val paramNum = readIntLoop("Which parameter to change? (0 to skip) ")
if (paramNum == 0)
params
else if (paramNum >= 1 && paramNum < ints.length + 1) {
val index = paramNum - 1
val paramName = ints(index)._1
val newValue = readIntLoop(s"Set $paramName = ")
Parametric(params.applyParams, params.intNamesDefaults.updated(paramName, newValue), params.intsMin, params.intsMax, params.doubleNamesDefaults)
} else if (paramNum < ints.length + doubles.length + 1) {
val index = paramNum - ints.length - 1
val paramName = doubles(index)._1
val newValue = readDoubleLoop(s"Set $paramName = ")
Parametric(params.applyParams, params.intNamesDefaults, params.intsMin, params.intsMax, params.doubleNamesDefaults.updated(paramName, newValue))
} else {
params
}
}
def runGenetic(main: GeneticMetadata[_], algParams: Parametric[GeneticAlg[_]], maxTime: Double, printEvery: Int): Params = {
val alg = algParams.applyDefaults()
val genetic: Genetic[Object] = alg.genetic.asInstanceOf[Genetic[Object]]
val start = System.currentTimeMillis
val (population: Population[_], iterations) = alg.run(printEvery, maxTime)
val end = System.currentTimeMillis
val time = end - start
val popSize = population.population.length
println(s"Best ${5 min popSize}:")
println(population.population.sortBy(_.fitness).take(5).map { gene =>
val geneObj = gene.gene.asInstanceOf[Object]
genetic.show(geneObj) + ", fitness = " + genetic.fitness(geneObj) + ", meaningful fitness = " + genetic.score(gene.asInstanceOf[Gene[Object]])
}.mkString("\\n"))
println(time + "ms, " + iterations + " iterations\\t\\t\\t\\tseed: " + main.seed)
if (main.isOpt) {
population.asInstanceOf[Population[Params]].population(0).gene
} else
algParams.defaultNamedParams.toParams
}
// ---------------------------------------------------------------------------------------------------------
// Input Helpers
def numSuffix(n: Int): String = {
n.toString.last match {
case '1' => "st"
case '2' => "nd"
case '3' => "rd"
case _ => "th"
}
}
def readDoubleLoop(prompt: String): Double = {
print(prompt)
tryReadDouble() match {
case Some(x) => x
case None => readDoubleLoop(prompt)
}
}
def tryReadDouble(): Option[Double] = {
val line = in.nextLine()
if (line.isEmpty) None
else Try(line.toDouble).toOption
}
def readDoubleWithDefault(prompt: String, default: Double): Double = {
print(prompt)
tryReadDouble().getOrElse(default)
}
@tailrec
def readIntLoop(prompt: String): Int = {
print(prompt)
tryReadInt() match {
case Some(n) => n
case None => readIntLoop(prompt)
}
}
def tryReadInt(): Option[Int] = {
val line = in.nextLine()
if (line.isEmpty) None
else Try(line.toInt).toOption
}
def readIntWithDefault(prompt: String, default: Int): Int = {
print(prompt)
tryReadInt().getOrElse(default)
}
}
| NightRa/AILab | Genetic/src/main/scala/genetic/main/UserMain.scala | Scala | apache-2.0 | 21,659 |
package pl.touk.nussknacker.engine.lite.kafka
import com.dimafeng.testcontainers._
import com.typesafe.scalalogging.LazyLogging
import org.scalatest.{FunSuite, Matchers}
import pl.touk.nussknacker.test.{EitherValuesDetailedMessage, ExtremelyPatientScalaFutures}
import sttp.client.asynchttpclient.future.AsyncHttpClientFutureBackend
import sttp.client.{NothingT, SttpBackend, UriContext, asString, basicRequest}
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.Future
class NuKafkaRuntimeDockerProbesTest extends FunSuite with BaseNuKafkaRuntimeDockerTest with Matchers with ExtremelyPatientScalaFutures with LazyLogging with EitherValuesDetailedMessage {
override val container: Container = {
kafkaContainer.start() // must be started before prepareTestCaseFixture because it creates topic via api
fixture = prepareTestCaseFixture("probes", NuKafkaRuntimeTestSamples.jsonPingPongScenario)
startRuntimeContainer(fixture.scenarioFile, checkReady = false)
MultipleContainers(kafkaContainer, runtimeContainer)
}
private implicit val backend: SttpBackend[Future, Nothing, NothingT] = AsyncHttpClientFutureBackend()
private val baseManagementUrl = uri"http://localhost:$runtimeManagementMappedPort"
test("readiness probe") {
eventually {
val readyResult = basicRequest
.get(baseManagementUrl.path("ready"))
.response(asString)
.send().futureValue.body.rightValue
readyResult shouldBe "OK"
}
}
test("liveness probe") {
eventually {
val livenessResult = basicRequest
.get(baseManagementUrl.path("alive"))
.response(asString)
.send().futureValue.body.rightValue
livenessResult shouldBe "OK"
}
}
} | TouK/nussknacker | engine/lite/kafka/integration-test/src/it/scala/pl/touk/nussknacker/engine/lite/kafka/NuKafkaRuntimeDockerProbesTest.scala | Scala | apache-2.0 | 1,749 |
package uk.gov.dvla.vehicles.presentation.common.controllers
// Used to test the version controller. Used in the routes file.
class VersionControllerTest extends Version
| dvla/vehicles-presentation-common | common-test/app/uk/gov/dvla/vehicles/presentation/common/controllers/VersionControllerTest.scala | Scala | mit | 171 |
/*
* Copyright 2015 - 2017 Pablo Alcaraz
* Mysql to Postgres converter
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.github.mysql2postgresql.converter.conversion
import com.github.mysql2postgresql.converter.GeneratedStatements
import com.github.mysql2postgresql.converter.conversion.data.DataConverter
import net.sf.jsqlparser.expression.{StringValue, LongValue, ExpressionVisitor, Expression}
import net.sf.jsqlparser.expression.operators.relational.{ExpressionList, ItemsListVisitor, MultiExpressionList}
import net.sf.jsqlparser.statement.insert.Insert
import net.sf.jsqlparser.statement.select.SubSelect
import org.springframework.beans.factory.annotation.Autowired
import org.springframework.stereotype.Component
import scala.collection.JavaConversions._
import scala.collection.mutable
/**
* Process conversion for "INSERT" statements.
*/
@Component
class InsertConverter extends ConversionProcessor {
var tableDictionary: TableDictionary = _
@Autowired
def this(tableDictionary: TableDictionary) = {
this()
this.tableDictionary = tableDictionary
}
/**
* Process current statement and convert it to postgres.
*
* @param generatedStatements statement to convert.
*/
override def process(generatedStatements: GeneratedStatements): Unit = {
val insertStatement: Insert = generatedStatements.statement.asInstanceOf[Insert]
if (insertStatement.getColumns != null) {
throw new RuntimeException("Insert with declared columns are not supported.")
}
val columns = tableDictionary.get(insertStatement.getTable.getName)
insertStatement.getItemsList.accept(new ItemsListVisitor {
override def visit(subSelect: SubSelect): Unit = {
// Not converted... let's hope the best. :)
println(insertStatement)
}
override def visit(expressionList: ExpressionList): Unit = {
formatExpressionList(expressionList, columns)
}
override def visit(multiExprList: MultiExpressionList): Unit = {
for (expressionList: ExpressionList <- multiExprList.getExprList) {
formatExpressionList(expressionList, columns)
}
}
})
}
def formatExpressionList(expressionList: ExpressionList, columns: mutable.Buffer[DataConverter]): Unit = {
var index = 0
for(expression: Expression <- expressionList.getExpressions) {
val newExpression = columns(index).convert(expression)
if(!newExpression.equals(expression)) {
// different object, replace old with new .
expressionList.getExpressions.set(index, newExpression)
}
index+=1
}
}
}
| pabloa/mysql2postgresql | src/main/scala/com/github/mysql2postgresql/converter/conversion/InsertConverter.scala | Scala | apache-2.0 | 3,175 |
package slick.compiler
import slick.SlickException
import slick.ast._
import Util._
import TypeUtil._
import slick.util.ConstArray
/** Create a ResultSetMapping root node, ensure that the top-level server-side node returns a
* collection, and hoist client-side type conversions into the ResultSetMapping. The original
* result type (which was removed by `removeMappedTypes`) is assigned back to the top level. */
class CreateResultSetMapping extends Phase {
val name = "createResultSetMapping"
def apply(state: CompilerState) = state.map { n =>
val tpe = state.get(Phase.removeMappedTypes).get
ClientSideOp.mapServerSide(n, keepType = false) { ch =>
val syms = ch.nodeType.structural match {
case StructType(defs) => defs.map(_._1)
case CollectionType(_, Type.Structural(StructType(defs))) => defs.map(_._1)
case t => throw new SlickException("No StructType found at top level: "+t)
}
val gen = new AnonSymbol
(tpe match {
case CollectionType(cons, el) =>
ResultSetMapping(gen, collectionCast(ch, cons).infer(), createResult(Ref(gen) :@ ch.nodeType.asCollectionType.elementType, el, syms))
case t =>
ResultSetMapping(gen, ch, createResult(Ref(gen) :@ ch.nodeType.asCollectionType.elementType, t, syms))
})
}.infer()
}
def collectionCast(ch: Node, cons: CollectionTypeConstructor): Node = ch.nodeType match {
case CollectionType(c, _) if c == cons => ch
case _ => CollectionCast(ch, cons).infer()
}
/** Create a structured return value for the client side, based on the
* result type (which may contain MappedTypes). */
def createResult(ref: Ref, tpe: Type, syms: ConstArray[TermSymbol]): Node = {
var curIdx = 0
def f(tpe: Type): Node = {
logger.debug("Creating mapping from "+tpe)
tpe.structural match {
case ProductType(ch) =>
ProductNode(ch.map(f))
case StructType(ch) =>
ProductNode(ch.map { case (_, t) => f(t) })
case t: MappedScalaType =>
TypeMapping(f(t.baseType), t.mapper, t.classTag)
case o @ OptionType(Type.Structural(el)) if !el.isInstanceOf[AtomicType] =>
val discriminator = Select(ref, syms(curIdx)).infer()
curIdx += 1
val data = f(o.elementType)
RebuildOption(discriminator, data)
case t =>
curIdx += 1
// Assign the original type. Inside a RebuildOption the actual column type will always be
// Option-lifted but we can still treat it as the base type when the discriminator matches.
val sel = Select(ref, syms(curIdx-1)).infer()
val tSel = t.structuralRec
if(sel.nodeType.structuralRec == tSel) sel else Library.SilentCast.typed(tSel, sel)
}
}
f(tpe)
}
}
/** Remove all mapped types from the tree and store the original top-level type as the phase state
* to be used later for building the ResultSetMapping. */
class RemoveMappedTypes extends Phase {
val name = "removeMappedTypes"
type State = Type
def apply(state: CompilerState) =
if(state.get(Phase.assignUniqueSymbols).map(_.typeMapping).getOrElse(true))
state.withNode(removeTypeMapping(state.tree)) + (this -> state.tree.nodeType)
else state + (this -> state.tree.nodeType)
/** Remove TypeMapping nodes and MappedTypes */
def removeTypeMapping(n: Node): Node = n match {
case t: TypeMapping => removeTypeMapping(t.child)
case n =>
val n2 = n.mapChildren(removeTypeMapping, keepType = true)
n2 :@ removeMappedType(n2.nodeType)
}
/** Remove MappedTypes from a Type */
def removeMappedType(tpe: Type): Type = tpe match {
case m: MappedScalaType => removeMappedType(m.baseType)
case t => t.mapChildren(removeMappedType)
}
}
| AtkinsChang/slick | slick/src/main/scala/slick/compiler/CreateResultSetMapping.scala | Scala | bsd-2-clause | 3,807 |
/**
* Copyright (C) 2013 Orbeon, Inc.
*
* This program is free software; you can redistribute it and/or modify it under the terms of the
* GNU Lesser General Public License as published by the Free Software Foundation; either version
* 2.1 of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
* without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU Lesser General Public License for more details.
*
* The full text of the license is available at http://www.gnu.org/copyleft/lesser.html
*/
package org.orbeon.oxf.fr.relational
import java.sql.{ResultSet, Connection, PreparedStatement}
import org.orbeon.oxf.properties.Properties
import org.orbeon.oxf.externalcontext.ExternalContextOps._
import javax.naming.{Context, InitialContext}
import javax.sql.DataSource
import org.orbeon.oxf.util.ScalaUtils._
import org.orbeon.oxf.util.NetUtils
object RelationalUtils {
def withConnection[T](block: Connection ⇒ T): T = {
// Get connection to the database
val dataSource = {
val datasource = NetUtils.getExternalContext.getRequest.getFirstHeader("orbeon-datasource").get
val jndiContext = new InitialContext().lookup("java:comp/env/jdbc").asInstanceOf[Context]
jndiContext.lookup(datasource).asInstanceOf[DataSource]
}
useAndClose(dataSource.getConnection)(block)
}
/**
* For cases where we can't use `setString` on a prepared statement
* - Apache Commons Lang had a `StringEscapeUtils.escapeSql` [1] but it has been deprecated in Commons Lang 3 [2]
* - just escaping the quote is enough for safety but might return the wrong result if the string is in a
* LIKE; in that case more characters should be escaped [3]
*
* [1]: http://javasourcecode.org/html/open-source/commons-lang/commons-lang-2.6/org/apache/commons/lang/StringEscapeUtils.java.html
* [2]: http://commons.apache.org/proper/commons-lang/article3_0.html
* [3]: http://www.jguru.com/faq/view.jsp?EID=8881
*/
def sqlString(text: String) = "'" + text.replaceAllLiterally("'", "''") + "'"
}
| tanbo800/orbeon-forms | src/main/scala/org/orbeon/oxf/fr/relational/RelationalUtils.scala | Scala | lgpl-2.1 | 2,245 |
///////////////////////////////////////////////////////////////////////////////
// Poligrounder.scala
//
// Copyright (C) 2010-2014 Ben Wing, The University of Texas at Austin
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
///////////////////////////////////////////////////////////////////////////////
/*
Basic idea:
1. We specify a corpus and times to compare, e.g.
poligrounder -i twitter-spritzer --from 201203051627/-1h --to 201203051801/3h
will operate on the twitter-spritzer corpus and compare the hour
directly preceding March 5, 2012, 4:27pm with the three hours directly
following March 5, 2012, 6:01pm.
Time can be specified either as simple absolute times (e.g. 201203051627)
or as a combination of a time and an offset, e.g. 201203051800-10h3m5s means
10 hours 3 minutes 5 seconds prior to 201203051800 (March 5, 2012, 6:00pm).
Absolute times are specified as YYYYMMDD[hh[mm[ss]]], i.e. a specific day
must be given, with optional hours, minutes or seconds, defaulting to the
earliest possible time when a portion is left out. Offsets and lengths
are specified using one or more combinations of number (possibly floating
point) and designator:
s = second
m or mi = minute
h = hour
d = day
mo = month
y = year
2. There may be different comparison methods, triggered by different command
line arguments.
3. Currently we have code in `gridlocate` that reads documents in from a
corpus and amalgamates them using a grid of some sort. We can reuse this
to amalgate documents by time. E.g. if we want to compare two specific time
periods, we will have two corresponding cells, one for each period, and
throw away the remaining documents. In other cases where we might want to
look at language models over a period of time, we will have more cells, at
(possibly more or less) regular intervals.
*/
package opennlp.textgrounder
package poligrounder
import scala.util.matching.Regex
import scala.util.Random
import math._
import collection.mutable
import util.argparser._
import util.collection._
import util.io
import util.textdb._
import util.experiment._
import util.print.errprint
import util.error.internal_error
import util.time._
import gridlocate._
import util.debug._
/*
This module is the main driver module for the Poligrounder subproject.
See GridLocate.scala.
*/
/////////////////////////////////////////////////////////////////////////////
// Main code //
/////////////////////////////////////////////////////////////////////////////
/**
* Class retrieving command-line arguments or storing programmatic
* configuration parameters.
*
* @param parser If specified, should be a parser for retrieving the
* value of command-line arguments from the command line. Provided
* that the parser has been created and initialized by creating a
* previous instance of this same class with the same parser (a
* "shadow field" class), the variables below will be initialized with
* the values given by the user on the command line. Otherwise, they
* will be initialized with the default values for the parameters.
* Because they are vars, they can be freely set to other values.
*
*/
class PoligrounderParameters(val parser: ArgParser = null) extends
GridLocateParameters {
var from = ap.option[String]("f", "from",
must = be_specified,
help = """Chunk of start time to compare.""")
var to = ap.option[String]("t", "to",
must = be_specified,
help = """Chunk of end time to compare.""")
var min_prob = ap.option[Double]("min-prob", "mp", default = 0.0,
must = be_within(0.0, 1.0),
help = """Mininum probability when comparing language models.
Default is 0.0, which means no restrictions.""")
var max_grams = ap.option[Int]("max-grams", "mg", default = 200,
must = be_>(0),
help = """Maximum number of grams (words or n-grams) to output when
comparing language models. Default is %default. This applies separately
to those grams that have increased and decreased, meaning the total
number counting both kinds may be as much as twice the maximum.""")
var ideological_user_corpus = ap.option[String](
"ideological-user-corpus", "iuc",
help="""Textdb containing corpus output from FindPolitical,
listing users and associated ideologies. The value can be any of
the following: Either the data or schema file of the database;
the common prefix of the two; or the directory containing them, provided
there is only one textdb in the directory.""")
var ideological_users: Map[String, Double] = _
var ideological_users_liberal: Map[String, Double] = _
var ideological_users_conservative: Map[String, Double] = _
var ideological_categories: Seq[String] = _
if (ap.parsedValues) {
if (ideological_user_corpus != null) {
val rows = TextDB.read_textdb(io.localfh,
ideological_user_corpus, suffix_re = "ideo-users")
val users =
(for (row <- rows) yield {
val user = row.gets("user")
val ideology = row.get[Double]("ideology")
(user, ideology)
}).toMap
ideological_users = users
ideological_users_liberal =
users filter { case (u, ideo) => ideo < 0.33 }
ideological_users_conservative =
users filter { case (u, ideo) => ideo > 0.66 }
ideological_categories = Seq("liberal", "conservative")
} else
ideological_categories = Seq("all")
}
// Unused, determined by --ideological-user-corpus.
// var mode = ap.option[String]("m", "mode",
// default = "combined",
// choices = Seq("combined", "ideo-users"),
// help = """How to compare language models. Possible values are
//
// 'combined': For a given time period, combine all users into a single
// language model.
//
// 'ideo-users': Retrieve the ideology of the users and use that to
// separate the users into liberal and conservative, and compare those
// separately.""")
}
class PoligrounderDriver extends
GridLocateDriver[TimeCoord] with StandaloneExperimentDriverStats {
type TParam = PoligrounderParameters
type TRunRes = Unit
var from_chunk: (Long, Long) = _
var to_chunk: (Long, Long) = _
def deserialize_coord(coord: String) = TimeCoord.deserialize(coord)
protected def create_document_factory(lang_model_factory: DocLangModelFactory) =
new TimeDocFactory(this, lang_model_factory)
protected def create_empty_grid(
create_docfact: => GridDocFactory[TimeCoord],
id: String
) = {
val time_docfact = create_docfact.asInstanceOf[TimeDocFactory]
if (params.ideological_user_corpus == null)
new TimeGrid(from_chunk, to_chunk, IndexedSeq("all"), x => "all", time_docfact, id)
else
new TimeGrid(from_chunk, to_chunk, IndexedSeq("liberal", "conservative"),
x => {
if (params.ideological_users_liberal contains x.user)
"liberal"
else if (params.ideological_users_conservative contains x.user)
"conservative"
else
null
}, time_docfact, id)
}
// FIXME!
def create_rough_ranker(args: Array[String]) = ???
def run() {
def parse_interval(param: String) = {
parse_date_interval(param) match {
case (Some((start, end)), "") => (start, end)
case (None, errmess) => param_error(errmess)
case _ => ???
}
}
from_chunk = parse_interval(params.from)
to_chunk = parse_interval(params.to)
val grid = initialize_grid
if (params.ideological_user_corpus == null)
LangModelComparer.compare_cells_2way(
grid.asInstanceOf[TimeGrid], "all",
params.min_prob, params.max_grams)
else
LangModelComparer.compare_cells_4way(
grid.asInstanceOf[TimeGrid], "liberal", "conservative",
params.min_prob, params.max_grams)
}
}
object Poligrounder extends GridLocateApp("Poligrounder") {
type TDriver = PoligrounderDriver
// FUCKING TYPE ERASURE
def create_param_object(ap: ArgParser) = new TParam(ap)
def create_driver = new TDriver
}
| utcompling/textgrounder | src/main/scala/opennlp/textgrounder/poligrounder/Poligrounder.scala | Scala | apache-2.0 | 8,597 |
package com.scala.bala
import com.scala.bala.util.CurrencyCodes
import akka.routing.RoundRobinRouter
import akka.actor.{Props, ActorSystem}
object MainApp {
def main(args: Array[String]): Unit = {
val system = ActorSystem()
val server = system.actorOf(Props(new QuotesRequestor).withRouter(RoundRobinRouter(nrOfInstances = 10)), "QuotesRequestor")
server ! QuotesRequester.request(CurrencyCodes.currencyPairs)
}
} | bbalajisg/scala-projects | currency-retriever/src/main/scala/com/scala/bala/MainApp.scala | Scala | gpl-2.0 | 435 |
package org.apache.spot.utilities.data.validation
import org.apache.spark.sql.types._
import org.scalatest.{FlatSpec, Matchers}
class InputSchemaTest extends FlatSpec with Matchers {
"validate" should "return true when incoming schema is valid" in {
val incomingSchema = StructType(List(StructField("ip", StringType),
StructField("ibyt", LongType),
StructField("host", StringType),
StructField("score", FloatType)))
val modelSchema = StructType(List(StructField("ip", StringType),
StructField("ibyt", LongType),
StructField("host", StringType)))
val results = InputSchema.validate(incomingSchema, modelSchema)
results.isValid shouldBe true
}
it should "return false when incoming schema is not valid due to type mismatch" in {
val incomingSchema = StructType(List(StructField("ip", StringType),
StructField("ibyt", StringType),
StructField("host", IntegerType),
StructField("score", FloatType)))
val modelSchema = StructType(List(StructField("ip", StringType),
StructField("ibyt", LongType),
StructField("host", StringType)))
val results = InputSchema.validate(incomingSchema, modelSchema)
results.isValid shouldBe false
}
it should "return false when incoming schema is not valid due to required field is missing" in {
val incomingSchema = StructType(List(StructField("ip", StringType),
StructField("ibyt", LongType),
StructField("score", FloatType)))
val modelSchema = StructType(List(StructField("ip", StringType),
StructField("ibyt", LongType),
StructField("host", StringType)))
val results = InputSchema.validate(incomingSchema, modelSchema)
results.isValid shouldBe false
}
}
| brandon-edwards/incubator-spot | spot-ml/src/test/scala/org/apache/spot/utilities/data/validation/InputSchemaTest.scala | Scala | apache-2.0 | 1,742 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql
import org.apache.spark.sql.catalyst.expressions.{CaseWhen, If, Literal}
import org.apache.spark.sql.execution.LocalTableScanExec
import org.apache.spark.sql.functions.{lit, when}
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.test.SharedSparkSession
import org.apache.spark.sql.types.BooleanType
class ReplaceNullWithFalseInPredicateEndToEndSuite extends QueryTest with SharedSparkSession {
import testImplicits._
test("SPARK-25860: Replace Literal(null, _) with FalseLiteral whenever possible") {
withTable("t1", "t2") {
Seq((1, true), (2, false)).toDF("l", "b").write.saveAsTable("t1")
Seq(2, 3).toDF("l").write.saveAsTable("t2")
val df1 = spark.table("t1")
val df2 = spark.table("t2")
val q1 = df1.where("IF(l > 10, false, b AND null)")
checkAnswer(q1, Seq.empty)
checkPlanIsEmptyLocalScan(q1)
val q2 = df1.where("CASE WHEN l < 10 THEN null WHEN l > 40 THEN false ELSE null END")
checkAnswer(q2, Seq.empty)
checkPlanIsEmptyLocalScan(q2)
val q3 = df1.join(df2, when(df1("l") > df2("l"), lit(null)).otherwise(df1("b") && lit(null)))
checkAnswer(q3, Seq.empty)
checkPlanIsEmptyLocalScan(q3)
val q4 = df1.where("IF(IF(b, null, false), true, null)")
checkAnswer(q4, Seq.empty)
checkPlanIsEmptyLocalScan(q4)
val q5 = df1.selectExpr("IF(l > 1 AND null, 5, 1) AS out")
checkAnswer(q5, Row(1) :: Row(1) :: Nil)
q5.queryExecution.executedPlan.foreach { p =>
assert(p.expressions.forall(e => e.find(_.isInstanceOf[If]).isEmpty))
}
val q6 = df1.selectExpr("CASE WHEN (l > 2 AND null) THEN 3 ELSE 2 END")
checkAnswer(q6, Row(2) :: Row(2) :: Nil)
q6.queryExecution.executedPlan.foreach { p =>
assert(p.expressions.forall(e => e.find(_.isInstanceOf[CaseWhen]).isEmpty))
}
checkAnswer(df1.where("IF(l > 10, false, b OR null)"), Row(1, true))
}
def checkPlanIsEmptyLocalScan(df: DataFrame): Unit = df.queryExecution.executedPlan match {
case s: LocalTableScanExec => assert(s.rows.isEmpty)
case p => fail(s"$p is not LocalTableScanExec")
}
}
test("SPARK-26107: Replace Literal(null, _) with FalseLiteral in higher-order functions") {
def assertNoLiteralNullInPlan(df: DataFrame): Unit = {
df.queryExecution.executedPlan.foreach { p =>
assert(p.expressions.forall(_.find {
case Literal(null, BooleanType) => true
case _ => false
}.isEmpty))
}
}
withTable("t1", "t2") {
// to test ArrayFilter and ArrayExists
spark.sql("select array(null, 1, null, 3) as a")
.write.saveAsTable("t1")
// to test MapFilter
spark.sql("""
select map_from_entries(arrays_zip(a, transform(a, e -> if(mod(e, 2) = 0, null, e)))) as m
from (select array(0, 1, 2, 3) as a)
""").write.saveAsTable("t2")
val df1 = spark.table("t1")
val df2 = spark.table("t2")
// ArrayExists
withSQLConf(SQLConf.LEGACY_ARRAY_EXISTS_FOLLOWS_THREE_VALUED_LOGIC.key -> "false") {
val q1 = df1.selectExpr("EXISTS(a, e -> IF(e is null, null, true))")
checkAnswer(q1, Row(true) :: Nil)
assertNoLiteralNullInPlan(q1)
}
// ArrayFilter
val q2 = df1.selectExpr("FILTER(a, e -> IF(e is null, null, true))")
checkAnswer(q2, Row(Seq[Any](1, 3)) :: Nil)
assertNoLiteralNullInPlan(q2)
// MapFilter
val q3 = df2.selectExpr("MAP_FILTER(m, (k, v) -> IF(v is null, null, true))")
checkAnswer(q3, Row(Map[Any, Any](1 -> 1, 3 -> 3)))
assertNoLiteralNullInPlan(q3)
}
}
}
| jkbradley/spark | sql/core/src/test/scala/org/apache/spark/sql/ReplaceNullWithFalseInPredicateEndToEndSuite.scala | Scala | apache-2.0 | 4,486 |
/*
* Copyright (C) 2012 Mikołaj Sochacki mikolajsochacki AT gmail.com
* This file is part of VRegister (Virtual Register)
* Apache License Version 2.0, January 2004 http://www.apache.org/licenses/
*/
package eu.brosbit.opos.lib
import java.util.{Date, Locale, GregorianCalendar}
import java.text.{SimpleDateFormat}
/** Formatowanie daty */
object Formater {
def formatTime(t: Date): String = {
val l = new Locale("pl")
val sfd = new SimpleDateFormat("EEE, dd MMM yyyy HH:mm", l)
sfd.format(t)
}
def strForDateTimePicker(t: Date): String = {
val l = new Locale("pl")
val sfd = new SimpleDateFormat("dd.MM.yyyy, HH:mm", l)
sfd.format(t)
}
def prettyDate(t: Date): String = {
val l = new Locale("pl")
val sfd = new SimpleDateFormat("EEE, dd MMM yyyy", l)
sfd.format(t)
}
def formatDate(t: Date): String = {
val l = new Locale("pl")
val sfd = new SimpleDateFormat("yyyy-MM-dd", l)
sfd.format(t)
}
/* @parm strDate: example 2015-08-12 10:33
*/
def fromStringToDate(strDate: String): Date = {
val listDate = strDate.split("-")
if (listDate.length == 3) {
val year :: month :: day :: rest = listDate.map(x => x.toInt).toList
val gregorianCal = new GregorianCalendar(year, month - 1, day)
gregorianCal.getTime
} else
{println("========= nieprawwidłowa data"); new Date() }
}
/* @parm strDate: example 12.08.2015 10:33
*
*/
def fromStringDataTimeToDate(strDate: String): Date = {
val listDateTime = strDate.split(" ").map(_.trim).filter(s => (s.length > 1))
if(listDateTime.length == 2 ) {
val day :: month :: year :: rest1 = listDateTime.head.split('.').map(x => x.toInt).toList
val hour :: minute :: rest2 = listDateTime.last.split(':').map(x => x.toInt).toList
val gregorianCal = new GregorianCalendar(year, month - 1, day, hour, minute)
gregorianCal.getTime
} else {println("========= nieprawwidłowa data"); new Date() }
}
}
| mikolajs/osp | src/main/scala/eu/brosbit/opos/lib/Formater.scala | Scala | agpl-3.0 | 1,998 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.util.io
import java.io.InputStream
import java.nio.ByteBuffer
import java.nio.channels.WritableByteChannel
import com.google.common.primitives.UnsignedBytes
import io.netty.buffer.{ByteBuf, Unpooled}
import org.apache.spark.SparkEnv
import org.apache.spark.internal.config
import org.apache.spark.network.util.ByteArrayWritableChannel
import org.apache.spark.storage.StorageUtils
/**
* Read-only byte buffer which is physically stored as multiple chunks rather than a single
* contiguous array.
*
* @param chunks an array of [[ByteBuffer]]s. Each buffer in this array must have position == 0.
* Ownership of these buffers is transferred to the ChunkedByteBuffer, so if these
* buffers may also be used elsewhere then the caller is responsible for copying
* them as needed.
*/
private[spark] class ChunkedByteBuffer(var chunks: Array[ByteBuffer]) {
require(chunks != null, "chunks must not be null")
require(chunks.forall(_.position() == 0), "chunks' positions must be 0")
// Chunk size in bytes
private val bufferWriteChunkSize =
Option(SparkEnv.get).map(_.conf.get(config.BUFFER_WRITE_CHUNK_SIZE))
.getOrElse(config.BUFFER_WRITE_CHUNK_SIZE.defaultValue.get).toInt
private[this] var disposed: Boolean = false
/**
* This size of this buffer, in bytes.
*/
val size: Long = chunks.map(_.limit().asInstanceOf[Long]).sum
def this(byteBuffer: ByteBuffer) = {
this(Array(byteBuffer))
}
/**
* Write this buffer to a channel.
*/
def writeFully(channel: WritableByteChannel): Unit = {
for (bytes <- getChunks()) {
val originalLimit = bytes.limit()
while (bytes.hasRemaining) {
// If `bytes` is an on-heap ByteBuffer, the Java NIO API will copy it to a temporary direct
// ByteBuffer when writing it out. This temporary direct ByteBuffer is cached per thread.
// Its size has no limit and can keep growing if it sees a larger input ByteBuffer. This may
// cause significant native memory leak, if a large direct ByteBuffer is allocated and
// cached, as it's never released until thread exits. Here we write the `bytes` with
// fixed-size slices to limit the size of the cached direct ByteBuffer.
// Please refer to http://www.evanjones.ca/java-bytebuffer-leak.html for more details.
val ioSize = Math.min(bytes.remaining(), bufferWriteChunkSize)
bytes.limit(bytes.position() + ioSize)
channel.write(bytes)
bytes.limit(originalLimit)
}
}
}
/**
* Wrap this buffer to view it as a Netty ByteBuf.
*/
def toNetty: ByteBuf = {
Unpooled.wrappedBuffer(chunks.length, getChunks(): _*)
}
/**
* Copy this buffer into a new byte array.
*
* @throws UnsupportedOperationException if this buffer's size exceeds the maximum array size.
*/
def toArray: Array[Byte] = {
if (size >= Integer.MAX_VALUE) {
throw new UnsupportedOperationException(
s"cannot call toArray because buffer size ($size bytes) exceeds maximum array size")
}
val byteChannel = new ByteArrayWritableChannel(size.toInt)
writeFully(byteChannel)
byteChannel.close()
byteChannel.getData
}
/**
* Convert this buffer to a ByteBuffer. If this buffer is backed by a single chunk, its underlying
* data will not be copied. Instead, it will be duplicated. If this buffer is backed by multiple
* chunks, the data underlying this buffer will be copied into a new byte buffer. As a result, it
* is suggested to use this method only if the caller does not need to manage the memory
* underlying this buffer.
*
* @throws UnsupportedOperationException if this buffer's size exceeds the max ByteBuffer size.
*/
def toByteBuffer: ByteBuffer = {
if (chunks.length == 1) {
chunks.head.duplicate()
} else {
ByteBuffer.wrap(toArray)
}
}
/**
* Creates an input stream to read data from this ChunkedByteBuffer.
*
* @param dispose if true, [[dispose()]] will be called at the end of the stream
* in order to close any memory-mapped files which back this buffer.
*/
def toInputStream(dispose: Boolean = false): InputStream = {
new ChunkedByteBufferInputStream(this, dispose)
}
/**
* Get duplicates of the ByteBuffers backing this ChunkedByteBuffer.
*/
def getChunks(): Array[ByteBuffer] = {
chunks.map(_.duplicate())
}
/**
* Make a copy of this ChunkedByteBuffer, copying all of the backing data into new buffers.
* The new buffer will share no resources with the original buffer.
*
* @param allocator a method for allocating byte buffers
*/
def copy(allocator: Int => ByteBuffer): ChunkedByteBuffer = {
val copiedChunks = getChunks().map { chunk =>
val newChunk = allocator(chunk.limit())
newChunk.put(chunk)
newChunk.flip()
newChunk
}
new ChunkedByteBuffer(copiedChunks)
}
/**
* Attempt to clean up any ByteBuffer in this ChunkedByteBuffer which is direct or memory-mapped.
* See [[StorageUtils.dispose]] for more information.
*/
def dispose(): Unit = {
if (!disposed) {
chunks.foreach(StorageUtils.dispose)
disposed = true
}
}
}
/**
* Reads data from a ChunkedByteBuffer.
*
* @param dispose if true, `ChunkedByteBuffer.dispose()` will be called at the end of the stream
* in order to close any memory-mapped files which back the buffer.
*/
private[spark] class ChunkedByteBufferInputStream(
var chunkedByteBuffer: ChunkedByteBuffer,
dispose: Boolean)
extends InputStream {
private[this] var chunks = chunkedByteBuffer.getChunks().iterator
private[this] var currentChunk: ByteBuffer = {
if (chunks.hasNext) {
chunks.next()
} else {
null
}
}
override def read(): Int = {
if (currentChunk != null && !currentChunk.hasRemaining && chunks.hasNext) {
currentChunk = chunks.next()
}
if (currentChunk != null && currentChunk.hasRemaining) {
UnsignedBytes.toInt(currentChunk.get())
} else {
close()
-1
}
}
override def read(dest: Array[Byte], offset: Int, length: Int): Int = {
if (currentChunk != null && !currentChunk.hasRemaining && chunks.hasNext) {
currentChunk = chunks.next()
}
if (currentChunk != null && currentChunk.hasRemaining) {
val amountToGet = math.min(currentChunk.remaining(), length)
currentChunk.get(dest, offset, amountToGet)
amountToGet
} else {
close()
-1
}
}
override def skip(bytes: Long): Long = {
if (currentChunk != null) {
val amountToSkip = math.min(bytes, currentChunk.remaining).toInt
currentChunk.position(currentChunk.position() + amountToSkip)
if (currentChunk.remaining() == 0) {
if (chunks.hasNext) {
currentChunk = chunks.next()
} else {
close()
}
}
amountToSkip
} else {
0L
}
}
override def close(): Unit = {
if (chunkedByteBuffer != null && dispose) {
chunkedByteBuffer.dispose()
}
chunkedByteBuffer = null
chunks = null
currentChunk = null
}
}
| lxsmnv/spark | core/src/main/scala/org/apache/spark/util/io/ChunkedByteBuffer.scala | Scala | apache-2.0 | 8,028 |
package com.twitter.util
import org.junit.runner.RunWith
import org.scalatest.{Matchers, WordSpec}
import org.scalatest.junit.JUnitRunner
import org.scalatest.mock.MockitoSugar
@RunWith(classOf[JUnitRunner])
class ConfigTest extends WordSpec with MockitoSugar with Matchers {
import Config._
"Config" should {
"computed should delay evaluation" in {
class Foo extends Config.Nothing {
var didIt = false
var x = 10
var y = computed {
didIt = true
x * 2 + 5
}
}
val foo = new Foo
assert(foo.didIt === false)
assert((foo.y: Int) === 25) // use type annotation to force implicit conversion
assert(foo.didIt === true)
}
"subclass can override indepedent var for use in dependent var" in {
class Foo extends Config.Nothing {
var x = 10
var y = computed(x * 2 + 5)
}
val bar = new Foo {
x = 20
}
assert((bar.y: Int) === 45) // use type annotation to force implicit conversion
}
"missingValues" should {
class Bar extends Config.Nothing {
var z = required[Int]
}
class Baz extends Config.Nothing {
var w = required[Int]
}
class Foo extends Config.Nothing {
var x = required[Int]
var y = 3
var bar = required[Bar]
var baz = optional[Baz]
}
"must return empty Seq when no values are missing" in {
val foo = new Foo {
x = 42
bar = new Bar {
z = 10
}
}
assert(foo.missingValues === List())
}
"must find top-level missing values" in {
val foo = new Foo
assert(foo.missingValues.sorted === Seq("x", "bar").sorted)
}
"must find top-level and nested missing values" in {
val foo = new Foo {
bar = new Bar
}
assert(foo.missingValues.sorted === Seq("x", "bar.z").sorted)
}
"must find nested missing values in optional sub-configs" in {
val foo = new Foo {
x = 3
bar = new Bar {
z = 1
}
baz = new Baz
}
assert(foo.missingValues.sorted === Seq("baz.w").sorted)
}
}
}
}
| travisbrown/util | util-core/src/test/scala/com/twitter/util/ConfigTest.scala | Scala | apache-2.0 | 2,256 |
package is.hail.annotations
import is.hail.asm4s._
import is.hail.expr.ir.{Ascending, Descending, EmitMethodBuilder, SortField, SortOrder}
import is.hail.types._
import is.hail.asm4s.coerce
import is.hail.types.physical._
import is.hail.utils._
object CodeOrdering {
sealed trait Op {
type ReturnType
val rtti: TypeInfo[ReturnType]
val missingEqual: Boolean
}
final case class Compare(missingEqual: Boolean = true) extends Op {
type ReturnType = Int
val rtti = typeInfo[Int]
}
final case class CompareStructs(sf: IndexedSeq[SortField], missingEqual: Boolean = true) extends Op {
type ReturnType = Int
val rtti = typeInfo[Int]
}
sealed trait BooleanOp extends Op {
type ReturnType = Boolean
val rtti = typeInfo[Boolean]
}
final case class Equiv(missingEqual: Boolean = true) extends BooleanOp
final case class Lt(missingEqual: Boolean = true) extends BooleanOp
final case class Lteq(missingEqual: Boolean = true) extends BooleanOp
final case class Gt(missingEqual: Boolean = true) extends BooleanOp
final case class Gteq(missingEqual: Boolean = true) extends BooleanOp
final case class Neq(missingEqual: Boolean = true) extends BooleanOp
type F[R] = ((Code[Boolean], Code[_]), (Code[Boolean], Code[_])) => Code[R]
def rowOrdering(
t1: PBaseStruct,
t2: PBaseStruct,
mb: EmitMethodBuilder[_],
sortOrders: Array[SortOrder] = null,
missingFieldsEqual: Boolean = true
): CodeOrdering = new CodeOrdering {
require(sortOrders == null || sortOrders.size == t1.size)
type T = Long
val m1: LocalRef[Boolean] = mb.newLocal[Boolean]()
val m2: LocalRef[Boolean] = mb.newLocal[Boolean]()
val v1s: Array[LocalRef[_]] = t1.types.map(tf => mb.newLocal()(typeToTypeInfo(tf)))
val v2s: Array[LocalRef[_]] = t2.types.map(tf => mb.newLocal()(typeToTypeInfo(tf)))
def setup(i: Int)(x: Value[Long], y: Value[Long]): Code[Unit] = {
val tf1 = t1.types(i)
val tf2 = t2.types(i)
Code(
m1 := t1.isFieldMissing(x, i),
m2 := t2.isFieldMissing(y, i),
v1s(i).storeAny(m1.mux(defaultValue(tf1), Region.loadIRIntermediate(tf1)(t1.fieldOffset(x, i)))),
v2s(i).storeAny(m2.mux(defaultValue(tf2), Region.loadIRIntermediate(tf2)(t2.fieldOffset(y, i)))))
}
private[this] def fieldOrdering(i: Int, op: CodeOrdering.Op): CodeOrdering.F[op.ReturnType] =
mb.getCodeOrdering(
t1.types(i),
t2.types(i),
if (sortOrders == null) Ascending else sortOrders(i),
op)
override def compareNonnull(x: Code[Long], y: Code[Long]): Code[Int] = {
val cmp = mb.newLocal[Int]()
Code.memoize(x, "cord_row_comp_x", y, "cord_row_comp_y") { (x, y) =>
val c = Array.tabulate(t1.size) { i =>
val mbcmp = fieldOrdering(i, CodeOrdering.Compare(missingFieldsEqual))
Code(setup(i)(x, y),
mbcmp((m1, v1s(i)), (m2, v2s(i))))
}.foldRight(cmp.get) { (ci, cont) => cmp.ceq(0).mux(Code(cmp := ci, cont), cmp) }
Code(cmp := 0, c)
}
}
private[this] def dictionaryOrderingFromFields(
op: CodeOrdering.BooleanOp,
zero: Code[Boolean],
combine: (Code[Boolean], Code[Boolean], Code[Boolean]) => Code[Boolean]
)(x: Code[Long],
y: Code[Long]
): Code[Boolean] =
Code.memoize(x, "cord_row_comp_x", y, "cord_row_comp_y") { (x, y) =>
Array.tabulate(t1.size) { i =>
val mbop = fieldOrdering(i, op)
val mbequiv = fieldOrdering(i, CodeOrdering.Equiv(op.missingEqual))
(Code(setup(i)(x, y), mbop((m1, v1s(i)), (m2, v2s(i)))),
mbequiv((m1, v1s(i)), (m2, v2s(i))))
}.foldRight(zero) { case ((cop, ceq), cont) => combine(cop, ceq, cont) }
}
val _ltNonnull = dictionaryOrderingFromFields(
CodeOrdering.Lt(missingFieldsEqual),
false,
{ (isLessThan, isEqual, subsequentLt) =>
isLessThan || (isEqual && subsequentLt) }) _
override def ltNonnull(x: Code[Long], y: Code[Long]): Code[Boolean] = _ltNonnull(x, y)
val _lteqNonnull = dictionaryOrderingFromFields(
CodeOrdering.Lteq(missingFieldsEqual),
true,
{ (isLessThanEq, isEqual, subsequentLtEq) =>
isLessThanEq && (!isEqual || subsequentLtEq) }) _
override def lteqNonnull(x: Code[Long], y: Code[Long]): Code[Boolean] = _lteqNonnull(x, y)
val _gtNonnull = dictionaryOrderingFromFields(
CodeOrdering.Gt(missingFieldsEqual),
false,
{ (isGreaterThan, isEqual, subsequentGt) =>
isGreaterThan || (isEqual && subsequentGt) }) _
override def gtNonnull(x: Code[Long], y: Code[Long]): Code[Boolean] = _gtNonnull(x, y)
val _gteqNonnull = dictionaryOrderingFromFields(
CodeOrdering.Gteq(missingFieldsEqual),
true,
{ (isGreaterThanEq, isEqual, subsequentGteq) =>
isGreaterThanEq && (!isEqual || subsequentGteq) }) _
override def gteqNonnull(x: Code[Long], y: Code[Long]): Code[Boolean] = _gteqNonnull(x, y)
override def equivNonnull(x: Code[Long], y: Code[Long]): Code[Boolean] =
Code.memoize(x, "cord_row_equiv_x", y, "cord_row_equiv_y") { (x, y) =>
Array.tabulate(t1.size) { i =>
val mbequiv = fieldOrdering(i, CodeOrdering.Equiv(missingFieldsEqual))
Code(setup(i)(x, y),
mbequiv((m1, v1s(i)), (m2, v2s(i))))
}.foldRight[Code[Boolean]](const(true))(_ && _)
}
}
def iterableOrdering(t1: PArray, t2: PArray, mb: EmitMethodBuilder[_]): CodeOrdering = new CodeOrdering {
type T = Long
val lord: CodeOrdering = PInt32().codeOrdering(mb)
val ord: CodeOrdering = t1.elementType.codeOrdering(mb, t2.elementType)
val len1: LocalRef[Int] = mb.newLocal[Int]()
val len2: LocalRef[Int] = mb.newLocal[Int]()
val lim: LocalRef[Int] = mb.newLocal[Int]()
val i: LocalRef[Int] = mb.newLocal[Int]()
val m1: LocalRef[Boolean] = mb.newLocal[Boolean]()
val v1: LocalRef[ord.T] = mb.newLocal()(typeToTypeInfo(t1.elementType)).asInstanceOf[LocalRef[ord.T]]
val m2: LocalRef[Boolean] = mb.newLocal[Boolean]()
val v2: LocalRef[ord.T] = mb.newLocal()(typeToTypeInfo(t2.elementType)).asInstanceOf[LocalRef[ord.T]]
val eq: LocalRef[Boolean] = mb.newLocal[Boolean]()
def loop(cmp: Code[Unit], loopCond: Code[Boolean])
(x: Code[Long], y: Code[Long]): Code[Unit] = {
Code.memoize(x, "cord_iter_ord_x", y, "cord_iter_ord_y") { (x, y) =>
Code(
i := 0,
len1 := t1.loadLength(x),
len2 := t2.loadLength(y),
lim := (len1 < len2).mux(len1, len2),
Code.whileLoop(loopCond && i < lim,
m1 := t1.isElementMissing(x, i),
v1.storeAny(Region.loadIRIntermediate(t1.elementType)(t1.elementOffset(x, len1, i))),
m2 := t2.isElementMissing(y, i),
v2.storeAny(Region.loadIRIntermediate(t2.elementType)(t2.elementOffset(y, len2, i))),
cmp, i += 1))
}
}
override def compareNonnull(x: Code[Long], y: Code[Long]): Code[Int] = {
val mbcmp = mb.getCodeOrdering(t1.elementType, t2.elementType, CodeOrdering.Compare())
val cmp = mb.newLocal[Int]()
Code(cmp := 0,
loop(cmp := mbcmp((m1, v1), (m2, v2)), cmp.ceq(0))(x, y),
cmp.ceq(0).mux(
lord.compareNonnull(coerce[lord.T](len1.load()), coerce[lord.T](len2.load())),
cmp))
}
override def ltNonnull(x: Code[Long], y: Code[Long]): Code[Boolean] = {
val mblt = mb.getCodeOrdering(t1.elementType, t2.elementType, CodeOrdering.Lt())
val mbequiv = mb.getCodeOrdering(t1.elementType, t2.elementType, CodeOrdering.Equiv())
val lt = mb.newLocal[Boolean]()
val lcmp = Code(
lt := mblt((m1, v1), (m2, v2)),
eq := mbequiv((m1, v1), (m2, v2)))
Code(lt := false, eq := true,
loop(lcmp, !lt && eq)(x, y),
lt || eq && lord.ltNonnull(coerce[lord.T](len1.load()), coerce[lord.T](len2.load())))
}
override def lteqNonnull(x: Code[Long], y: Code[Long]): Code[Boolean] = {
val mblteq = mb.getCodeOrdering(t1.elementType, t2.elementType, CodeOrdering.Lteq())
val mbequiv = mb.getCodeOrdering(t1.elementType, t2.elementType, CodeOrdering.Equiv())
val lteq = mb.newLocal[Boolean]()
val lcmp = Code(
lteq := mblteq((m1, v1), (m2, v2)),
eq := mbequiv((m1, v1), (m2, v2)))
Code(lteq := true, eq := true,
loop(lcmp, eq)(x, y),
lteq && (!eq || lord.lteqNonnull(coerce[lord.T](len1.load()), coerce[lord.T](len2.load()))))
}
override def gtNonnull(x: Code[Long], y: Code[Long]): Code[Boolean] = {
val mbgt = mb.getCodeOrdering(t1.elementType, t2.elementType, CodeOrdering.Gt())
val mbequiv = mb.getCodeOrdering(t1.elementType, t2.elementType, CodeOrdering.Equiv())
val gt = mb.newLocal[Boolean]()
val lcmp = Code(
gt := mbgt((m1, v1), (m2, v2)),
eq := !gt && mbequiv((m1, v1), (m2, v2)))
Code(gt := false,
eq := true,
loop(lcmp, eq)(x, y),
gt || (eq &&
lord.gtNonnull(coerce[lord.T](len1.load()), coerce[lord.T](len2.load()))))
}
override def gteqNonnull(x: Code[Long], y: Code[Long]): Code[Boolean] = {
val mbgteq = mb.getCodeOrdering(t1.elementType, t2.elementType, CodeOrdering.Gteq())
val mbequiv = mb.getCodeOrdering(t1.elementType, t2.elementType, CodeOrdering.Equiv())
val gteq = mb.newLocal[Boolean]()
val lcmp = Code(
gteq := mbgteq((m1, v1), (m2, v2)),
eq := mbequiv((m1, v1), (m2, v2)))
Code(gteq := true,
eq := true,
loop(lcmp, eq)(x, y),
gteq && (!eq || lord.gteqNonnull(coerce[lord.T](len1.load()), coerce[lord.T](len2.load()))))
}
override def equivNonnull(x: Code[Long], y: Code[Long]): Code[Boolean] = {
val mbequiv = mb.getCodeOrdering(t1.elementType, t2.elementType, CodeOrdering.Equiv())
val lcmp = eq := mbequiv((m1, v1), (m2, v2))
Code(eq := true,
loop(lcmp, eq)(x, y),
eq && lord.equivNonnull(coerce[lord.T](len1.load()), coerce[lord.T](len2.load())))
}
}
def intervalOrdering(t1: PInterval, t2: PInterval, mb: EmitMethodBuilder[_]): CodeOrdering = new CodeOrdering {
type T = Long
val mp1: LocalRef[Boolean] = mb.newLocal[Boolean]()
val mp2: LocalRef[Boolean] = mb.newLocal[Boolean]()
val p1: LocalRef[_] = mb.newLocal()(typeToTypeInfo(t1.pointType))
val p2: LocalRef[_] = mb.newLocal()(typeToTypeInfo(t2.pointType))
def loadStart(x: Value[T], y: Value[T]): Code[Unit] = {
Code(
mp1 := !t1.startDefined(x),
mp2 := !t2.startDefined(y),
p1.storeAny(mp1.mux(defaultValue(t1.pointType), Region.loadIRIntermediate(t1.pointType)(t1.startOffset(x)))),
p2.storeAny(mp2.mux(defaultValue(t2.pointType), Region.loadIRIntermediate(t2.pointType)(t2.startOffset(y)))))
}
def loadEnd(x: Value[T], y: Value[T]): Code[Unit] = {
Code(
mp1 := !t1.endDefined(x),
mp2 := !t2.endDefined(y),
p1.storeAny(mp1.mux(defaultValue(t1.pointType), Region.loadIRIntermediate(t1.pointType)(t1.endOffset(x)))),
p2.storeAny(mp2.mux(defaultValue(t2.pointType), Region.loadIRIntermediate(t2.pointType)(t2.endOffset(y)))))
}
override def compareNonnull(x: Code[T], y: Code[T]): Code[Int] = {
val mbcmp = mb.getCodeOrdering(t1.pointType, t2.pointType, CodeOrdering.Compare())
val cmp = mb.newLocal[Int]()
Code.memoize(x, "cord_int_comp_x", y, "cord_int_comp_y") { (x, y) =>
Code(loadStart(x, y),
cmp := mbcmp((mp1, p1), (mp2, p2)),
cmp.ceq(0).mux(
Code(mp1 := t1.includesStart(x),
mp1.cne(t2.includesStart(y)).mux(
mp1.mux(-1, 1),
Code(
loadEnd(x, y),
cmp := mbcmp((mp1, p1), (mp2, p2)),
cmp.ceq(0).mux(
Code(mp1 := t1.includesEnd(x),
mp1.cne(t2.includesEnd(y)).mux(mp1.mux(1, -1), 0)),
cmp)))),
cmp))
}
}
override def equivNonnull(x: Code[T], y: Code[T]): Code[Boolean] = {
val mbeq = mb.getCodeOrdering(t1.pointType, t2.pointType, CodeOrdering.Equiv())
Code.memoize(x, "cord_int_equiv_x", y, "cord_int_equiv_y") { (x, y) =>
Code(loadStart(x, y), mbeq((mp1, p1), (mp2, p2))) &&
t1.includesStart(x).ceq(t2.includesStart(y)) &&
Code(loadEnd(x, y), mbeq((mp1, p1), (mp2, p2))) &&
t1.includesEnd(x).ceq(t2.includesEnd(y))
}
}
override def ltNonnull(x: Code[T], y: Code[T]): Code[Boolean] = {
val mblt = mb.getCodeOrdering(t1.pointType, t2.pointType, CodeOrdering.Lt())
val mbeq = mb.getCodeOrdering(t1.pointType, t2.pointType, CodeOrdering.Equiv())
Code.memoize(x, "cord_int_lt_x", y, "cord_int_lt_y") { (x, y) =>
Code(loadStart(x, y), mblt((mp1, p1), (mp2, p2))) || (
mbeq((mp1, p1), (mp2, p2)) && (
Code(mp1 := t1.includesStart(x), mp2 := t2.includesStart(y), mp1 && !mp2) || (mp1.ceq(mp2) && (
Code(loadEnd(x, y), mblt((mp1, p1), (mp2, p2))) || (
mbeq((mp1, p1), (mp2, p2)) &&
!t1.includesEnd(x) && t2.includesEnd(y))))))
}
}
override def lteqNonnull(x: Code[T], y: Code[T]): Code[Boolean] = {
val mblteq = mb.getCodeOrdering(t1.pointType, t2.pointType, CodeOrdering.Lteq())
val mbeq = mb.getCodeOrdering(t1.pointType, t2.pointType, CodeOrdering.Equiv())
Code.memoize(x, "cord_int_lteq_x", y, "cord_int_lteq_y") { (x, y) =>
Code(loadStart(x, y), mblteq((mp1, p1), (mp2, p2))) && (
!mbeq((mp1, p1), (mp2, p2)) || (// if not equal, then lt
Code(mp1 := t1.includesStart(x), mp2 := t2.includesStart(y), mp1 && !mp2) || (mp1.ceq(mp2) && (
Code(loadEnd(x, y), mblteq((mp1, p1), (mp2, p2))) && (
!mbeq((mp1, p1), (mp2, p2)) ||
!t1.includesEnd(x) || t2.includesEnd(y))))))
}
}
override def gtNonnull(x: Code[T], y: Code[T]): Code[Boolean] = {
val mbgt = mb.getCodeOrdering(t1.pointType, t2.pointType, CodeOrdering.Gt())
val mbeq = mb.getCodeOrdering(t1.pointType, t2.pointType, CodeOrdering.Equiv())
Code.memoize(x, "cord_int_gt_x", y, "cord_int_gt_y") { (x, y) =>
Code(loadStart(x, y), mbgt((mp1, p1), (mp2, p2))) || (
mbeq((mp1, p1), (mp2, p2)) && (
Code(mp1 := t1.includesStart(x), mp2 := t2.includesStart(y), !mp1 && mp2) || (mp1.ceq(mp2) && (
Code(loadEnd(x, y), mbgt((mp1, p1), (mp2, p2))) || (
mbeq((mp1, p1), (mp2, p2)) &&
t1.includesEnd(x) && !t2.includesEnd(y))))))
}
}
override def gteqNonnull(x: Code[T], y: Code[T]): Code[Boolean] = {
val mbgteq = mb.getCodeOrdering(t1.pointType, t2.pointType, CodeOrdering.Gteq())
val mbeq = mb.getCodeOrdering(t1.pointType, t2.pointType, CodeOrdering.Equiv())
Code.memoize(x, "cord_int_gteq_x", y, "cord_int_gteq_y") { (x, y) =>
Code(loadStart(x, y), mbgteq((mp1, p1), (mp2, p2))) && (
!mbeq((mp1, p1), (mp2, p2)) || (// if not equal, then lt
Code(mp1 := t1.includesStart(x), mp2 := t2.includesStart(y), !mp1 && mp2) || (mp1.ceq(mp2) && (
Code(loadEnd(x, y), mbgteq((mp1, p1), (mp2, p2))) && (
!mbeq((mp1, p1), (mp2, p2)) ||
t1.includesEnd(x) || !t2.includesEnd(y))))))
}
}
}
def mapOrdering(t1: PDict, t2: PDict, mb: EmitMethodBuilder[_]): CodeOrdering =
iterableOrdering(PCanonicalArray(t1.elementType, t1.required), PCanonicalArray(t2.elementType, t2.required), mb)
def setOrdering(t1: PSet, t2: PSet, mb: EmitMethodBuilder[_]): CodeOrdering =
iterableOrdering(PCanonicalArray(t1.elementType, t1.required), PCanonicalArray(t2.elementType, t2.required), mb)
}
abstract class CodeOrdering {
outer =>
type T
type P = (Code[Boolean], Code[T])
def compareNonnull(x: Code[T], y: Code[T]): Code[Int]
def ltNonnull(x: Code[T], y: Code[T]): Code[Boolean]
def lteqNonnull(x: Code[T], y: Code[T]): Code[Boolean]
def gtNonnull(x: Code[T], y: Code[T]): Code[Boolean]
def gteqNonnull(x: Code[T], y: Code[T]): Code[Boolean]
def equivNonnull(x: Code[T], y: Code[T]): Code[Boolean]
def compare(x: P, y: P, missingEqual: Boolean = true): Code[Int] = (x, y) match { case ((xm, xv), (ym, yv)) =>
Code.memoize(xm, "cord_compare_xm", ym, "cord_compare_ym") { (xm, ym) =>
xm.mux(ym.mux(if (missingEqual) 0 else -1, 1),
ym.mux(-1, compareNonnull(xv, yv)))
}
}
def lt(x: P, y: P, missingEqual: Boolean): Code[Boolean] = (x, y) match { case ((xm, xv), (ym, yv)) =>
val nonnull = ltNonnull(xv, yv)
if (missingEqual)
!xm && (ym || nonnull)
else
ym || (!xm && nonnull)
}
def lteq(x: P, y: P, missingEqual: Boolean): Code[Boolean] = (x, y) match { case ((xm, xv), (ym, yv)) =>
ym || (!xm && lteqNonnull(xv, yv))
}
def gt(x: P, y: P, missingEqual: Boolean): Code[Boolean] = (x, y) match { case ((xm, xv), (ym, yv)) =>
!ym && (xm || gtNonnull(xv, yv))
}
def gteq(x: P, y: P, missingEqual: Boolean): Code[Boolean] = (x, y) match { case ((xm, xv), (ym, yv)) =>
val nonnull = gteqNonnull(xv, yv)
if (missingEqual)
xm || (!ym && nonnull)
else
!ym && (xm || nonnull)
}
def equiv(x: P, y: P, missingEqual: Boolean): Code[Boolean] = (x, y) match { case ((xm, xv), (ym, yv)) =>
val nonnull = equivNonnull(xv, yv)
if (missingEqual)
Code.memoize(xm, "cord_lift_missing_xm", ym, "cord_lift_missing_ym") { (xm, ym) =>
(xm && ym) || (!xm && !ym && nonnull)
}
else
!xm && !ym && nonnull
}
// reverses the sense of the non-null comparison only
def reverse: CodeOrdering = new CodeOrdering () {
override def reverse: CodeOrdering = CodeOrdering.this
override type T = CodeOrdering.this.T
override type P = CodeOrdering.this.P
override def compareNonnull(x: Code[T], y: Code[T]) = CodeOrdering.this.compareNonnull(y, x)
override def ltNonnull(x: Code[T], y: Code[T]) = CodeOrdering.this.ltNonnull(y, x)
override def lteqNonnull(x: Code[T], y: Code[T]) = CodeOrdering.this.lteqNonnull(y, x)
override def gtNonnull(x: Code[T], y: Code[T]) = CodeOrdering.this.gtNonnull(y, x)
override def gteqNonnull(x: Code[T], y: Code[T]) = CodeOrdering.this.gteqNonnull(y, x)
override def equivNonnull(x: Code[T], y: Code[T]) = CodeOrdering.this.equivNonnull(y, x)
}
}
abstract class CodeOrderingCompareConsistentWithOthers extends CodeOrdering {
def ltNonnull(x: Code[T], y: Code[T]): Code[Boolean] = compareNonnull(x, y) < 0
def lteqNonnull(x: Code[T], y: Code[T]): Code[Boolean] = compareNonnull(x, y) <= 0
def gtNonnull(x: Code[T], y: Code[T]): Code[Boolean] = compareNonnull(x, y) > 0
def gteqNonnull(x: Code[T], y: Code[T]): Code[Boolean] = compareNonnull(x, y) >= 0
def equivNonnull(x: Code[T], y: Code[T]): Code[Boolean] = compareNonnull(x, y).ceq(0)
}
| cseed/hail | hail/src/main/scala/is/hail/annotations/CodeOrdering.scala | Scala | mit | 19,096 |
import sbt._
import Keys._
import org.ensime.EnsimePlugin
object FPInScalaBuild extends Build {
val opts = Project.defaultSettings ++ Seq(
scalaVersion := "2.10.4",
resolvers += "Typesafe Repository" at "http://repo.typesafe.com/typesafe/releases/",
libraryDependencies += "org.scalatest" % "scalatest_2.10" % "2.2.0" % "test",
libraryDependencies += "junit" % "junit" % "4.10" % "test",
libraryDependencies += "org.scalacheck" %% "scalacheck" % "1.11.3" % "test"
) ++ EnsimePlugin.projectSettings
lazy val root =
Project(id = "fpinscala",
base = file("."),
settings = opts ++ Seq(
onLoadMessage ~= (_ + nio2check())
)) aggregate (chapterCode, exercises, answers)
lazy val chapterCode =
Project(id = "chapter-code",
base = file("chaptercode"),
settings = opts)
lazy val exercises =
Project(id = "exercises",
base = file("exercises"),
settings = opts)
lazy val answers =
Project(id = "answers",
base = file("answers"),
settings = opts)
def nio2check(): String = {
val cls = "java.nio.channels.AsynchronousFileChannel"
try {Class.forName(cls); ""}
catch {case _: ClassNotFoundException =>
("\\nWARNING: JSR-203 \\"NIO.2\\" (" + cls + ") not found.\\n" +
"You are probably running Java < 1.7; answers will not compile.\\n" +
"You seem to be running " + System.getProperty("java.version") + ".\\n" +
"Try `project exercises' before compile, or upgrading your JDK.")
}
}
}
| RawToast/fpinscala | project/Build.scala | Scala | mit | 1,583 |
package org.scalafmt.stats
case class OsInfo(name: String, architechture: String, version: String)
object OsInfo {
def apply(): OsInfo =
OsInfo(
sys.props("os.name"),
sys.props("os.arch"),
sys.props("os.version")
)
}
| Daxten/scalafmt | core/src/test/scala/org/scalafmt/stats/OsInfo.scala | Scala | apache-2.0 | 248 |
package org.scalaide.core.internal.project.scopes
import scala.tools.nsc.Settings
import org.eclipse.core.resources.IContainer
import org.eclipse.core.resources.IFile
import org.eclipse.core.resources.IMarker
import org.eclipse.core.resources.IResource
import org.eclipse.core.runtime.IProgressMonitor
import org.eclipse.core.runtime.SubMonitor
import org.eclipse.jdt.core.IJavaModelMarker
import org.scalaide.core.IScalaProject
import org.scalaide.core.SdtConstants
import org.scalaide.core.internal.builder.BuildProblemMarker
import org.scalaide.core.internal.builder.EclipseBuildManager
import org.scalaide.core.internal.builder.zinc.EclipseSbtBuildManager
import org.scalaide.core.internal.project.CompileScope
import sbt.inc.Analysis
import sbt.inc.IncOptions
import java.io.File
/**
* Manages compilation of sources for given scope.
* @see CompileScope scopes
*/
class BuildScopeUnit(val scope: CompileScope, val owningProject: IScalaProject, settings: Settings,
val dependentUnitInstances: Seq[BuildScopeUnit] = Seq.empty)
extends EclipseBuildManager {
private val delegate =
new EclipseSbtBuildManager(owningProject, settings, Some(owningProject.underlying.getFile(".cache-" + scope.name)),
addThemToClasspath, srcOutputs)
private val scopeFilesToCompile = ScopeFilesToCompile(toCompile, owningProject)
private def managesSrcFolder(src: IContainer) = scope.isValidSourcePath(src.getProjectRelativePath)
private def addThemToClasspath = owningProject.sourceOutputFolders.collect {
case (src, out) if !managesSrcFolder(src) => out.getLocation
}
private def srcOutputs = owningProject.sourceOutputFolders.collect {
case entry @ (src, out) if managesSrcFolder(src) => entry
}
def sources: Seq[IContainer] = srcOutputs.unzip._1
override def clean(implicit monitor: IProgressMonitor): Unit = delegate.clean
override def build(addedOrUpdated: Set[IFile], removed: Set[IFile], monitor: SubMonitor): Unit = {
hasInternalErrors = if (areDependedUnitsBuilt) {
def javaHasErrors: Boolean = {
val SeverityNotSet = -1
owningProject.underlying.findMarkers(IJavaModelMarker.JAVA_MODEL_PROBLEM_MARKER, true, IResource.DEPTH_INFINITE).exists { marker =>
val severity = marker.getAttribute(IMarker.SEVERITY, SeverityNotSet)
severity == IMarker.SEVERITY_ERROR && scope.isValidSourcePath(marker.getResource.getLocation)
}
}
delegate.build(scopeFilesToCompile(addedOrUpdated), toCompile(removed), monitor)
delegate.hasErrors || javaHasErrors
} else {
true
}
}
private def areDependedUnitsBuilt = {
val wrongScopes = dependentUnitInstances filter { _.hasErrors } map { _.scope }
if (wrongScopes.nonEmpty) {
BuildProblemMarker.create(owningProject.underlying,
s"${owningProject.underlying.getName}'s ${scope.name} not built due to errors in dependent scope(s) ${wrongScopes.map(_.name).toSet.mkString(", ")}")
false
} else true
}
private def toCompile(sources: Set[IFile]) = (for {
(src, _) <- srcOutputs
source <- sources if src.getProjectRelativePath.isPrefixOf(source.getProjectRelativePath)
} yield source).toSet
override def canTrackDependencies: Boolean = delegate.canTrackDependencies
override def invalidateAfterLoad: Boolean = delegate.invalidateAfterLoad
override def latestAnalysis(incOptions: => IncOptions): Analysis =
delegate.latestAnalysis(incOptions)
override def buildManagerOf(outputFile: File): Option[EclipseBuildManager] =
owningProject.sourceOutputFolders collectFirst {
case (sourceFolder, outputFolder) if outputFolder.getLocation.toFile == outputFile &&
scope.isValidSourcePath(sourceFolder.getProjectRelativePath) => this
}
}
private case class ScopeFilesToCompile(toCompile: Set[IFile] => Set[IFile], owningProject: IScalaProject) {
private var run: Set[IFile] => Set[IFile] = once
private def once(sources: Set[IFile]): Set[IFile] = {
run = forever
toCompile(owningProject.allSourceFiles)
}
private def forever(sources: Set[IFile]): Set[IFile] = toCompile(sources) ++ resetJavaMarkers(getValidJavaSourcesOfThisScope)
def apply(sources: Set[IFile]): Set[IFile] = run(sources)
private def getValidJavaSourcesOfThisScope: Set[IFile] = {
val Dot = 1
toCompile(owningProject.allSourceFiles
.filter { _.getLocation.getFileExtension == SdtConstants.JavaFileExtn.drop(Dot) })
}
private def resetJavaMarkers(javaFiles: Set[IFile]): Set[IFile] = {
javaFiles.foreach { _.deleteMarkers(IJavaModelMarker.JAVA_MODEL_PROBLEM_MARKER, true, IResource.DEPTH_INFINITE) }
javaFiles
}
} | Kwestor/scala-ide | org.scala-ide.sdt.core/src/org/scalaide/core/internal/project/scopes/BuildScopeUnit.scala | Scala | bsd-3-clause | 4,660 |
package com.spotify.hype
trait HFn[T] extends Serializable {
/**
* The function that will run in docker
*/
def run: T
/**
* Override this to allow the function to modify the container it is about to be submitted to.
*/
def image: String = HFn.defaultImage
}
object HFn {
val defaultImage = "spotify/hype:1"
def apply[T](f: => T) = new HFn[T] {
def run: T = f
}
def withImage[T](img: String)(f: => T) = new HFn[T] {
def run: T = f
override def image: String = img
}
}
| spotify/hype | hype-submitter_2.11/src/main/scala/com/spotify/hype/HFn.scala | Scala | apache-2.0 | 521 |
import Dependencies._
import FiloSettings._
import com.typesafe.sbt.SbtMultiJvm.MultiJvmKeys.MultiJvm
import pl.project13.scala.sbt.JmhPlugin
import sbt._
import sbt.Keys._
// All of the submodules are defined here.
// This works around an issue where things in multiple build.sbt files cannot reference one another.
// This way references can be shared.
object Submodules {
lazy val memory = (project in file("memory"))
.settings(
commonSettings,
assemblySettings,
name := "filodb-memory",
scalacOptions += "-language:postfixOps",
libraryDependencies ++= memoryDeps
)
lazy val core = (project in file("core"))
.dependsOn(memory % "compile->compile; test->test")
.settings(
commonSettings,
name := "filodb-core",
scalacOptions += "-language:postfixOps",
libraryDependencies ++= coreDeps
)
lazy val coordinator = (project in file("coordinator"))
.dependsOn(core % "compile->compile; test->test")
.dependsOn(query % "compile->compile; test->test")
.dependsOn(prometheus % "compile->compile; test->test")
.configs(MultiJvm)
.settings(
commonSettings,
multiJvmSettings,
testMultiJvmToo,
name := "filodb-coordinator",
libraryDependencies ++= coordDeps,
libraryDependencies +=
"com.typesafe.akka" %% "akka-contrib" % akkaVersion exclude(
"com.typesafe.akka", s"akka-persistence-experimental_${scalaBinaryVersion.value}")
)
lazy val prometheus = (project in file("prometheus"))
.dependsOn(core % "compile->compile; test->test")
.dependsOn(query % "compile->compile; test->test")
.settings(
commonSettings,
name := "filodb-prometheus",
libraryDependencies ++= promDeps
)
lazy val query = (project in file("query"))
.dependsOn(core % "compile->compile; test->test")
.settings(
libraryDependencies ++= queryDeps,
commonSettings,
scalacOptions += "-language:postfixOps",
name := "filodb-query"
)
lazy val cassandra = (project in file("cassandra"))
.dependsOn(core % "compile->compile; test->test", coordinator)
.settings(
commonSettings,
name := "filodb-cassandra",
libraryDependencies ++= cassDeps
)
lazy val cli = (project in file("cli"))
.dependsOn(prometheus % "compile->compile; test->test")
.dependsOn(core, coordinator % "test->test", cassandra)
.settings(
commonSettings,
name := "filodb-cli",
libraryDependencies ++= cliDeps,
cliAssemblySettings
)
lazy val kafka = (project in file("kafka"))
.dependsOn(
core % "compile->compile; it->test",
coordinator % "compile->compile; test->test"
)
.configs(IntegrationTest, MultiJvm)
.settings(
name := "filodb-kafka",
commonSettings,
kafkaSettings,
itSettings,
assemblySettings,
libraryDependencies ++= kafkaDeps
)
lazy val sparkJobs = (project in file("spark-jobs"))
.dependsOn(cassandra, core % "compile->compile; test->test")
.settings(
commonSettings,
name := "spark-jobs",
fork in Test := true,
baseDirectory in Test := file("."), // since we have a config using FiloDB project root as relative path
assemblySettings,
scalacOptions += "-language:postfixOps",
libraryDependencies ++= sparkJobsDeps
)
lazy val bootstrapper = (project in file("akka-bootstrapper"))
.configs(MultiJvm)
.settings(
commonSettings,
multiJvmMaybeSettings,
name := "akka-bootstrapper",
libraryDependencies ++= bootstrapperDeps
)
lazy val http = (project in file("http"))
.dependsOn(core, coordinator % "compile->compile; test->test")
.settings(
commonSettings,
name := "http",
libraryDependencies ++= httpDeps
)
lazy val standalone = (project in file("standalone"))
.dependsOn(core, prometheus % "test->test", coordinator % "compile->compile; test->test",
cassandra, kafka, http, bootstrapper, gateway % Test)
.configs(MultiJvm)
.settings(
commonSettings,
multiJvmMaybeSettings,
assemblySettings,
libraryDependencies ++= standaloneDeps
)
// standalone does not depend on spark-jobs, but the idea is to simplify packaging and versioning
// lazy val spark = (project in file("spark"))
// .dependsOn(core % "compile->compile; test->test; it->test",
// coordinator % "compile->compile; test->test",
// cassandra % "compile->compile; test->test; it->test")
// .configs( IntegrationTest )
// .settings(
// name := "filodb-spark",
// commonSettings,
// libraryDependencies ++= sparkDeps,
// itSettings,
// jvmPerTestSettings,
// assemblyExcludeScala,
// // Disable tests for now since lots of work remaining to enable Spark
// test := {}
// )
lazy val jmh = (project in file("jmh"))
.enablePlugins(JmhPlugin)
.dependsOn(core % "compile->compile; compile->test", gateway, standalone)
.settings(
commonSettings,
name := "filodb-jmh",
libraryDependencies ++= jmhDeps,
publish := {}
)
// lazy val stress = (project in file("stress"))
// .dependsOn(spark)
// .settings(
// commonSettings,
// name := "filodb-stress",
// libraryDependencies ++= stressDeps,
// assemblyExcludeScala
// )
lazy val gateway = (project in file("gateway"))
.dependsOn(coordinator % "compile->compile; test->test", prometheus, cassandra)
.settings(
commonSettings,
name := "filodb-gateway",
libraryDependencies ++= gatewayDeps,
gatewayAssemblySettings
)
}
| tuplejump/FiloDB | project/FiloBuild.scala | Scala | apache-2.0 | 5,722 |
package controllers
import models.User
import util._
import util.security._
object Permissions {
val LoggedIn = SecuritySpec.fromConfig("controllers.Api", SecuritySpec(true))
def AdminSpec = SecuritySpec(true, AuthenticationProviderConfig.adminGroup)
def please(user: User, spec: SecuritySpecification): Boolean = {
AuthenticationProvider.userIsAuthorized(user, spec)
}
class PermSpec(val klass: String) {
def spec(method: String, default: SecuritySpecification): SecuritySpecification = {
val fmt = "%s.%s".format(klass, method)
SecuritySpec.fromConfig(fmt, default)
}
def spec(default: SecuritySpecification): SecuritySpecification = {
SecuritySpec.fromConfig(klass, default)
}
}
object Feature extends PermSpec("feature") {
def CanSeePasswords = spec("canSeePasswords", AdminSpec)
def NoRateLimit = spec("noRateLimit", AdminSpec)
}
object Admin extends PermSpec("controllers.Admin") {
def Spec = spec(AdminSpec)
def Stats = spec("stats", Spec)
def ClearCache = spec("clearCache", Stats)
}
object AssetApi extends PermSpec("controllers.AssetApi") {
def Spec = spec(LoggedIn)
def CreateAsset = spec("createAsset", AdminSpec)
def DeleteAsset = spec("deleteAsset", AdminSpec)
def DeleteAssetAttribute = spec("deleteAssetAttribute", AdminSpec)
def GetAsset = spec("getAsset", Spec)
def GetAssets = spec("getAssets", Spec)
def UpdateAsset = spec("updateAsset", AdminSpec)
def UpdateAssetForMaintenance = spec("updateAssetForMaintenance", AdminSpec)
def UpdateAssetStatus = spec("updateAssetStatus", AdminSpec)
}
object AssetLogApi extends PermSpec("controllers.AssetLogApi") {
def Spec = spec(AdminSpec)
def Create = spec("submitLogData", Spec)
def Get = spec("getLogData", LoggedIn)
def GetAll = spec("getAllLogData", Spec)
}
object AssetManagementApi extends PermSpec("controllers.AssetManagementApi") {
def Spec = spec(LoggedIn)
def PowerStatus = spec("powerStatus", Spec)
def PowerManagement = spec("powerManagement", AdminSpec)
def ProvisionAsset = spec("provisionAsset", AdminSpec)
def GetProvisioningProfiles = spec("getProvisioningProfiles", AdminSpec)
}
object AssetStateApi extends PermSpec("controllers.AssetStateApi") {
def Spec = spec(AdminSpec)
def Create = spec("createState", Spec)
def Delete = spec("deleteState", Spec)
def Get = spec("getState", LoggedIn)
def Update = spec("updateState", Spec)
}
object AssetWebApi extends PermSpec("controllers.AssetWebApi") {
def Spec = spec(AdminSpec)
def CancelAsset = spec("cancelAsset", Spec)
def DeleteAsset = spec("deleteAsset", Spec)
}
object Help extends PermSpec("controllers.Help") {
def Spec = spec(LoggedIn)
def Index = spec("index", Spec)
}
object IpmiApi extends PermSpec("controllers.IpmiApi") {
def Spec = spec(AdminSpec)
def UpdateIpmi = spec("updateIpmi", Spec)
}
object IpAddressApi extends PermSpec("controllers.IpAddressApi") {
def Spec = spec(AdminSpec)
def AllocateAddress = spec("allocateAddress", Spec)
def AssetFromAddress = spec("assetFromAddress", LoggedIn)
def AssetsFromPool = spec("assetsFromPool", LoggedIn)
def GetForAsset = spec("getForAsset", LoggedIn)
def GetAddressPools = spec("getAddressPools", LoggedIn)
def UpdateAddress = spec("updateAddress", Spec)
def PurgeAddresses = spec("purgeAddresses", Spec)
}
object Resources extends PermSpec("controllers.Resources") {
def Spec = spec(LoggedIn)
def CreateAsset = spec("createAsset", AdminSpec)
def CreateForm = spec("displayCreateForm", AdminSpec)
def Find = spec("find", Spec)
def Index = spec("index", Spec)
def Intake = spec("intake", AdminSpec)
}
object TagApi extends PermSpec("controllers.TagApi") {
def Spec = spec(LoggedIn)
def GetTags = spec("getTags", Spec)
def GetTagValues = spec("getTagValues", AdminSpec)
def UpdateTag = spec("updateTag", AdminSpec)
}
object HierarchyApi extends PermSpec("controllers.HierarchyApi") {
def Spec = spec(LoggedIn)
def UpdateHierarchy = spec("updateHierarchy", AdminSpec)
}
}
| Shopify/collins | app/controllers/Permissions.scala | Scala | apache-2.0 | 4,200 |
package com.googlecode.kanbanik.commands
import com.googlecode.kanbanik.model.{User, Board, Project}
import com.googlecode.kanbanik.security._
import org.bson.types.ObjectId
import com.googlecode.kanbanik.messages.ServerMessages
import com.googlecode.kanbanik.db.HasEntityLoader
import com.googlecode.kanbanik.dtos.{PermissionType, ProjectWithBoardDto, ErrorDto, ProjectDto}
abstract class BaseProjectsOnBoardCommand
extends Command[ProjectWithBoardDto, ProjectWithBoardDto] with HasEntityLoader {
override def execute(params: ProjectWithBoardDto, user: User): Either[ProjectWithBoardDto, ErrorDto] = {
val board = loadBoard(new ObjectId(params.boardId), includeTasks = false)
if (!board.isDefined) {
return Right(ErrorDto(ServerMessages.entityDeletedMessage("board " + params.boardId)))
}
executeOne(params.project, board.get, user)
}
def executeSpecific(board: Board, project: Project, user: User): Either[ProjectWithBoardDto, ErrorDto]
def executeOne(project: ProjectDto, board: Board, user: User): Either[ProjectWithBoardDto, ErrorDto] = {
try {
executeSpecific(board, Project.byId(new ObjectId(project.id.orNull), User().withAllPermissions()).copy(version = project.version), user)
} catch {
case e: IllegalArgumentException =>
Right(ErrorDto(ServerMessages.entityDeletedMessage("project")))
}
}
override def checkPermissions(param: ProjectWithBoardDto, user: User) = checkEditBoardPermissions(user, Some(param.boardId))
override def filter(toReturn: ProjectWithBoardDto, user: User): Boolean =
canRead(user, PermissionType.ReadProject, toReturn.project.id.getOrElse(""))
}
| kanbanik/kanbanik | kanbanik-server/src/main/scala/com/googlecode/kanbanik/commands/BaseProjectsOnBoardCommand.scala | Scala | apache-2.0 | 1,664 |
package models
import org.joda.time.DateTime
/**
*
* @param instant
*/
case class Stardate(instant: DateTime) {
override def toString(): String = instant.toString("yyyy.D")
def day = instant.getDayOfMonth
def month = instant.monthOfYear().getAsShortText
def year = instant.year().get()
}
object Stardate {
def now = Stardate(DateTime.now())
/**
*
* @param yearMonthDay
* @return
*/
def at(yearMonthDay: String) = {
Stardate( DateTime.parse(yearMonthDay) )
}
}
| michaelwestphal/stardatenow | app/models/Stardate.scala | Scala | mit | 496 |
package models.sunerp
import models.core.{AbstractQuery, AbstractTable, WithId}
import play.api.db.slick.Config.driver.simple._
import scala.slick.lifted.Tag
import play.api.libs.json.Json
import play.api.data.Form
import play.api.data.Forms._
import play.api.data.format.Formats._
import dtos.{ExtGirdDto, PagingDto}
/**
* The Class Company.
*
* @author Nguyen Duc Dung
* @since 3/4/14 9:09 AM
*
*/
case class Company(
id: Option[Long] = None,
name: String,
address: String,
phone: String,
email: String,
mst: String
) extends WithId[Long]
class Companies(tag: Tag) extends AbstractTable[Company](tag, "company") {
def name = column[String]("name", O.NotNull)
def address = column[String]("address", O.NotNull)
def phone = column[String]("phone", O.NotNull)
def email = column[String]("email", O.NotNull)
def mst = column[String]("mst", O.NotNull)
override def * = (id.?, name, address, phone, email, mst) <>(Company.tupled, Company.unapply)
}
object Companies extends AbstractQuery[Company, Companies](new Companies(_)) {
implicit val companyJsonFormat = Json.format[Company]
def editFrom = Form(
mapping(
"id" -> optional(of[Long]),
"name" -> text(minLength = 4),
"address" -> text(minLength = 4),
"phone" -> text(minLength = 4),
"email" -> email,
"mst" -> text(minLength = 4)
)(Company.apply)(Company.unapply)
)
def load(pagingDto: PagingDto)(implicit session: Session): ExtGirdDto[Company] = {
var query = for (row <- this) yield row
pagingDto.filters.foreach(filter => {
query = query.where(table => {
filter.property match {
case "name" => table.name.toLowerCase like filter.asLikeValue
case _ => throw new Exception("Invalid filtering key: " + filter.property)
}
})
})
pagingDto.sorts.foreach(sort => {
query = query.sortBy(table => {
sort.property match {
case "name" => orderColumn(sort.direction, table.name)
case "address" => orderColumn(sort.direction, table.address)
case "phone" => orderColumn(sort.direction, table.phone)
case "email" => orderColumn(sort.direction, table.email)
case "mst" => orderColumn(sort.direction, table.mst)
case _ => throw new Exception("Invalid sorting key: " + sort.property)
}
})
})
val totalRow = Query(query.length).first()
val rows = query
.drop(pagingDto.start)
.take(pagingDto.limit)
.list
ExtGirdDto[Company](
total = totalRow,
data = rows
)
}
} | SunriseSoftVN/sunerp | app/models/sunerp/Company.scala | Scala | apache-2.0 | 2,727 |
// Copyright (c) 2016 PSForever.net to present
package net.psforever.packet.game.objectcreate
import scodec.{Attempt, Codec, Err}
import scodec.codecs._
import scala.annotation.switch
/**
* A reference between all object class codes and the name of the object they represent.<br>
* <br>
* Object classes compose a number between 0 and (probably) 2047, always translating into an 11-bit value.
* They are recorded as little-endian hexadecimal values here.
* In `scodec` terms, that's a `uintL(11)` or `uintL(0xB)`.
*/
object ObjectClass {
//character
final val avatar = 0x79 // 121
//ammunition
final val bullet_105mm = 0
final val bullet_12mm = 3
final val bullet_150mm = 6
final val bullet_15mm = 9
final val bullet_20mm = 16
final val bullet_25mm = 19
final val bullet_35mm = 21
final val bullet_75mm = 25
final val bullet_9mm = 28
final val bullet_9mm_AP = 29
final val ancient_ammo_combo = 50
final val ancient_ammo_vehicle = 51
final val anniversary_ammo = 54
final val aphelion_immolation_cannon_ammo = 86
final val aphelion_laser_ammo = 89
final val aphelion_plasma_rocket_ammo = 97
final val aphelion_ppa_ammo = 101
final val aphelion_starfire_ammo = 106
final val armor_canister = 111
final val armor_siphon_ammo = 112
final val bolt = 145
final val burster_ammo = 154
final val colossus_100mm_cannon_ammo = 180
final val colossus_burster_ammo = 186
final val colossus_chaingun_ammo = 191
final val colossus_cluster_bomb_ammo = 195
final val colossus_tank_cannon_ammo = 205
final val comet_ammo = 209
final val dualcycler_ammo = 265
final val energy_cell = 272
final val energy_gun_ammo = 275
final val falcon_ammo = 285
final val firebird_missile = 287
final val flamethrower_ammo = 300
final val flux_cannon_thresher_battery = 307
final val fluxpod_ammo = 310
final val frag_cartridge = 327
final val frag_grenade_ammo = 331
final val gauss_cannon_ammo = 347
final val grenade = 370
final val health_canister = 389
final val heavy_grenade_mortar = 391
final val heavy_rail_beam_battery = 393
final val hellfire_ammo = 399
final val hunter_seeker_missile = 403
final val jammer_cartridge = 413
final val jammer_grenade_ammo = 417
final val lancer_cartridge = 426
final val liberator_bomb = 434
final val maelstrom_ammo = 463
final val melee_ammo = 540
final val mine = 550
final val mine_sweeper_ammo = 553
final val ntu_siphon_ammo = 595
final val oicw_ammo = 600
final val pellet_gun_ammo = 630
final val peregrine_dual_machine_gun_ammo = 637
final val peregrine_mechhammer_ammo = 645
final val peregrine_particle_cannon_ammo = 653
final val peregrine_rocket_pod_ammo = 656
final val peregrine_sparrow_ammo = 659
final val phalanx_ammo = 664
final val phoenix_missile = 674
final val plasma_cartridge = 677
final val plasma_grenade_ammo = 681
final val pounder_ammo = 693
final val pulse_battery = 704
final val quasar_ammo = 712
final val reaver_rocket = 722
final val rocket = 734
final val scattercannon_ammo = 745
final val shotgun_shell = 755
final val shotgun_shell_AP = 756
final val six_shooter_ammo = 762
final val skyguard_flak_cannon_ammo = 786
final val sparrow_ammo = 791
final val spitfire_aa_ammo = 820
final val spitfire_ammo = 823
final val starfire_ammo = 830
final val striker_missile_ammo = 839
final val trek_ammo = 877
final val upgrade_canister = 922
final val wasp_gun_ammo = 998
final val wasp_rocket_ammo = 1000
final val winchester_ammo = 1004
//weapons
final val chaingun_12mm = 2
final val chaingun_15mm = 8
final val cannon_20mm = 12
final val cannon_deliverer_20mm = 13
final val cannon_dropship_20mm = 14
final val cannon_dropship_l_20mm = 15
final val cannon_75mm = 23
final val lightning_75mm = 24
final val ace = 32
final val ace_deployable = 33
final val advanced_ace = 39
final val advanced_missile_launcher_t = 40
final val anniversary_gun = 55
final val anniversary_guna = 56
final val anniversary_gunb = 57
final val apc_ballgun_l = 63
final val apc_ballgun_r = 64
final val apc_weapon_systema = 69
final val apc_weapon_systemb = 70
final val apc_weapon_systemc = 71
final val apc_weapon_systemc_nc = 72
final val apc_weapon_systemc_tr = 73
final val apc_weapon_systemc_vs = 74
final val apc_weapon_systemd = 75
final val apc_weapon_systemd_nc = 76
final val apc_weapon_systemd_tr = 77
final val apc_weapon_systemd_vs = 78
final val aphelion_immolation_cannon = 85
final val aphelion_laser = 88
final val aphelion_laser_left = 90
final val aphelion_laser_right = 92
final val aphelion_plasma_rocket_pod = 98
final val aphelion_ppa = 100
final val aphelion_ppa_left = 102
final val aphelion_ppa_right = 104
final val aphelion_starfire = 105
final val aphelion_starfire_left = 107
final val aphelion_starfire_right = 109
final val aurora_weapon_systema = 119
final val aurora_weapon_systemb = 120
final val battlewagon_weapon_systema = 136
final val battlewagon_weapon_systemb = 137
final val battlewagon_weapon_systemc = 138
final val battlewagon_weapon_systemd = 139
final val beamer = 140
final val bolt_driver = 146
final val chainblade = 175
final val chaingun_p = 177
final val colossus_burster = 185
final val colossus_burster_left = 187
final val colossus_burster_right = 189
final val colossus_chaingun = 190
final val colossus_chaingun_left = 192
final val colossus_chaingun_right = 194
final val colossus_cluster_bomb_pod = 196
final val colossus_dual_100mm_cannons = 198
final val colossus_tank_cannon = 204
final val colossus_tank_cannon_left = 206
final val colossus_tank_cannon_right = 208
final val cycler = 233
final val cycler_v2 = 234
final val cycler_v3 = 235
final val cycler_v4 = 236
final val dropship_rear_turret = 262
final val energy_gun = 274
final val energy_gun_nc = 276
final val energy_gun_tr = 278
final val energy_gun_vs = 280
final val flail_weapon = 298
final val flamethrower = 299
final val flechette = 304
final val flux_cannon_thresher = 306
final val fluxpod = 309
final val forceblade = 324
final val fragmentation_grenade = 334
final val fury_weapon_systema = 336
final val galaxy_gunship_cannon = 339
final val galaxy_gunship_gun = 340
final val galaxy_gunship_tailgun = 342
final val gauss = 345
final val gauss_cannon = 346
final val grenade_launcher_marauder = 371
final val heavy_rail_beam_magrider = 394
final val heavy_sniper = 396
final val hellfire = 398
final val hunterseeker = 406
final val ilc9 = 407
final val isp = 411
final val katana = 421
final val lancer = 425
final val lasher = 429
final val liberator_25mm_cannon = 433
final val liberator_bomb_bay = 435
final val liberator_weapon_system = 440
final val lightgunship_weapon_system = 445
final val lightning_weapon_system = 448
final val maelstrom = 462
final val magcutter = 468
final val mediumtransport_weapon_systemA = 534
final val mediumtransport_weapon_systemB = 535
final val mini_chaingun = 556
final val oicw = 599
final val particle_beam_magrider = 628
final val pellet_gun = 629
final val peregrine_dual_machine_gun = 636
final val peregrine_dual_machine_gun_left = 638
final val peregrine_dual_machine_gun_right = 640
final val peregrine_dual_rocket_pods = 641
final val peregrine_mechhammer = 644
final val peregrine_mechhammer_left = 646
final val peregrine_mechhammer_right = 648
final val peregrine_particle_cannon = 652
final val peregrine_sparrow = 658
final val peregrine_sparrow_left = 660
final val peregrine_sparrow_right = 662
final val phalanx_avcombo = 666
final val phalanx_flakcombo = 668
final val phalanx_sgl_hevgatcan = 670
final val phantasm_12mm_machinegun = 672
final val phoenix = 673
final val prowler_weapon_systemA = 699
final val prowler_weapon_systemB = 700
final val pulsar = 701
final val pulsed_particle_accelerator = 705
final val punisher = 706
final val quadassault_weapon_system = 709
final val r_shotgun = 714
final val radiator = 716
final val repeater = 730
final val rocklet = 737
final val rotarychaingun_mosquito = 740
final val router_telepad = 743
final val scythe = 747
final val six_shooter = 761
final val skyguard_weapon_system = 788
final val spiker = 817
final val spitfire_aa_weapon = 822
final val spitfire_weapon = 827
final val striker = 838
final val suppressor = 845
final val thumper = 864
final val thunderer_weapon_systema = 866
final val thunderer_weapon_systemb = 867
final val vanguard_weapon_system = 927
final val vanu_sentry_turret_weapon = 945
final val vulture_bomb_bay = 987
final val vulture_nose_weapon_system = 990
final val vulture_tail_cannon = 992
final val wasp_weapon_system = 1002
final val winchester = 1003
final val dynomite = 267
final val frag_grenade = 330
final val generic_grenade = 354
final val jammer_grenade = 416
final val mine_sweeper = 552
final val plasma_grenade = 680
//tools - medkits
final val medkit = 536
final val super_armorkit = 842
final val super_medkit = 843
final val super_staminakit = 844
final val remote_electronics_kit = 728
final val trek = 876
final val applicator = 110
final val medicalapplicator = 531
final val bank = 132
final val nano_dispenser = 577
final val command_detonater = 213
//unknown
final val locker_container = 456 //strange item found in inventory slot #5, between holsters and grid
//TODO refactor this function into another object later
/**
* Given an object class, retrieve the `Codec` used to parse and translate the constructor data for that type.<br>
* <br>
* This function serves as a giant `switch` statement that loosely connects object data to object class.
* All entries, save the default, merely point to the `Codec` of pattern `ConstructorData.genericPattern`.
* This pattern connects all `Codec`s back to the superclass `ConstructorData`.
* The default case is a failure case for trying to either decode or encode an unknown class of object.
* @param objClass the code for the type of object being constructed
* @return the `Codec` that handles the format of data for that particular item class, or a failing `Codec`
*/
def selectDataCodec(objClass : Int) : Codec[ConstructorData.genericPattern] = {
(objClass : @switch) match {
//ammunition
case ObjectClass.bullet_105mm => AmmoBoxData.genericCodec
case ObjectClass.bullet_12mm => AmmoBoxData.genericCodec
case ObjectClass.bullet_150mm => AmmoBoxData.genericCodec
case ObjectClass.bullet_15mm => AmmoBoxData.genericCodec
case ObjectClass.bullet_20mm => AmmoBoxData.genericCodec
case ObjectClass.bullet_25mm => AmmoBoxData.genericCodec
case ObjectClass.bullet_35mm => AmmoBoxData.genericCodec
case ObjectClass.bullet_75mm => AmmoBoxData.genericCodec
case ObjectClass.bullet_9mm => AmmoBoxData.genericCodec
case ObjectClass.bullet_9mm_AP => AmmoBoxData.genericCodec
case ObjectClass.ancient_ammo_combo => AmmoBoxData.genericCodec
case ObjectClass.ancient_ammo_vehicle => AmmoBoxData.genericCodec
case ObjectClass.anniversary_ammo => AmmoBoxData.genericCodec
case ObjectClass.aphelion_immolation_cannon_ammo => AmmoBoxData.genericCodec
case ObjectClass.aphelion_laser_ammo => AmmoBoxData.genericCodec
case ObjectClass.aphelion_plasma_rocket_ammo => AmmoBoxData.genericCodec
case ObjectClass.aphelion_ppa_ammo => AmmoBoxData.genericCodec
case ObjectClass.aphelion_starfire_ammo => AmmoBoxData.genericCodec
case ObjectClass.armor_canister => AmmoBoxData.genericCodec
case ObjectClass.armor_siphon_ammo => AmmoBoxData.genericCodec
case ObjectClass.bolt => AmmoBoxData.genericCodec
case ObjectClass.burster_ammo => AmmoBoxData.genericCodec
case ObjectClass.colossus_100mm_cannon_ammo => AmmoBoxData.genericCodec
case ObjectClass.colossus_burster_ammo => AmmoBoxData.genericCodec
case ObjectClass.colossus_chaingun_ammo => AmmoBoxData.genericCodec
case ObjectClass.colossus_cluster_bomb_ammo => AmmoBoxData.genericCodec
case ObjectClass.colossus_tank_cannon_ammo => AmmoBoxData.genericCodec
case ObjectClass.comet_ammo => AmmoBoxData.genericCodec
case ObjectClass.dualcycler_ammo => AmmoBoxData.genericCodec
case ObjectClass.energy_cell => AmmoBoxData.genericCodec
case ObjectClass.energy_gun_ammo => AmmoBoxData.genericCodec
case ObjectClass.falcon_ammo => AmmoBoxData.genericCodec
case ObjectClass.firebird_missile => AmmoBoxData.genericCodec
case ObjectClass.flamethrower_ammo => AmmoBoxData.genericCodec
case ObjectClass.flux_cannon_thresher_battery => AmmoBoxData.genericCodec
case ObjectClass.fluxpod_ammo => AmmoBoxData.genericCodec
case ObjectClass.frag_cartridge => AmmoBoxData.genericCodec
case ObjectClass.frag_grenade_ammo => AmmoBoxData.genericCodec
case ObjectClass.gauss_cannon_ammo => AmmoBoxData.genericCodec
case ObjectClass.grenade => AmmoBoxData.genericCodec
case ObjectClass.health_canister => AmmoBoxData.genericCodec
case ObjectClass.heavy_grenade_mortar => AmmoBoxData.genericCodec
case ObjectClass.heavy_rail_beam_battery => AmmoBoxData.genericCodec
case ObjectClass.hellfire_ammo => AmmoBoxData.genericCodec
case ObjectClass.hunter_seeker_missile => AmmoBoxData.genericCodec
case ObjectClass.jammer_cartridge => AmmoBoxData.genericCodec
case ObjectClass.jammer_grenade_ammo => AmmoBoxData.genericCodec
case ObjectClass.lancer_cartridge => AmmoBoxData.genericCodec
case ObjectClass.liberator_bomb => AmmoBoxData.genericCodec
case ObjectClass.maelstrom_ammo => AmmoBoxData.genericCodec
case ObjectClass.melee_ammo => AmmoBoxData.genericCodec
case ObjectClass.mine => AmmoBoxData.genericCodec
case ObjectClass.mine_sweeper_ammo => AmmoBoxData.genericCodec
case ObjectClass.ntu_siphon_ammo => AmmoBoxData.genericCodec
case ObjectClass.oicw_ammo => AmmoBoxData.genericCodec
case ObjectClass.pellet_gun_ammo => AmmoBoxData.genericCodec
case ObjectClass.peregrine_dual_machine_gun_ammo => AmmoBoxData.genericCodec
case ObjectClass.peregrine_mechhammer_ammo => AmmoBoxData.genericCodec
case ObjectClass.peregrine_particle_cannon_ammo => AmmoBoxData.genericCodec
case ObjectClass.peregrine_rocket_pod_ammo => AmmoBoxData.genericCodec
case ObjectClass.peregrine_sparrow_ammo => AmmoBoxData.genericCodec
case ObjectClass.phalanx_ammo => AmmoBoxData.genericCodec
case ObjectClass.phoenix_missile => AmmoBoxData.genericCodec
case ObjectClass.plasma_cartridge => AmmoBoxData.genericCodec
case ObjectClass.plasma_grenade_ammo => AmmoBoxData.genericCodec
case ObjectClass.pounder_ammo => AmmoBoxData.genericCodec
case ObjectClass.pulse_battery => AmmoBoxData.genericCodec
case ObjectClass.quasar_ammo => AmmoBoxData.genericCodec
case ObjectClass.reaver_rocket => AmmoBoxData.genericCodec
case ObjectClass.rocket => AmmoBoxData.genericCodec
case ObjectClass.scattercannon_ammo => AmmoBoxData.genericCodec
case ObjectClass.shotgun_shell => AmmoBoxData.genericCodec
case ObjectClass.shotgun_shell_AP => AmmoBoxData.genericCodec
case ObjectClass.six_shooter_ammo => AmmoBoxData.genericCodec
case ObjectClass.skyguard_flak_cannon_ammo => AmmoBoxData.genericCodec
case ObjectClass.sparrow_ammo => AmmoBoxData.genericCodec
case ObjectClass.spitfire_aa_ammo => AmmoBoxData.genericCodec
case ObjectClass.spitfire_ammo => AmmoBoxData.genericCodec
case ObjectClass.starfire_ammo => AmmoBoxData.genericCodec
case ObjectClass.striker_missile_ammo => AmmoBoxData.genericCodec
case ObjectClass.trek_ammo => AmmoBoxData.genericCodec
case ObjectClass.upgrade_canister => AmmoBoxData.genericCodec
case ObjectClass.wasp_gun_ammo => AmmoBoxData.genericCodec
case ObjectClass.wasp_rocket_ammo => AmmoBoxData.genericCodec
case ObjectClass.winchester_ammo => AmmoBoxData.genericCodec
//weapons (have a look on punisher)
case ObjectClass.beamer => WeaponData.genericCodec
case ObjectClass.chaingun_12mm => WeaponData.genericCodec
case ObjectClass.chaingun_15mm => WeaponData.genericCodec
case ObjectClass.cannon_20mm => WeaponData.genericCodec
case ObjectClass.cannon_deliverer_20mm => WeaponData.genericCodec
case ObjectClass.cannon_dropship_20mm => WeaponData.genericCodec
case ObjectClass.cannon_dropship_l_20mm => WeaponData.genericCodec
case ObjectClass.cannon_75mm => WeaponData.genericCodec
case ObjectClass.lightning_75mm => WeaponData.genericCodec
case ObjectClass.ace => WeaponData.genericCodec
case ObjectClass.ace_deployable => WeaponData.genericCodec
case ObjectClass.advanced_ace => WeaponData.genericCodec
case ObjectClass.advanced_missile_launcher_t => WeaponData.genericCodec
case ObjectClass.anniversary_gun => WeaponData.genericCodec
case ObjectClass.anniversary_guna => WeaponData.genericCodec
case ObjectClass.anniversary_gunb => WeaponData.genericCodec
case ObjectClass.apc_ballgun_l => WeaponData.genericCodec
case ObjectClass.apc_ballgun_r => WeaponData.genericCodec
case ObjectClass.apc_weapon_systema => WeaponData.genericCodec
case ObjectClass.apc_weapon_systemb => WeaponData.genericCodec
case ObjectClass.apc_weapon_systemc => WeaponData.genericCodec
case ObjectClass.apc_weapon_systemc_nc => WeaponData.genericCodec
case ObjectClass.apc_weapon_systemc_tr => WeaponData.genericCodec
case ObjectClass.apc_weapon_systemc_vs => WeaponData.genericCodec
case ObjectClass.apc_weapon_systemd => WeaponData.genericCodec
case ObjectClass.apc_weapon_systemd_nc => WeaponData.genericCodec
case ObjectClass.apc_weapon_systemd_tr => WeaponData.genericCodec
case ObjectClass.apc_weapon_systemd_vs => WeaponData.genericCodec
case ObjectClass.aphelion_immolation_cannon => WeaponData.genericCodec
case ObjectClass.aphelion_laser => WeaponData.genericCodec
case ObjectClass.aphelion_laser_left => WeaponData.genericCodec
case ObjectClass.aphelion_laser_right => WeaponData.genericCodec
case ObjectClass.aphelion_plasma_rocket_pod => WeaponData.genericCodec
case ObjectClass.aphelion_ppa => WeaponData.genericCodec
case ObjectClass.aphelion_ppa_left => WeaponData.genericCodec
case ObjectClass.aphelion_ppa_right => WeaponData.genericCodec
case ObjectClass.aphelion_starfire => WeaponData.genericCodec
case ObjectClass.aphelion_starfire_left => WeaponData.genericCodec
case ObjectClass.aphelion_starfire_right => WeaponData.genericCodec
case ObjectClass.aurora_weapon_systema => WeaponData.genericCodec
case ObjectClass.aurora_weapon_systemb => WeaponData.genericCodec
case ObjectClass.battlewagon_weapon_systema => WeaponData.genericCodec
case ObjectClass.battlewagon_weapon_systemb => WeaponData.genericCodec
case ObjectClass.battlewagon_weapon_systemc => WeaponData.genericCodec
case ObjectClass.battlewagon_weapon_systemd => WeaponData.genericCodec
case ObjectClass.beamer => WeaponData.genericCodec
case ObjectClass.bolt_driver => WeaponData.genericCodec
case ObjectClass.chainblade => WeaponData.genericCodec
case ObjectClass.chaingun_p => WeaponData.genericCodec
case ObjectClass.colossus_burster => WeaponData.genericCodec
case ObjectClass.colossus_burster_left => WeaponData.genericCodec
case ObjectClass.colossus_burster_right => WeaponData.genericCodec
case ObjectClass.colossus_chaingun => WeaponData.genericCodec
case ObjectClass.colossus_chaingun_left => WeaponData.genericCodec
case ObjectClass.colossus_chaingun_right => WeaponData.genericCodec
case ObjectClass.colossus_cluster_bomb_pod => WeaponData.genericCodec
case ObjectClass.colossus_dual_100mm_cannons => WeaponData.genericCodec
case ObjectClass.colossus_tank_cannon => WeaponData.genericCodec
case ObjectClass.colossus_tank_cannon_left => WeaponData.genericCodec
case ObjectClass.colossus_tank_cannon_right => WeaponData.genericCodec
case ObjectClass.cycler => WeaponData.genericCodec
case ObjectClass.cycler_v2 => WeaponData.genericCodec
case ObjectClass.cycler_v3 => WeaponData.genericCodec
case ObjectClass.cycler_v4 => WeaponData.genericCodec
case ObjectClass.dropship_rear_turret => WeaponData.genericCodec
case ObjectClass.energy_gun => WeaponData.genericCodec
case ObjectClass.energy_gun_nc => WeaponData.genericCodec
case ObjectClass.energy_gun_tr => WeaponData.genericCodec
case ObjectClass.energy_gun_vs => WeaponData.genericCodec
case ObjectClass.flail_weapon => WeaponData.genericCodec
case ObjectClass.flamethrower => WeaponData.genericCodec
case ObjectClass.flechette => WeaponData.genericCodec
case ObjectClass.flux_cannon_thresher => WeaponData.genericCodec
case ObjectClass.fluxpod => WeaponData.genericCodec
case ObjectClass.forceblade => WeaponData.genericCodec
case ObjectClass.fragmentation_grenade => WeaponData.genericCodec
case ObjectClass.fury_weapon_systema => WeaponData.genericCodec
case ObjectClass.galaxy_gunship_cannon => WeaponData.genericCodec
case ObjectClass.galaxy_gunship_gun => WeaponData.genericCodec
case ObjectClass.galaxy_gunship_tailgun => WeaponData.genericCodec
case ObjectClass.gauss => WeaponData.genericCodec
case ObjectClass.gauss_cannon => WeaponData.genericCodec
case ObjectClass.grenade_launcher_marauder => WeaponData.genericCodec
case ObjectClass.heavy_rail_beam_magrider => WeaponData.genericCodec
case ObjectClass.heavy_sniper => WeaponData.genericCodec
case ObjectClass.hellfire => WeaponData.genericCodec
case ObjectClass.hunterseeker => WeaponData.genericCodec
case ObjectClass.ilc9 => WeaponData.genericCodec
case ObjectClass.isp => WeaponData.genericCodec
case ObjectClass.katana => WeaponData.genericCodec
case ObjectClass.lancer => WeaponData.genericCodec
case ObjectClass.lasher => WeaponData.genericCodec
case ObjectClass.liberator_25mm_cannon => WeaponData.genericCodec
case ObjectClass.liberator_bomb_bay => WeaponData.genericCodec
case ObjectClass.liberator_weapon_system => WeaponData.genericCodec
case ObjectClass.lightgunship_weapon_system => WeaponData.genericCodec
case ObjectClass.lightning_weapon_system => WeaponData.genericCodec
case ObjectClass.maelstrom => WeaponData.genericCodec
case ObjectClass.magcutter => WeaponData.genericCodec
case ObjectClass.mediumtransport_weapon_systemA => WeaponData.genericCodec
case ObjectClass.mediumtransport_weapon_systemB => WeaponData.genericCodec
case ObjectClass.mini_chaingun => WeaponData.genericCodec
case ObjectClass.oicw => WeaponData.genericCodec
case ObjectClass.particle_beam_magrider => WeaponData.genericCodec
case ObjectClass.pellet_gun => WeaponData.genericCodec
case ObjectClass.peregrine_dual_machine_gun => WeaponData.genericCodec
case ObjectClass.peregrine_dual_machine_gun_left => WeaponData.genericCodec
case ObjectClass.peregrine_dual_machine_gun_right => WeaponData.genericCodec
case ObjectClass.peregrine_dual_rocket_pods => WeaponData.genericCodec
case ObjectClass.peregrine_mechhammer => WeaponData.genericCodec
case ObjectClass.peregrine_mechhammer_left => WeaponData.genericCodec
case ObjectClass.peregrine_mechhammer_right => WeaponData.genericCodec
case ObjectClass.peregrine_particle_cannon => WeaponData.genericCodec
case ObjectClass.peregrine_sparrow => WeaponData.genericCodec
case ObjectClass.peregrine_sparrow_left => WeaponData.genericCodec
case ObjectClass.peregrine_sparrow_right => WeaponData.genericCodec
case ObjectClass.phalanx_avcombo => WeaponData.genericCodec
case ObjectClass.phalanx_flakcombo => WeaponData.genericCodec
case ObjectClass.phalanx_sgl_hevgatcan => WeaponData.genericCodec
case ObjectClass.phantasm_12mm_machinegun => WeaponData.genericCodec
case ObjectClass.phoenix => WeaponData.genericCodec
case ObjectClass.prowler_weapon_systemA => WeaponData.genericCodec
case ObjectClass.prowler_weapon_systemB => WeaponData.genericCodec
case ObjectClass.pulsar => WeaponData.genericCodec
case ObjectClass.pulsed_particle_accelerator => WeaponData.genericCodec
case ObjectClass.punisher => ConcurrentFeedWeaponData.genericCodec
case ObjectClass.quadassault_weapon_system => WeaponData.genericCodec
case ObjectClass.r_shotgun => WeaponData.genericCodec
case ObjectClass.radiator => WeaponData.genericCodec
case ObjectClass.repeater => WeaponData.genericCodec
case ObjectClass.rocklet => WeaponData.genericCodec
case ObjectClass.rotarychaingun_mosquito => WeaponData.genericCodec
case ObjectClass.router_telepad => WeaponData.genericCodec
case ObjectClass.scythe => WeaponData.genericCodec
case ObjectClass.six_shooter => WeaponData.genericCodec
case ObjectClass.skyguard_weapon_system => WeaponData.genericCodec
case ObjectClass.spiker => WeaponData.genericCodec
case ObjectClass.spitfire_aa_weapon => WeaponData.genericCodec
case ObjectClass.spitfire_weapon => WeaponData.genericCodec
case ObjectClass.striker => WeaponData.genericCodec
case ObjectClass.suppressor => WeaponData.genericCodec
case ObjectClass.thumper => WeaponData.genericCodec
case ObjectClass.thunderer_weapon_systema => WeaponData.genericCodec
case ObjectClass.thunderer_weapon_systemb => WeaponData.genericCodec
case ObjectClass.vanguard_weapon_system => WeaponData.genericCodec
case ObjectClass.vanu_sentry_turret_weapon => WeaponData.genericCodec
case ObjectClass.vulture_bomb_bay => WeaponData.genericCodec
case ObjectClass.vulture_nose_weapon_system => WeaponData.genericCodec
case ObjectClass.vulture_tail_cannon => WeaponData.genericCodec
case ObjectClass.wasp_weapon_system => WeaponData.genericCodec
case ObjectClass.winchester => WeaponData.genericCodec
case ObjectClass.dynomite => WeaponData.genericCodec
case ObjectClass.frag_grenade => WeaponData.genericCodec
case ObjectClass.generic_grenade => WeaponData.genericCodec
case ObjectClass.jammer_grenade => WeaponData.genericCodec
case ObjectClass.mine_sweeper => WeaponData.genericCodec
case ObjectClass.plasma_grenade => WeaponData.genericCodec
//tools - medkits
case ObjectClass.avatar => CharacterData.genericCodec
case ObjectClass.locker_container => AmmoBoxData.genericCodec
case ObjectClass.remote_electronics_kit => REKData.genericCodec
case ObjectClass.trek => WeaponData.genericCodec
case ObjectClass.medkit => AmmoBoxData.genericCodec
case ObjectClass.super_armorkit => AmmoBoxData.genericCodec
case ObjectClass.super_medkit => AmmoBoxData.genericCodec
case ObjectClass.super_staminakit => AmmoBoxData.genericCodec
case ObjectClass.applicator => WeaponData.genericCodec
case ObjectClass.medicalapplicator => WeaponData.genericCodec
case ObjectClass.bank => WeaponData.genericCodec
case ObjectClass.nano_dispenser => WeaponData.genericCodec
case ObjectClass.command_detonater => WeaponData.genericCodec
//failure case
case _ => conditional(false, bool).exmap[ConstructorData.genericPattern] (
{
case None | _ =>
Attempt.failure(Err("decoding unknown object class"))
},
{
case None | _ =>
Attempt.failure(Err("encoding unknown object class"))
}
)
}
}
} | Fate-JH/PSF-Server | common/src/main/scala/net/psforever/packet/game/objectcreate/ObjectClass.scala | Scala | gpl-3.0 | 28,237 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.graphx
import org.scalatest.FunSuite
import org.apache.spark.SparkContext
import org.apache.spark.rdd._
class PregelSuite extends FunSuite with LocalSparkContext {
test("1 iteration") {
withSpark { sc =>
val n = 5
val starEdges = (1 to n).map(x => (0: VertexId, x: VertexId))
val star = Graph.fromEdgeTuples(sc.parallelize(starEdges, 3), "v").cache()
val result = Pregel(star, 0)(
(vid, attr, msg) => attr,
et => Iterator.empty,
(a: Int, b: Int) => throw new Exception("mergeMsg run unexpectedly"))
assert(result.vertices.collect.toSet === star.vertices.collect.toSet)
}
}
test("chain propagation") {
withSpark { sc =>
val n = 5
val chain = Graph.fromEdgeTuples(
sc.parallelize((1 until n).map(x => (x: VertexId, x + 1: VertexId)), 3),
0).cache()
assert(chain.vertices.collect.toSet === (1 to n).map(x => (x: VertexId, 0)).toSet)
val chainWithSeed = chain.mapVertices { (vid, attr) => if (vid == 1) 1 else 0 }.cache()
assert(chainWithSeed.vertices.collect.toSet ===
Set((1: VertexId, 1)) ++ (2 to n).map(x => (x: VertexId, 0)).toSet)
val result = Pregel(chainWithSeed, 0)(
(vid, attr, msg) => math.max(msg, attr),
et => if (et.dstAttr != et.srcAttr) Iterator((et.dstId, et.srcAttr)) else Iterator.empty,
(a: Int, b: Int) => math.max(a, b))
assert(result.vertices.collect.toSet ===
chain.vertices.mapValues { (vid, attr) => attr + 1 }.collect.toSet)
}
}
}
| dotunolafunmiloye/spark | graphx/src/test/scala/org/apache/spark/graphx/PregelSuite.scala | Scala | apache-2.0 | 2,363 |
package models.sitedata.joined
import models.sitedata.BaseModel
object ZoneInfoDef{
def toTable: String = "ZoneInfo"
}
case class ZoneInfo (
subzoneid: Long,
subzonename: String,
zonename: String
) extends BaseModel {
override def toString: String = {
"ZoneInfo {id: " + subzoneid +
", SubZoneName: " + subzonename +
", ZoneName: " + zonename +
"}"
}
override def getId: Long = subzoneid
override def setId(id: Long): Unit = { /* this.id = id */ }
}
| tnddn/iv-web | portal/rest-portal/app/models/sitedata/joined/ZoneInfo.scala | Scala | apache-2.0 | 548 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.common
import org.junit.Assert._
import collection.mutable.ArrayBuffer
import org.junit.Test
class TopicTest {
@Test
def testInvalidTopicNames() {
val invalidTopicNames = new ArrayBuffer[String]()
invalidTopicNames += ("", ".", "..")
var longName = "ATCG"
for (i <- 1 to 6)
longName += longName
invalidTopicNames += longName
val badChars = Array('/', '\\\\', ',', '\\u0000', ':', "\\"", '\\'', ';', '*', '?', ' ', '\\t', '\\r', '\\n', '=')
for (weirdChar <- badChars) {
invalidTopicNames += "Is" + weirdChar + "illegal"
}
for (i <- 0 until invalidTopicNames.size) {
try {
Topic.validate(invalidTopicNames(i))
fail("Should throw InvalidTopicException.")
}
catch {
case e: InvalidTopicException => "This is good."
}
}
val validTopicNames = new ArrayBuffer[String]()
validTopicNames += ("valid", "TOPIC", "nAmEs", "ar6", "VaL1d", "_0-9_.")
for (i <- 0 until validTopicNames.size) {
try {
Topic.validate(validTopicNames(i))
}
catch {
case e: Exception => fail("Should not throw exception.")
}
}
}
@Test
def testTopicHasCollisionChars() = {
val falseTopics = List("start", "end", "middle", "many")
val trueTopics = List(
".start", "end.", "mid.dle", ".ma.ny.",
"_start", "end_", "mid_dle", "_ma_ny."
)
falseTopics.foreach( t =>
assertFalse(Topic.hasCollisionChars(t))
)
trueTopics.foreach( t =>
assertTrue(Topic.hasCollisionChars(t))
)
}
@Test
def testTopicHasCollision() = {
val periodFirstMiddleLastNone = List(".topic", "to.pic", "topic.", "topic")
val underscoreFirstMiddleLastNone = List("_topic", "to_pic", "topic_", "topic")
// Self
periodFirstMiddleLastNone.foreach { t =>
assertTrue(Topic.hasCollision(t, t))
}
underscoreFirstMiddleLastNone.foreach { t =>
assertTrue(Topic.hasCollision(t, t))
}
// Same Position
periodFirstMiddleLastNone.zip(underscoreFirstMiddleLastNone).foreach { case (t1, t2) =>
assertTrue(Topic.hasCollision(t1, t2))
}
// Different Position
periodFirstMiddleLastNone.zip(underscoreFirstMiddleLastNone.reverse).foreach { case (t1, t2) =>
assertFalse(Topic.hasCollision(t1, t2))
}
}
}
| likaiwalkman/kafka | core/src/test/scala/unit/kafka/common/TopicTest.scala | Scala | apache-2.0 | 3,132 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License
*/
package org.apache.toree.kernel.interpreter.pyspark
import org.apache.toree.interpreter.broker.BrokerService
import org.apache.toree.kernel.interpreter.pyspark.PySparkTypes._
import org.slf4j.LoggerFactory
import py4j.GatewayServer
import scala.concurrent.Future
import scala.tools.nsc.interpreter.OutputStream
/**
* Represents the service that provides the high-level interface between the
* JVM and Python.
*
* @param pythonProcessName name of python process
* @param gatewayServer The backend to start to communicate between the JVM and
* Python
* @param pySparkBridge The bridge to use for communication between the JVM and
* Python
* @param pySparkProcessHandler The handler used for events that occur with
* the PySpark process
*/
class PySparkService(
private val pythonProcessName: String,
private val gatewayServer: GatewayServer,
private val pySparkBridge: PySparkBridge,
private val pySparkProcessHandler: PySparkProcessHandler
) extends BrokerService {
private val logger = LoggerFactory.getLogger(this.getClass)
@volatile private var _isRunning: Boolean = false
override def isRunning: Boolean = _isRunning
/** Represents the process used to execute Python code via the bridge. */
private lazy val pySparkProcess = {
val p = new PySparkProcess(
pythonProcessName,
pySparkBridge,
pySparkProcessHandler,
gatewayServer.getListeningPort,
org.apache.spark.SPARK_VERSION
)
// Update handlers to correctly reset and restart the process
pySparkProcessHandler.setResetMethod(message => {
p.stop()
pySparkBridge.state.reset(message)
})
pySparkProcessHandler.setRestartMethod(() => p.start())
p
}
/** Starts the PySpark service. */
def start(): Unit = {
// Start without forking the gateway server (needs to have access to
// SparkContext in current JVM)
logger.debug("Starting gateway server")
gatewayServer.start()
val port = gatewayServer.getListeningPort
logger.debug(s"Gateway server running on port $port")
// Start the Python process used to execute code
logger.debug("Launching process to execute Python code")
pySparkProcess.start()
_isRunning = true
}
/**
* Submits code to the PySpark service to be executed and return a result.
*
* @param code The code to execute
*
* @return The result as a future to eventually return
*/
def submitCode(code: Code, kernelOutputStream: Option[OutputStream]): Future[CodeResults] = {
pySparkBridge.state.pushCode(code, kernelOutputStream)
}
/** Stops the running PySpark service. */
def stop(): Unit = {
// Stop the Python process used to execute code
pySparkProcess.stop()
// Stop the server used as an entrypoint for Python
gatewayServer.shutdown()
_isRunning = false
}
}
| Myllyenko/incubator-toree | pyspark-interpreter/src/main/scala/org/apache/toree/kernel/interpreter/pyspark/PySparkService.scala | Scala | apache-2.0 | 3,721 |
/**
* Copyright 2015 Thomson Reuters
*
* Licensed under the Apache License, Version 2.0 (the “License”); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an “AS IS” BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*
* See the License for the specific language governing permissions and
* limitations under the License.
*/
//import scala.sys.process.processInternal.File
// comment
import java.io.File
import java.nio.file.{Files, Paths}
import java.util.Date
import cmwell.ctrl.client.CtrlClient
import cmwell.ctrl.hc.{ActiveNodes, ClusterStatus}
import k.grid.{GridConnection, Grid => AkkaGrid}
import scala.collection.parallel.ParMap
import scala.collection.{GenSeq, GenSet}
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.duration._
import scala.concurrent.{Await, Future}
import scala.io.Source
import scala.language.postfixOps
import scala.sys.process._
import scala.util.parsing.json._
import scala.util.{Failure, Success, Try}
//todo: make sure that some applications are installed.
trait Info {
// scalastyle:off
def info(msg: String) = println(s"Info: $msg")
// scalastyle:on
}
object ResourceBuilder {
def getIndexedName(name: String, index: Int): String = {
index match {
case 1 => name
case _ => s"$name$index"
}
}
private def replaceTemplates(text: String, templates: Map[String, String]): String =
"""\\{\\{([^{}]*)\\}\\}""".r.replaceSomeIn(text, {
case scala.util.matching.Regex.Groups(name) => templates.get(name)
})
def getResource(path: String, map: Map[String, String]): String = {
val fileContent = Source.fromFile(path).mkString
replaceTemplates(fileContent, map)
}
}
abstract class ModuleLock(checkCount: Int = 50) extends Info {
val delay = 5
def name: String
def com(host: String): Try[String]
def continueCondition(v: String, waitForModuleFor: Int): Boolean
private var prevRes = "UNLIKLY RES"
def fail = {
// scalastyle:off
println("failed to check " + name)
// scalastyle:on
throw new Exception("failed to check " + name)
}
def waitForModule(host: String, waitForModuleFor: Int, tries: Int = checkCount): Boolean = {
if (tries == 0) {
fail
false
} else {
val res = com(host)
res match {
case Success(v) =>
if (continueCondition(v.trim, waitForModuleFor)) {
Thread.sleep(delay * 1000)
val t = if (prevRes != v.trim) {
prevRes = v.trim
if (v.trim.length < 40)
info(s" $name in progress (${v.trim})")
else
info(s" $name in progress")
checkCount
} else tries - 1
waitForModule(host, waitForModuleFor, t)
}
case Failure(e) =>
Thread.sleep(delay * 1000)
waitForModule(host, waitForModuleFor, tries - 1)
}
true
}
}
def waitForModuleIndefinitely(host: String, waitForModuleFor: Int = 0): Boolean = {
val res = com(host)
res match {
case Success(v) =>
if (continueCondition(v.trim, waitForModuleFor)) {
if (v.trim.length < 40)
info(s" $name in progress (${v.trim})")
else
info(s" $name in progress")
Thread.sleep(delay * 1000)
waitForModuleIndefinitely(host, waitForModuleFor)
}
case Failure(e) =>
Thread.sleep(delay * 1000)
waitForModuleIndefinitely(host, waitForModuleFor)
}
true
}
}
case class DataDirs(casDataDirs: GenSeq[String],
casCommitLogDirs: GenSeq[String],
esDataDirs: GenSeq[String],
kafkaDataDirs: GenSeq[String],
zookeeperDataDir: String,
logsDataDir: String)
//case class DataDirs(m : Map[String,String])
case class InstDirs(intallationDir: String = "~/cm-well-new/cm-well", globalLocation: String = "/opt")
case class HaProxy(host: String, sitedown: String = "cm-well:8080")
object Host {
var connectedToAkkaGrid = false
def createHostsNames(name: String, fromIndex: Int, toIndex: Int): List[String] = {
val digitNum = toIndex.toString.length
val range = fromIndex to toIndex
range.toList.map(index => s"${name}%0${digitNum}d".format(index))
}
def ctrl = cmwell.ctrl.client.CtrlClient
def getIndexTxt(moduleIndex: Int) = {
if (moduleIndex == 1) "" else s"$moduleIndex"
}
}
trait OsType
case object Oracle extends OsType
case object Ubuntu extends OsType
abstract class Host(user: String,
password: String,
ipMappings: IpMappings,
size: Int,
inet: String,
val cn: String,
val dc: String,
dataDirs: DataDirs,
instDirs: InstDirs,
wsPerMachine: Int,
allocationPlan: ModuleAllocations,
useAuthorization: Boolean,
deployJava: Boolean,
production: Boolean,
su: Boolean,
ctrlService: Boolean = false,
minMembers: Option[Int] = None,
haProxy: Option[HaProxy],
withElk: Boolean = false,
isDebug: Boolean = false) {
var sudoerCredentials: Option[Credentials] = None
def getUser = user
def getIpMappings = ipMappings
def getInet = inet
def getDataDirs = dataDirs
def getInstDirs = instDirs
def getAllocationPlan = allocationPlan
def getUseAuthorization = useAuthorization
def getDeployJava = deployJava
def getProduction = production
def getSu = su
def getCtrlSerice = ctrlService
def getHaProxy = haProxy
def getWithElk = withElk
/*
var useAuthorization = false
var deployJava = false
var production = false
var devMode = false
*/
def getMinMembers = minMembers.getOrElse(ips.size / 2 + 1)
val esRegPort = 9201
val esMasterPort = 9200
def currentDir = command("pwd").get
def getOs(host: String): OsType = {
val osStr = command("""cat /etc/*-release""", host, false) match {
case Success(str) => str.trim
case Failure(err) => "oracle"
}
osStr match {
case str: String if str.toLowerCase().contains("ubuntu") => Ubuntu
case str: String if str.toLowerCase().contains("oracle") => Oracle
case str: String => Oracle
}
}
def cssh = {
checkProduction
Future {
command(s"cssh --username $user ${ips.mkString(" ")}")
}
}
def jstat = {
ips.par.map { ip =>
// scalastyle:off
ip -> command(s"ps aux | grep java | egrep -v 'starter|grep' | awk '{print $$2}' | xargs -I zzz ${getInstDirs.globalLocation}/cm-well/app/java/bin/jstat -gcutil zzz", ip, false).map(_.trim)
// scalastyle:on
}.toMap
}
def jstat(comp: String) = {
ips.par.map { ip =>
// scalastyle:off
ip -> command(s"ps aux | grep java | egrep -v 'starter|grep' | grep $comp | awk '{print $$2}' | xargs -I zzz ${getInstDirs.globalLocation}/cm-well/app/java/bin/jstat -gcutil zzz", ip, false).map(_.trim)
// scalastyle:on
}.toMap
}
def jstat(comp: String, ip: String): Unit = {
// scalastyle:off
ParMap(ip -> command(s"ps aux | grep java | egrep -v 'starter|grep' | grep $comp | awk '{print $$2}' | xargs -I zzz ${getInstDirs.globalLocation}/cm-well/app/java/bin/jstat -gcutil zzz", ip, false).map(_.trim))
// scalastyle:on
}
private val componentToJmxMapping = Map(
"ws" -> PortManagers.ws.jmxPortManager.initialPort,
"bg" -> PortManagers.bg.jmxPortManager.initialPort,
"ctrl" -> PortManagers.ctrl.jmxPortManager.initialPort,
"dc" -> PortManagers.dc.jmxPortManager.initialPort
)
def jconsole(component: String, dualmonitor: Boolean, host1: String, hosts: String*): Unit =
jconsole(component, dualmonitor, Seq(host1) ++ hosts)
def jconsole(component: String, dualmonitor: Boolean = false, hosts: GenSeq[String] = ips): Unit = {
if (!dualmonitor) {
val com = hosts.map(h => s"$h:${componentToJmxMapping(component)}").mkString(" ")
info(com)
Future {
command(s"jconsole $com")
}
} else {
val (hosts1, hosts2) = hosts.splitAt(hosts.size / 2)
val com1 = hosts1.map(h => s"$h:${componentToJmxMapping(component)}").mkString(" ")
val com2 = hosts2.map(h => s"$h:${componentToJmxMapping(component)}").mkString(" ")
info(com1)
info(com2)
Future {
command(s"jconsole $com1")
}
Future {
command(s"jconsole $com2")
}
}
}
def dcSync(remoteHost: String, dc: String): Unit = {
// scalastyle:off
command(s"""curl -XPOST "http://${ips(0)}:9000/meta/sys/dc/$dc" -H "X-CM-Well-Type:Obj" -H "Content-Type:application/json" --data-binary '{"type":"remote" , "location" : "$remoteHost" , "id" : "$dc"}'""")
// scalastyle:on
}
def ips = ipMappings.getIps
def getSize = size
def createFile(path: String,
content: String,
hosts: GenSeq[String] = ips,
sudo: Boolean = false,
sudoer: Option[Credentials] = None) {
if (sudo)
command(s"""echo -e '$content' | sudo tee $path > /dev/null""", hosts, true, sudoer)
else
command(s"""echo $$'$content' > $path""", hosts, false)
}
val shipperConfLocation = s"${instDirs.globalLocation}/cm-well/conf/logstash"
val logstashJarLocation = s"${instDirs.globalLocation}/cm-well/app/logstash"
val logstashConfName = "logstash.conf"
val logstashJarName = "logstash-1.2.2-flatjar.jar"
def addLogstash(esHost: String, hosts: GenSeq[String] = ips) {
createLogstashConfFile(esHost, hosts)
deployLogstash(hosts)
startSendingLogsToLogstash(hosts)
}
def createLogstashConfFile(esHost: String, hosts: GenSeq[String] = ips) {
val str = genLogstashConfFile(
esHost,
Map("BU" -> "TMS", "serviceID" -> "cm-well", "environmentID" -> "cm-well", "appID" -> "cm-well", "cluster" -> cn)
)
command(s"mkdir -p $shipperConfLocation", hosts, false)
createFile(s"$shipperConfLocation/$logstashConfName", str, hosts)
}
def deployLogstash(hosts: GenSeq[String] = ips) {
command(s"mkdir -p ${instDirs.globalLocation}/cm-well/app/logstash", hosts, false)
rsync(s"components-extras/$logstashJarName", logstashJarLocation, hosts)
val startFile =
s"java -jar $logstashJarName agent -f $shipperConfLocation/$logstashConfName > /dev/null 2> /dev/null &"
createFile(s"$logstashJarLocation/start.sh", startFile, hosts)
command(s"cd $logstashJarLocation; chmod +x start.sh", hosts, false)
}
def startSendingLogsToLogstash(hosts: GenSeq[String] = ips) {
command(s"cd $logstashJarLocation; ./start.sh", hosts, false)
}
def stopSendingLogsToLogstash(hosts: GenSeq[String] = ips) {
killProcess("logstash", "")
}
def genLogstashConfFile(esHost: String, globalFields: Map[String, String]): String =
LogstashConf.genLogstashConfFile(cn,
esHost,
globalFields,
s"${instDirs.globalLocation}/cm-well/log",
dataDirs.esDataDirs.size)
private def resolveIndex(index: Int): String = {
index match {
case 1 => ""
case _ => s"${index}"
}
}
val deployment = new Deployment(this)
//FIXME: was object inside a class, caused this nifty hard to track exception:
/*
* java.lang.NoSuchFieldError: LogLevel$module
* at Host.LogLevel(ctl.scala:415)
* at Main$$anon$1.<init>(scalacmd4970030751979185144.scala:10)
* at Main$.main(scalacmd4970030751979185144.scala:1)
* at Main.main(scalacmd4970030751979185144.scala)
* at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
* at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
* at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
* at java.lang.reflect.Method.invoke(Method.java:497)
* at scala.reflect.internal.util.ScalaClassLoader$$anonfun$run$1.apply(ScalaClassLoader.scala:70)
* at scala.reflect.internal.util.ScalaClassLoader$class.asContext(ScalaClassLoader.scala:31)
* at scala.reflect.internal.util.ScalaClassLoader$URLClassLoader.asContext(ScalaClassLoader.scala:101)
* at scala.reflect.internal.util.ScalaClassLoader$class.run(ScalaClassLoader.scala:70)
* at scala.reflect.internal.util.ScalaClassLoader$URLClassLoader.run(ScalaClassLoader.scala:101)
* at scala.tools.nsc.CommonRunner$class.run(ObjectRunner.scala:22)
* at scala.tools.nsc.ObjectRunner$.run(ObjectRunner.scala:39)
* at scala.tools.nsc.CommonRunner$class.runAndCatch(ObjectRunner.scala:29)
* at scala.tools.nsc.ObjectRunner$.runAndCatch(ObjectRunner.scala:39)
* at scala.tools.nsc.ScriptRunner.scala$tools$nsc$ScriptRunner$$runCompiled(ScriptRunner.scala:175)
* at scala.tools.nsc.ScriptRunner$$anonfun$runCommand$1.apply(ScriptRunner.scala:222)
* at scala.tools.nsc.ScriptRunner$$anonfun$runCommand$1.apply(ScriptRunner.scala:222)
* at scala.tools.nsc.ScriptRunner$$anonfun$withCompiledScript$1$$anonfun$apply$mcZ$sp$1.apply(ScriptRunner.scala:161)
* at scala.tools.nsc.ScriptRunner$$anonfun$withCompiledScript$1.apply$mcZ$sp(ScriptRunner.scala:161)
* at scala.tools.nsc.ScriptRunner$$anonfun$withCompiledScript$1.apply(ScriptRunner.scala:129)
* at scala.tools.nsc.ScriptRunner$$anonfun$withCompiledScript$1.apply(ScriptRunner.scala:129)
* at scala.tools.nsc.util.package$.trackingThreads(package.scala:43)
* at scala.tools.nsc.util.package$.waitingForThreads(package.scala:27)
* at scala.tools.nsc.ScriptRunner.withCompiledScript(ScriptRunner.scala:128)
* at scala.tools.nsc.ScriptRunner.runCommand(ScriptRunner.scala:222)
* at scala.tools.nsc.MainGenericRunner.run$1(MainGenericRunner.scala:85)
* at scala.tools.nsc.MainGenericRunner.process(MainGenericRunner.scala:98)
* at scala.tools.nsc.MainGenericRunner$.main(MainGenericRunner.scala:103)
* at scala.tools.nsc.MainGenericRunner.main(MainGenericRunner.scala)
*
* changed it to an anon val, which is ugly, but works.
* this is a TEMPORARY PATCH!
* @michaelirzh : please refactor!!!
*/
val LogLevel = new {
def warn = deployment.componentProps.collect { case lc: LoggingComponent => lc }.foreach(lc => lc.LogLevel.warn)
def error = deployment.componentProps.collect { case lc: LoggingComponent => lc }.foreach(lc => lc.LogLevel.error)
def info = deployment.componentProps.collect { case lc: LoggingComponent => lc }.foreach(lc => lc.LogLevel.info)
def debug = deployment.componentProps.collect { case lc: LoggingComponent => lc }.foreach(lc => lc.LogLevel.debug)
}
private val jwt = sys.env.getOrElse(
"PUSER_TOKEN",
"eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiJwVXNlciIsImV4cCI6NDYzODkwMjQwMDAwMCwicmV2IjoxfQ.j-tJCGnWHbJ-XAUJ1wyHxMlnMaLvO6IO0fKVjsXOzYM"
)
private val rootDigest =
sys.env.getOrElse("ROOT_DIGEST", "$2a$10$MKrHtymBOyfE67dZnbEdeOEB336uOXwYetVU28djINKjUTs2da6Km")
private val rootDigest2 = sys.env.getOrElse("ROOT_DIGEST2", "199245447fd82dd38f84c000da94cf1d")
var verbose = false
var deb = isDebug
def debug: Boolean = deb
def debug_=(v: Boolean) = {
deb = v
// scalastyle:off
println("The ports are:\\nws: 5010\\nbatch: 5009\\nctrl: 5011\\ncw: 5012\\ndc: 5013\\nbg: 5014")
// scalastyle:on
}
var doInfo = true
// scalastyle:off
def info(msg: String) = if (doInfo) println(s"Info: $msg")
// scalastyle:on
def warn(msg: String) = {
// scalastyle:off
println(s"Warning: $msg")
// scalastyle:on
}
def warnPrompt = {
// scalastyle:off
println("Warning: Are you sure you want to continue: (yes/no)")
// scalastyle:on
val ln = scala.io.StdIn.readLine()
if (ln != "yes") {
throw new Exception("You chose to not continue the process.")
}
}
def getMode: String
def getCassandraHostIDs(host: String): String
def getElasticsearchMasters: Int
def isSu = su
// scalastyle:off
def help = println(Source.fromFile("readme").mkString)
// scalastyle:on
//def hosts = ips.map(ip => s"${user}@${ip}")
def getSeedNodes: List[String]
def javaPath = s"${instDirs.globalLocation}/cm-well/app/java/bin"
def utilsPath = s"${instDirs.globalLocation}/cm-well/bin/utils"
def homeBinPath = "~/bin"
def path: String = s"$javaPath:$utilsPath:$homeBinPath:$$PATH"
private def ipsToSsh(u: String = user, ips: GenSeq[String]) =
ips.map(ip => if (ip.indexOf("@") == -1) s"${u}@${ip}" else ip)
private def timeStamp = System.currentTimeMillis / 1000
private var lastProdCheckTimeStamp = 0L
private def checkProduction {
val interval = 60 * 60
if (production && (timeStamp - lastProdCheckTimeStamp > interval)) {
// scalastyle:off
println("This is a production cluster. Are you sure you want to do this operation: (yes/no)")
// scalastyle:on
val ln = scala.io.StdIn.readLine()
if (ln != "yes") {
throw new Exception(
"This operation is not allowed on a production environment. Please remove: production = true from this cluster's definition file"
)
} else {
lastProdCheckTimeStamp = timeStamp
}
}
}
// var intallationDir = "~/cm-well-new/cm-well"
// var globalLocation = "/opt"
case class Credentials(name: String, pass: String)
def gainTrust: Unit = gainTrust()
def gainTrust(u: String = user, p: String = "", hosts: GenSeq[String] = ips) {
val sshLocation = s"${sys.env("HOME")}/.ssh"
val privateKey = s"$sshLocation/id_rsa"
val publicKey = s"$privateKey.pub"
val hasPrivateKey = new java.io.File(privateKey).exists
val hasPublicKey = new java.io.File(publicKey).exists
val hasKey = hasPrivateKey && hasPublicKey
if (!hasKey) {
info(" key not found, generating install key.")
//ssh-keygen asks for overwrite in case the private key exists, so deleting before.
Seq("bash", "-c", s"rm -f $privateKey; ssh-keygen -q -t rsa -b 4096 -N '' -C '' -f $privateKey").!!
}
val pass = if (p != "") p else scala.io.StdIn.readLine(s"Please enter password for $u\\n")
val sshHosts = ipsToSsh(u, hosts)
sshHosts.seq.foreach { sshHost =>
Seq("ssh-keygen", "-R", sshHost).!!
}
sshHosts.foreach { sshHost =>
val cmd = Seq("bash", "-c", s"read PASS; ${UtilCommands.sshpass} -p $$PASS ssh-copy-id -i $privateKey -o StrictHostKeyChecking=no $sshHost")
// scalastyle:off
if (verbose) println("command: " + cmd.mkString(" "))
// scalastyle:on
(s"echo -e -n $pass\\\\n" #| cmd).!!
}
}
def refreshUserState(user: String, sudoer: Option[Credentials], hosts: GenSeq[String] = ips): Unit = {
// temp disabled for OSX till new cons available...
val pubKeyOpt = sys.env.get("SSH_DEV_KEY")
if (!UtilCommands.isOSX && pubKeyOpt.isDefined) {
val pubKey = pubKeyOpt.get
val userSshDir = s"/home/$user/.ssh"
val rootSshDir = "/root/.ssh"
val fileName = "authorized_keys"
val rootVarMap = Map("STR" -> pubKey, "DIR" -> rootSshDir, "FILE" -> fileName)
// scalastyle:off
val cmdTemplate = "%smkdir -p $DIR; %ssed -i -e '\\\\$a\\\\' $DIR/$FILE 2> /dev/null; %sgrep -q '$STR' $DIR/$FILE 2> /dev/null || echo -e '$STR' | %stee -a $DIR/$FILE > /dev/null"
// scalastyle:on
val rootCmd = cmdTemplate.format(Seq.fill(4)("sudo "): _*)
val userCmd = cmdTemplate.format(Seq.fill(4)(""): _*)
sudoer.foreach(_ => command(rootCmd, hosts, sudo = true, sudoer, rootVarMap))
val userVarMap = Map("STR" -> pubKey, "DIR" -> userSshDir, "FILE" -> fileName)
command(userCmd, hosts, sudo = false, sudoer = None, variables = userVarMap)
//add the file that removes the annoying ssh log in message
command("touch ~/.hushlogin", hosts, sudo = false)
}
}
//def gainTrustNoPass(u : String = user , p : String = "", hosts : GenSeq[String] = ips.par)
def validateNumberOfMasterNodes(num: Int, size: Int): Boolean = (num % 2) == 1 && num <= size && num >= 3
def absPath(path: String) = Seq("bash", "-c", s"cd ${path}; pwd").!!.replace("\\n", "")
val nodeToolLocation = s"${instDirs.globalLocation}/cm-well/app/cas/cur/bin/nodetool"
def nodeToolPath = nodeToolLocation
def pingAddress = ips(0)
def esHealthAddress = ":9200/_cluster/health?pretty=true"
var mappingFile = "mapping.json"
def cassandraStatus(host: String): Try[String] = {
command(
s"JAVA_HOME=${instDirs.globalLocation}/cm-well/app/java/bin $nodeToolPath status 2> /dev/null | grep UN | wc -l",
host,
false
).map(_.trim)
}
case class CassandraLock() extends ModuleLock {
def name: String = "Cassandra boot"
def com(host: String): Try[String] = cassandraStatus(host)
def continueCondition(v: String, waitFor: Int): Boolean = v.toInt < waitFor
}
case class CassandraDNLock() extends ModuleLock {
def name: String = "CassandraDownNodes counter"
def com(host: String): Try[String] =
command(
s"JAVA_HOME=${instDirs.globalLocation}/cm-well/app/java/bin $nodeToolPath status 2> /dev/null | grep DN | wc -l",
host,
false
)
def continueCondition(v: String, waitFor: Int): Boolean = v.toInt < waitFor
}
case class ElasticsearchLock(checkHost: String = ips(0)) extends ModuleLock {
def name: String = "Elasticsearch boot"
def com(host: String): Try[String] = {
val r = command("curl -sX GET http://" + checkHost + esHealthAddress, host, false)
r match {
case Success(v) =>
Try(JSON.parseFull(v.trim).get.asInstanceOf[Map[String, Any]]("number_of_nodes").toString.trim.split('.')(0))
case Failure(e) => Failure(e)
}
}
def continueCondition(v: String, waitFor: Int): Boolean = v.trim.toInt < waitFor
}
case class ElasticsearchStatusLock(colors: String*) extends ModuleLock {
override val delay = 20
def name: String = s"Waiting for Elasticsearch ${colors.mkString(", ")} status"
def com(host: String): Try[String] = elasticsearchStatus(host)
def continueCondition(v: String, waitFor: Int): Boolean = !colors.contains(v.toLowerCase)
}
case class NoKnownHostsLock() extends ModuleLock {
override def name: String = "Updating known hosts"
override def continueCondition(v: String, waitForModuleFor: Int): Boolean = v.contains("known-cmwell-hosts")
override def com(host: String): Try[String] =
command("curl -sX GET http://" + host + ":9000/meta/sys?format=ntriples")
}
def webServerStatus(host: String) = command("curl -Is http://" + host + ":9000/ | head -1")
case class WebServiceLock() extends ModuleLock {
def name: String = "Web Service boot"
def com(host: String): Try[String] = webServerStatus(host)
def continueCondition(v: String, waitFor: Int): Boolean =
!v.contains("200") && !v.contains("404") && !v.contains("503")
}
val dataInitializer = new DataInitializer(this, jwt, rootDigest, rootDigest2)
def shutDownDataInitializer() = dataInitializer.shutdownMaterializerAndActorSystem()
implicit class StringExtensions(s: String) {
def takeRightWhile(p: (Char) => Boolean): String = s.takeRight(s.reverseIterator.takeWhile(p).length)
}
def createCassandraRackProperties(hosts: GenSeq[String] = ips.par) {
hosts.zipWithIndex.foreach { ip =>
val content = s"dc=DC1\\nrack=RAC${ip._2 + 1}"
command(s"""echo "$content" > ${instDirs.globalLocation}/cm-well/conf/cas/cassandra-rackdc.properties""",
ip._1,
false)
for (i <- 2 to dataDirs.casDataDirs.size)
command(s"""echo "$content" > ${instDirs.globalLocation}/cm-well/conf/cas$i/cassandra-rackdc.properties""",
ip._1,
false)
}
}
def createUser(user: String = "u", pass: String = "said2000", hosts: GenSeq[String] = ips.par, sudoer: Credentials) {
command(s"sudo useradd $user", hosts, true, Some(sudoer))
command(s"echo '$user:$$USERPASS' | sudo chpasswd", hosts, true, Some(sudoer), Map("USERPASS" -> pass))
}
def sudoComm(com: String) = s"""sudo bash -c \\"\\"\\"${com}\\"\\"\\""""
def elasticsearchStatus(host: String) = {
val r = command("curl -sX GET http://" + ips(0) + esHealthAddress, host, false)
r match {
case Success(v) =>
Try(JSON.parseFull(v.trim).get.asInstanceOf[Map[String, Any]]("status").toString.trim.split('.')(0))
case Failure(e) => Failure(e)
}
}
def command(com: String, hosts: GenSeq[String], sudo: Boolean): GenSeq[Try[String]] = {
command(com, hosts, sudo, None)
}
def command(com: String, hosts: GenSeq[String], sudo: Boolean, sudoer: Option[Credentials]): GenSeq[Try[String]] = {
hosts.map { host =>
command(com, host, sudo, sudoer)
}
}
def command(com: String,
hosts: GenSeq[String],
sudo: Boolean,
sudoer: Option[Credentials],
variables: Map[String, String]): GenSeq[Try[String]] = {
hosts.map(host => command(com, host, sudo, sudoer, variables))
}
def command(com: String, host: String, sudo: Boolean): Try[String] = {
command(com, host, sudo, None)
}
def command(com: String, host: String, sudo: Boolean, sudoer: Option[Credentials]): Try[String] = {
command(com, host, sudo, sudoer, Map[String, String]())
}
def command(com: String,
host: String,
sudo: Boolean,
sudoer: Option[Credentials],
variables: Map[String, String]): Try[String] = {
if (sudo && isSu && sudoer.isEmpty)
throw new Exception(s"Sudoer credentials must be available in order to use sudo")
if (!ips.contains(host) && host != haProxy.map(x => x.host).getOrElse(""))
throw new Exception(s"The host $host is not part of this cluster")
val (readVarsLine, varValues) = variables.fold(("", "")) {
case ((readVarsStr, varValuesForEcho), (varName, value)) =>
(s"$readVarsStr read $varName;", s"$varValuesForEcho$value\\\\n")
}
val (commandLine, process) = if (sudo && isSu) {
// scalastyle:off
//old version that get stuck sometimes - val command = s"""ssh -o StrictHostKeyChecking=no ${sudoer.get.name}@$host bash -c $$'{ export PATH=$path; read PASS; ./sshpass -p $$PASS bash -c "${escapedCommand(com)}"; }'"""
val cmd = s"""ssh -o StrictHostKeyChecking=no -o LogLevel=ERROR ${sudoer.get.name}@$host export PATH=$path;$readVarsLine read PASS; sshpass -p $$PASS bash -c "${escapedCommand(com)}""""
// scalastyle:on
(cmd, s"echo -e -n $varValues${sudoer.get.pass}\\\\n" #| cmd)
} else {
if (variables.nonEmpty) {
val cmd =
s"""ssh -o StrictHostKeyChecking=no -o LogLevel=ERROR $user@$host export PATH=$path;$readVarsLine bash -c "${escapedCommand(
com
)}""""
(cmd, s"echo -e -n $varValues" #| cmd)
} else {
val cmd =
Seq("ssh", "-o", "StrictHostKeyChecking=no", "-o", "LogLevel=ERROR", s"$user@$host", s"PATH=$path $com")
(cmd.mkString(" "), Process(cmd))
}
}
// scalastyle:off
if (verbose) println("command: " + commandLine)
// scalastyle:on
Try(process.!!)
}
private def escapedCommand(cmd: String) =
cmd.replace("\\"", "\\\\\\"") // old version for $'..' bash string: cmd.replace("\\"", "\\\\\\\\\\"").replace("'", "\\\\'")
def command(com: String, sudo: Boolean = false): Try[String] = {
if (sudo && isSu)
Try(sudoComm(com).!!)
else {
val seq = Seq("bash", "-c", com)
// scalastyle:off
if (verbose) println("command: " + seq.mkString(" "))
// scalastyle:on
Try(seq.!!)
}
}
def rsync(from: String, to: String, hosts: GenSeq[String], sudo: Boolean = false): GenSeq[Try[String]] = {
val h = hosts.map(host => if (host.indexOf("@") == -1) s"${user}@${host}" else host)
h.map { host =>
_rsync(from, to, host, sudo = sudo)
}
}
def _rsync(from: String, to: String, host: String, tries: Int = 10, sudo: Boolean): Try[String] = {
val seq = Seq("rsync", "-Paz", "--delete", from, host + ":" + to)
// scalastyle:off
if (verbose) println("command: " + seq.mkString(" "))
// scalastyle:on
val res = Try(seq.!!)
res match {
case Success(r) => res
case Failure(err) => if (tries == 0) res else _rsync(from, to, host, tries - 1, sudo)
}
}
def removeDataDirs: Unit = removeDataDirs()
def removeDataDirs(i: GenSeq[String] = ips.par) {
command(s"rm -rf ${instDirs.intallationDir}", i, false)
dataDirs.casDataDirs.foreach { cas =>
command(s"rm -rf ${cas}", i, false)
}
dataDirs.casCommitLogDirs.foreach { ccl =>
command(s"rm -rf ${ccl}", i, false)
}
dataDirs.esDataDirs.foreach { es =>
command(s"rm -rf ${es}", i, false)
}
command(s"rm -rf ${dataDirs.logsDataDir}", i, false)
}
def createDataDirs(): Unit = createDataDirs(ips.par)
def createDataDirs(hosts: GenSeq[String]) {
info("creating data directories")
info(" creating installation directory")
command(s"mkdir -p ${instDirs.intallationDir}/", hosts, false)
deployment.componentProps.collect { case cp: DataComponent => cp }.foreach {
_.createDataDirectories(hosts)
}
info(" creating log data directory")
command(s"mkdir -p ${dataDirs.logsDataDir}", hosts, false)
info("finished creating data directories")
}
def deployComponents(hosts: GenSeq[String] = ips.par) {
deployment.componentProps.foreach(_.deployComponent(hosts = hosts))
}
def genResources(hosts: GenSeq[String] = ips) {
deployment.createResources(mkScripts(hosts))
}
def genEsResources(hosts: GenSeq[String]) {
deployment.createResources(mkScripts(hosts).filter(_.isInstanceOf[ElasticsearchConf]))
}
def genCtrlResources(hosts: GenSeq[String]) {
deployment.createResources(mkScripts(hosts).filter(_.isInstanceOf[CtrlConf]))
}
def deployApplication: Unit = deployApplication()
def deployApplication(hosts: GenSeq[String] = ips.par) {
syncLib(hosts)
info("deploying application")
info(" creating application directories")
//command(s"mkdir -p ${instDirs.intallationDir}/", hosts, false)
// scalastyle:off
command(s"mkdir ${instDirs.intallationDir}/app ${instDirs.intallationDir}/conf ${instDirs.intallationDir}/data ${instDirs.intallationDir}/bin", hosts, false)
command(s"mkdir ${instDirs.intallationDir}/app/batch ${instDirs.intallationDir}/app/bg ${instDirs.intallationDir}/app/ctrl ${instDirs.intallationDir}/app/dc ${instDirs.intallationDir}/app/cas ${instDirs.intallationDir}/app/es ${instDirs.intallationDir}/app/ws ${instDirs.intallationDir}/app/scripts ${instDirs.intallationDir}/app/tools", hosts, false)
// scalastyle:on
command(s"ln -s ${dataDirs.logsDataDir} ${instDirs.intallationDir}/log", hosts, false)
info(" deploying components")
deployComponents(hosts)
//info(" extracting components")
//extractComponents(hosts)
info(" creating symbolic links")
deployment.componentProps.collect { case cp: DataComponent => cp }.foreach {
_.linkDataDirectories(hosts)
}
deployment.componentProps.collect { case cp: LoggingComponent => cp }.foreach {
_.createLoggingDirectories(hosts)
}
deployment.componentProps.collect { case cp: ConfigurableComponent => cp }.foreach {
_.createConigurationsDirectoires(hosts)
}
rsync("./scripts/", s"${instDirs.intallationDir}/app/scripts/", hosts)
info(" creating links in app directory")
createAppLinks(hosts)
rsync(s"./components/mx4j-tools-3.0.1.jar", s"${instDirs.intallationDir}/app/cas/cur/lib/", hosts)
info(" creating scripts")
genResources(hosts)
info(" deploying plugins")
rsyncPlugins(hosts)
info(" linking libs")
linkLibs(hosts)
info("finished deploying application")
}
private def createAppLinks(hosts: GenSeq[String]) = {
// scalastyle:off
command(s"test -L ${instDirs.globalLocation}/cm-well/app/batch/logs || ln -s ${instDirs.globalLocation}/cm-well/log/batch/ ${instDirs.globalLocation}/cm-well/app/batch/logs", hosts, false)
command(s"test -L ${instDirs.globalLocation}/cm-well/app/bg/logs || ln -s ${instDirs.globalLocation}/cm-well/log/bg/ ${instDirs.globalLocation}/cm-well/app/bg/logs", hosts, false)
command(s"test -L ${instDirs.globalLocation}/cm-well/app/ws/logs || ln -s ${instDirs.globalLocation}/cm-well/log/ws/ ${instDirs.globalLocation}/cm-well/app/ws/logs", hosts, false)
command(s"mkdir -p ${instDirs.globalLocation}/cm-well/log/cw/", hosts, false)
command(s"test -L ${instDirs.globalLocation}/cm-well/app/ws/cw-logs || ln -s ${instDirs.globalLocation}/cm-well/log/cw/ ${instDirs.globalLocation}/cm-well/app/ws/cw-logs", hosts, false)
command(s"test -L ${instDirs.globalLocation}/cm-well/app/ctrl/logs || ln -s ${instDirs.globalLocation}/cm-well/log/ctrl/ ${instDirs.globalLocation}/cm-well/app/ctrl/logs", hosts, false)
command(s"test -L ${instDirs.globalLocation}/cm-well/app/dc/logs || ln -s ${instDirs.globalLocation}/cm-well/log/dc/ ${instDirs.globalLocation}/cm-well/app/dc/logs", hosts, false)
command(s"mkdir -p ${instDirs.globalLocation}/cm-well/conf/batch/", hosts, false)
command(s"test -L ${instDirs.globalLocation}/cm-well/app/batch/conf || ln -s ${instDirs.globalLocation}/cm-well/conf/batch/ ${instDirs.globalLocation}/cm-well/app/batch/conf", hosts, false)
command(s"test -L ${instDirs.globalLocation}/cm-well/app/bg/conf || ln -s ${instDirs.globalLocation}/cm-well/conf/bg/ ${instDirs.globalLocation}/cm-well/app/bg/conf", hosts, false)
command(s"test -L ${instDirs.globalLocation}/cm-well/app/ws/conf || ln -s ${instDirs.globalLocation}/cm-well/conf/ws/ ${instDirs.globalLocation}/cm-well/app/ws/conf", hosts, false)
command(s"mkdir -p ${instDirs.globalLocation}/cm-well/conf/cw/", hosts, false)
command(s"test -L ${instDirs.globalLocation}/cm-well/app/ws/cw-conf || ln -s ${instDirs.globalLocation}/cm-well/conf/cw/ ${instDirs.globalLocation}/cm-well/app/ws/cw-conf", hosts, false)
command(s"test -L ${instDirs.globalLocation}/cm-well/app/ctrl/conf || ln -s ${instDirs.globalLocation}/cm-well/conf/ctrl/ ${instDirs.globalLocation}/cm-well/app/ctrl/conf", hosts, false)
command(s"mkdir -p ${instDirs.globalLocation}/cm-well/conf/dc/", hosts, false)
command(s"test -L ${instDirs.globalLocation}/cm-well/app/dc/conf || ln -s ${instDirs.globalLocation}/cm-well/conf/dc/ ${instDirs.globalLocation}/cm-well/app/dc/conf", hosts, false)
// scalastyle:on
}
def mkScripts(ips: GenSeq[String] = ips): GenSeq[ComponentConf] = {
null
}
def redeploy: Unit = redeploy()
def redeploy(hosts: GenSeq[String] = ips.par) {
checkProduction
stop(false, hosts)
clearApp(hosts)
deployApplication(hosts)
}
def updateWebService: Unit = updateWebService()
def updateWebService(hosts: GenSeq[String] = ips) {
hosts.foreach { h =>
command(s"cd ${instDirs.globalLocation}/cm-well/app/ws; mkdir tmp", List(h), false)
rsync("./components/cmwell-ws_2.10-1.0.1-SNAPSHOT-dist.zip",
s"${instDirs.globalLocation}/cm-well/app/ws/tmp/cmwell-ws_2.10-1.0.1-SNAPSHOT-dist.zip",
List(h))
command(s"cd ${instDirs.globalLocation}/cm-well/app/ws/tmp; unzip cmwell-ws_2.10-1.0.1-SNAPSHOT-dist.zip",
hosts,
false)
stopWebservice(List(h))
command(s"rm -rf ${instDirs.intallationDir}/cm-well/app/ws/cmwell-ws-1.0.1-SNAPSHOT", List(h), false)
command(s"rm ${instDirs.globalLocation}/cm-well/app/ws/RUNNING_PID", List(h), false)
command(
s"mv ${instDirs.globalLocation}/cm-well/app/ws/tmp/cmwell-ws-1.0.1-SNAPSHOT ${instDirs.globalLocation}/cm-well/app/ws/cmwell-ws-1.0.1-SNAPSHOT",
List(h),
false
)
startWebservice(List(h))
}
}
def removeCmwellSymLink(): Unit = removeCmwellSymLink(ips.par)
def removeCmwellSymLink(hosts: GenSeq[String]) {
command(s"unlink ${instDirs.globalLocation}/cm-well 2> /dev/null", hosts, false)
}
def createCmwellSymLink(sudoer: Option[Credentials]): Unit = createCmwellSymLink(ips.par, sudoer)
def createCmwellSymLink(hosts: GenSeq[String], sudoer: Option[Credentials] = None) {
removeCmwellSymLink(hosts)
command(
s"sudo ln -s ${instDirs.intallationDir} ${instDirs.globalLocation}/cm-well; sudo chown -h $user:$user ${instDirs.globalLocation}/cm-well",
hosts,
true,
sudoer
)
}
def registerCtrlService(hosts: GenSeq[String], sudoer: Credentials) {
if (ctrlService) {
//remove the old ctrl (the link one) - if exists
command("sudo rm -f /etc/init.d/ctrl", hosts, true, Some(sudoer))
command(s"mkdir -p ${instDirs.globalLocation}/cm-well/conf/ctrl", hosts, false)
createFile(s"${instDirs.globalLocation}/cm-well/conf/ctrl/ctrl",
Source.fromFile("scripts/templates/ctrl").mkString.replace("{{user}}", user),
hosts)
command(s"chmod +x ${instDirs.globalLocation}/cm-well/conf/ctrl/ctrl", hosts, false)
// it's used inside echo -e that will remove the \\\\ to \\ and then another echo -e that will make the actual new line
val cmwellRunner = Source.fromFile("scripts/templates/cmwell-runner").mkString.replace("\\n", "\\\\\\\\n")
createFile("/etc/init.d/cmwell-runner", cmwellRunner, hosts, true, Some(sudoer))
command("sudo chmod +x /etc/init.d/cmwell-runner", hosts, true, Some(sudoer))
hosts.foreach { host =>
getOs(host) match {
case Ubuntu =>
command("sudo update-rc.d cmwell-runner defaults", host, true, Some(sudoer))
case Oracle =>
command("sudo chkconfig --add cmwell-runner", host, true, Some(sudoer))
command("sudo chkconfig cmwell-runner on", host, true, Some(sudoer))
}
}
command("sudo service cmwell-runner start", hosts, true, Some(sudoer))
}
}
def disks: GenSet[String] = {
val DataDirs(casDataDirs, casCommitLogDirs, esDataDirs, kafkaDataDirs, zookeeperDataDir, logsDataDir) = dataDirs
val dirs = casDataDirs ++ casCommitLogDirs ++ esDataDirs ++ kafkaDataDirs ++ Seq(zookeeperDataDir,
logsDataDir,
instDirs.intallationDir)
dirs.map(dir => dir.substring(0, dir.lastIndexOf("/"))).toSet
}
def disksWithAncestors(disks: GenSet[String]): GenSet[String] = {
def addSlash(p: String) = p match {
case "" => "";
case "/" => "/";
case _ => p + "/"
}
disks.flatten { disk =>
val splitted = disk.split("/").map(p => if (p.isEmpty) "/" else p)
val ancestors = splitted.scan("")((p, dir) => addSlash(p) + dir)
ancestors.filterNot(p => p.isEmpty || p == "/")
}
}
def unprepareMachines(): Unit = unprepareMachines(ips.par)
def unprepareMachines(hosts: GenSeq[String]) {
purge(hosts)
removeDataDirs(hosts)
removeCmwellSymLink(hosts)
}
def changeOwnerAndAddExcutePermission(hosts: GenSeq[String],
dirs: GenSeq[String],
user: String,
sudoer: Credentials): Unit = {
dirs.foreach(dir => command(s"sudo chmod +x $dir; sudo chown $user:$user $dir", hosts, true, Some(sudoer)))
}
def prepareMachines(): Unit = prepareMachines(ips.par, "", "", "")
def prepareMachines(hosts: String*): Unit = prepareMachines(hosts, "", "", "")
def prepareMachines(hosts: GenSeq[String], sudoerName: String, sudoerPass: String, userPass: String) {
val sudoerNameFinal: String =
if (sudoerName != "") sudoerName else scala.io.StdIn.readLine("Please enter sudoer username\\n")
val sudoerPassword: String =
if (sudoerPass != "") sudoerPass else scala.io.StdIn.readLine(s"Please enter $sudoerNameFinal password\\n")
// scalastyle:off
println(s"Gaining trust of sudoer account: $sudoerNameFinal")
// scalastyle:on
gainTrust(sudoerNameFinal, sudoerPassword, hosts)
sudoerCredentials = Some(Credentials(sudoerNameFinal, sudoerPassword))
val sudoer = sudoerCredentials.get
copySshpass(hosts, sudoer)
// scalastyle:off
println("We will now create a local user 'u' for this cluster")
// scalastyle:on
val pass = if (userPass != "") userPass else scala.io.StdIn.readLine(s"Please enter $user password\\n")
createUser(user, pass, hosts, sudoer)
// scalastyle:off
println(s"Gaining trust of the account $user")
// scalastyle:on
gainTrust(user, pass, hosts)
refreshUserState(user, Some(sudoer), hosts)
changeOwnerAndAddExcutePermission(hosts, disksWithAncestors(disks).toSeq, user, sudoer)
createDataDirs(hosts)
createCmwellSymLink(hosts, Some(sudoer))
registerCtrlService(hosts, sudoer)
finishPrepareMachines(hosts, sudoer)
}
protected def finishPrepareMachines(hosts: GenSeq[String], sudoer: Credentials) = {
// deleteSshpass(hosts, sudoer)
info("Machine preparation was done. Please look at the console output to see if there were any errors.")
}
private def copySshpass(hosts: GenSeq[String], sudoer: Credentials): Unit = {
//only copy sshpass if it's an internal one
if (UtilCommands.linuxSshpass == "bin/utils/sshpass") {
hosts.foreach(
host =>
Seq("rsync",
"-z",
"-e",
"ssh -o StrictHostKeyChecking=no",
UtilCommands.linuxSshpass,
s"${sudoer.name}@$host:~/bin/") !!
)
}
}
private def deleteSshpass(hosts: GenSeq[String], sudoer: Credentials): Unit = {
command("sudo rm sshpass", hosts, true, Some(sudoer))
}
def prepareMachinesNonInteractive: Unit = prepareMachinesNonInteractive()
def prepareMachinesNonInteractive(sudoerName: String = "mySudoer",
sudoerPass: String = "said2000",
uPass: String = "said2000",
hosts: GenSeq[String] = ips.par) {
gainTrust(sudoerName, sudoerPass, hosts)
val sudoer = Credentials(sudoerName, sudoerPass)
sudoerCredentials = Some(sudoer)
copySshpass(hosts, sudoer)
createUser(pass = uPass, hosts = hosts, sudoer = sudoer)
gainTrust("u", uPass, hosts)
refreshUserState("u", Some(sudoer))
changeOwnerAndAddExcutePermission(hosts, disksWithAncestors(disks).toSeq, user, sudoer)
createDataDirs()
createCmwellSymLink(Some(sudoer))
registerCtrlService(hosts, sudoer)
// deleteSshpass(hosts, sudoer)
}
def deploy: Unit = deploy()
def deploy(hosts: GenSeq[String] = ips.par) {
checkProduction
deployApplication(hosts)
}
def getNewHostInstance(ipms: IpMappings): Host
def cassandraNetstats = {
// scalastyle:off
println(command(s"JAVA_HOME=${instDirs.globalLocation}/cm-well/app/java/bin $nodeToolPath netstats 2> /dev/null", ips(0), false).get)
// scalastyle:on
}
def removeNode(host: String): Host = {
checkProduction
connectToGrid
if (CtrlClient.currentHost == host) CtrlClient.init((ips.toSet - host).head)
purge(Seq(host))
Host.ctrl.waitForHealth
Thread.sleep(20000)
info("Removing node from Grid")
Host.ctrl.removeNode(host)
ipMappings.filePath match {
case Some(fp) =>
IpMappingController.writeMapping(ipMappings.remove(List(host)), fp)
IpMappingController.writeMapping(ipMappings.remove(ips.diff(List(host))), s"${fp}_$host")
case None => // Do nothing.
}
getNewHostInstance(ipMappings.remove(List(host)))
}
def addNodesSH(path: String) {
addNodes(path)
sys.exit(0)
}
def removeNodeSH(ip: String) {
removeNode(ip)
sys.exit(0)
}
def addNodes(path: String): Host = {
addNodes(IpMappingController.readMapping(path))
}
def addNodes(ipms: IpMappings, sudoerName: String = "", sudoerPass: String = "", userPass: String = ""): Host = {
connectToGrid
val activeNodes = Try(Await.result(Host.ctrl.getActiveNodes, 10 seconds)).getOrElse(ActiveNodes(Set.empty[String]))
val addedInstances = getNewHostInstance(ipms)
//Due to Dudi's request prepare machine isn't run by default and must be run manually (to spare the need for passwords)
//addedInstances.prepareMachines(addedInstances.ips.par, sudoerName = sudoerName, sudoerPass = sudoerPass, userPass = userPass)
addedInstances.purge()
val hostsToRemove = Set.empty[String] //ipMappings.m.map(_.ip).toSet -- activeNodes.an
val withoutDownNodesMapping = ipMappings.remove(hostsToRemove.toList)
val combinedMappings = withoutDownNodesMapping.combine(ipms)
val combinedInstances = getNewHostInstance(combinedMappings)
combinedInstances.deploy(addedInstances.ips)
combinedInstances.startCtrl(addedInstances.ips)
Thread.sleep(20000)
ipms.getIps.foreach(Host.ctrl.addNode)
combinedInstances.startDcForced(addedInstances.ips)
// combinedInstances.startCassandra(addedInstances.ips)
// combinedInstances.startElasticsearch(addedInstances.ips)
//
//
// Retry{
// try{
// combinedInstances.CassandraLock().waitForModule(combinedInstances.ips(0), combinedInstances.getSize)
// } catch {
// case t : Throwable =>
// info("Trying to reinit Cassandra")
// combinedInstances.startCassandra(addedInstances.ips)
// throw t
// }
// }
//
// Retry{
// try{
// combinedInstances.ElasticsearchLock().waitForModule(combinedInstances.ips(0), combinedInstances.getSize)
// } catch {
// case t : Throwable =>
// info("Trying to reinit Elasticsearch")
// combinedInstances.startElasticsearch(addedInstances.ips)
// throw t
// }
// }
//
// combinedInstances.startCtrl(addedInstances.ips)
// combinedInstances.startBatch(addedInstances.ips)
// combinedInstances.startWebservice(addedInstances.ips)
// combinedInstances.startCW(addedInstances.ips)
// combinedInstances.startDc(addedInstances.ips)
//
// update the ip mappings file.
ipMappings.filePath match {
case Some(fp) => IpMappingController.writeMapping(combinedMappings, fp)
case None => // Do nothing.
}
//combinedInstances.dataInitializer.updateKnownHosts
combinedInstances
}
def killProcess(name: String, flag: String, hosts: GenSeq[String] = ips.par, tries: Int = 5) {
if (tries > 0) {
command(s"ps aux | grep -v grep | grep $name | awk '{print $$2}' | xargs -I zzz kill $flag zzz 2> /dev/null",
hosts,
false)
val died = command(s"ps aux | grep java | grep -v grep | grep $name | wc -l ", hosts, false)
.map(s => s.get.trim.toInt)
.filterNot(_ == 0)
.length == 0
if (!died) {
Thread.sleep(500)
killProcess(name, flag, hosts, tries - 1)
}
} else {
command(s"ps aux | grep java | grep " + name + " | awk '{print $2}' | xargs -I zzz kill -9 zzz 2> /dev/null",
hosts,
false)
}
}
// todo: kill with -9 if it didn't work.
// todo: remove es with its command.
def stop: Unit = stop(false, ips.par)
def stop(hosts: String*): Unit = stop(false, hosts.par)
def stop(force: Boolean, hosts: GenSeq[String]) {
checkProduction
val tries = if (force) 0 else 5
stopWebservice(hosts, tries)
stopBg(hosts, tries)
stopElasticsearch(hosts, tries)
stopCassandra(hosts, tries)
stopCtrl(hosts, tries)
stopCW(hosts, tries)
stopDc(hosts, tries)
stopKafka(hosts, tries)
stopZookeeper(hosts, tries)
stopLogstash(hosts, tries)
stopKibana(hosts, tries)
}
def clearData: Unit = clearData()
def clearData(hosts: GenSeq[String] = ips.par) {
checkProduction
dataDirs.casDataDirs.foreach { cas =>
command(s"rm -rf ${cas}/*", hosts, false)
}
dataDirs.casCommitLogDirs.foreach { ccl =>
command(s"rm -rf ${ccl}/*", hosts, false)
}
dataDirs.esDataDirs.foreach { es =>
command(s"rm -rf ${es}/*", hosts, false)
}
dataDirs.kafkaDataDirs.foreach { kafka =>
command(s"rm -rf $kafka/*", hosts, false)
}
command(s"rm -rf ${dataDirs.zookeeperDataDir}/*", hosts, false)
command(s"rm -rf ${dataDirs.logsDataDir}/*", hosts, false)
}
def clearApp: Unit = clearApp()
def clearApp(hosts: GenSeq[String] = ips.par) {
checkProduction
command(s"rm -rf ${instDirs.intallationDir}/*", hosts, false)
}
def purge: Unit = purge()
def purge(hosts: GenSeq[String] = ips) {
checkProduction
info("purging cm-well")
info(" stopping processes")
stop(true, hosts)
info(" clearing application data")
clearApp(hosts)
info(" clearing data")
clearData(hosts)
info("finished purging cm-well")
}
def injectMetaData: Unit = injectMetaData(ips(0))
def injectMetaData(host: String) {
dataInitializer.uploadMetaData()
dataInitializer.uploadNameSpaces()
}
def injectSampleData = {
dataInitializer.uploadSampleData()
}
def casHealth: Try[String] = casHealth()
def casHealth(hosts: GenSeq[String] = ips.par): Try[String] = {
command(s"JAVA_HOME=${instDirs.globalLocation}/cm-well/app/java/bin" + nodeToolPath + " status", hosts(0), false)
}
def esHealth: Try[String] = {
command("curl -sX GET http://" + pingAddress + esHealthAddress, ips(0), false)
}
def stopBg: Unit = stopBg(ips.par)
def stopBg(hosts: String*): Unit = stopBg(hosts.par)
def stopBg(hosts: GenSeq[String], tries: Int = 5) {
checkProduction
killProcess("cmwell.bg.Runner", "", hosts, tries)
}
def stopWebservice: Unit = stopWebservice(ips.par)
def stopWebservice(hosts: String*): Unit = stopWebservice(hosts.par)
def stopWebservice(hosts: GenSeq[String], tries: Int = 5) {
checkProduction
killProcess("psId=Webserver", "", hosts, tries)
}
def stopCW: Unit = stopCW(ips.par)
def stopCW(hosts: String*): Unit = stopCW(hosts.par)
def stopCW(hosts: GenSeq[String], tries: Int = 5) {
checkProduction
killProcess("crashableworker", "", hosts, tries)
}
def stopDc: Unit = stopDc(ips.par)
def stopDc(hosts: String*): Unit = stopDc(hosts.par)
def stopDc(hosts: GenSeq[String], tries: Int = 5) = {
checkProduction
killProcess("app/dc", "", hosts, tries)
}
def stopCassandra: Unit = stopCassandra(ips.par)
def stopCassandra(hosts: String*): Unit = stopCassandra(hosts.par)
def stopCassandra(hosts: GenSeq[String], tries: Int = 5) {
checkProduction
killProcess("CassandraDaemon", "", hosts, tries)
}
def esSyncedFlush(host: String, port: Int = 9200): Unit = {
command(s"curl -sX POST 'http://$host:$port/_all/_flush/synced'")
}
def stopElasticsearch: Unit = stopElasticsearch(ips.par)
def stopElasticsearch(hosts: String*): Unit = stopElasticsearch(hosts.par)
def stopElasticsearch(hosts: GenSeq[String], tries: Int = 5) {
checkProduction
esSyncedFlush(hosts(0))
killProcess("Elasticsearch", "", hosts, tries)
}
def startBg: Unit = startBg(ips.par)
def startBg(hosts: String*): Unit = startBg(hosts.par)
def startBg(hosts: GenSeq[String]) {
checkProduction
command(s"cd ${instDirs.globalLocation}/cm-well/app/bg; ${startScript("./start.sh")}", hosts, false)
}
def startWebservice: Unit = startWebservice(ips.par)
def startWebservice(hosts: String*): Unit = startWebservice(hosts.par)
def startWebservice(hosts: GenSeq[String]) {
checkProduction
command(s"cd ${instDirs.globalLocation}/cm-well/app/ws/; ${startScript("./start.sh")}", hosts, false)
}
def startCW: Unit = startCW(ips.par)
def startCW(hosts: String*): Unit = startCW(hosts.par)
def startCW(hosts: GenSeq[String]) {
checkProduction
command(s"cd ${instDirs.globalLocation}/cm-well/app/ws; ${startScript("./cw-start.sh")}", hosts, false)
}
def startDc: Unit = startDc(ips.par)
def startDc(hosts: String*): Unit = startDc(hosts.par)
def startDc(hosts: GenSeq[String]): Unit = {
checkProduction
command(s"cd ${instDirs.globalLocation}/cm-well/app/dc; ${startScript("./start.sh")}", hosts, false)
}
def startDcForced(hosts: GenSeq[String]): Unit = {
checkProduction
command(s"cd ${instDirs.globalLocation}/cm-well/app/dc; HAL=9000 FORCE=MAJOUR ./start.sh", hosts, false)
}
//def startScript(script : String) = s"""bash -c "starter '$script'" > /dev/null 2> /dev/null & """
//def startScript(script : String) = s"""HAL=9000 $script"""
def startScript(script: String) =
s"""HAL=9000 ${if (deb) "CMWELL_DEBUG=true" else ""} $script"""
def start: Unit = start(ips.par)
def start(hosts: String*): Unit = start(hosts.par)
def start(hosts: GenSeq[String]) {
checkProduction
startCassandra(hosts)
startElasticsearch(hosts)
Try(CassandraLock().waitForModule(hosts(0), size))
Try(ElasticsearchLock().waitForModule(hosts(0), size))
startZookeeper
startKafka(hosts)
startCtrl(hosts)
startBg(hosts)
startCW(hosts)
startWebservice(hosts)
startDc(hosts)
if (withElk) {
startLogstash(hosts)
startKibana(hosts)
}
}
def startCtrl: Unit = startCtrl(ips)
def startCtrl(hosts: String*): Unit = startCtrl(hosts.par)
def startCtrl(hosts: GenSeq[String]) = {
checkProduction
command(s"cd ${instDirs.globalLocation}/cm-well/app/ctrl; ${startScript("./start.sh")}", hosts, false)
}
def stopCtrl: Unit = stopCtrl(ips.par)
def stopCtrl(hosts: String*): Unit = stopCtrl(hosts.par)
def stopCtrl(hosts: GenSeq[String], tries: Int = 5) {
checkProduction
killProcess("CtrlServer", "", hosts, tries)
}
def createManager: Unit = createManager()
def createManager(machineName: String = ips(0), path: String = "~/cmwell/") {
rsync("./", path, List(machineName))
}
def readTime(targ: String = "meta/ns/oa") {}
def init: Unit = init()
def init(hosts: GenSeq[String] = ips.par) {
checkProduction
info("starting controller")
startCtrl(hosts)
info("initializing cm-well")
info(" initializing cassandra")
initCassandra(hosts)
info(" initializing elasticsearch")
initElasticsearch(hosts)
info(" waiting for cassandra and elasticsearch")
Retry {
try {
CassandraLock().waitForModule(hosts(0), size)
} catch {
case t: Throwable =>
info("Trying to reinit Cassandra")
initCassandra(hosts)
throw t
}
}
Retry {
try {
ElasticsearchLock().waitForModule(hosts(0), size)
} catch {
case t: Throwable =>
info("Trying to reinit Elasticsearch")
initElasticsearch(hosts)
throw t
}
}
info(" starting zookeeper")
startZookeeper
info(" starting kafka")
startKafka
info(" inserting schemas")
initSchemes(hosts)
// wait until all the schemas are written.
Thread.sleep(10000)
info(" starting bg")
startBg(hosts)
info(" starting cw")
startCW(hosts)
info(" starting web service")
startWebservice(hosts)
uploadInitialContent(hosts(0))
info(" starting dc controller")
startDc(hosts)
info("finished initializing cm-well")
if (withElk) {
startLogstash(hosts)
startKibana(hosts)
}
}
def uploadInitialContent(host: String = ips(0)): Unit = {
checkProduction
Try(WebServiceLock().waitForModule(host, 1))
info(" waiting for ws...")
dataInitializer.waitForWs()
info(" inserting meta data")
injectMetaData(host)
info(" uploading SPAs to meta/app")
dataInitializer.uploadDirectory("data", s"http://$host:9000/meta/app/")
info(" uploading docs")
dataInitializer.uploadDirectory("docs", s"http://$host:9000/meta/docs/")
info(" uploading basic userInfotons (if not exist)")
dataInitializer.uploadBasicUserInfotons(host)
info(" updating version history")
dataInitializer.logVersionUpgrade(host)
}
def initCassandra: Unit = initCassandra()
def initCassandra(hosts: GenSeq[String] = ips.par)
def initElasticsearch: Unit = initElasticsearch()
def initElasticsearch(hosts: GenSeq[String] = ips.par)
def initSchemes: Unit = initSchemes()
def initSchemes(hosts: GenSeq[String] = ips.par) {
val aliases =
"""{
"actions" : [
{ "add" : { "index" : "cmwell_current_0", "alias" : "cmwell_current" } },
{ "add" : { "index" : "cmwell_history_0", "alias" : "cmwell_history" } },
{ "add" : { "index" : "cmwell_current_0", "alias" : "cmwell_current_latest" } },
{ "add" : { "index" : "cmwell_history_0", "alias" : "cmwell_history_latest" } },
{ "add" : { "index" : "cm_well_p0_0", "alias" : "cm_well_all" } }
]
}""".replace("\\n", "")
// scalastyle:off
command(s"cd ${instDirs.globalLocation}/cm-well/app/cas/cur; sh bin/cqlsh ${pingAddress} -f ${instDirs.globalLocation}/cm-well/conf/cas/cassandra-cql-init-cluster", hosts(0), false)
command(s"cd ${instDirs.globalLocation}/cm-well/app/cas/cur; sh bin/cqlsh ${pingAddress} -f ${instDirs.globalLocation}/cm-well/conf/cas/cassandra-cql-init-cluster-new", hosts(0), false)
command(s"cd ${instDirs.globalLocation}/cm-well/app/cas/cur; sh bin/cqlsh ${pingAddress} -f ${instDirs.globalLocation}/cm-well/conf/cas/zstore-cql-init-cluster", hosts(0), false)
command(s"""curl -s -X POST http://${pingAddress}:$esRegPort/_template/cmwell_indices_template -H "Content-Type: application/json" --data-ascii @${instDirs.globalLocation}/cm-well/conf/es/mapping.json""", hosts(0), false)
command(s"""curl -s -X POST http://${pingAddress}:$esRegPort/_template/cmwell_index_template -H "Content-Type: application/json" --data-ascii @${instDirs.globalLocation}/cm-well/conf/es/indices_template_new.json""", hosts(0), false)
command(s"curl -s -X POST http://${pingAddress}:$esRegPort/cmwell_current_0/;curl -s -X POST http://${pingAddress}:$esRegPort/cmwell_history_0/", hosts(0), false)
// scalastyle:on
command(s"curl -s -X POST http://${pingAddress}:$esRegPort/cm_well_p0_0/", hosts(0), false)
// command(s"curl -s -X POST http://${pingAddress}:$esRegPort/cm_well_0/", hosts(0), false)
command(s"""curl -s -X POST http://${pingAddress}:$esRegPort/_aliases -H "Content-Type: application/json" --data-ascii '${aliases}'""", hosts(0), false)
// create kafka topics
val replicationFactor = math.min(hosts.size, 3)
val javaHomeLocation = s"${instDirs.globalLocation}/cm-well/app/java"
val exportCommand = if (Files.exists(Paths.get(javaHomeLocation)))
s"export PATH=${instDirs.globalLocation}/cm-well/app/java/bin:$$PATH ; export JAVA_HOME=${javaHomeLocation} ;"
else ""
// scalastyle:off
val createTopicCommandPrefix = s"cd ${instDirs.globalLocation}/cm-well/app/kafka/cur; $exportCommand sh bin/kafka-topics.sh --create --zookeeper ${pingAddress}:2181 --replication-factor $replicationFactor --partitions ${hosts.size} --topic"
// scalastyle:on
var tryNum: Int = 1
var ret = command(s"$createTopicCommandPrefix persist_topic", hosts(0), false)
while (ret.isFailure || !ret.get.contains("Created topic") && tryNum < 6) {
tryNum += 1
Thread.sleep(5000)
ret = command(s"$createTopicCommandPrefix persist_topic", hosts(0), false)
}
ret = command(s"$createTopicCommandPrefix persist_topic.priority", hosts(0), false)
while (ret.isFailure || !ret.get.contains("Created topic") && tryNum < 6) {
tryNum += 1
Thread.sleep(5000)
ret = command(s"$createTopicCommandPrefix persist_topic.priority", hosts(0), false)
}
ret = command(s"$createTopicCommandPrefix index_topic", hosts(0), false)
while (ret.isFailure || !ret.get.contains("Created topic") && tryNum < 6) {
tryNum += 1
Thread.sleep(5000)
ret = command(s"$createTopicCommandPrefix index_topic", hosts(0), false)
}
ret = command(s"$createTopicCommandPrefix index_topic.priority", hosts(0), false)
while (ret.isFailure || !ret.get.contains("Created topic") && tryNum < 6) {
tryNum += 1
Thread.sleep(5000)
ret = command(s"$createTopicCommandPrefix index_topic.priority", hosts(0), false)
}
}
def avaiableHosts = {
ips.filter { ip =>
command(s"ping -c 1 $ip > /dev/null 2> /dev/null").isSuccess
}
}
def brokerId(host: String) = ips.indexOf(host)
def startZookeeper: Unit = {
checkProduction
command(s"cd ${instDirs.globalLocation}/cm-well/app/zookeeper; ${startScript("./start.sh")}",
avaiableHosts.take(3),
false)
}
def startZookeeper(host: String): Unit = {
command(s"cd ${instDirs.globalLocation}/cm-well/app/zookeeper; ${startScript("./start.sh")}", host, false)
}
def startZookeeper(hosts: GenSeq[String]): Unit = {
checkProduction
command(s"cd ${instDirs.globalLocation}/cm-well/app/zookeeper; ${startScript("./start.sh")}",
hosts.intersect(avaiableHosts),
false)
}
def stopZookeeper: Unit = stopZookeeper()
def stopZookeeper(hosts: GenSeq[String] = ips.par, tries: Int = 5): Unit = {
checkProduction
//if(withZookeeper)
killProcess("zookeeper", "", hosts, tries = tries)
}
def startKafka: Unit = startKafka()
def startKafka(hosts: GenSeq[String] = ips.par): Unit = {
checkProduction
command(s"cd ${instDirs.globalLocation}/cm-well/app/kafka; ${startScript("./start.sh")}", hosts, false)
}
def startKafka(host: String): Unit = {
checkProduction
command(s"cd ${instDirs.globalLocation}/cm-well/app/kafka; ${startScript("./start.sh")}", host, false)
}
def stopKafka: Unit = stopKafka()
def stopKafka(hosts: GenSeq[String] = ips.par, tries: Int = 5): Unit = {
checkProduction
//if(withKafka)
killProcess("kafka.Kafka", "", hosts, tries = tries)
}
def startElasticsearch: Unit = startElasticsearch(ips.par)
def startElasticsearch(hosts: String*): Unit = startElasticsearch(hosts.par)
def startElasticsearch(hosts: GenSeq[String]): Unit
def startCassandra: Unit = startCassandra(ips.par)
def startCassandra(hosts: String*): Unit = startCassandra(hosts.par)
def startCassandra(hosts: GenSeq[String])
def startKibana: Unit = startKibana()
def startKibana(hosts: GenSeq[String] = ips.par): Unit = {
checkProduction
command(s"cd ${instDirs.globalLocation}/cm-well/app/kibana; ${startScript("./start.sh")}", hosts, false)
}
def stopKibana: Unit = stopKibana()
def stopKibana(hosts: GenSeq[String] = ips.par, tries: Int = 5): Unit = {
checkProduction
killProcess("kibana", "", hosts, tries = tries)
}
def startLogstash: Unit = startLogstash()
def startLogstash(hosts: GenSeq[String] = ips.par): Unit = {
checkProduction
command(s"cd ${instDirs.globalLocation}/cm-well/app/logstash; ${startScript("./start.sh")}", hosts, false)
}
def stopLogstash: Unit = stopLogstash()
def stopLogstash(hosts: GenSeq[String] = ips.par, tries: Int = 5): Unit = {
checkProduction
killProcess("logstash", "", hosts, tries = tries)
}
def quickInstall: Unit = {
checkProduction
}
def install: Unit = install(ips.par)
def install(hosts: String*): Unit = install(hosts.par)
def install(hosts: GenSeq[String]) {
checkProduction
refreshUserState(user, None, hosts)
purge(hosts)
deploy(hosts)
init(hosts)
//setElasticsearchUnassignedTimeout()
}
def disableElasticsearchUpdate: Unit = disableElasticsearchUpdate(ips(0))
def disableElasticsearchUpdate(ip: String) {
command(
s"""curl -s -X PUT http://${pingAddress}:$esRegPort/_cluster/settings -d '{"transient" : {"cluster.routing.allocation.enable" : "none"}}'""",
ip,
false
)
}
def enableElasticsearchUpdate: Unit = enableElasticsearchUpdate(ips(0))
def enableElasticsearchUpdate(ip: String) {
command(
s"""curl -s -X PUT http://${pingAddress}:$esRegPort/_cluster/settings -d '{"transient" : {"cluster.routing.allocation.enable" : "all"}}'""",
ip,
false
)
}
def findEsMasterNode(hosts: GenSeq[String] = ips): Option[String] = {
hosts.par.find(host => command(s"curl -s $host:$esMasterPort > /dev/null 2> /dev/null").isSuccess)
}
def findEsMasterNodes(hosts: GenSeq[String] = ips): GenSeq[String] = {
hosts.par.filter(host => command(s"curl -s $host:$esMasterPort > /dev/null 2> /dev/null").isSuccess)
}
def setElasticsearchUnassignedTimeout(host: String = ips.head, timeout: String = "15m"): Unit = {
info(s"setting index.unassigned.node_left.delayed_timeout to $timeout")
val com =
s"""curl -s -X PUT 'http://$host:$esRegPort/_all/_settings' -d '{
| "settings": {
| "index.unassigned.node_left.delayed_timeout": "$timeout"
| }
|}'""".stripMargin
command(com, host, false)
}
def getCassandraHostId(addr: String): String = {
command(
s"JAVA_HOME=${instDirs.globalLocation}/cm-well/app/java/bin $nodeToolPath status 2> /dev/null | grep $addr | awk '{print $$7}'",
ips(0),
false
).get.trim
}
def rebalanceCassandraDownNodes {
// grep DN | awk '{print $2 " " $7}'
Retry {
val downNodes = command(
s"""JAVA_HOME=${instDirs.globalLocation}/cm-well/app/java/bin $nodeToolPath status 2> /dev/null | grep DN | awk '{print $$2 " " $$7}'""",
ips(0),
false
).get.trim.split("\\n").toList.map { dn =>
val dnsplt = dn.split(" ")
dnsplt(0) -> dnsplt(1)
}
downNodes.par.foreach(
dn =>
command(
s"JAVA_HOME=${instDirs.globalLocation}/cm-well/app/java/bin $nodeToolPath removenode ${dn._2} 2> /dev/null",
ips(0),
false
)
)
if (command(s"""JAVA_HOME=${instDirs.globalLocation}/cm-well/app/java/bin $nodeToolPath status 2> /dev/null | grep DN | awk '{print $$2 " " $$7}'""",
ips(0),
false).get.trim.split("\\n").toList.size > 0)
throw new Exception("Failed to remove down nodes")
info(s"Cassandra nodes were removed from the cluster. The cluster now will rebalance its data.")
}
}
def getCassandraAddresses(host: String): Seq[String] = Seq(host)
def decommissionCassandraNodes(hosts: GenSeq[String]) {
hosts.foreach { host =>
getCassandraAddresses(host).foreach { ip =>
command(
s"""JAVA_HOME=${instDirs.globalLocation}/cm-well/app/java/bin $nodeToolLocation -h $ip decommission 2> /dev/null""",
host,
false
)
}
}
}
def shutdown: Unit = shutdown()
def shutdown(hosts: GenSeq[String] = ips): Unit = {
disableElasticsearchUpdate
stop(false, hosts)
}
def updateCasKeyspace: Unit = {
// scalastyle:off
command(s"cd ${absPath(instDirs.globalLocation)}/cm-well/app/cas/cur; sh bin/cqlsh ${pingAddress} -f ${absPath(instDirs.globalLocation)}/cm-well/conf/cas/cassandra-cql-init-cluster-new", ips(0), false)
// scalastyle:on
}
def updateKafkaScemas: Unit = {
val replicationFactor = math.min(ips.size, 3)
val javaHomeLocation = s"${absPath(instDirs.globalLocation)}/cm-well/app/java"
val exportCommand = if (Files.exists(Paths.get(javaHomeLocation)))
s"export PATH=${absPath(instDirs.globalLocation)}/cm-well/app/java/bin:$$PATH ; export JAVA_HOME=${javaHomeLocation} ;"
else ""
// scalastyle:off
val createTopicCommandPrefix = s"cd ${absPath(instDirs.globalLocation)}/cm-well/app/kafka/cur; $exportCommand sh bin/kafka-topics.sh --create --zookeeper ${pingAddress}:2181 --replication-factor $replicationFactor --partitions ${ips.size} --topic"
// scalastyle:on
command(s"$createTopicCommandPrefix persist_topic", ips(0), false)
command(s"$createTopicCommandPrefix index_topic", ips(0), false)
}
def checkPreUpgradeStatus(host: String): Unit = {
val esStatusTry = elasticsearchStatus(host)
val casStatusTry = cassandraStatus(host).map(_.toInt)
val wsStatusTry = webServerStatus(host)
var hasProblem = false
esStatusTry match {
case Success(color) =>
if (color.toLowerCase != "green") {
hasProblem = true
warn(s"Elasticsearch status is $color.")
}
case Failure(err) =>
hasProblem = true
warn(s"Couldn't retrieve Elasticsearch status.")
}
casStatusTry match {
case Success(uns) =>
if (uns < size) {
hasProblem = true
warn(s"Number of Cassandra up nodes is $uns/$size.")
}
case Failure(err) =>
hasProblem = true
warn(s"Couldn't retrieve Cassandra status.")
}
wsStatusTry match {
case Success(v) =>
if (!v.contains("200") && !v.contains("404") && !v.contains("503")) {
hasProblem = true
warn(s"Webservice doesn't respond with a good code.")
}
case Failure(err) =>
hasProblem = true
warn(s"Webservice doesn't respond.")
}
if (hasProblem) warnPrompt
}
def upgradeDc = upgrade(List(DcProps(this)), uploadSpa = false, uploadDocs = false)
def upgradeCtrl = upgrade(List(CtrlProps(this)), uploadSpa = false, uploadDocs = false)
def upgradeBG = upgrade(List(BgProps(this)), uploadSpa = false, uploadDocs = false)
def upgradeWS = upgrade(List(WebserviceProps(this)))
def quickUpgrade: Unit = quickUpgrade()
def quickUpgrade(hosts: GenSeq[String] = ips): Unit = {
refreshUserState(user, None, hosts)
syncLib(hosts)
linkLibs(hosts)
hosts.par.foreach(host => restartApp(host))
}
def noDownTimeQuickUpgrade(hosts: GenSeq[String] = ips): Unit = {
refreshUserState(user, None, hosts)
info("syncing libs")
syncLib(hosts)
linkLibs(hosts)
info("generating resources")
genResources(hosts)
info("stopping CM-WELL components")
stopBg(hosts)
stopDc(hosts)
stopCW(hosts)
stopCtrl(hosts)
val (h1, h2) = hosts.zipWithIndex.partition(x => x._2 % 2 == 0)
val hosts1 = h1.map(_._1)
val hosts2 = h2.map(_._1)
info(s"restarting web services on ${hosts1.mkString(",")}")
stopWebservice(hosts1)
startWebservice(hosts1)
hosts1.foreach { host =>
info(s"waiting for $host to respond"); WebServiceLock().com(host)
}
info(s"restarting web services on ${hosts2.mkString(",")}")
stopWebservice(hosts2)
startWebservice(hosts2)
hosts2.foreach { host =>
info(s"waiting for $host to respond"); WebServiceLock().com(host)
}
startBg(hosts)
startDc(hosts)
startCW(hosts)
startCtrl(hosts)
}
def upgrade: Unit = upgrade()
def upgrade(baseProps: List[ComponentProps] = List(CassandraProps(this),
ElasticsearchProps(this),
KafkaProps(this),
ZooKeeperProps(this),
BgProps(this),
WebserviceProps(this),
CtrlProps(this),
DcProps(this)),
uploadSpa: Boolean = true,
uploadDocs: Boolean = true,
uploadUserInfotons: Boolean = true,
withUpdateSchemas: Boolean = false,
hosts: GenSeq[String] = ips) {
checkProduction
refreshUserState(user, None, hosts)
//checkPreUpgradeStatus(hosts(0))
val esMasterNode = findEsMasterNode(hosts) match {
case Some(emn) =>
info(s"found Elasticsearch master node: $emn")
emn
case None => throw new Exception("Couldn't find elasticsearch master node")
}
val dateStr = deployment.getCurrentDateStr
var props = baseProps
if (deployJava) props = props ++ List(JavaProps(this))
if (withElk) props = props ++ List(LogstashProps(this), KibanaProps(this))
info("deploying components and checking what should be upgraded.")
syncLib(hosts)
linkLibs(hosts)
rsyncPlugins(hosts)
BinsProps(this).deployComponent(hosts)
// get for each component its unsynced hosts and redeploy the new version of the component.
val updatedHosts = props
.map(prop => (prop, prop.getUnsyncedHosts(hosts.par)))
.filter(t => t._2.size > 0)
.map(t => (t._1, t._2, t._1.redeployComponent(t._2)))
if (updatedHosts.size > 0) {
//todo: FIX THIS!!!
doInfo = false
deployment.createDirs(hosts, props)
doInfo = true
val updatedComponents = updatedHosts.map(_._1).toSet
val preUpgradeComponents = props
.collect { case r: RunnableComponent if r.upgradeMethod == PreUpgrade => r }
.filter(r => updatedComponents.contains(r) || !updatedComponents.intersect(r.upgradeDependency).isEmpty)
val nonRollingComponents = props
.collect { case r: RunnableComponent if r.upgradeMethod == NonRolling => r }
.filter(r => updatedComponents.contains(r) || !updatedComponents.intersect(r.upgradeDependency).isEmpty)
val rollingComponents = props
.collect { case r: RunnableComponent if r.upgradeMethod == Rolling => r }
.filter(r => updatedComponents.contains(r) || !updatedComponents.intersect(r.upgradeDependency).isEmpty)
val nonRunningComponents = props.filter(p => !p.isInstanceOf[RunnableComponent])
updatedHosts
.filter { el =>
nonRunningComponents.contains(el._1) && el._1.symLinkName.isDefined
}
.foreach { el =>
val component = el._1
val hostsToUpdate = el._2
val newName = el._3
info(s"updating ${component.getName} on all hosts")
component.relink(newName, hostsToUpdate)
}
// stopping all the components that are not upgraded in rolling style.
nonRollingComponents.foreach { nrc =>
info(s"stopping ${nrc.getName} on all hosts.")
nrc.stop(hosts)
}
hosts.foreach { h =>
// The components that where updated on this host.
val updatedHostComponents =
updatedHosts.filter(uh => uh._2.toVector.contains(h)).map(uh => uh._1 -> (uh._2, uh._3)).toMap
val casUpdated = updatedComponents.contains(CassandraProps(this))
val esUpdated = updatedComponents.contains(ElasticsearchProps(this))
val javaUpdated = updatedComponents.contains(JavaProps(this))
//if(esUpdated || javaUpdated) {
Try(ElasticsearchLock().waitForModule(esMasterNode, size))
Try(ElasticsearchStatusLock("green", "yellow").waitForModuleIndefinitely(esMasterNode))
// if we encounter status yellow lets sleep for 10 minutes.
//if(elasticsearchStatus(ips(0)).getOrElse("N/A") == "yellow") Thread.sleep(10 * 1000 * 60)
//}
info(
s"updating ${(updatedComponents -- nonRunningComponents -- preUpgradeComponents).map(_.getName).mkString(", ")} on $h"
)
val updatedComponentsSet = updatedComponents
// stopping all the components that are upgraded in rolling style.
rollingComponents.foreach { rc =>
info(s" restarting ${rc.getName}")
rc.stop(List(h))
}
// relinking the new components.
(updatedComponentsSet -- preUpgradeComponents -- nonRunningComponents)
.foreach(cp => if (cp.symLinkName.isDefined) cp.relink(updatedHostComponents.get(cp).get._2, List(h)))
createAppLinks(List(h))
genResources(List(h))
// starting all the components that are upgraded in rolling style.
rollingComponents.foreach(_.start(List(h)))
// wait for cassandra and elasticsearch to be stable before starting cmwell components.
if (javaUpdated || casUpdated) {
Try(CassandraLock().waitForModule(ips(0), size))
}
}
hosts.par.foreach(host => Try(WebServiceLock().waitForModule(host, 1)))
preUpgradeComponents.foreach { puc =>
info(s"restarting ${puc.getName} on all hosts")
puc.stop(hosts)
}
updatedHosts
.filter { el =>
preUpgradeComponents.contains(el._1) && el._1.symLinkName.isDefined
}
.foreach { el =>
val component = el._1
val hostsToUpdate = el._2
val newName = el._3
info(s"updating ${component.getName} on all hosts.")
component.relink(newName, hostsToUpdate)
}
// todo: make more generic.
genEsResources(hosts)
preUpgradeComponents.foreach(_.start(hosts))
// starting all the components that are not upgraded in rolling style.
Try(ElasticsearchLock(esMasterNode).waitForModule(esMasterNode, size))
Try(ElasticsearchStatusLock("green", "yellow").waitForModuleIndefinitely(esMasterNode))
if (withUpdateSchemas) {
updateCasKeyspace
reloadEsMappings
updateKafkaScemas
}
nonRollingComponents.par.foreach { nrc =>
info(s"starting ${nrc.getName} on all hosts.")
nrc.start(hosts)
}
}
Try(WebServiceLock().waitForModule(ips(0), 1))
info(" waiting for ws...")
dataInitializer.waitForWs()
if (uploadSpa) {
Try(WebServiceLock().waitForModule(ips(0), 1))
info(" uploading SPAs to meta/app")
dataInitializer.uploadDirectory("data", s"http://${hosts.head}:9000/meta/app/")
}
if (uploadDocs) {
Try(WebServiceLock().waitForModule(ips(0), 1))
info(" uploading docs")
dataInitializer.uploadDirectory("docs", s"http://${hosts.head}:9000/meta/docs/")
}
if (uploadUserInfotons) {
Try(WebServiceLock().waitForModule(ips(0), 1))
info(" uploading basic userInfotons (if not exist)")
dataInitializer.uploadBasicUserInfotons(hosts(0))
}
info(" updating version history")
dataInitializer.logVersionUpgrade(hosts(0))
}
def reloadEsMappings: Unit = reloadEsMappings()
def reloadEsMappings(createNewIndices: Boolean = true) {
info("reloading Elasticsearch mappings")
command(
s"""curl -s -X POST http://${pingAddress}:$esRegPort/_template/cmwell_index_template -H "Content-Type: application/json" --data-ascii @${absPath(
instDirs.globalLocation
)}/cm-well/conf/es/indices_template_new.json""",
ips(0),
false
)
if (createNewIndices) {
Thread.sleep(5000)
createEsIndices
}
}
def createEsIndices: Unit = {
// val numberOfShards = getSize
// val numberOfReplicas = 2
//
// val settingsJson =
// s"""
// |{
// | "settings" : {
// | "index" : {
// | "number_of_shards" : $numberOfShards,
// | "number_of_replicas" : $numberOfReplicas
// | }
// | }
// |}
// """.stripMargin
//
// command(s"""curl -s -XPUT 'http://${pingAddress}:$esRegPort/cm_well_0/' -d '$settingsJson'""", ips.head, false)
// val actionsJson =
// s"""
// |{
// | "actions" : [
// | {
// | "add" : { "index" : "cm_well_0", "alias" : "cm_well_latest" }
// | },
// | {
// | "add" : { "index" : "cm_well_0", "alias" : "cm_well_all" }
// | }
// | ]
// |}
// """.stripMargin
//
//
// command(s"""curl -s -X POST 'http://${pingAddress}:$esRegPort/_aliases' -d '$actionsJson'""", ips.head, false)
}
def createNewEsIndices: Unit = {
info("creating new indices")
val numberOfShards = getSize
val numberOfReplicas = 2
val settingsJson =
s"""
|{
| "settings" : {
| "index" : {
| "number_of_shards" : $numberOfShards,
| "number_of_replicas" : $numberOfReplicas
| }
| }
|}
""".stripMargin
val json = command(s""" curl -s http://${ips.head}:9000/health/es""").get
val (currents, histories) = JSON
.parseFull(json.trim)
.get
.asInstanceOf[Map[String, Any]]("indices")
.asInstanceOf[Map[String, Any]]
.keySet
.partition {
_.contains("current")
}
val currentIndex = currents.map(_.split("_")(2).toInt).max
val historyIndex = histories.map(_.split("_")(2).toInt).max
val newCurrentIndex = s"cmwell_current_${currentIndex + 1}"
val newHistoryIndex = s"cmwell_history_${historyIndex + 1}"
val oldCurrentIndex = s"cmwell_current_$currentIndex"
val oldHistoryIndex = s"cmwell_history_$historyIndex"
command(s"""curl -s -XPUT 'http://${pingAddress}:$esRegPort/$newCurrentIndex/' -d '$settingsJson'""",
ips.head,
false)
command(s"""curl -s -XPUT 'http://${pingAddress}:$esRegPort/$newHistoryIndex/' -d '$settingsJson'""",
ips.head,
false)
val actionsJson =
s"""
|{
| "actions" : [
| {
| "add" : { "index" : "$newCurrentIndex", "alias" : "cmwell_current" }
| },
| {
| "add" : { "index" : "$newCurrentIndex", "alias" : "cmwell_current_latest" }
| },
| {
| "add" : { "index" : "$newHistoryIndex", "alias" : "cmwell_history" }
| },
| {
| "add" : { "index" : "$newHistoryIndex", "alias" : "cmwell_history_latest" }
| },
| {
| "remove" : { "index" : "$oldCurrentIndex", "alias" : "cmwell_current_latest" }
| },
| {
| "remove" : { "index" : "$oldHistoryIndex", "alias" : "cmwell_history_latest" }
| }
| ]
|}
""".stripMargin
command(s"""curl -s -X POST 'http://${pingAddress}:$esRegPort/_aliases' -d '$actionsJson'""", ips.head, false)
}
def restartApp = {
stopCtrl
startCtrl
Thread.sleep(5000)
restartWebservice
restartCW
restartDc
stopBg
startBg
}
def restartApp(host: String) = {
stopCtrl(host)
startCtrl(host)
Thread.sleep(5000)
stopWebservice(host)
startWebservice(host)
stopCW(host)
startCW(host)
stopDc(host)
startDc(host)
stopBg(host)
startBg(host)
}
def restartWebservice {
ips.foreach { ip =>
info(s"Restarting Webservice on $ip")
stopWebservice(Seq(ip))
startWebservice(Seq(ip))
Try(WebServiceLock().waitForModule(ip, 1))
}
}
def restartDc {
stopDc
startDc
}
def restartCW {
stopCW
startCW
}
def restartCassandra {
ips.foreach { ip =>
info(s"Restarting Cassandra on $ip")
stopCassandra(Seq(ip))
startCassandra(Seq(ip))
Try(CassandraLock().waitForModule(ips(0), size))
}
}
def restartElasticsearch: Unit = restartElasticsearch(ips)
def restartElasticsearch(hosts: Seq[String]) {
hosts.foreach { host =>
Try(ElasticsearchStatusLock("green").waitForModule(hosts(0), 1000))
info(s"Restarting Elasticsearch on $host")
disableElasticsearchUpdate(ips((ips.indexOf(host) + 1) % ips.size))
Thread.sleep(10000)
stopElasticsearch(Seq(host))
startElasticsearch(Seq(host))
enableElasticsearchUpdate(ips((ips.indexOf(host) + 1) % ips.size))
}
}
//def createNetwork : Unit = createNetwork(ips.par,topology, persistentAliases)
def createNetwork(topology: NetTopology, persistent: Boolean, hosts: GenSeq[String], sudoer: Credentials) {
val ipMappingsOfPreparedOnly = ipMappings.remove(ipMappings.getIps.filterNot(hosts.seq.contains))
topology match {
case n: VLanTopology =>
val tag = n.tag
val m = topology.getTopologyMap(ipMappingsOfPreparedOnly)
m.foreach { tuple =>
var index = 0
command(s"echo '/sbin/modprobe 8021q' | sudo tee /etc/sysconfig/modules/vlan.modules > /dev/null",
tuple._1,
true,
Some(sudoer))
command(s"sudo chmod +x /etc/sysconfig/modules/vlan.modules", tuple._1, true, Some(sudoer))
command(s"sudo modprobe 8021q", tuple._1, true, Some(sudoer))
command(s"sudo ip link add link $inet name $inet.$tag type vlan id $tag", tuple._1, true, Some(sudoer))
command(s"sudo ifconfig $inet.$tag up", tuple._1, true, Some(sudoer))
val fileName = s"ifcfg-$inet.$tag"
val path = "/etc/sysconfig/network-scripts"
val fileContent =
s"""
|DEVICE=$inet.$tag
|BOOTPROTO=none
|ONBOOT=yes
|VLAN=yes
""".stripMargin
command(s"echo '$fileContent' | sudo tee $path/$fileName > /dev/null", tuple._1, true, Some(sudoer))
tuple._2.foreach { ip =>
val mask = topology.getNetMask
val fileName = s"ifcfg-$inet.$tag:$index"
val path = "/etc/sysconfig/network-scripts"
val fileContent =
s"""
|DEVICE=${inet}.${tag}:${index}
|IPADDR=${ip}
|NETMASK=$mask
|ONBOOT=yes
""".stripMargin
command(s"echo '$fileContent' | sudo tee $path/$fileName > /dev/null", tuple._1, true, Some(sudoer))
command(s"sudo ifconfig $inet.$tag:$index $ip netmask $mask", tuple._1, true, Some(sudoer))
index += 1
}
}
case _ =>
val m = topology.getTopologyMap(ipMappingsOfPreparedOnly)
m.foreach { tuple =>
var index = 0
tuple._2.foreach { ip =>
command(s"sudo ifconfig $inet:$index $ip/${topology.getCidr} up", tuple._1, true, Some(sudoer))
if (persistent) {
val path = "/etc/sysconfig/network-scripts"
val fileName = s"ifcfg-$inet:$index"
val fileContent =
s"""
|DEVICE=$inet:$index
|IPADDR=$ip
|NETMASK=${topology.getNetMask}
|ONBOOT=yes
""".stripMargin
command(s"echo '$fileContent' | sudo tee $path/$fileName > /dev/null", tuple._1, true, Some(sudoer))
}
index += 1
}
}
}
}
/*def findIpToConnectWithToGrid : String = {
Await.result(Future.firstCompletedOf(ips map getIpInGrid), Duration.Inf)
}
def getIpInGrid(ipToCheckAgainst : String) : Future[String] = {
import java.net.NetworkInterface
import java.util
import collection.JavaConversions._
import collection.JavaConverters._
Future {
val interfaces: Seq[java.net.NetworkInterface] = util.Collections.list(NetworkInterface.getNetworkInterfaces())
val validInterfaceOpt = interfaces.collectFirst { case i if (command(s"ping -c 1 -I ${i.getName}
$ipToCheckAgainst ; echo $$?").get.split("\\n").toList.last.trim == "0") => i}
validInterfaceOpt match {
case Some(validInterface) =>
validInterface.getInterfaceAddresses.asScala.collectFirst {
case inetAddr if (inetAddr.getAddress.getHostAddress.matches( """\\d+.\\d+.\\d+.\\d+""")) =>
inetAddr.getAddress.getHostAddress
}.get
}
}
}*/
def findIpToConnectWithToGrid: String = {
var out = Option.empty[String]
ips.find { ip =>
val res = getIpInGrid(ip)
val rv = res.isDefined
if(rv) out = Some(res.get)
rv
}
out.getOrElse(ips(0))
}
def getIpInGrid(ipToCheckAgainst: String): Option[String] = {
import java.net.NetworkInterface
import java.util
import scala.collection.JavaConversions._
import scala.collection.JavaConverters._
val interfaces: Seq[java.net.NetworkInterface] = util.Collections.list(NetworkInterface.getNetworkInterfaces())
val validInterfaceOpt = interfaces.collectFirst {
case i
if (command(s"ping -c 1 -I ${i.getName} $ipToCheckAgainst ; echo $$?").get
.split("\\n")
.toList
.last
.trim == "0") =>
i
}
validInterfaceOpt match {
case Some(validInterface) =>
validInterface.getInterfaceAddresses.asScala.collectFirst {
case inetAddr if (inetAddr.getAddress.getHostAddress.matches("""\\d+.\\d+.\\d+.\\d+""")) =>
inetAddr.getAddress.getHostAddress
}
case None => None
}
}
def connectToGrid: Unit = connectToGrid()
def connectToGrid(ip: String = "") {
if (!Host.connectedToAkkaGrid) {
val useIp = if (ip == "") {
val uIp = findIpToConnectWithToGrid
info(s"Connecting to grid with ip: $uIp")
uIp
} else ip
AkkaGrid.setGridConnection(
GridConnection(memberName = "CONS",
clusterName = cn,
hostName = useIp,
port = 0,
seeds = ips.take(3).map(seedIp => s"$seedIp:7777").toSet)
)
AkkaGrid.joinClient
CtrlClient.init(ips(0))
Host.connectedToAkkaGrid = true
Thread.sleep(5000)
}
}
def restartHaproxy(sudoer: Credentials) {
haProxy match {
case Some(HaProxy(host, sitedown)) =>
command("sudo service haproxy restart", Seq(host), true, Some(sudoer))
case None =>
}
}
def stopHaproxy(sudoer: Credentials) {
haProxy match {
case Some(HaProxy(host, sitedown)) =>
command("sudo service haproxy stop", Seq(host), true, Some(sudoer))
case None =>
}
}
def deployHaproxy(sudoer: Credentials) {
throw new Exception("deploy haproxy currently cancelled")
/*
haProxy match {
case Some(HaProxy(host, sitedown)) =>
command("sudo apt-get -q -y install haproxy", Seq(host), true, Some(sudoer))
val servers = ips.map(ip => s"""server $ip $ip:9000 check inter 10000 rise 5 fall 3""").mkString("\\n")
val content = ResourceBuilder.getResource("scripts/templates/haproxy.cfg", Map("cluster" -> cn, "sitedown" -> sitedown, "servers" -> servers))
createFile("/etc/haproxy/haproxy.cfg", content, Seq(host), true, Some(sudoer))
restartHaproxy(sudoer)
case None =>
}
*/
}
def getClusterStatus: ClusterStatus = {
connectToGrid
Await.result(Host.ctrl.getClusterStatus, 30 seconds)
}
private val elkImageName = "cmwell-elk"
private val elkContainerName = "cmwell-elk-container"
private val elkClusterNameSuffix = "elk"
private val elkDirName = "elk"
private val elkEsWebPort = 9220
private val elkEsTransportPort = 9320
private val elkWebPort = 8080
def deployElk: Unit = {
???
info(s"copying files to remote hosts.")
ips.par.foreach { ip =>
info(s"copying files to $ip")
command(s"rsync -Paz scripts/docker-elk $user@$ip:${instDirs.intallationDir}/app/")
}
info(s"creating docker image")
ips.par.foreach { ip =>
val res =
command(s"sudo cd ${instDirs.intallationDir}/app/docker-elk/; sudo docker build -t $elkImageName .", ip, true)
if (res.isSuccess)
info(s"image was created at $ip")
else
info(s"failed to create image at $ip")
}
info("creating elk log directory")
command(s"mkdir -p ${instDirs.intallationDir}/log/$elkDirName", ips, false)
}
def createLogstashConfig: Unit = {
info("creating logstash config file")
ips.par.foreach { ip =>
createLogstashConfFile(s"$ip:$elkEsWebPort", Seq(ip))
}
}
def startElk: Unit = {
def getSeeds: String = {
ips.take(3).map(ip => s"$ip:$elkEsTransportPort").mkString(",")
}
//docker run -e elk_cluster='docker-elk' -v /home/michael/me/projects/elk-docker/conf:/etc/logstash -v
// /home/michael/app/cm-well/log:/cm-well/log -p 8080:80 -p 9200:9220 -p 9300:9320 elk
//command(s"docker run -d --net=host --name=$elkContainerName -e elk_cluster='$cn-$elkClusterNameSuffix'
// -e elk_hosts='$getSeeds' -v ${instDirs.intallationDir}/conf/logstash/:/etc/logstash -v
// ${instDirs.intallationDir}/log:/opt/cm-well/log -v
// ${instDirs.intallationDir}/log/$elkDirName:/usr/share/elasticsearch/data
// -p $elkWebPort:80 -p $elkEsWebPort:$elkEsWebPort -p $elkEsTransportPort:$elkEsTransportPort $elkImageName", ips, true)
???
}
def stopElk: Unit = {
//command(s"docker rm -f $elkContainerName", ips, true)
???
}
def removeOldPackages(hosts: GenSeq[String] = ips): Unit = {
val packs = deployment.componentProps.filter(_.symLinkName.isDefined)
val loc = instDirs.globalLocation
for {
host <- hosts
pack <- packs
} {
val target = pack.targetLocation
val compName = pack.getName
val symLinkName = pack.symLinkName.get
val currentPack = command(s"readlink -e $loc/cm-well/$target/$symLinkName | xargs basename", host, false).get.trim
val com =
s"ls -1 $loc/cm-well/$target | grep $compName | grep -v $currentPack | xargs -I zzz rm -rf $loc/cm-well/$target/zzz"
command(com, host, false)
}
}
def syncLib(hosts: GenSeq[String] = ips) = {
def getCurrentDateStr = {
val format = new java.text.SimpleDateFormat("yyyyMMdd_hhmmss")
val date = new Date()
format.format(date)
}
val currentDate = getCurrentDateStr
hosts.foreach { host =>
val comStr =
s"""test -L ${instDirs.globalLocation}/cm-well/lib &&
|cp -al `readlink ${instDirs.globalLocation}/cm-well/lib`/ ${instDirs.globalLocation}/cm-well/lib-$currentDate/ ||
|mkdir -p ${instDirs.globalLocation}/cm-well/lib-$currentDate""".stripMargin
command(comStr, host, false)
command(s"test -L ${instDirs.globalLocation}/cm-well/lib && rm ${instDirs.globalLocation}/cm-well/lib",
host,
false)
command(s"ln -s ${instDirs.globalLocation}/cm-well/lib-$currentDate ${instDirs.globalLocation}/cm-well/lib",
host,
false)
rsync("lib/", s"${instDirs.globalLocation}/cm-well/lib/", Seq(host))
}
}
def linkLibs(hosts: GenSeq[String] = ips.par) = {
val dir = new File("dependencies")
command(s"mkdir -p ${instDirs.globalLocation}/cm-well/dependencies", hosts, false)
rsync(s"dependencies/", s"${instDirs.globalLocation}/cm-well/dependencies/", hosts)
dir.listFiles().toVector.par.foreach { file =>
linkLib(file.getName, hosts)
}
}
def linkLib(component: String, hosts: GenSeq[String] = ips) = {
val target = component //if(component == "batch") "bg" else component
//val content = Source.fromFile(s"dependencies/$component").getLines().toVector
command(s"rm ${instDirs.globalLocation}/cm-well/app/$target/lib/* > /dev/null 2> /dev/null", hosts, false)
command(s"mkdir -p ${instDirs.globalLocation}/cm-well/app/$target/lib", hosts, false)
hosts.foreach { host =>
// scalastyle:off
command(s"cat ${instDirs.globalLocation}/cm-well/dependencies/$component | xargs -I zzz ln -s ${instDirs.globalLocation}/cm-well/lib/zzz ${instDirs.globalLocation}/cm-well/app/$target/lib/zzz", host, false)
// scalastyle:on
}
}
sys.addShutdownHook {
Try(k.grid.Grid.shutdown)
}
def rsyncPlugins(hosts: GenSeq[String] = ips) = {
command(s"mkdir -p ${instDirs.globalLocation}/cm-well/app/ws/plugins/sg-engines/", hosts, false)
rsync(s"plugins/", s"${instDirs.globalLocation}/cm-well/app/ws/plugins/sg-engines/", hosts)
}
}
| hochgi/CM-Well | server/cmwell-cons/src/main/scala/ctl.scala | Scala | apache-2.0 | 96,840 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.optimizer
import scala.collection.immutable.HashSet
import org.apache.spark.sql.catalyst.analysis._
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.expressions.aggregate._
import org.apache.spark.sql.catalyst.expressions.Literal.{FalseLiteral, TrueLiteral}
import org.apache.spark.sql.catalyst.expressions.objects.AssertNotNull
import org.apache.spark.sql.catalyst.plans._
import org.apache.spark.sql.catalyst.plans.logical._
import org.apache.spark.sql.catalyst.rules._
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.types._
/*
* Optimization rules defined in this file should not affect the structure of the logical plan.
*/
/**
* Replaces [[Expression Expressions]] that can be statically evaluated with
* equivalent [[Literal]] values.
*/
object ConstantFolding extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan transform {
case q: LogicalPlan => q transformExpressionsDown {
// Skip redundant folding of literals. This rule is technically not necessary. Placing this
// here avoids running the next rule for Literal values, which would create a new Literal
// object and running eval unnecessarily.
case l: Literal => l
// Fold expressions that are foldable.
case e if e.foldable => Literal.create(e.eval(EmptyRow), e.dataType)
}
}
}
/**
* Reorder associative integral-type operators and fold all constants into one.
*/
object ReorderAssociativeOperator extends Rule[LogicalPlan] {
private def flattenAdd(
expression: Expression,
groupSet: ExpressionSet): Seq[Expression] = expression match {
case expr @ Add(l, r) if !groupSet.contains(expr) =>
flattenAdd(l, groupSet) ++ flattenAdd(r, groupSet)
case other => other :: Nil
}
private def flattenMultiply(
expression: Expression,
groupSet: ExpressionSet): Seq[Expression] = expression match {
case expr @ Multiply(l, r) if !groupSet.contains(expr) =>
flattenMultiply(l, groupSet) ++ flattenMultiply(r, groupSet)
case other => other :: Nil
}
private def collectGroupingExpressions(plan: LogicalPlan): ExpressionSet = plan match {
case Aggregate(groupingExpressions, aggregateExpressions, child) =>
ExpressionSet.apply(groupingExpressions)
case _ => ExpressionSet(Seq())
}
def apply(plan: LogicalPlan): LogicalPlan = plan transform {
case q: LogicalPlan =>
// We have to respect aggregate expressions which exists in grouping expressions when plan
// is an Aggregate operator, otherwise the optimized expression could not be derived from
// grouping expressions.
val groupingExpressionSet = collectGroupingExpressions(q)
q transformExpressionsDown {
case a: Add if a.deterministic && a.dataType.isInstanceOf[IntegralType] =>
val (foldables, others) = flattenAdd(a, groupingExpressionSet).partition(_.foldable)
if (foldables.size > 1) {
val foldableExpr = foldables.reduce((x, y) => Add(x, y))
val c = Literal.create(foldableExpr.eval(EmptyRow), a.dataType)
if (others.isEmpty) c else Add(others.reduce((x, y) => Add(x, y)), c)
} else {
a
}
case m: Multiply if m.deterministic && m.dataType.isInstanceOf[IntegralType] =>
val (foldables, others) = flattenMultiply(m, groupingExpressionSet).partition(_.foldable)
if (foldables.size > 1) {
val foldableExpr = foldables.reduce((x, y) => Multiply(x, y))
val c = Literal.create(foldableExpr.eval(EmptyRow), m.dataType)
if (others.isEmpty) c else Multiply(others.reduce((x, y) => Multiply(x, y)), c)
} else {
m
}
}
}
}
/**
* Optimize IN predicates:
* 1. Removes literal repetitions.
* 2. Replaces [[In (value, seq[Literal])]] with optimized version
* [[InSet (value, HashSet[Literal])]] which is much faster.
*/
case class OptimizeIn(conf: SQLConf) extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan transform {
case q: LogicalPlan => q transformExpressionsDown {
case expr @ In(v, list) if expr.inSetConvertible =>
val newList = ExpressionSet(list).toSeq
if (newList.size > conf.optimizerInSetConversionThreshold) {
val hSet = newList.map(e => e.eval(EmptyRow))
InSet(v, HashSet() ++ hSet)
} else if (newList.size < list.size) {
expr.copy(list = newList)
} else { // newList.length == list.length
expr
}
}
}
}
/**
* Simplifies boolean expressions:
* 1. Simplifies expressions whose answer can be determined without evaluating both sides.
* 2. Eliminates / extracts common factors.
* 3. Merge same expressions
* 4. Removes `Not` operator.
*/
object BooleanSimplification extends Rule[LogicalPlan] with PredicateHelper {
def apply(plan: LogicalPlan): LogicalPlan = plan transform {
case q: LogicalPlan => q transformExpressionsUp {
case TrueLiteral And e => e
case e And TrueLiteral => e
case FalseLiteral Or e => e
case e Or FalseLiteral => e
case FalseLiteral And _ => FalseLiteral
case _ And FalseLiteral => FalseLiteral
case TrueLiteral Or _ => TrueLiteral
case _ Or TrueLiteral => TrueLiteral
case a And b if Not(a).semanticEquals(b) => FalseLiteral
case a Or b if Not(a).semanticEquals(b) => TrueLiteral
case a And b if a.semanticEquals(Not(b)) => FalseLiteral
case a Or b if a.semanticEquals(Not(b)) => TrueLiteral
case a And b if a.semanticEquals(b) => a
case a Or b if a.semanticEquals(b) => a
case a And (b Or c) if Not(a).semanticEquals(b) => And(a, c)
case a And (b Or c) if Not(a).semanticEquals(c) => And(a, b)
case (a Or b) And c if a.semanticEquals(Not(c)) => And(b, c)
case (a Or b) And c if b.semanticEquals(Not(c)) => And(a, c)
case a Or (b And c) if Not(a).semanticEquals(b) => Or(a, c)
case a Or (b And c) if Not(a).semanticEquals(c) => Or(a, b)
case (a And b) Or c if a.semanticEquals(Not(c)) => Or(b, c)
case (a And b) Or c if b.semanticEquals(Not(c)) => Or(a, c)
// Common factor elimination for conjunction
case and @ (left And right) =>
// 1. Split left and right to get the disjunctive predicates,
// i.e. lhs = (a, b), rhs = (a, c)
// 2. Find the common predict between lhsSet and rhsSet, i.e. common = (a)
// 3. Remove common predict from lhsSet and rhsSet, i.e. ldiff = (b), rdiff = (c)
// 4. Apply the formula, get the optimized predicate: common || (ldiff && rdiff)
val lhs = splitDisjunctivePredicates(left)
val rhs = splitDisjunctivePredicates(right)
val common = lhs.filter(e => rhs.exists(e.semanticEquals))
if (common.isEmpty) {
// No common factors, return the original predicate
and
} else {
val ldiff = lhs.filterNot(e => common.exists(e.semanticEquals))
val rdiff = rhs.filterNot(e => common.exists(e.semanticEquals))
if (ldiff.isEmpty || rdiff.isEmpty) {
// (a || b || c || ...) && (a || b) => (a || b)
common.reduce(Or)
} else {
// (a || b || c || ...) && (a || b || d || ...) =>
// ((c || ...) && (d || ...)) || a || b
(common :+ And(ldiff.reduce(Or), rdiff.reduce(Or))).reduce(Or)
}
}
// Common factor elimination for disjunction
case or @ (left Or right) =>
// 1. Split left and right to get the conjunctive predicates,
// i.e. lhs = (a, b), rhs = (a, c)
// 2. Find the common predict between lhsSet and rhsSet, i.e. common = (a)
// 3. Remove common predict from lhsSet and rhsSet, i.e. ldiff = (b), rdiff = (c)
// 4. Apply the formula, get the optimized predicate: common && (ldiff || rdiff)
val lhs = splitConjunctivePredicates(left)
val rhs = splitConjunctivePredicates(right)
val common = lhs.filter(e => rhs.exists(e.semanticEquals))
if (common.isEmpty) {
// No common factors, return the original predicate
or
} else {
val ldiff = lhs.filterNot(e => common.exists(e.semanticEquals))
val rdiff = rhs.filterNot(e => common.exists(e.semanticEquals))
if (ldiff.isEmpty || rdiff.isEmpty) {
// (a && b) || (a && b && c && ...) => a && b
common.reduce(And)
} else {
// (a && b && c && ...) || (a && b && d && ...) =>
// ((c && ...) || (d && ...)) && a && b
(common :+ Or(ldiff.reduce(And), rdiff.reduce(And))).reduce(And)
}
}
case Not(TrueLiteral) => FalseLiteral
case Not(FalseLiteral) => TrueLiteral
case Not(a GreaterThan b) => LessThanOrEqual(a, b)
case Not(a GreaterThanOrEqual b) => LessThan(a, b)
case Not(a LessThan b) => GreaterThanOrEqual(a, b)
case Not(a LessThanOrEqual b) => GreaterThan(a, b)
case Not(a Or b) => And(Not(a), Not(b))
case Not(a And b) => Or(Not(a), Not(b))
case Not(Not(e)) => e
}
}
}
/**
* Simplifies binary comparisons with semantically-equal expressions:
* 1) Replace '<=>' with 'true' literal.
* 2) Replace '=', '<=', and '>=' with 'true' literal if both operands are non-nullable.
* 3) Replace '<' and '>' with 'false' literal if both operands are non-nullable.
*/
object SimplifyBinaryComparison extends Rule[LogicalPlan] with PredicateHelper {
def apply(plan: LogicalPlan): LogicalPlan = plan transform {
case q: LogicalPlan => q transformExpressionsUp {
// True with equality
case a EqualNullSafe b if a.semanticEquals(b) => TrueLiteral
case a EqualTo b if !a.nullable && !b.nullable && a.semanticEquals(b) => TrueLiteral
case a GreaterThanOrEqual b if !a.nullable && !b.nullable && a.semanticEquals(b) =>
TrueLiteral
case a LessThanOrEqual b if !a.nullable && !b.nullable && a.semanticEquals(b) => TrueLiteral
// False with inequality
case a GreaterThan b if !a.nullable && !b.nullable && a.semanticEquals(b) => FalseLiteral
case a LessThan b if !a.nullable && !b.nullable && a.semanticEquals(b) => FalseLiteral
}
}
}
/**
* Simplifies conditional expressions (if / case).
*/
object SimplifyConditionals extends Rule[LogicalPlan] with PredicateHelper {
private def falseOrNullLiteral(e: Expression): Boolean = e match {
case FalseLiteral => true
case Literal(null, _) => true
case _ => false
}
def apply(plan: LogicalPlan): LogicalPlan = plan transform {
case q: LogicalPlan => q transformExpressionsUp {
case If(TrueLiteral, trueValue, _) => trueValue
case If(FalseLiteral, _, falseValue) => falseValue
case If(Literal(null, _), _, falseValue) => falseValue
case e @ CaseWhen(branches, elseValue) if branches.exists(x => falseOrNullLiteral(x._1)) =>
// If there are branches that are always false, remove them.
// If there are no more branches left, just use the else value.
// Note that these two are handled together here in a single case statement because
// otherwise we cannot determine the data type for the elseValue if it is None (i.e. null).
val newBranches = branches.filter(x => !falseOrNullLiteral(x._1))
if (newBranches.isEmpty) {
elseValue.getOrElse(Literal.create(null, e.dataType))
} else {
e.copy(branches = newBranches)
}
case e @ CaseWhen(branches, _) if branches.headOption.map(_._1) == Some(TrueLiteral) =>
// If the first branch is a true literal, remove the entire CaseWhen and use the value
// from that. Note that CaseWhen.branches should never be empty, and as a result the
// headOption (rather than head) added above is just an extra (and unnecessary) safeguard.
branches.head._2
case CaseWhen(branches, _) if branches.exists(_._1 == TrueLiteral) =>
// a branc with a TRue condition eliminates all following branches,
// these branches can be pruned away
val (h, t) = branches.span(_._1 != TrueLiteral)
CaseWhen( h :+ t.head, None)
}
}
}
/**
* Simplifies LIKE expressions that do not need full regular expressions to evaluate the condition.
* For example, when the expression is just checking to see if a string starts with a given
* pattern.
*/
object LikeSimplification extends Rule[LogicalPlan] {
// if guards below protect from escapes on trailing %.
// Cases like "something\\%" are not optimized, but this does not affect correctness.
private val startsWith = "([^_%]+)%".r
private val endsWith = "%([^_%]+)".r
private val startsAndEndsWith = "([^_%]+)%([^_%]+)".r
private val contains = "%([^_%]+)%".r
private val equalTo = "([^_%]*)".r
def apply(plan: LogicalPlan): LogicalPlan = plan transformAllExpressions {
case Like(input, Literal(pattern, StringType)) =>
pattern.toString match {
case startsWith(prefix) if !prefix.endsWith("\\\\") =>
StartsWith(input, Literal(prefix))
case endsWith(postfix) =>
EndsWith(input, Literal(postfix))
// 'a%a' pattern is basically same with 'a%' && '%a'.
// However, the additional `Length` condition is required to prevent 'a' match 'a%a'.
case startsAndEndsWith(prefix, postfix) if !prefix.endsWith("\\\\") =>
And(GreaterThanOrEqual(Length(input), Literal(prefix.size + postfix.size)),
And(StartsWith(input, Literal(prefix)), EndsWith(input, Literal(postfix))))
case contains(infix) if !infix.endsWith("\\\\") =>
Contains(input, Literal(infix))
case equalTo(str) =>
EqualTo(input, Literal(str))
case _ =>
Like(input, Literal.create(pattern, StringType))
}
}
}
/**
* Replaces [[Expression Expressions]] that can be statically evaluated with
* equivalent [[Literal]] values. This rule is more specific with
* Null value propagation from bottom to top of the expression tree.
*/
case class NullPropagation(conf: SQLConf) extends Rule[LogicalPlan] {
private def isNullLiteral(e: Expression): Boolean = e match {
case Literal(null, _) => true
case _ => false
}
def apply(plan: LogicalPlan): LogicalPlan = plan transform {
case q: LogicalPlan => q transformExpressionsUp {
case e @ WindowExpression(Cast(Literal(0L, _), _, _), _) =>
Cast(Literal(0L), e.dataType, Option(conf.sessionLocalTimeZone))
case e @ AggregateExpression(Count(exprs), _, _, _) if exprs.forall(isNullLiteral) =>
Cast(Literal(0L), e.dataType, Option(conf.sessionLocalTimeZone))
case ae @ AggregateExpression(Count(exprs), _, false, _) if !exprs.exists(_.nullable) =>
// This rule should be only triggered when isDistinct field is false.
ae.copy(aggregateFunction = Count(Literal(1)))
case IsNull(c) if !c.nullable => Literal.create(false, BooleanType)
case IsNotNull(c) if !c.nullable => Literal.create(true, BooleanType)
case EqualNullSafe(Literal(null, _), r) => IsNull(r)
case EqualNullSafe(l, Literal(null, _)) => IsNull(l)
case AssertNotNull(c, _) if !c.nullable => c
// For Coalesce, remove null literals.
case e @ Coalesce(children) =>
val newChildren = children.filterNot(isNullLiteral)
if (newChildren.isEmpty) {
Literal.create(null, e.dataType)
} else if (newChildren.length == 1) {
newChildren.head
} else {
Coalesce(newChildren)
}
// If the value expression is NULL then transform the In expression to null literal.
case In(Literal(null, _), _) => Literal.create(null, BooleanType)
// Non-leaf NullIntolerant expressions will return null, if at least one of its children is
// a null literal.
case e: NullIntolerant if e.children.exists(isNullLiteral) =>
Literal.create(null, e.dataType)
}
}
}
/**
* Propagate foldable expressions:
* Replace attributes with aliases of the original foldable expressions if possible.
* Other optimizations will take advantage of the propagated foldable expressions.
*
* {{{
* SELECT 1.0 x, 'abc' y, Now() z ORDER BY x, y, 3
* ==> SELECT 1.0 x, 'abc' y, Now() z ORDER BY 1.0, 'abc', Now()
* }}}
*/
object FoldablePropagation extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = {
val foldableMap = AttributeMap(plan.flatMap {
case Project(projectList, _) => projectList.collect {
case a: Alias if a.child.foldable => (a.toAttribute, a)
}
case _ => Nil
})
val replaceFoldable: PartialFunction[Expression, Expression] = {
case a: AttributeReference if foldableMap.contains(a) => foldableMap(a)
}
if (foldableMap.isEmpty) {
plan
} else {
var stop = false
CleanupAliases(plan.transformUp {
// A leaf node should not stop the folding process (note that we are traversing up the
// tree, starting at the leaf nodes); so we are allowing it.
case l: LeafNode =>
l
// We can only propagate foldables for a subset of unary nodes.
case u: UnaryNode if !stop && canPropagateFoldables(u) =>
u.transformExpressions(replaceFoldable)
// Allow inner joins. We do not allow outer join, although its output attributes are
// derived from its children, they are actually different attributes: the output of outer
// join is not always picked from its children, but can also be null.
// TODO(cloud-fan): It seems more reasonable to use new attributes as the output attributes
// of outer join.
case j @ Join(_, _, Inner, _) if !stop =>
j.transformExpressions(replaceFoldable)
// We can fold the projections an expand holds. However expand changes the output columns
// and often reuses the underlying attributes; so we cannot assume that a column is still
// foldable after the expand has been applied.
// TODO(hvanhovell): Expand should use new attributes as the output attributes.
case expand: Expand if !stop =>
val newExpand = expand.copy(projections = expand.projections.map { projection =>
projection.map(_.transform(replaceFoldable))
})
stop = true
newExpand
case other =>
stop = true
other
})
}
}
/**
* Whitelist of all [[UnaryNode]]s for which allow foldable propagation.
*/
private def canPropagateFoldables(u: UnaryNode): Boolean = u match {
case _: Project => true
case _: Filter => true
case _: SubqueryAlias => true
case _: Aggregate => true
case _: Window => true
case _: Sample => true
case _: GlobalLimit => true
case _: LocalLimit => true
case _: Generate => true
case _: Distinct => true
case _: AppendColumns => true
case _: AppendColumnsWithObject => true
case _: BroadcastHint => true
case _: RepartitionByExpression => true
case _: Repartition => true
case _: Sort => true
case _: TypedFilter => true
case _ => false
}
}
/**
* Optimizes expressions by replacing according to CodeGen configuration.
*/
case class OptimizeCodegen(conf: SQLConf) extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan transformAllExpressions {
case e: CaseWhen if canCodegen(e) => e.toCodegen()
}
private def canCodegen(e: CaseWhen): Boolean = {
val numBranches = e.branches.size + e.elseValue.size
numBranches <= conf.maxCaseBranchesForCodegen
}
}
/**
* Removes [[Cast Casts]] that are unnecessary because the input is already the correct type.
*/
object SimplifyCasts extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan transformAllExpressions {
case Cast(e, dataType, _) if e.dataType == dataType => e
case c @ Cast(e, dataType, _) => (e.dataType, dataType) match {
case (ArrayType(from, false), ArrayType(to, true)) if from == to => e
case (MapType(fromKey, fromValue, false), MapType(toKey, toValue, true))
if fromKey == toKey && fromValue == toValue => e
case _ => c
}
}
}
/**
* Removes nodes that are not necessary.
*/
object RemoveDispensableExpressions extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan transformAllExpressions {
case UnaryPositive(child) => child
case PromotePrecision(child) => child
}
}
/**
* Removes the inner case conversion expressions that are unnecessary because
* the inner conversion is overwritten by the outer one.
*/
object SimplifyCaseConversionExpressions extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan transform {
case q: LogicalPlan => q transformExpressionsUp {
case Upper(Upper(child)) => Upper(child)
case Upper(Lower(child)) => Upper(child)
case Lower(Upper(child)) => Lower(child)
case Lower(Lower(child)) => Lower(child)
}
}
}
| MLnick/spark | sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/optimizer/expressions.scala | Scala | apache-2.0 | 21,986 |
package provingground.andrewscurtis
import provingground.translation.StringParse._
import cats.kernel._
import upickle.default.{ReadWriter => RW, macroRW}
/*
* Free group in n generators
* An element is represented as a word in integers, together with rank of the corresponding group
* The negative of a number represents the inverse generator
*
* Perhaps it would be wise to make the recursive
* calls tail recursive.
*/
object FreeGroups {
/**
* String for a letter, e.g. a, a! (for a inverse)
*/
def letterString(n: Int): String =
if (n > 0) ('a' + n - 1).toChar.toString + "."
else ('a' - n - 1).toChar.toString + "!."
/**
* unicode string for a letter, e.g. "a" or "\\bar{a}"
*/
def letterUnic(n: Int): String =
if (n > 0) ('a' + n - 1).toChar.toString
else ('a' - n - 1).toChar.toString + '\\u0305'.toString
object Word {
implicit val freeGroup: Group[Word] = new Group[Word] {
val empty = Word(Vector())
def combine(x: Word, y: Word): Word = x * y
def inverse(x: Word): Word = x.inv
}
implicit def rw: RW[Word] = macroRW
/**
* sanity checker for listFromChars.
* to add further checks later.
*/
def isParsable(s: Vector[Char]): Boolean = {
if (s.isEmpty) true
else if ((s.head == '\\u0305') || (s.head == '!')) false
else true
}
/**
* helper for fromString
*/
def listFromChars(s: Vector[Char]): Vector[Int] = {
require(
isParsable(s),
"The list of characters is not well formed and should not be parsed.")
s match {
case Vector() => Vector()
case x +: '\\u0305' +: tail =>
(-(x - 'a' + 1)) +: listFromChars(tail)
case x +: '!' +: tail =>
(-(x - 'a' + 1)) +: listFromChars(tail)
case x +: tail =>
(x - 'a' + 1) +: listFromChars(tail)
}
}
/**
* word from a string.
*/
def fromString(s: String): Word =
if (s == "1") Word(Vector())
else
Word(
listFromChars(
s.replace("!", "\\u0305")
.replace(" ", "")
.replace(".", "")
.toVector))
/**
* word from a string.
*/
def apply(w: String): Word = fromString(w)
/**
* the identity
*/
val e = Word(Vector())
}
/**
* A word in a free group.
* @param ls letters of the words represented as integers; 1 represents a, -1 represents `a^{-1}`
*/
case class Word(ls: Vector[Int]) extends AnyVal {
/**
* returns reduced form of a word
*/
def reduce: Word = {
ls match {
case x +: ys =>
Word(ys).reduce match{
case Word(head +: tail) if head == -x => Word(tail)
case that => x +: that
}
case _ => this
}
}
def isReduced: Boolean = (this == reduce)
/**
* string representation
*/
def toPlainString: String =
((ls map (letterString(_))).foldLeft("")(_ + _)).dropRight(1)
override def toString: String = if (ls.isEmpty) "1" else toUnicode
def ++(that: Word): Word = Word(ls ++ that.ls).reduce
/**
* unicode representation.
*/
def toUnicode: String = ((ls map (letterUnic(_))).foldLeft("")(_ + _))
/**
* letter prepended to word, not reduced
*/
def +:(let: Int): Word = Word(let +: ls)
def :+(let: Int): Word = Word(ls :+ let)
/**
* inverse
*/
def inv: Word = Word(ls.reverse map ((n) => -n))
/**
* inverse
*/
def ! : Word = inv
/**
* returns this to kth power.
*/
def pow: Int => Word = {
case 0 => Word(Vector())
case k if k > 0 => Word(Vector.fill(k)(ls).flatten).reduce
case k if k < 0 => this.inv.pow(-k)
}
/**
* raise to nth power.
*/
def ^(n: Int): Word = pow(n)
/**
* multiply and reduce
*/
def *(that: Word): Word = Word(ls ++ that.ls).reduce
/**
* conjugate
*/
def conj(that: Word): Word = (that.inv * this * that).reduce
/**
* conjugate
*/
def ^(that: Word): Word = conj(that)
/**
* conjugate by a generator (or its inverse)
*/
def conjGen(k: Int): Word = Word((-k) +: (ls :+ (k))).reduce
/**
* conjugate by a generator (or its inverse).
* @param k index of generator, starting at 1.
*/
def ^^(k: Int): Word = conjGen(k)
/**
* largest generator in the free group.
*/
def maxgen: Int = {
if (ls.isEmpty) 0
else (ls map ((x: Int) => x.abs)).max
}
/**
* remove generators of rank and above.
*/
def rmvtop(rank: Int): Word = Word(ls filter (_.abs < rank))
}
/**
* weight of a word, for a generation process where we extend with some probability, picking letters at random.
*/
def wordWeight(w: Word, wrdCntn: Double, rank: Double): Double =
(1 - wrdCntn) * math.pow(wrdCntn / (2 * rank), w.ls.length)
/*
* Objects involved in Andrews-Curtis evolution : Presentations and Moves
*/
/**
* Finite presentation of a group
*
* @param rels relations
* @param rank number of generators.
*/
case class Presentation(rels: Vector[Word], rank: Int) {
require(maxgen <= rank, "There are more generators than the rank allows")
/**
* number of relations
*/
val sz: Int = rels.length
/**
* string without unicode.
*/
def toPlainString: String = {
val gens =
(for (j <- 0 to rank - 1) yield ('a' + j).toChar.toString).toVector
.mkString(",")
val relstring =
(for (rel <- rels) yield rel.toPlainString).toVector.mkString(",")
s"<$gens; $relstring>"
}
/**
* unicode string
*/
def toUnicode: String = {
val gens =
(for (j <- 0 to rank - 1) yield ('a' + j).toChar.toString).toVector
.mkString(",")
val relstring =
(for (rel <- rels) yield rel.toUnicode).toVector.mkString(",")
s"<$gens; $relstring>"
}
/**
* unicode string
*/
override def toString: String = toUnicode
/**
* defect of the presentation.
*/
val defect: Int = rank - sz
/**
* largest generator appearing in relation.
*/
def maxgen: Int = {
if (rels.isEmpty) 0
else (rels map ((x: Word) => x.maxgen)).max
}
/**
* returns presentation with ith element inverted.
*/
def inv(k: Int): Presentation = {
val result =
(0 to sz - 1) map { (i) =>
if (i == k) rels(i).inv else rels(i)
}
Presentation(result.toVector, rank)
}
/**
* presentation with kth relation multiplied on the right by the lth relation.
*/
def rtmult(k: Int, l: Int): Presentation = {
val result =
(0 to sz - 1) map { (i) =>
if (i == k) rels(k) * rels(l) else rels(i)
}
Presentation(result.toVector, rank)
}
def rtmultinv(k: Int, l: Int): Presentation = {
val result =
(0 to sz - 1) map { (i) =>
if (i == k) rels(k) * (rels(l).inv) else rels(i)
}
Presentation(result.toVector, rank)
}
/**
* presentation with kth relation multiplied on the right by the ith relation.
*/
def lftmult(k: Int, l: Int): Presentation = {
val result =
(0 to sz - 1) map { (i) =>
if (i == k) rels(l) * rels(k) else rels(i)
}
Presentation(result.toVector, rank)
}
def lftmultinv(k: Int, l: Int): Presentation = {
val result =
(0 to sz - 1) map { (i) =>
if (i == k) (rels(l).inv) * rels(k) else rels(i)
}
Presentation(result.toVector, rank)
}
def transpose(k: Int, l: Int): Presentation = {
def flipped: Int => Word = {
case `k` => rels(l)
case `l` => rels(k)
case j => rels(j)
}
val result = (0 to sz - 1) map (flipped)
Presentation(result.toVector, rank)
}
/**
* presentation with kth relation conjugated by generator with index l.
*/
def conj(k: Int, l: Int): Presentation = {
val result =
0 until sz map { (i) =>
if (i == k) rels(k) ^^ l else rels(i)
}
Presentation(result.toVector, rank)
}
/**
* presentation with the kth relation conjugated by the lth relation
*/
def conjRelations(k: Int, l: Int): Presentation = {
val result =
0 until sz map { (i) =>
if (i == k) rels(k) ^ rels(l) else rels(i)
}
Presentation(result.toVector, rank)
}
/**
* Andrews-Curtis stabilization
*/
def acStab: Presentation = Presentation(Word(Vector(rank + 1)) +: rels, rank + 1)
/**
* Tietze stabilization.
*/
def ttzStab: Presentation = Presentation(Word(Vector()) +: rels, rank)
/**
* returns whether Andrews-Curtis stabilized.
*/
def acStabilized: Boolean = rels contains (Word(Vector(rank)))
/**
* (unsafe) Andrews-Curtis destabilization.
*/
def acDestabilized: Presentation = {
val newrels =
rels filter ((w: Word) => w != Word(Vector(rank + 1))) map
(_.rmvtop(rank))
Presentation(newrels, rank - 1)
}
}
object Presentation {
implicit def rw: RW[Presentation] = macroRW
/**
* parses string to a presentation.
*/
def fromString(s: String): Presentation = {
val ss = s.replaceAll("[ <>]", "")
val genWord +: relWord +: _ = ss.split(";").toVector
val rank = genWord.split(",").length
val rels = relWord.split(",") map (Word.fromString(_))
Presentation(rels.toVector, rank)
}
/**
* gives presentation from rank and strings for words.
*/
def apply(rank: Int, ws: String*): Presentation =
Presentation(ws.toVector map (Word.fromString), rank)
def balanced(ws: String*): Presentation = {
val rels = ws.toVector map (Word.fromString(_))
Presentation(rels, rels.length)
}
val empty = Presentation(Vector(), 0)
def trivial(n: Int) =
Presentation((1 to n).toVector map ((i) => Word(Vector(i))), n)
/**
* moves implemented as functions
*/
def id(pres: Presentation): Presentation = pres
def inv(pres: Presentation, k: Int): Presentation = pres.inv(k)
def rtmult(pres: Presentation, k: Int, l: Int): Presentation = pres.rtmult(k, l)
def lftmult(pres: Presentation, k: Int, l: Int): Presentation = pres.lftmult(k, l)
def conj(pres: Presentation, k: Int, l: Int): Presentation = pres.conj(k, l)
def conjRelators(pres: Presentation, k: Int, l: Int): Presentation =
pres.conjRelations(k, l)
def acStabilized(pres: Presentation): Presentation = pres.acStab
def ttzStab(pres: Presentation): Presentation = pres.ttzStab
def acDestabilized(pres: Presentation): Presentation = pres.acDestabilized
/**
* weight where number of relations is fixed.
*/
def weight(wrdCntn: Double): Presentation => Double =
(pres: Presentation) => {
val wordwts = pres.rels map (wordWeight(_, wrdCntn, pres.rank))
(wordwts.foldRight(1.0)(_ * _))
}
}
def presentationWeight(pres: Presentation,
presCntn: Double,
wrdCntn: Double): Double = {
val wordwts = pres.rels map (wordWeight(_, wrdCntn, pres.rank))
(1 - presCntn) * math.pow(presCntn, pres.sz) * (wordwts.foldRight(1.0)(_ * _))
}
/*
* Empty presentation
*/
val nullpres: Presentation = Presentation(Vector(), 0)
implicit def writeWord: WriteString[FreeGroups.Word] =
WriteString.simple[Word]
implicit def writePres: WriteString[FreeGroups.Presentation] =
WriteString.simple[Presentation]
implicit def readWord: ReadString[FreeGroups.Word] =
ReadString(Word.fromString)
implicit def readPres: ReadString[FreeGroups.Presentation] =
ReadString(Presentation.fromString)
}
| siddhartha-gadgil/ProvingGround | core/src/main/scala/provingground/andrewscurtis/FreeGroups.scala | Scala | mit | 12,174 |
val textFile = sc.textFile("README.md", 4)
val words = textFile.flatMap(line => line.split("[\\\\s]+"))
val realWords = words.filter(_.nonEmpty)
val wordTuple = realWords.map(word => (word, 1))
val groupBy = wordTuple.groupByKey(2)
val wordCount = groupBy.mapValues(value => value.reduce(_ + _))
wordCount.collect().sortBy(-_._2)
wordCount.toDebugString
wordCount.dependencies.head.rdd
textFile.dependencies.head.rdd
textFile.dependencies.head.rdd.dependencies
groupBy.collect()
| taewookeom/spark-shell-examples | Example05WordCountGroupByKey.scala | Scala | apache-2.0 | 477 |
package objsets
import common._
import TweetReader._
/**
* A class to represent tweets.
*/
class Tweet(val user: String, val text: String, val retweets: Int) {
override def toString: String =
"User: " + user + "\\n" +
"Text: " + text + " [" + retweets + "]"
}
/**
* This represents a set of objects of type `Tweet` in the form of a binary search
* tree. Every branch in the tree has two children (two `TweetSet`s). There is an
* invariant which always holds: for every branch `b`, all elements in the left
* subtree are smaller than the tweet at `b`. The elements in the right subtree are
* larger.
*
* Note that the above structure requires us to be able to compare two tweets (we
* need to be able to say which of two tweets is larger, or if they are equal). In
* this implementation, the equality / order of tweets is based on the tweet's text
* (see `def incl`). Hence, a `TweetSet` could not contain two tweets with the same
* text from different users.
*
*
* The advantage of representing sets as binary search trees is that the elements
* of the set can be found quickly. If you want to learn more you can take a look
* at the Wikipedia page [1], but this is not necessary in order to solve this
* assignment.
*
* [1] http://en.wikipedia.org/wiki/Binary_search_tree
*/
abstract class TweetSet {
/**
* This method takes a predicate and returns a subset of all the elements
* in the original set for which the predicate is true.
*
* Question: Can we implement this method here, or should it remain abstract
* and be implemented in the subclasses?
*/
def filter(p: Tweet => Boolean): TweetSet = filterAcc(p, new Empty)
/**
* This is a helper method for `filter` that propagates the accumulated tweets.
*/
def filterAcc(p: Tweet => Boolean, acc: TweetSet): TweetSet
/**
* Returns a new `TweetSet` that is the union of `TweetSet`s `this` and `that`.
*
* Question: Should we implement this method here, or should it remain abstract
* and be implemented in the subclasses?
*/
def union(that: TweetSet): TweetSet =
if (that.isEmpty) this
else incl(that.head) union that.tail
/**
* Returns the tweet from this set which has the greatest retweet count.
*
* Calling `mostRetweeted` on an empty set should throw an exception of
* type `java.util.NoSuchElementException`.
*
* Question: Should we implement this method here, or should it remain abstract
* and be implemented in the subclasses?
*/
def mostRetweeted: Tweet = tail.findMostRetweeted(head)
def findMostRetweeted(curr: Tweet): Tweet =
if (isEmpty) curr
else if (curr.retweets > head.retweets) tail.findMostRetweeted(curr)
else tail.findMostRetweeted(head)
/**
* Returns a list containing all tweets of this set, sorted by retweet count
* in descending order. In other words, the head of the resulting list should
* have the highest retweet count.
*
* Hint: the method `remove` on TweetSet will be very useful.
* Question: Should we implement this method here, or should it remain abstract
* and be implemented in the subclasses?
*/
def descendingByRetweet: TweetList = {
if (isEmpty) Nil
else new Cons(mostRetweeted, tail.descendingByRetweet)
}
/**
* The following methods are already implemented
*/
/**
* Returns a new `TweetSet` which contains all elements of this set, and the
* the new element `tweet` in case it does not already exist in this set.
*
* If `this.contains(tweet)`, the current set is returned.
*/
def incl(tweet: Tweet): TweetSet
/**
* Returns a new `TweetSet` which excludes `tweet`.
*/
def remove(tweet: Tweet): TweetSet
/**
* Tests if `tweet` exists in this `TweetSet`.
*/
def contains(tweet: Tweet): Boolean
/**
* This method takes a function and applies it to every element in the set.
*/
def foreach(f: Tweet => Unit): Unit
def isEmpty: Boolean
def head: Tweet
def tail: TweetSet
}
class Empty extends TweetSet {
def filterAcc(p: Tweet => Boolean, acc: TweetSet): TweetSet = new Empty
/**
* The following methods are already implemented
*/
def contains(tweet: Tweet): Boolean = false
def incl(tweet: Tweet): TweetSet = new NonEmpty(tweet, new Empty, new Empty)
def remove(tweet: Tweet): TweetSet = this
def foreach(f: Tweet => Unit): Unit = ()
def isEmpty: Boolean = true
def head: Nothing = throw new NoSuchElementException("No head!")
def tail: Nothing = throw new NoSuchElementException("No tail!")
}
class NonEmpty(elem: Tweet, left: TweetSet, right: TweetSet) extends TweetSet {
def filterAcc(p: Tweet => Boolean, acc: TweetSet): TweetSet =
if (p(head))
if (tail.isEmpty) acc.incl(head)
else tail.filterAcc(p, acc.incl(head))
else if (!tail.isEmpty) tail.filterAcc(p, acc)
else acc
/**
* The following methods are already implemented
*/
def contains(x: Tweet): Boolean =
if (x.text < elem.text) left.contains(x)
else if (elem.text < x.text) right.contains(x)
else true
def incl(x: Tweet): TweetSet = {
if (x.text < elem.text) new NonEmpty(elem, left.incl(x), right)
else if (elem.text < x.text) new NonEmpty(elem, left, right.incl(x))
else this
}
def remove(tw: Tweet): TweetSet =
if (tw.text < elem.text) new NonEmpty(elem, left.remove(tw), right)
else if (elem.text < tw.text) new NonEmpty(elem, left, right.remove(tw))
else left.union(right)
def foreach(f: Tweet => Unit): Unit = {
f(elem)
left.foreach(f)
right.foreach(f)
}
def isEmpty: Boolean = false
def head: Tweet = if (left.isEmpty) elem else left.head
def tail: TweetSet = if (left.isEmpty) right else new NonEmpty(elem, left.tail, right)
}
trait TweetList {
def head: Tweet
def tail: TweetList
def isEmpty: Boolean
def foreach(f: Tweet => Unit): Unit =
if (!isEmpty) {
f(head)
tail.foreach(f)
}
}
object Nil extends TweetList {
def head = throw new java.util.NoSuchElementException("head of EmptyList")
def tail = throw new java.util.NoSuchElementException("tail of EmptyList")
def isEmpty = true
}
class Cons(val head: Tweet, val tail: TweetList) extends TweetList {
def isEmpty = false
}
object GoogleVsApple {
val google = List("android", "Android", "galaxy", "Galaxy", "nexus", "Nexus")
val apple = List("ios", "iOS", "iphone", "iPhone", "ipad", "iPad")
lazy val tweetSet: TweetSet = TweetReader.allTweets
lazy val googleTweets: TweetSet = tweetSet.filter((t: Tweet) => google.exists((s: String) => t.text.contains(s)))
lazy val appleTweets: TweetSet = tweetSet.filter((t: Tweet) => apple.exists((s: String) => t.text.contains(s)))
/**
* A list of all tweets mentioning a keyword from either apple or google,
* sorted by the number of retweets.
*/
lazy val trending: TweetList = googleTweets.union(appleTweets).descendingByRetweet
}
object Main extends App {
// Print the trending tweets
GoogleVsApple.trending foreach println
}
| clasnake/coursera-fp-scala | objsets/src/main/scala/objsets/TweetSet.scala | Scala | mit | 7,054 |
/*
* Copyright (c) 2014-2021 by The Monix Project Developers.
* See the project homepage at: https://monix.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package monix.execution.misc
import implicitbox.Not
import monix.execution.{CancelableFuture, FutureUtils}
import monix.execution.schedulers.TrampolineExecutionContext
import scala.annotation.implicitNotFound
import scala.concurrent.Future
/**
* Type class describing how [[Local]] binding works for specific data types.
*
* This is needed because asynchronous data types, like `Future`,
* that can be waited on, should also clear the modified context
* after completion.
*
* NOTE: this type class does not work for data types that suspend the
* execution, like `Coeval` or `Task`, because [[Local]] is meant to
* be used in a side effectful way. Instances of this type class
* can't be implemented for data types like `Task`, as a technical
* limitation, because `Task` would also need a suspended `Context`
* evaluation in `bindContext`.
*/
@implicitNotFound("""Cannot find an implicit value for CanBindLocals[${R}].
If ${R} is the result of a synchronous action, either build an implicit with
CanBindLocals.synchronous or import CanBindLocals.Implicits.synchronousAsDefault.""")
trait CanBindLocals[R] {
/** See [[monix.execution.misc.Local.bind[R](ctx* Local.bind]]. */
def bindContext(ctx: Local.Context)(f: => R): R
/** See [[monix.execution.misc.Local.bind[R](value* Local.bind]]. */
def bindKey[A](local: Local[A], value: Option[A])(f: => R): R =
bindContext(Local.getContext().bind(local.key, value))(f)
/** See [[Local.isolate]]. */
def isolate(f: => R): R =
bindContext(Local.getContext().isolate())(f)
}
object CanBindLocals extends CanIsolateInstancesLevel1 {
def apply[R](implicit R: CanBindLocals[R]): CanBindLocals[R] = R
}
private[misc] abstract class CanIsolateInstancesLevel1 extends CanIsolateInstancesLevel0 {
/**
* Instance for `monix.execution.CancelableFuture`.
*/
implicit def cancelableFuture[R]: CanBindLocals[CancelableFuture[R]] =
CancelableFutureInstance.asInstanceOf[CanBindLocals[CancelableFuture[R]]]
object Implicits {
/**
* Implicit instance for all things synchronous.
*
* Needs to be imported explicitly in scope. Will NOT override
* other `CanBindLocals` implicits that are already visible.
*/
@inline implicit def synchronousAsDefault[R](implicit ev: Not[CanBindLocals[R]]): CanBindLocals[R] =
CanBindLocals.synchronous[R]
}
}
private[misc] abstract class CanIsolateInstancesLevel0 {
/**
* Instance for `scala.concurrent.Future`.
*/
implicit def future[R]: CanBindLocals[Future[R]] =
FutureInstance.asInstanceOf[CanBindLocals[Future[R]]]
/**
* Instance for `Unit`.
*/
@inline implicit def forUnit: CanBindLocals[Unit] =
synchronous[Unit]
/**
* Builds an instance for synchronous execution.
*
* {{{
* import monix.execution.misc._
* implicit val ev = CanBindLocals.synchronous[String]
*
* // If not provided explicitly, it might trigger compilation error
* // due to requirement for CanBindLocals[String]
* Local.bindClear {
* "Hello!"
* }
* }}}
*/
def synchronous[R]: CanBindLocals[R] =
SynchronousInstance.asInstanceOf[CanBindLocals[R]]
/** Implementation for [[CanBindLocals.synchronous]]. */
protected object SynchronousInstance extends CanBindLocals[Any] {
override def bindContext(ctx: Local.Context)(f: => Any): Any = {
val prev = Local.getContext()
Local.setContext(ctx)
try f
finally Local.setContext(prev)
}
}
/** Implementation for [[CanBindLocals.cancelableFuture]]. */
protected object CancelableFutureInstance extends CanBindLocals[CancelableFuture[Any]] {
override def bindContext(ctx: Local.Context)(f: => CancelableFuture[Any]): CancelableFuture[Any] = {
val prev = Local.getContext()
Local.setContext(ctx)
try {
f.transform { result =>
Local.setContext(prev)
result
}(TrampolineExecutionContext.immediate)
} finally {
Local.setContext(prev)
}
}
}
/** Implementation for [[CanBindLocals.future]]. */
protected object FutureInstance extends CanBindLocals[Future[Any]] {
override def bindContext(ctx: Local.Context)(f: => Future[Any]): Future[Any] = {
val prev = Local.getContext()
Local.setContext(ctx)
try {
FutureUtils
.transform[Any, Any](f, result => {
Local.setContext(prev)
result
})(TrampolineExecutionContext.immediate)
} finally {
Local.setContext(prev)
}
}
}
}
| monifu/monifu | monix-execution/shared/src/main/scala/monix/execution/misc/CanBindLocals.scala | Scala | apache-2.0 | 5,266 |
package at.logic.gapt.provers.viper.grammars
import at.logic.gapt.expr._
import at.logic.gapt.expr.fol.folTermSize
import at.logic.gapt.proofs.Context
import at.logic.gapt.utils.NameGenerator
import scala.util.Random
object randomInstance {
def generate( tys: Seq[TBase] )( implicit ctx: Context ): Seq[Expr] = {
val nameGen = new NameGenerator( Set() )
tys.map( generate( _, nameGen ) )
}
def generate( ty: TBase )( implicit ctx: Context ): Expr = generate( ty, new NameGenerator( Set() ) )
def generate( ty: TBase, nameGen: NameGenerator )( implicit ctx: Context ): Expr = {
ctx.getConstructors( ty ) match {
case None =>
Var( nameGen freshWithIndex "x", ty )
case Some( ctrs ) =>
val ctr = ctrs( Random.nextInt( ctrs.size ) )
val FunctionType( _, argTypes ) = ctr.ty
val args = argTypes.map { at => generate( at.asInstanceOf[TBase], nameGen ) }
ctr( args: _* )
}
}
def generate( tys: Seq[TBase], cond: Float => Boolean )( implicit ctx: Context ): Seq[Expr] =
Stream.continually( generate( tys ) ).filter( insts => cond( folTermSize( insts ).toFloat / insts.size ) ).head
}
| gebner/gapt | core/src/main/scala/at/logic/gapt/provers/viper/grammars/randomInstance.scala | Scala | gpl-3.0 | 1,167 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gearpump.cluster
import akka.actor.ActorRef
import org.apache.gearpump.cluster.master.Master.MasterInfo
import org.apache.gearpump.cluster.scheduler.Resource
import org.apache.gearpump.cluster.worker.WorkerId
/**
* Cluster Bootup Flow
*/
object WorkerToMaster {
/** When an worker is started, it sends RegisterNewWorker */
case object RegisterNewWorker
/** When worker lose connection with master, it tries to register itself again with old Id. */
case class RegisterWorker(workerId: WorkerId)
/** Worker is responsible to broadcast its current status to master */
case class ResourceUpdate(worker: ActorRef, workerId: WorkerId, resource: Resource)
}
object MasterToWorker {
/** Master confirm the reception of RegisterNewWorker or RegisterWorker */
case class WorkerRegistered(workerId: WorkerId, masterInfo: MasterInfo)
/** Worker have not received reply from master */
case class UpdateResourceFailed(reason: String = null, ex: Throwable = null)
/** Master is synced with worker on resource slots managed by current worker */
case object UpdateResourceSucceed
} | manuzhang/incubator-gearpump | core/src/main/scala/org/apache/gearpump/cluster/DaemonMessage.scala | Scala | apache-2.0 | 1,925 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.streamingtest
/**
* A test suite to make sure all `implicit` functions work correctly.
*
* As `implicit` is a compiler feature, we don't need to run this class.
* What we need to do is making the compiler happy.
*/
class ImplicitSuite {
// We only want to test if `implicit` works well with the compiler,
// so we don't need a real DStream.
def mockDStream[T]: org.apache.spark.streaming.dstream.DStream[T] = null
def testToPairDStreamFunctions(): Unit = {
val dstream: org.apache.spark.streaming.dstream.DStream[(Int, Int)] = mockDStream
dstream.groupByKey()
}
}
| bravo-zhang/spark | streaming/src/test/scala/org/apache/spark/streamingtest/ImplicitSuite.scala | Scala | apache-2.0 | 1,417 |
package controllers.webservice
import helpers.Token
import models.admin.{Question, QuestionTable}
import models.webservice.{GameUserDAO, GameUser}
import play.api.Play
import play.api.Play.current
import play.api.db.slick.DatabaseConfigProvider
import play.api.libs.json.{Reads, JsPath, JsError, Json}
import play.api.mvc.{Action, Controller}
import slick.driver.JdbcProfile
import scala.slick.driver.MySQLDriver.simple._
import play.api.libs.json.Reads._ // Custom validation helpers
import play.api.libs.functional.syntax._ // Combinator syntax
/**
* Created by Murat.
*/
object Application extends Controller{
lazy val db = DatabaseConfigProvider.get[JdbcProfile](Play.current).db
implicit val uFormat = Json.format[GameUser]
implicit val userReads = (
(JsPath \\ "username").read[String] and
(JsPath \\ "password").read[String]
).tupled
def register = Action(parse.json){ request =>
request.body.validate[(String, String)].map{
case (username: String, password: String) =>
val user = GameUser(None, username, password, Some(1200))
GameUserDAO.register(user)
Ok(Json.obj("success" -> 1))
}.recoverTotal{
e => BadRequest(JsError.toJson(e))
}
}
def login = Action(parse.json){ request =>
request.body.validate[(String, String)].map{
case (username: String, password: String) =>
val exist = GameUserDAO.checkCredentials(username, password)
if(exist) {
val user = GameUserDAO.findByName(username)
val token = Token.createToken(user.get)
Ok(Json.obj("success" -> 1, "token" -> token, "userId" -> user.get.id))
}else {
Unauthorized(Json.obj("error" -> "wrong username or password"))
}
}.recoverTotal{
e => BadRequest(JsError.toJson(e))
}
}
}
| mustafin/ent-quiz-server | modules/webservice/app/controllers/webservice/Application.scala | Scala | apache-2.0 | 1,822 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.datasources.v2.json
import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.connector.read.Scan
import org.apache.spark.sql.execution.datasources.PartitioningAwareFileIndex
import org.apache.spark.sql.execution.datasources.v2.FileScanBuilder
import org.apache.spark.sql.types.StructType
import org.apache.spark.sql.util.CaseInsensitiveStringMap
class JsonScanBuilder (
sparkSession: SparkSession,
fileIndex: PartitioningAwareFileIndex,
schema: StructType,
dataSchema: StructType,
options: CaseInsensitiveStringMap)
extends FileScanBuilder(sparkSession, fileIndex, dataSchema) {
override def build(): Scan = {
JsonScan(sparkSession, fileIndex, dataSchema, readDataSchema(), readPartitionSchema(), options)
}
}
| ConeyLiu/spark | sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/json/JsonScanBuilder.scala | Scala | apache-2.0 | 1,594 |
package epic.parser
package projections
/*
Copyright 2012 David Hall
Licensed under the Apache License, Version 2.0 (the "License")
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
import org.junit.runner.RunWith
import org.scalatest._
import org.scalatest.junit._
import org.scalatest.prop._
import epic.trees.{TreeInstance, AnnotatedLabel}
/**
*
* @author dlwh
*/
@RunWith(classOf[JUnitRunner])
class EnumeratedAnchoringTest extends FunSuite with ParserTestHarness {
test("We can parse using simple anchoring") {
val gen = ParserTestHarness.simpleParser
val f = new AnchoredPCFGProjector[AnnotatedLabel, String](Double.NegativeInfinity)
val grammar = new ProjectingCoreGrammar(gen, f)
val chartParser = Parser(grammar)
val grammarNext = new ProjectingCoreGrammar(chartParser, f)
val chartNext = Parser(grammarNext)
for( TreeInstance(_, t, w) <- getTestTrees()) try {
val tree1 = chartParser(w)
val tree2 = chartNext(w)
assert(tree2 === tree1, "late")
} catch {
case e: Exception =>
throw new RuntimeException("Trouble with " + t.render(w), e)
}
}
test("Parsing kind of works using it") {
val gen = ParserTestHarness.simpleParser
val f = new AnchoredPCFGProjector[AnnotatedLabel, String](Double.NegativeInfinity)
val grammar = new ProjectingCoreGrammar(gen, f)
val chartParser = Parser(grammar)
val res = evalParser(getTestTrees(), chartParser)
assert(res.f1 > 0.5, res.f1)
}
} | langkilde/epic | src/test/scala/epic/parser/projections/EnumeratedAnchoringTest.scala | Scala | apache-2.0 | 1,913 |
/*
* Copyright (c) 2013-2014 Telefónica Investigación y Desarrollo S.A.U.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package es.tid.cosmos.api.controllers.cluster
import java.sql.Timestamp
import org.scalatest.FlatSpec
import org.scalatest.matchers.MustMatchers
import play.api.libs.json.Json
import es.tid.cosmos.api.profile.Cluster
import es.tid.cosmos.servicemanager.{ClusterName, ClusterUser}
import es.tid.cosmos.servicemanager.clusters._
import es.tid.cosmos.servicemanager.services.{Hdfs, MapReduce2}
class ClusterDetailsTest extends FlatSpec with MustMatchers {
val clusterOwner = ClusterUser(
username = "jsmith",
group = None,
publicKey = "ssh-rsa XXXXX [email protected]"
)
val extraUser = ClusterUser(
username = "pbanks",
group = Some("group"),
publicKey = "ssh-rsa ZZZZZ [email protected]"
)
val sampleDetails = ClusterDetails(
href = "http://host/path",
id = "001",
name = ClusterName("test cluster"),
size = 20,
state = "terminating",
stateDescription = "releasing resources",
master = Some(host(1)),
slaves = Some(Seq(host(2), host(3))),
users = Some(Seq(clusterOwner, extraUser)),
services = Set("ServiceB", "ServiceA"),
blockedPorts = Set(2, 1, 3),
shared = true
)
val sampleJson = Json.obj(
"href" -> "http://host/path",
"id" -> "001",
"name" -> "test cluster",
"size" -> 20,
"state" -> "terminating",
"stateDescription" -> "releasing resources",
"master" -> hostJson(1),
"slaves" -> Json.arr(hostJson(2), hostJson(3)),
"shared" -> true,
"users" -> Json.arr(
Json.obj(
"username" -> "jsmith",
"sshPublicKey" -> "ssh-rsa XXXXX [email protected]",
"isSudoer" -> false
),
Json.obj(
"username" -> "pbanks",
"sshPublicKey" -> "ssh-rsa ZZZZZ [email protected]",
"isSudoer" -> false
)
),
"services" -> Json.arr("ServiceA", "ServiceB"),
"blockedPorts" -> Json.arr(1, 2, 3)
)
"Cluster details" must "be serialized to JSON" in {
Json.toJson(sampleDetails) must be (sampleJson)
}
it must "omit master information when unavailable" in {
Json.toJson(sampleDetails.copy(master = None)) must equal (sampleJson - "master")
}
it must "omit slaves information when unavailable" in {
Json.toJson(sampleDetails.copy(slaves = None)) must equal (sampleJson - "slaves")
}
it must "omit user information when unavailable" in {
Json.toJson(sampleDetails.copy(users = None)) must equal (sampleJson - "users")
}
it must "not show removed users" in {
val removedUser = extraUser.copy(sshEnabled = false, hdfsEnabled = false)
val clusterWithRemovedUser = sampleDetails.copy(users = Some(Seq(clusterOwner, removedUser)))
val jsonWithRemovedUser = sampleJson ++ Json.obj("users" -> Json.arr(
Json.obj(
"username" -> "jsmith",
"sshPublicKey" -> "ssh-rsa XXXXX [email protected]",
"isSudoer" -> false
)
))
Json.toJson(clusterWithRemovedUser) must equal (jsonWithRemovedUser)
}
it must "not show hidden services" in {
val description = ImmutableClusterDescription(
id = ClusterId("id"),
name = ClusterName("mycluster"),
size = 2,
state = Running,
nameNode = None,
master = None,
slaves = Seq.empty,
users = None,
services = Set(Hdfs.name, MapReduce2.name) ++ ClusterDetails.unlistedServices,
blockedPorts = Set.empty
)
val assignment = Cluster(description.id, 13L, new Timestamp(0))
val details = ClusterDetails(description, assignment, "href")
val listedServices = (Json.toJson(details) \\ "services").as[Set[String]]
ClusterDetails.unlistedServices.foreach { serviceName =>
listedServices must not contain serviceName
}
}
private def host(index: Int) = HostDetails(s"cosmos0$index", s"192.168.0.$index")
private def hostJson(index: Int) = Json.obj(
"hostname" -> s"cosmos0$index",
"ipAddress" -> s"192.168.0.$index"
)
}
| telefonicaid/fiware-cosmos-platform | cosmos-api/test/es/tid/cosmos/api/controllers/cluster/ClusterDetailsTest.scala | Scala | apache-2.0 | 4,564 |
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** @author John Miller
* @version 1.3
* @date Thu Apr 28 13:55:48 EDT 2016
* @see LICENSE (MIT style license file).
*/
package scalation.util
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** The `BinarySearch` object provides a method for binary search.
*/
object BinarySearch
{
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Find a key in the array/vector using binary search and return the position
* of the key (-1, if not found) along with the number of probes required.
* Works (switch comments) for both mutable `Array`s and immutable `Vector`s.
* @param a the sorted array/vector to be searched
* @param key the key to find in 'a'
*/
// def search [T <% Ordered [T]] (a: Vector [T], key: T): Tuple2 [Int, Int] =
def search [T <% Ordered [T]] (a: Array [T], key: T): Tuple2 [Int, Int] =
{
var probes = 0 // count number of probes
var min = 0 // lowest possible index
var max = a.length - 1 // highest possible index
while (min <= max) {
val m = (max + min) / 2 // compute mid-point
probes += 1 // increment probes count
if (a(m) == key) return (m, probes) // found the key
if (a(m) < key) min = m + 1 // a_m too low, search m+1 to max
else max = m - 1 // a_m too high, search min to m-1
} // while
(-1, probes)
} // search
} // BinarySearch object
import scala.math.floor
import scalation.linalgebra.VectorD
import scalation.math.log2
import scalation.plot.Plot
import scalation.random.{Randi0, RandomVecI}
import BinarySearch.search
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** The `BinarySearchTest` object performs performance testing on `BinarySearch`.
* > run-main scalation.util.BinarySearchTest
*/
object BinarySearchTest extends App
{
val max_it = 1000000 // max number of iterations for a given size
val m = 1000 // 'k'th size
val nvec = new VectorD (m) // size vector
val pvec = new VectorD (m) // probes count vector
val evec = new VectorD (m) // probes estimate vector
val dvec = new VectorD (m) // probes estimate vector
for (k <- 0 until m) {
val n = (k + 1) * 1000
val rvg = RandomVecI (n, 10*n, unique = false)
val rng = Randi0 (n-1)
var total = 0
val av = rvg.igen; av.sort ()
// val a = av().toVector
val a = av().toArray
for (it <- 0 until max_it) {
val i = rng.igen
val key = a(i)
val (pos, probes) = search (a, key)
total += probes
// println (s"key = $key, i = $i, search: pos = $pos, probes = $probes")
} // for
// estimate of the number probes for the expected case
// @see Knuth, "The Art of Computer Programming," Vol. 3, p. 413
val ln = log2 (n)
val fln = floor (ln)
val estimate = ln - 1.04 + (fln + 2.0) / n.toDouble
val avg_probes = total / max_it.toDouble
nvec(k) = k
pvec(k) = avg_probes
evec(k) = estimate
val diff = estimate - avg_probes
dvec(k) = diff
println (s"$k: estimate = $estimate, avg-probes = $avg_probes, diff = $diff")
} // for
println (s"mean diff = ${dvec.mean}")
new Plot (nvec, pvec, evec)
} // BinarySearchTest object
| NBKlepp/fda | scalation_1.3/scalation_mathstat/src/main/scala/scalation/util/BinarySearch.scala | Scala | mit | 3,804 |
/* ********************************************************** *\\
** ___________ __
** \\__ ___/____/ |_____________
** | |_/ __ \\ __\\_ __ \\__ \\ Tetra Concepts LLC
** | |\\ ___/| | | | \\// __ \\_ tetraconcepts.com
** |____| \\___ >__| |__| (____ /
** \\/ \\/
\\* ********************************************************** */
/*
* Copyright (C) 2014 Tetra Concepts LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.tetra.accumulo_scala.util
import com.tetra.accumulo_scala.UnitSpec
import org.apache.accumulo.core.client.Connector
import org.apache.accumulo.core.security.Authorizations
import org.apache.accumulo.core.client.TableNotFoundException
import org.easymock.EasyMock._
import org.apache.accumulo.core.client.Scanner
import org.apache.accumulo.core.client.mock.MockConnector
import org.apache.accumulo.core.client.mock.MockInstance
import java.util.UUID
import org.apache.accumulo.core.client.BatchWriterConfig
import org.apache.accumulo.core.data.Mutation
import scala.collection.JavaConversions._
import com.tetra.accumulo_scala.ConnectorOps._
import org.apache.accumulo.core.data.{ Range => AccumuloRange }
import org.apache.accumulo.core.security.ColumnVisibility
import org.apache.accumulo.core.data.Range
import org.junit.runner.RunWith
import org.scalatest.junit.JUnitRunner
import org.apache.hadoop.io.Text
import java.util.Map.Entry
import org.apache.accumulo.core.data.Key
import org.apache.accumulo.core.data.Value
import org.apache.accumulo.core.client.BatchScanner
@RunWith(classOf[JUnitRunner])
class BatchScannerProxySpec extends UnitSpec {
def fixture =
new {
val tableName = "xyz"
val connector = new MockInstance(UUID.randomUUID().toString).getConnector("", "")
connector.tableOperations().create(tableName)
var conf = new ScannerProxyConfig {
override val conn = connector
override val auths = Auths.EMPTY
override val tableName = "xyz"
}
//setup
private val a = new Mutation("a")
a.put("fa", "qa", "v")
private val h = new Mutation("h")
h.put("fh", "qh", new ColumnVisibility("hidden"), "v")
private val k = new Mutation("k")
k.put("fk", "qk1", "v")
k.put("fk", "qk2", "v")
private val z = new Mutation("z")
z.put("fz", "qz", "v")
private val writer = connector.createBatchWriter(tableName, new BatchWriterConfig)
writer.addMutations(List(a, h, k, z))
writer.flush()
writer.close()
}
"A BatchScannerPoxy" should "throw an exception if the table does not exist if strict" in {
val conf = new ScannerProxyConfig {
override val conn = new MockInstance(UUID.randomUUID().toString).getConnector("", "")
override val auths = Auths.EMPTY
override val tableName = "abc"
isStrict = true
}
val scanner = new BatchScannerProxy(conf, 2, List(new Range()).iterator)
intercept[TableNotFoundException] {
scanner.hasNext
}
}
it should "return an empty iterator if strict is set to false" in {
val conf = new ScannerProxyConfig {
override val conn = new MockInstance(UUID.randomUUID().toString).getConnector("", "")
override val auths = Auths.EMPTY
override val tableName = "abc"
isStrict = false
}
val scanner = new BatchScannerProxy(conf, 2, List(new Range()).iterator)
assert(!scanner.hasNext)
}
it should "do a full table scan if no range parameters are set" in {
val f = fixture
val bsp = new BatchScannerProxy(f.conf, 2, List(new Range()).iterator)
assert("akkz" == bsp.foldLeft("")((rows, e) => rows + e.getKey().getRow().toString()))
}
it should "skip a if starting at b" in {
val f = fixture
val bsp = new BatchScannerProxy(f.conf, 2, List(new Range("b", null)).iterator)
assert("kkz" == bsp.foldLeft("")((rows, e) => rows + e.getKey().getRow().toString()))
}
it should "skip z if ending at y" in {
val f = fixture
val bsp = new BatchScannerProxy(f.conf, 2, List(new Range(null, "y")).iterator)
assert("akk" == bsp.foldLeft("")((rows, e) => rows + e.getKey().getRow().toString()))
}
it should "get a and z if provided in ranges" in {
val f = fixture
val bsp = new BatchScannerProxy(f.conf, 2, List(new AccumuloRange("a"), new AccumuloRange("z")).iterator)
assert("az" == bsp.foldLeft("")((rows, e) => rows + e.getKey().getRow().toString()))
}
it should "used both ranges and to and from" in {
val f = fixture
val bsp = new BatchScannerProxy(f.conf, 2, List(new AccumuloRange("a"), new AccumuloRange("z"), new AccumuloRange("b", "y")).iterator)
assert("akkz" == (bsp.foldLeft("")((rows, e) => rows + e.getKey().getRow().toString())).sorted)
}
it should "get only 1 if take(1)" in {
val f = fixture
val bsp = new BatchScannerProxy(f.conf, 2, List(new Range()).iterator)
assert(1 == bsp.take(1).foldLeft(0)((count, _) => count + 1))
}
it should "get only 1 if drop(1) take(1)" in {
val f = fixture
val bsp = new BatchScannerProxy(f.conf, 2, List(new Range()).iterator)
assert(1 == bsp.drop(1).take(1).foldLeft(0)((count, _) => count + 1))
}
it should "get only 1 if drop(N-1)" in {
val f = fixture
val bsp = new BatchScannerProxy(f.conf, 2, List(new Range()).iterator)
assert(1 == bsp.drop(3).foldLeft(0)((count, _) => count + 1))
}
it should "everything if we have the auths" in {
val f = fixture
f.conf = new ScannerProxyConfig {
override val conn = f.conf.conn
override val auths = Auths.getAuths("hidden")
override val tableName = f.conf.tableName
}
val bsp = new BatchScannerProxy(f.conf, 2, List(new Range()).iterator)
assert(5 == bsp.foldLeft(0)((count, _) => count + 1))
}
it should "get only kk if filtering on family fk" in {
val f = fixture
f.conf.familyQualifiers = Some(List((new Text("fk"), null)))
val bsp = new BatchScannerProxy(f.conf, 2, List(new Range()).iterator)
assert("kk" == bsp.foldLeft("")((rows, e) => rows + e.getKey().getRow().toString()))
}
it should "get only k if filtering on family fk qualifier qk2" in {
val f = fixture
f.conf.familyQualifiers = Some(List((new Text("fk"), new Text("qk2"))))
val bsp = new BatchScannerProxy(f.conf, 2, List(new Range()).iterator)
assert("k" == bsp.foldLeft("")((rows, e) => rows + e.getKey().getRow().toString()))
}
it should "close the ranges when it closes" in {
val ranges = mock[CloseableIterator[Range]]
expecting {
ranges.close
}
val bsp = new ScannerProxy(null, null, null);
bsp.in(ranges)
whenExecuting(ranges) {
bsp.close
}
}
it should "close the ranges when it throws an exception during hasNext" in {
val conn = mock[Connector]
val scanner = mock[BatchScanner]
val iter = mock[Iterator[Entry[Key, Value]]]
//FIXME: how do I mock a GroupedIterator?
val ranges = new CloseableIterator[Range] {
val i = List(new Range()).iterator
var closed = false
def hasNext() = i.hasNext
def next() = i.next
def close() = {
closed = true
}
}
expecting {
conn.createBatchScanner(anyString(), anyObject(), anyObject()).andReturn(scanner)
scanner.setRanges(anyObject())
scanner.iterator().andReturn(iter)
iter.hasNext().andThrow(new RuntimeException)
}
val proxy = new ScannerProxy(conn, Auths.EMPTY, "abc")
val sp = new BatchScannerProxy(proxy, 2, ranges)
whenExecuting(conn, scanner, iter) {
try {
sp.hasNext
} catch {
case t: Throwable => {
assert(ranges.closed)
}
}
}
}
it should "close the ranges when it throws an exception during next" in {
val conn = mock[Connector]
val scanner = mock[BatchScanner]
val iter = mock[Iterator[Entry[Key, Value]]]
//FIXME: how do I mock a GroupedIterator?
val ranges = new CloseableIterator[Range] {
val i = List(new Range()).iterator
var closed = false
def hasNext() = i.hasNext
def next() = i.next
def close() = { closed = true }
}
expecting {
conn.createBatchScanner(anyString(), anyObject(), anyObject()).andReturn(scanner)
scanner.setRanges(anyObject())
scanner.iterator().andReturn(iter)
iter.hasNext().andReturn(true).anyTimes()
iter.next().andThrow(new RuntimeException)
}
val proxy = new ScannerProxy(conn, Auths.EMPTY, "abc")
val sp = new BatchScannerProxy(proxy, 2, ranges)
whenExecuting(conn, scanner, iter) {
try {
sp.next
} catch {
case t: Throwable => {
assert(ranges.closed)
}
}
}
}
} | tetra-concepts-llc/accumulo-scala | src/test/scala/com/tetra/accumulo_scala/util/BatchScannerProxySpec.scala | Scala | apache-2.0 | 9,320 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.